├── AUTHORS ├── README.md ├── docs ├── images │ └── lthread_scheduler.png ├── index.rst ├── disk_io.rst ├── intro.rst ├── examples.rst ├── socket.rst ├── lthread.rst ├── Makefile └── conf.py ├── .gitignore ├── .readthedocs.yaml ├── tests ├── lthread_sleep.c ├── lthread_join.c ├── lthread_pipe.c ├── lthread_unit_test_compute.c ├── lthread_io.c └── lthread_socket.c ├── examples ├── udp_client.c ├── udp_server.c └── webserver.c ├── CMakeLists.txt ├── LICENSE └── src ├── lthread_poller.h ├── lthread_poller.c ├── lthread.h ├── lthread_epoll.c ├── lthread_kqueue.c ├── lthread_io.c ├── lthread_int.h ├── lthread_compute.c ├── lthread_sched.c ├── lthread.c ├── lthread_socket.c ├── queue.h └── tree.h /AUTHORS: -------------------------------------------------------------------------------- 1 | Hasan Alayli 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | lthread 2 | ======= 3 | 4 | [Docs](http://lthread.readthedocs.org/en/latest/intro.html) 5 | 6 | -------------------------------------------------------------------------------- /docs/images/lthread_scheduler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/halayli/lthread/HEAD/docs/images/lthread_scheduler.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | User Documentation 2 | ================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | intro 8 | lthread 9 | socket 10 | disk_io 11 | examples 12 | 13 | .. _feature-docs: 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.la 2 | *.lo 3 | aclocal.m4 4 | autom4te.cache 5 | config.guess 6 | config.log 7 | config.status 8 | config.sub 9 | configure 10 | depcomp 11 | install-sh 12 | ltmain.sh 13 | Makefile 14 | Makefile.in 15 | missing 16 | src/.deps 17 | src/.libs 18 | *.DS_Store 19 | *.swp 20 | build* 21 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: docs/conf.py 17 | -------------------------------------------------------------------------------- /tests/lthread_sleep.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | 5 | void a(void *x); 6 | 7 | void 8 | a(void *x) 9 | { 10 | int i = 3; 11 | struct timeval t1 = {0, 0}; 12 | struct timeval t2 = {0, 0}; 13 | lthread_detach(); 14 | int sleep_for = 2000; 15 | while (i--) { 16 | gettimeofday(&t1, NULL); 17 | lthread_sleep(sleep_for); 18 | gettimeofday(&t2, NULL); 19 | printf("a (%d): elapsed is: %lf, slept is %d\n", i, 20 | ((t2.tv_sec * 1000.0) + t2.tv_usec /1000.0) - 21 | ((t1.tv_sec * 1000.0) + t1.tv_usec/1000.0), sleep_for); 22 | } 23 | } 24 | 25 | 26 | int 27 | main(int argc, char **argv) 28 | { 29 | lthread_t *lt = NULL; 30 | 31 | lthread_create(<, a, NULL); 32 | lthread_run(); 33 | 34 | return 0; 35 | } 36 | -------------------------------------------------------------------------------- /tests/lthread_join.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | void a(void *x); 8 | void b(void *x); 9 | 10 | void 11 | b(void *x) 12 | { 13 | DEFINE_LTHREAD; 14 | printf("b is running\n"); 15 | lthread_compute_begin(); 16 | sleep(1); 17 | lthread_compute_end(); 18 | printf("b is exiting\n"); 19 | } 20 | 21 | void 22 | a(void *x) 23 | { 24 | lthread_t *lt_new = NULL; 25 | 26 | DEFINE_LTHREAD; 27 | lthread_detach(); 28 | 29 | printf("a is running\n"); 30 | lthread_create(<_new, b, NULL); 31 | lthread_join(lt_new, NULL, 10); 32 | printf("a is done joining on b.\n"); 33 | } 34 | 35 | 36 | int 37 | main(int argc, char **argv) 38 | { 39 | lthread_t *lt = NULL; 40 | 41 | lthread_create(<, a, NULL); 42 | lthread_run(); 43 | 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /tests/lthread_pipe.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void 9 | write_pipe(void *arg) 10 | { 11 | int *pipes = arg; 12 | char buf[100] = "Hello world!"; 13 | printf("fd of 1 is %d\n", pipes[1]); 14 | lthread_write(pipes[1], buf, strlen(buf)); 15 | } 16 | 17 | void 18 | read_pipe(void *arg) 19 | { 20 | int *pipes = arg; 21 | char buf[100]; 22 | printf("fd of 0 is %d\n", pipes[0]); 23 | lthread_read(pipes[0], buf, 100, 0); 24 | printf("read %s\n", buf); 25 | } 26 | 27 | int 28 | main(int argc, char **argv) 29 | { 30 | lthread_t *lt = NULL; 31 | int pipes[2]; 32 | lthread_pipe(pipes); 33 | 34 | lthread_create(<, read_pipe, pipes); 35 | lthread_create(<, write_pipe, pipes); 36 | lthread_run(); 37 | 38 | return 0; 39 | } 40 | -------------------------------------------------------------------------------- /docs/disk_io.rst: -------------------------------------------------------------------------------- 1 | Disk IO 2 | ======= 3 | 4 | The way disk async functions are implemented in lthread is by using a native 5 | worker thread in the background to execute the actual read/write calls to disk. 6 | When an lthread calls :c:func:`lthread_io_read()` or :c:func:`lthread_io_write` 7 | a job is put on a queue for the native thread to pick up and the actual lthread 8 | yields until the read/write is done. 9 | 10 | Use :c:func:`lthread_io_read()` or :c:func:`lthread_io_write` when 11 | fd is a file descriptor to a file. 12 | 13 | lthread_io_read 14 | --------------- 15 | .. c:function:: ssize_t lthread_io_read(int fd, void *buf, size_t nbytes) 16 | 17 | An async version of read(2) for disk IO. 18 | 19 | lthread_io_write 20 | ---------------- 21 | .. c:function:: ssize_t lthread_io_write(int fd, const void *buf, size_t buf_len) 22 | 23 | An async version of write(2) for disk IO. 24 | 25 | 26 | lthread_sendfile 27 | ----------------- 28 | .. c:function:: int lthread_sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr) 29 | 30 | An lthread version of sendfile(2). `man 2 sendfile` for more details. 31 | 32 | .. note:: Available on FreeBSD only. 33 | -------------------------------------------------------------------------------- /examples/udp_client.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | void 13 | udp_client(void *args) 14 | { 15 | struct sockaddr_in dest; 16 | socklen_t dest_len = sizeof(dest); 17 | int s; 18 | int ret; 19 | char buf[64] = "hello world!"; 20 | lthread_detach(); 21 | 22 | if ((s=lthread_socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP))==-1) { 23 | perror("error"); 24 | return; 25 | } 26 | 27 | memset((char *) &dest, 0, sizeof(dest)); 28 | dest.sin_family = AF_INET; 29 | dest.sin_port = htons(5556); 30 | if (inet_aton("127.0.0.1", &dest.sin_addr)==0) { 31 | fprintf(stderr, "inet_aton() failed\n"); 32 | exit(1); 33 | } 34 | 35 | while (1) { 36 | ret = lthread_sendto(s, buf, 64, 0, (struct sockaddr *)&dest, dest_len); 37 | printf("ret returned %d\n", ret); 38 | lthread_sleep(1000); 39 | } 40 | 41 | close(s); 42 | } 43 | 44 | int 45 | main(void) 46 | { 47 | lthread_t *lt; 48 | lthread_create(<, udp_client, NULL); 49 | lthread_run(); 50 | 51 | return 0; 52 | } 53 | -------------------------------------------------------------------------------- /examples/udp_server.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | void 13 | udp_server(void *args) 14 | { 15 | struct sockaddr_in listener; 16 | struct sockaddr_in client; 17 | socklen_t listener_len = sizeof(listener); 18 | int s; 19 | int ret; 20 | char buf[64]; 21 | lthread_detach(); 22 | 23 | if ((s=lthread_socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP))==-1) { 24 | perror("error"); 25 | return; 26 | } 27 | 28 | memset((char *) &listener, 0, sizeof(listener)); 29 | listener.sin_family = AF_INET; 30 | listener.sin_port = htons(5556); 31 | listener.sin_addr.s_addr = htonl(INADDR_ANY); 32 | if (bind(s, (struct sockaddr *)&listener, sizeof(listener)) == -1) { 33 | perror("Cannot bind"); 34 | return; 35 | } 36 | 37 | while (1) { 38 | ret = lthread_recvfrom(s, buf, 64, 0, (struct sockaddr *)&client, &listener_len, 1000); 39 | printf("ret returned %d: %s\n", ret, buf); 40 | } 41 | 42 | close(s); 43 | } 44 | 45 | int 46 | main(void) 47 | { 48 | lthread_t *lt; 49 | lthread_create(<, udp_server, NULL); 50 | lthread_run(); 51 | 52 | return 0; 53 | } 54 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 2.6) 2 | project (lthread) 3 | SET(CMAKE_BUILD_TYPE release) 4 | SET (CMAKE_C_FLAGS_RELEASE "-Werror -g -Wall") 5 | 6 | set(LTHREAD_VERSION_MAJOR 1) 7 | set(LTHREAD_VERSION_MINOR 0) 8 | set(LTHREAD_VERSION_PATCH 0) 9 | set(LTHREAD_VERSION_STRING ${LTHREAD_VERSION_MAJOR}.${LTHREAD_VERSION_MINOR}.${LTHREAD_VERSION_PATCH}) 10 | 11 | set(lthread_files src/lthread.c src/lthread_socket.c 12 | src/lthread_sched.c src/lthread_io.c 13 | src/lthread_poller.c src/lthread_compute.c) 14 | 15 | add_library(lthread ${lthread_files}) 16 | set_property(SOURCE ${lthread_files} PROPERTY COMPILE_FLAGS "-O2") 17 | 18 | install(TARGETS lthread DESTINATION lib) 19 | install(FILES src/lthread.h DESTINATION include) 20 | 21 | set(UNIT_TEST_PATH ${CMAKE_SOURCE_DIR}/tests) 22 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${UNIT_TEST_PATH}) 23 | include_directories(src) 24 | 25 | add_executable(lthread_sleep tests/lthread_sleep.c) 26 | target_link_libraries(lthread_sleep lthread pthread) 27 | 28 | add_executable(lthread_io tests/lthread_io.c) 29 | target_link_libraries(lthread_io lthread pthread) 30 | 31 | add_executable(lthread_join tests/lthread_join.c) 32 | target_link_libraries(lthread_join lthread pthread) 33 | 34 | enable_testing() 35 | add_test(lthread_sleep ${UNIT_TEST_PATH}/lthread_sleep) 36 | add_test(lthread_join ${UNIT_TEST_PATH}/lthread_join) 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012, Hasan Alayli 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 | * SUCH DAMAGE. 24 | */ 25 | -------------------------------------------------------------------------------- /tests/lthread_unit_test_compute.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | 5 | void a(lthread_t *lt, void *x); 6 | 7 | void 8 | test(char *p) 9 | { 10 | printf("char *p is %p, %c\n", p, *p); 11 | *p = 'b'; 12 | } 13 | 14 | void 15 | a(lthread_t *lt ,void *arg) 16 | { 17 | int i = 3; 18 | struct timeval t1 = {0, 0}; 19 | struct timeval t2 = {0, 0}; 20 | char x = 'a'; 21 | char *y = &x; 22 | DEFINE_LTHREAD; 23 | lthread_detach(); 24 | 25 | printf("char *p is %p, %c\n", &x, x); 26 | while (i--) { 27 | lthread_compute_begin(); 28 | test(y); 29 | printf("I am A in a compute thread \n"); 30 | lthread_compute_end(); 31 | printf("char *p became %p, %c\n", &x, x); 32 | printf("A going for a nap\n"); 33 | lthread_sleep(3000); 34 | printf("A after sleep: %d\n", i); 35 | } 36 | printf("a is exiting\n"); 37 | } 38 | 39 | void 40 | b(lthread_t *lt ,void *x) 41 | { 42 | int i = 3; 43 | struct timeval t1 = {0, 0}; 44 | struct timeval t2 = {0, 0}; 45 | DEFINE_LTHREAD; 46 | lthread_detach(); 47 | 48 | lthread_sleep(1000); 49 | while (i--) { 50 | printf(" I am B before begin\n"); 51 | lthread_compute_begin(); 52 | printf("I am B in a compute thread \n"); 53 | sleep(10); 54 | lthread_compute_end(); 55 | printf("B after sleep\n"); 56 | } 57 | printf("b is exiting\n"); 58 | } 59 | 60 | int 61 | main(int argc, char **argv) 62 | { 63 | lthread_t *lt = NULL; 64 | 65 | lthread_create(<, a, NULL); 66 | lthread_create(<, b, NULL); 67 | lthread_run(); 68 | 69 | return 0; 70 | } 71 | -------------------------------------------------------------------------------- /tests/lthread_io.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | #include 5 | 6 | void a(void *x); 7 | char line[] = "this is an lthread io test\n"; 8 | 9 | void 10 | a(void *arg) 11 | { 12 | int i, x; 13 | i = x = 50; 14 | struct timeval t1 = {0, 0}; 15 | struct timeval t2 = {0, 0}; 16 | lthread_detach(); 17 | int fd = 0; 18 | double total = 0; 19 | 20 | fd = open("/tmp/lthread_io.txt", O_CREAT | O_APPEND | O_WRONLY, 0640); 21 | lthread_io_write(fd, line, sizeof(line)); 22 | while (i--) { 23 | gettimeofday(&t1, NULL); 24 | lthread_io_write(fd, line, sizeof(line)); 25 | //write(fd, line, sizeof(line)); 26 | gettimeofday(&t2, NULL); 27 | total += ((t2.tv_sec * 1000000) + t2.tv_usec) - 28 | ((t1.tv_sec * 1000000) + t1.tv_usec); 29 | printf("elapsed is: %ld\n", 30 | ((t2.tv_sec * 1000000) + t2.tv_usec) - 31 | ((t1.tv_sec * 1000000) + t1.tv_usec)); 32 | 33 | } 34 | printf("io_write took %lf msec (avg: %lf usec) to write %d lines\n", total, total / (double)x, x); 35 | } 36 | 37 | void 38 | b(void *x) 39 | { 40 | int i = 5; 41 | struct timeval t1 = {0, 0}; 42 | struct timeval t2 = {0, 0}; 43 | lthread_detach(); 44 | int sleep_for = 2000; 45 | while (i--) { 46 | gettimeofday(&t1, NULL); 47 | lthread_sleep(sleep_for); 48 | gettimeofday(&t2, NULL); 49 | printf("a (%d): elapsed is: %lf, slept is %d\n", i, 50 | ((t2.tv_sec * 1000.0) + t2.tv_usec /1000.0) - 51 | ((t1.tv_sec * 1000.0) + t1.tv_usec/1000.0), sleep_for); 52 | } 53 | } 54 | 55 | int 56 | main(int argc, char **argv) 57 | { 58 | lthread_t *lt = NULL; 59 | 60 | lthread_create(<, a, NULL); 61 | //lthread_create(<, b, NULL); 62 | lthread_run(); 63 | 64 | return 0; 65 | } 66 | -------------------------------------------------------------------------------- /src/lthread_poller.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_poller.h 27 | */ 28 | 29 | 30 | #ifndef LTHREAD_POLLER_H 31 | #define LTHREAD_POLLER_H 32 | 33 | #if defined(__FreeBSD__) || defined(__APPLE__) 34 | #include 35 | #define POLL_EVENT_TYPE struct kevent 36 | #else 37 | #include 38 | #define POLL_EVENT_TYPE struct epoll_event 39 | #endif 40 | #include 41 | 42 | struct lthread_sched; 43 | struct lthread; 44 | enum lthread_event; 45 | 46 | int _lthread_poller_create(void); 47 | int _lthread_poller_poll(struct timespec t); 48 | void _lthread_poller_ev_register_rd(int fd); 49 | void _lthread_poller_ev_register_wr(int fd); 50 | void _lthread_poller_ev_clear_wr(int fd); 51 | void _lthread_poller_ev_clear_rd(int fd); 52 | void _lthread_poller_ev_register_trigger(void); 53 | void _lthread_poller_ev_trigger(struct lthread_sched *sched); 54 | void _lthread_poller_ev_clear_trigger(void); 55 | void _lthread_poller_set_fd_ready(struct lthread *lt, int fd, 56 | enum lthread_event, int is_eof); 57 | 58 | int _lthread_poller_ev_get_event(POLL_EVENT_TYPE *ev); 59 | int _lthread_poller_ev_get_fd(POLL_EVENT_TYPE *ev); 60 | int _lthread_poller_ev_is_eof(POLL_EVENT_TYPE *ev); 61 | int _lthread_poller_ev_is_read(POLL_EVENT_TYPE *ev); 62 | int _lthread_poller_ev_is_write(POLL_EVENT_TYPE *ev); 63 | 64 | #endif 65 | -------------------------------------------------------------------------------- /src/lthread_poller.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * poller.c 27 | */ 28 | 29 | 30 | #if defined(__FreeBSD__) || defined(__APPLE__) 31 | #include "lthread_kqueue.c" 32 | #else 33 | #include "lthread_epoll.c" 34 | #endif 35 | 36 | #include "lthread_int.h" 37 | 38 | void 39 | _lthread_poller_set_fd_ready(struct lthread *lt, int fd, enum lthread_event e, 40 | int is_eof) 41 | { 42 | /* 43 | * not all scheduled fds in the poller are guaranteed to have triggered, 44 | * deschedule them all and cancel events in poller so they don't trigger later. 45 | */ 46 | int i; 47 | if (lt->ready_fds == 0) { 48 | for (i = 0; i < lt->nfds; i++) 49 | if (lt->pollfds[i].events & POLLIN) { 50 | _lthread_poller_ev_clear_rd(lt->pollfds[i].fd); 51 | _lthread_desched_event(lt->pollfds[i].fd, LT_EV_READ); 52 | } else if (lt->pollfds[i].events & POLLOUT) { 53 | _lthread_poller_ev_clear_wr(lt->pollfds[i].fd); 54 | _lthread_desched_event(lt->pollfds[i].fd, LT_EV_WRITE); 55 | } 56 | } 57 | 58 | 59 | lt->pollfds[lt->ready_fds].fd = fd; 60 | if (e == LT_EV_WRITE) 61 | lt->pollfds[lt->ready_fds].events = POLLOUT; 62 | else 63 | lt->pollfds[lt->ready_fds].events = POLLIN; 64 | 65 | if (is_eof) 66 | lt->pollfds[lt->ready_fds].events |= POLLHUP; 67 | 68 | lt->ready_fds++; 69 | } 70 | -------------------------------------------------------------------------------- /tests/lthread_socket.c: -------------------------------------------------------------------------------- 1 | #include "lthread.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | struct conn { 13 | int fd; 14 | }; 15 | 16 | void 17 | socket_client_server_reader(void *cli_fd) 18 | { 19 | int fd = ((struct conn *)cli_fd)->fd; 20 | char buf[100]; 21 | int r = 0; 22 | while (1) { 23 | r = lthread_read(fd, buf, 100, 10); 24 | if (r > 0) 25 | printf("read from server: %.*s\n", r, buf); 26 | } 27 | } 28 | 29 | void 30 | socket_client_server_writer(void *cli_fd) 31 | { 32 | char buf[100] = "Hello from client!"; 33 | int fd = ((struct conn *)cli_fd)->fd; 34 | printf("fd is %d\n", fd); 35 | while (1) { 36 | lthread_write(fd, buf, strlen(buf)); 37 | lthread_sleep(1000); 38 | } 39 | } 40 | 41 | void 42 | client(void *arg) 43 | { 44 | int s, t, len; 45 | int i = 10; 46 | struct sockaddr_un remote; 47 | char str[100]; 48 | lthread_t *lt; 49 | remote.sun_family = PF_UNIX; 50 | strcpy(remote.sun_path, "/tmp/lthread.sock"); 51 | len = strlen(remote.sun_path) + sizeof(remote.sun_family); 52 | if ((s = lthread_socket(PF_UNIX, SOCK_STREAM, 0)) == -1) { 53 | perror("socket"); 54 | exit(1); 55 | } 56 | if (lthread_connect(s, (struct sockaddr *)&remote, len, 0) == -1) { 57 | perror("connect"); 58 | exit(1); 59 | } 60 | struct conn *c = calloc(1, sizeof(struct conn)); 61 | c->fd = s; 62 | lthread_create(<, socket_client_server_reader, c); 63 | lthread_create(<, socket_client_server_writer, c); 64 | } 65 | 66 | void 67 | socket_server_client_reader(void *cli_fd) 68 | { 69 | int fd = ((struct conn *)cli_fd)->fd; 70 | char buf[100]; 71 | int r = 0; 72 | printf("socket_server_client_reader started\n"); 73 | while (1) { 74 | r = lthread_read(fd, buf, 100, 0); 75 | printf("read from client: %.*s\n", r, buf); 76 | } 77 | } 78 | 79 | void 80 | socket_server_client_writer(void *cli_fd) 81 | { 82 | int fd = ((struct conn *)cli_fd)->fd; 83 | char buf[100] = "Hello from server!"; 84 | printf("socket_server_client_writer started\n"); 85 | while (1) { 86 | lthread_write(fd, buf, strlen(buf)); 87 | lthread_sleep(1000); 88 | } 89 | } 90 | 91 | void 92 | server(void *arg) 93 | { 94 | int fd = 0; 95 | int cli_fd = 0; 96 | int len = 0; 97 | lthread_t *lt; 98 | struct sockaddr_un local, remote; 99 | fd = lthread_socket(PF_UNIX, SOCK_STREAM, 0); 100 | 101 | local.sun_family = PF_UNIX; 102 | strcpy(local.sun_path, "/tmp/lthread.sock"); 103 | unlink(local.sun_path); 104 | len = strlen(local.sun_path) + sizeof(local.sun_family); 105 | 106 | bind(fd, (struct sockaddr *)&local, len); 107 | listen(fd, 100); 108 | 109 | while (1) { 110 | len = sizeof(struct sockaddr_un); 111 | cli_fd = lthread_accept(fd, (struct sockaddr *)&remote, &len); 112 | struct conn *c = calloc(1, sizeof(struct conn)); 113 | c->fd = cli_fd; 114 | lthread_create(<, socket_server_client_reader, c); 115 | lthread_create(<, socket_server_client_writer, c); 116 | } 117 | } 118 | 119 | int 120 | main(int argc, char **argv) 121 | { 122 | lthread_t *lt = NULL; 123 | 124 | lthread_create(<, server, NULL); 125 | lthread_create(<, client, NULL); 126 | lthread_run(); 127 | 128 | return 0; 129 | } 130 | -------------------------------------------------------------------------------- /docs/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | lthread is a multicore/multithread coroutine library written in C. It uses [Sam Rushing's](https://github.com/samrushing) _swap function to swap lthreads. What's special about lthread is that it allows you to make *blocking calls* and *expensive* computations, blocking IO inside a coroutine, providing you with the advantages of coroutines and pthreads. See the http server example below. 5 | 6 | lthreads are created in userspace and don't require kernel intervention, they are light weight and ideal for socket programming. Each lthread have separate stack, and the stack is madvise(2)-ed to save space, allowing you to create thousands(tested with a million lthreads) of coroutines and maintain a low memory footprint. The scheduler is hidden from the user and is created automagically in each pthread, allowing the user to take advantage of cpu cores and distribute the load by creating several pthreads, each running it's own lthread scheduler and handling its own share of coroutines. Locks are necessary when accessing global variables from lthreads running in different pthreads, and lthreads must not block on pthread condition variables as this will block the whole lthread scheduler in the pthread. 7 | 8 | .. image:: images/lthread_scheduler.png 9 | 10 | To run an lthread scheduler in each pthread, launch a pthread and create lthreads using lthread_create() followed by lthread_run() in each pthread. 11 | 12 | **Scheduler** 13 | 14 | The scheduler is build around epoll/kqueue and uses an rbtree to track which lthreads needs to run next. 15 | 16 | If you need to execute an expensive computation or make a blocking call inside an lthread, you can surround the block of code with `lthread_compute_begin()` and `lthread_compute_end()`, which moves the lthread into an lthread_compute_scheduler that runs in its own pthread to avoid blocking other lthreads. lthread_compute_schedulers are created when needed and they die after 60 seconds of inactivity. `lthread_compute_begin()` tries to pick an already created and free lthread_compute_scheduler before it creates a new one. 17 | 18 | Installation 19 | ------------ 20 | 21 | Currently, lthread is supported on FreeBSD, OS X, and Linux (x86 & 64bit arch). 22 | 23 | To build and install, simply: 24 | 25 | .. code-block:: Shell 26 | 27 | cmake . 28 | sudo make install 29 | 30 | :: 31 | 32 | Linking 33 | ------- 34 | 35 | `#include ` 36 | 37 | Pass `-llthread` to gcc to use lthread in your program. 38 | 39 | C++11 Bindings 40 | -------------- 41 | 42 | Docs and instructions to download/install can be found `here `_. 43 | 44 | 45 | License 46 | ------- 47 | 48 | Copyright (C) 2012, Hasan Alayli 49 | 50 | Redistribution and use in source and binary forms, with or without 51 | modification, are permitted provided that the following conditions 52 | are met: 53 | 1. Redistributions of source code must retain the above copyright 54 | notice, this list of conditions and the following disclaimer. 55 | 2. Redistributions in binary form must reproduce the above copyright 56 | notice, this list of conditions and the following disclaimer in the 57 | documentation and/or other materials provided with the distribution. 58 | 59 | THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 | ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 63 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 | SUCH DAMAGE. 70 | -------------------------------------------------------------------------------- /examples/webserver.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | struct cli_info { 8 | /* other stuff if needed*/ 9 | struct sockaddr_in peer_addr; 10 | int fd; 11 | }; 12 | 13 | typedef struct cli_info cli_info_t; 14 | 15 | char *reply = "HTTP/1.0 200 OK\r\nContent-length: 11\r\n\r\nHello Kannan"; 16 | 17 | unsigned long long int 18 | fibonacci(unsigned long long int n) 19 | { 20 | if (n == 0) 21 | return 0; 22 | if (n == 1) 23 | return 1; 24 | 25 | return fibonacci(n - 1) + fibonacci(n - 2); 26 | } 27 | 28 | void 29 | http_serv(void *arg) 30 | { 31 | cli_info_t *cli_info = arg; 32 | char *buf = NULL; 33 | unsigned long long int ret = 0; 34 | char ipstr[INET6_ADDRSTRLEN]; 35 | lthread_detach(); 36 | 37 | inet_ntop(AF_INET, &cli_info->peer_addr.sin_addr, ipstr, INET_ADDRSTRLEN); 38 | printf("Accepted connection on IP %s\n", ipstr); 39 | 40 | if ((buf = malloc(1024)) == NULL) 41 | return; 42 | 43 | /* read data from client or timeout in 5 secs */ 44 | ret = lthread_recv(cli_info->fd, buf, 1024, 0, 5000); 45 | 46 | /* did we timeout before the user has sent us anything? */ 47 | if (ret == -2) { 48 | lthread_close(cli_info->fd); 49 | free(buf); 50 | free(arg); 51 | return; 52 | } 53 | 54 | /* 55 | * Run an expensive computation without blocking other lthreads. 56 | * lthread_compute_begin() will yield http_serv coroutine and resumes 57 | * it in a compute scheduler that runs in a pthread. If a compute scheduler 58 | * is already available and free it will be used otherwise a compute scheduler 59 | * is created and launched in a new pthread. After the compute scheduler 60 | * resumes the lthread it will wait 60 seconds for a new job and dies after 60 61 | * of inactivity. 62 | */ 63 | lthread_compute_begin(); 64 | /* make an expensive call without blocking other coroutines */ 65 | ret = fibonacci(55); 66 | lthread_compute_end(); 67 | printf("Computation completed\n"); 68 | /* reply back to user */ 69 | lthread_send(cli_info->fd, reply, strlen(reply), 0); 70 | lthread_close(cli_info->fd); 71 | free(buf); 72 | free(arg); 73 | } 74 | 75 | void 76 | listener(lthread_t *lt, void *arg) 77 | { 78 | int cli_fd = 0; 79 | int lsn_fd = 0; 80 | int opt = 1; 81 | int ret = 0; 82 | struct sockaddr_in peer_addr = {}; 83 | struct sockaddr_in sin = {}; 84 | socklen_t addrlen = sizeof(peer_addr); 85 | lthread_t *cli_lt = NULL; 86 | cli_info_t *cli_info = NULL; 87 | char ipstr[INET6_ADDRSTRLEN]; 88 | 89 | DEFINE_LTHREAD; 90 | 91 | /* create listening socket */ 92 | lsn_fd = lthread_socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 93 | if (lsn_fd == -1) 94 | return; 95 | 96 | if (setsockopt(lsn_fd, SOL_SOCKET, SO_REUSEADDR, &opt,sizeof(int)) == -1) 97 | perror("failed to set SOREUSEADDR on socket"); 98 | 99 | sin.sin_family = PF_INET; 100 | sin.sin_addr.s_addr = INADDR_ANY; 101 | sin.sin_port = htons(3128); 102 | 103 | /* bind to the listening port */ 104 | ret = bind(lsn_fd, (struct sockaddr *)&sin, sizeof(sin)); 105 | if (ret == -1) { 106 | perror("Failed to bind on port 3128"); 107 | return; 108 | } 109 | 110 | printf("Starting listener on 3128\n"); 111 | 112 | listen(lsn_fd, 128); 113 | 114 | while (1) { 115 | /* block until a new connection arrives */ 116 | cli_fd = lthread_accept(lsn_fd, (struct sockaddr*)&peer_addr, &addrlen); 117 | if (cli_fd == -1) { 118 | perror("Failed to accept connection"); 119 | return; 120 | } 121 | 122 | if ((cli_info = malloc(sizeof(cli_info_t))) == NULL) { 123 | close(cli_fd); 124 | continue; 125 | } 126 | cli_info->peer_addr = peer_addr; 127 | cli_info->fd = cli_fd; 128 | /* launch a new lthread that takes care of this client */ 129 | ret = lthread_create(&cli_lt, http_serv, cli_info); 130 | } 131 | } 132 | 133 | int 134 | main(int argc, char **argv) 135 | { 136 | lthread_t *lt = NULL; 137 | 138 | lthread_create(<, listener, NULL); 139 | lthread_run(); 140 | 141 | return 0; 142 | } 143 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | Webserver 5 | --------- 6 | 7 | 8 | `gcc -I/usr/local/include -llthread test.c -o test` 9 | 10 | .. code-block:: C 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | struct cli_info { 19 | /* other stuff if needed*/ 20 | struct sockaddr_in peer_addr; 21 | int fd; 22 | }; 23 | 24 | typedef struct cli_info cli_info_t; 25 | 26 | char *reply = "HTTP/1.0 200 OK\r\nContent-length: 11\r\n\r\nHello World"; 27 | 28 | unsigned int 29 | fibonacci(unsigned int n) 30 | { 31 | if (n == 0) 32 | return 0; 33 | if (n == 1) 34 | return 1; 35 | 36 | return fibonacci(n - 1) + fibonacci(n - 2); 37 | } 38 | 39 | void 40 | http_serv(lthread_t *lt, void *arg) 41 | { 42 | cli_info_t *cli_info = arg; 43 | char *buf = NULL; 44 | int ret = 0; 45 | char ipstr[INET6_ADDRSTRLEN]; 46 | lthread_detach(); 47 | 48 | inet_ntop(AF_INET, &cli_info->peer_addr.sin_addr, ipstr, INET_ADDRSTRLEN); 49 | printf("Accepted connection on IP %s\n", ipstr); 50 | 51 | if ((buf = malloc(1024)) == NULL) 52 | return; 53 | 54 | /* read data from client or timeout in 5 secs */ 55 | ret = lthread_recv(cli_info->fd, buf, 1024, 0, 5000); 56 | 57 | /* did we timeout before the user has sent us anything? */ 58 | if (ret == -2) { 59 | lthread_close(cli_info->fd); 60 | free(buf); 61 | free(arg); 62 | return; 63 | } 64 | 65 | /* 66 | * Run an expensive computation without blocking other lthreads. 67 | * lthread_compute_begin() will yield http_serv coroutine and resumes 68 | * it in a compute scheduler that runs in a pthread. If a compute scheduler 69 | * is already available and free it will be used otherwise a compute scheduler 70 | * is created and launched in a new pthread. After the compute scheduler 71 | * resumes the lthread it will wait 60 seconds for a new job and dies after 60 72 | * of inactivity. 73 | */ 74 | lthread_compute_begin(); 75 | /* make an expensive call without blocking other coroutines */ 76 | ret = fibonacci(35); 77 | lthread_compute_end(); 78 | 79 | /* reply back to user */ 80 | lthread_send(cli_info->fd, reply, strlen(reply), 0); 81 | lthread_close(cli_info->fd); 82 | free(buf); 83 | free(arg); 84 | } 85 | 86 | void 87 | listener(void *arg) 88 | { 89 | int cli_fd = 0; 90 | int lsn_fd = 0; 91 | int opt = 1; 92 | int ret = 0; 93 | struct sockaddr_in peer_addr = {}; 94 | struct sockaddr_in sin = {}; 95 | socklen_t addrlen = sizeof(peer_addr); 96 | lthread_t *cli_lt = NULL; 97 | cli_info_t *cli_info = NULL; 98 | char ipstr[INET6_ADDRSTRLEN]; 99 | lthread_detach(); 100 | 101 | DEFINE_LTHREAD; 102 | 103 | /* create listening socket */ 104 | lsn_fd = lthread_socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 105 | if (lsn_fd == -1) 106 | return; 107 | 108 | if (setsockopt(lsn_fd, SOL_SOCKET, SO_REUSEADDR, &opt,sizeof(int)) == -1) 109 | perror("failed to set SOREUSEADDR on socket"); 110 | 111 | sin.sin_family = PF_INET; 112 | sin.sin_addr.s_addr = INADDR_ANY; 113 | sin.sin_port = htons(3128); 114 | 115 | /* bind to the listening port */ 116 | ret = bind(lsn_fd, (struct sockaddr *)&sin, sizeof(sin)); 117 | if (ret == -1) { 118 | perror("Failed to bind on port 3128"); 119 | return; 120 | } 121 | 122 | printf("Starting listener on 3128\n"); 123 | 124 | listen(lsn_fd, 128); 125 | 126 | while (1) { 127 | /* block until a new connection arrives */ 128 | cli_fd = lthread_accept(lsn_fd, (struct sockaddr*)&peer_addr, &addrlen); 129 | if (cli_fd == -1) { 130 | perror("Failed to accept connection"); 131 | return; 132 | } 133 | 134 | if ((cli_info = malloc(sizeof(cli_info_t))) == NULL) { 135 | close(cli_fd); 136 | continue; 137 | } 138 | cli_info->peer_addr = peer_addr; 139 | cli_info->fd = cli_fd; 140 | /* launch a new lthread that takes care of this client */ 141 | ret = lthread_create(&cli_lt, http_serv, cli_info); 142 | } 143 | } 144 | 145 | int 146 | main(int argc, char **argv) 147 | { 148 | lthread_t *lt = NULL; 149 | 150 | lthread_create(<, listener, NULL); 151 | lthread_run(); 152 | 153 | return 0; 154 | } 155 | 156 | :: 157 | -------------------------------------------------------------------------------- /src/lthread.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread.h 27 | */ 28 | 29 | 30 | #ifndef LTHREAD_H 31 | #define LTHREAD_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | #define DEFINE_LTHREAD (lthread_set_funcname(__func__)) 40 | 41 | #ifndef LTHREAD_INT_H 42 | struct lthread; 43 | struct lthread_cond; 44 | typedef struct lthread lthread_t; 45 | typedef struct lthread_cond lthread_cond_t; 46 | #endif 47 | 48 | char *lthread_summary(); 49 | 50 | typedef void (*lthread_func)(void *); 51 | #ifdef __cplusplus 52 | extern "C" { 53 | #endif 54 | 55 | int lthread_create(lthread_t **new_lt, lthread_func, void *arg); 56 | void lthread_cancel(lthread_t *lt); 57 | void lthread_run(void); 58 | int lthread_join(lthread_t *lt, void **ptr, uint64_t timeout); 59 | void lthread_detach(void); 60 | void lthread_detach2(lthread_t *lt); 61 | void lthread_exit(void *ptr); 62 | void lthread_sleep(uint64_t msecs); 63 | void lthread_wakeup(lthread_t *lt); 64 | int lthread_cond_create(lthread_cond_t **c); 65 | int lthread_cond_wait(lthread_cond_t *c, uint64_t timeout); 66 | void lthread_cond_signal(lthread_cond_t *c); 67 | void lthread_cond_broadcast(lthread_cond_t *c); 68 | int lthread_init(size_t size); 69 | void *lthread_get_data(void); 70 | void lthread_set_data(void *data); 71 | lthread_t *lthread_current(); 72 | 73 | /* socket related functions */ 74 | int lthread_socket(int, int, int); 75 | int lthread_pipe(int fildes[2]); 76 | int lthread_accept(int fd, struct sockaddr *, socklen_t *); 77 | int lthread_close(int fd); 78 | void lthread_set_funcname(const char *f); 79 | uint64_t lthread_id(); 80 | struct lthread* lthread_self(void); 81 | int lthread_connect(int fd, struct sockaddr *, socklen_t, uint64_t timeout); 82 | ssize_t lthread_recv(int fd, void *buf, size_t buf_len, int flags, 83 | uint64_t timeout); 84 | ssize_t lthread_read(int fd, void *buf, size_t length, uint64_t timeout); 85 | ssize_t lthread_readline(int fd, char **buf, size_t max, uint64_t timeout); 86 | ssize_t lthread_recv_exact(int fd, void *buf, size_t buf_len, int flags, 87 | uint64_t timeout); 88 | ssize_t lthread_read_exact(int fd, void *buf, size_t length, uint64_t timeout); 89 | ssize_t lthread_recvmsg(int fd, struct msghdr *message, int flags, 90 | uint64_t timeout); 91 | ssize_t lthread_recvfrom(int fd, void *buf, size_t length, int flags, 92 | struct sockaddr *address, socklen_t *address_len, uint64_t timeout); 93 | 94 | ssize_t lthread_send(int fd, const void *buf, size_t buf_len, int flags); 95 | ssize_t lthread_write(int fd, const void *buf, size_t buf_len); 96 | ssize_t lthread_sendmsg(int fd, const struct msghdr *message, int flags); 97 | ssize_t lthread_sendto(int fd, const void *buf, size_t length, int flags, 98 | const struct sockaddr *dest_addr, socklen_t dest_len); 99 | ssize_t lthread_writev(int fd, struct iovec *iov, int iovcnt); 100 | int lthread_wait_read(int fd, int timeout_ms); 101 | int lthread_wait_write(int fd, int timeout_ms); 102 | #ifdef __FreeBSD__ 103 | int lthread_sendfile(int fd, int s, off_t offset, size_t nbytes, 104 | struct sf_hdtr *hdtr); 105 | #endif 106 | ssize_t lthread_io_write(int fd, void *buf, size_t nbytes); 107 | ssize_t lthread_io_read(int fd, void *buf, size_t nbytes); 108 | int lthread_poll(struct pollfd *fds, nfds_t nfds, int timeout); 109 | 110 | int lthread_compute_begin(void); 111 | void lthread_compute_end(void); 112 | #ifdef __cplusplus 113 | } 114 | #endif 115 | 116 | #endif 117 | -------------------------------------------------------------------------------- /src/lthread_epoll.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_epoll.c 27 | */ 28 | 29 | #include "lthread_int.h" 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | int 36 | _lthread_poller_create(void) 37 | { 38 | return (epoll_create(1024)); 39 | } 40 | 41 | inline int 42 | _lthread_poller_poll(struct timespec t) 43 | { 44 | struct lthread_sched *sched = lthread_get_sched(); 45 | 46 | return (epoll_wait(sched->poller_fd, sched->eventlist, LT_MAX_EVENTS, 47 | t.tv_sec*1000.0 + t.tv_nsec/1000000.0)); 48 | } 49 | 50 | inline void 51 | _lthread_poller_ev_clear_rd(int fd) 52 | { 53 | struct epoll_event ev; 54 | int ret = 0; 55 | struct lthread_sched *sched = lthread_get_sched(); 56 | 57 | ev.data.fd = fd; 58 | ev.events = EPOLLIN | EPOLLONESHOT | EPOLLRDHUP; 59 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_DEL, fd, &ev); 60 | assert(ret != -1); 61 | } 62 | 63 | inline void 64 | _lthread_poller_ev_clear_wr(int fd) 65 | { 66 | struct epoll_event ev; 67 | int ret = 0; 68 | struct lthread_sched *sched = lthread_get_sched(); 69 | 70 | ev.data.fd = fd; 71 | ev.events = EPOLLOUT | EPOLLONESHOT | EPOLLRDHUP; 72 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_DEL, fd, &ev); 73 | assert(ret != -1); 74 | } 75 | 76 | inline void 77 | _lthread_poller_ev_register_rd(int fd) 78 | { 79 | struct epoll_event ev; 80 | int ret = 0; 81 | struct lthread_sched *sched = lthread_get_sched(); 82 | 83 | ev.events = EPOLLIN | EPOLLONESHOT | EPOLLRDHUP; 84 | ev.data.fd = fd; 85 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_MOD, fd, &ev); 86 | if (ret < 0) 87 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_ADD, fd, &ev); 88 | assert(ret != -1); 89 | } 90 | 91 | inline void 92 | _lthread_poller_ev_register_wr(int fd) 93 | { 94 | struct epoll_event ev; 95 | int ret = 0; 96 | struct lthread_sched *sched = lthread_get_sched(); 97 | 98 | ev.events = EPOLLOUT | EPOLLONESHOT | EPOLLRDHUP; 99 | ev.data.fd = fd; 100 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_MOD, fd, &ev); 101 | if (ret < 0) 102 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_ADD, fd, &ev); 103 | assert(ret != -1); 104 | } 105 | 106 | inline int 107 | _lthread_poller_ev_get_fd(struct epoll_event *ev) 108 | { 109 | return (ev->data.fd); 110 | } 111 | 112 | inline int 113 | _lthread_poller_ev_get_event(struct epoll_event *ev) 114 | { 115 | return (ev->events); 116 | } 117 | 118 | inline int 119 | _lthread_poller_ev_is_eof(struct epoll_event *ev) 120 | { 121 | return (ev->events & EPOLLHUP); 122 | } 123 | 124 | inline int 125 | _lthread_poller_ev_is_write(struct epoll_event *ev) 126 | { 127 | return (ev->events & EPOLLOUT); 128 | } 129 | 130 | inline int 131 | _lthread_poller_ev_is_read(struct epoll_event *ev) 132 | { 133 | return (ev->events & EPOLLIN); 134 | } 135 | 136 | inline void 137 | _lthread_poller_ev_register_trigger(void) 138 | { 139 | struct lthread_sched *sched = lthread_get_sched(); 140 | int ret = 0; 141 | struct epoll_event ev; 142 | 143 | if (!sched->eventfd) { 144 | sched->eventfd = eventfd(0, EFD_NONBLOCK); 145 | assert(sched->eventfd != -1); 146 | } 147 | ev.events = EPOLLIN; 148 | ev.data.fd = sched->eventfd; 149 | ret = epoll_ctl(sched->poller_fd, EPOLL_CTL_ADD, sched->eventfd, &ev); 150 | assert(ret != -1); 151 | } 152 | 153 | inline void 154 | _lthread_poller_ev_clear_trigger(void) 155 | { 156 | uint64_t tmp; 157 | struct lthread_sched *sched = lthread_get_sched(); 158 | assert(read(sched->eventfd, &tmp, sizeof(uint64_t)) == sizeof(uint64_t)); 159 | } 160 | 161 | inline void 162 | _lthread_poller_ev_trigger(struct lthread_sched *sched) 163 | { 164 | uint64_t tmp = 2; 165 | assert(write(sched->eventfd, &tmp, sizeof(uint64_t)) == sizeof(uint64_t)); 166 | } 167 | -------------------------------------------------------------------------------- /src/lthread_kqueue.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_kqueue.c 27 | */ 28 | 29 | #include "lthread_int.h" 30 | #include 31 | #include 32 | 33 | static inline void 34 | _lthread_poller_flush_events(void) 35 | { 36 | struct lthread_sched *sched = lthread_get_sched(); 37 | struct timespec tm = {0, 0}; 38 | 39 | assert(kevent(sched->poller_fd, sched->changelist, 40 | sched->nevents, NULL, 0, &tm) == 0); 41 | sched->nevents = 0; 42 | } 43 | 44 | int 45 | _lthread_poller_create(void) 46 | { 47 | return kqueue(); 48 | } 49 | 50 | inline int 51 | _lthread_poller_poll(struct timespec t) 52 | { 53 | struct lthread_sched *sched = lthread_get_sched(); 54 | return (kevent(sched->poller_fd, sched->changelist, sched->nevents, 55 | sched->eventlist, LT_MAX_EVENTS, &t)); 56 | } 57 | 58 | inline void 59 | _lthread_poller_ev_clear_rd(int fd) 60 | { 61 | struct kevent change; 62 | struct lthread_sched *sched = lthread_get_sched(); 63 | 64 | EV_SET(&change, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); 65 | assert(kevent(sched->poller_fd, &change, 1, NULL, 0, NULL) != -1); 66 | } 67 | 68 | inline void 69 | _lthread_poller_ev_clear_wr(int fd) 70 | { 71 | struct kevent change; 72 | struct lthread_sched *sched = lthread_get_sched(); 73 | 74 | EV_SET(&change, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); 75 | assert(kevent(sched->poller_fd, &change, 1, NULL, 0, NULL) != -1); 76 | } 77 | 78 | inline void 79 | _lthread_poller_ev_register_rd(int fd) 80 | { 81 | struct lthread_sched *sched = lthread_get_sched(); 82 | if (sched->nevents == LT_MAX_EVENTS) 83 | _lthread_poller_flush_events(); 84 | EV_SET(&sched->changelist[sched->nevents++], fd, EVFILT_READ, 85 | EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, sched->current_lthread); 86 | } 87 | 88 | inline void 89 | _lthread_poller_ev_register_wr(int fd) 90 | { 91 | struct lthread_sched *sched = lthread_get_sched(); 92 | if (sched->nevents == LT_MAX_EVENTS) 93 | _lthread_poller_flush_events(); 94 | EV_SET(&sched->changelist[sched->nevents++], fd, EVFILT_WRITE, 95 | EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, sched->current_lthread); 96 | } 97 | 98 | inline void 99 | _lthread_poller_ev_register_trigger(void) 100 | { 101 | struct lthread_sched *sched = lthread_get_sched(); 102 | struct kevent change; 103 | struct kevent event; 104 | struct timespec tm = {0, 0}; 105 | 106 | EV_SET(&change, -1, EVFILT_USER, EV_ADD, 0, 0, 0); 107 | assert(kevent(sched->poller_fd, &change, 1, &event, 0, &tm) != -1); 108 | sched->eventfd = -1; 109 | } 110 | 111 | inline void 112 | _lthread_poller_ev_clear_trigger(void) 113 | { 114 | struct lthread_sched *sched = lthread_get_sched(); 115 | struct kevent change; 116 | struct kevent event; 117 | struct timespec tm = {0, 0}; 118 | 119 | EV_SET(&change, -1, EVFILT_USER, 0, EV_CLEAR, 0, 0); 120 | assert(kevent(sched->poller_fd, &change, 1, &event, 0, &tm) != -1); 121 | } 122 | 123 | inline void 124 | _lthread_poller_ev_trigger(struct lthread_sched *sched) 125 | { 126 | struct kevent change; 127 | struct kevent event; 128 | struct timespec tm = {0, 0}; 129 | 130 | EV_SET(&change, -1, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0); 131 | assert(kevent(sched->poller_fd, &change, 1, &event, 0, &tm) != -1); 132 | } 133 | 134 | inline int 135 | _lthread_poller_ev_get_fd(struct kevent *ev) 136 | { 137 | return ev->ident; 138 | } 139 | 140 | inline int 141 | _lthread_poller_ev_get_event(struct kevent *ev) 142 | { 143 | return ev->filter; 144 | } 145 | 146 | inline int 147 | _lthread_poller_ev_is_eof(struct kevent *ev) 148 | { 149 | return ev->flags & EV_EOF; 150 | } 151 | 152 | inline int 153 | _lthread_poller_ev_is_write(struct kevent *ev) 154 | { 155 | return ev->filter == EVFILT_WRITE; 156 | } 157 | 158 | inline int 159 | _lthread_poller_ev_is_read(struct kevent *ev) 160 | { 161 | return ev->filter == EVFILT_READ; 162 | } 163 | -------------------------------------------------------------------------------- /docs/socket.rst: -------------------------------------------------------------------------------- 1 | Socket 2 | ====== 3 | 4 | lthread_socket 5 | -------------- 6 | .. c:function:: int lthread_socket(int domain, int type, int protocol) 7 | 8 | Creates a new socket(2) and sets it to non-blocking. 9 | 10 | Parameters can be found in `man socket`. 11 | 12 | lthread_pipe 13 | ------------ 14 | .. c:function:: int lthread_pipe(int fildes[2]) 15 | 16 | lthread version of pipe(2), with socket set to non-blocking. 17 | 18 | lthread_accept 19 | -------------- 20 | .. c:function:: int lthread_accept(int fd, struct sockaddr *, socklen_t *) 21 | 22 | lthread version of accept(2). `man 2 accept` for more details. 23 | 24 | lthread_close 25 | ------------- 26 | .. c:function:: int lthread_close(int fd) 27 | 28 | lthread version of close(2). `man 2 close` for more details. 29 | 30 | lthread_connect 31 | --------------- 32 | .. c:function:: int lthread_connect(int fd, struct sockaddr *, socklen_t, uint64_t timeout) 33 | 34 | lthread version of connect(2) with additional timeout parameter. 35 | 36 | :return: new fd > 0 on success. 37 | :return: -1 on failure. 38 | :return: -2 on timeout. 39 | 40 | lthread_recv 41 | ------------ 42 | .. c:function:: ssize_t lthread_recv(int fd, void *buf, size_t buf_len, int flags, uint64_t timeout) 43 | 44 | lthread version of recv(2), with additional timeout parameter. 45 | 46 | :return: Returns number of bytes read, -1 on failure and -2 on timeout. 47 | 48 | lthread_read 49 | ------------ 50 | .. c:function:: ssize_t lthread_read(int fd, void *buf, size_t length, uint64_t timeout) 51 | 52 | lthread version of read(2), with additional timeout parameter. 53 | 54 | :return: Returns number of bytes read. 55 | :return: 0 if socket is closed. 56 | :return: -1 on failure. 57 | :return: -2 on timeout. 58 | 59 | 60 | lthread_readline 61 | ---------------- 62 | .. c:function:: ssize_t lthread_readline(int fd, char **buf, size_t max, uint64_t timeout) 63 | 64 | Keeps reading from fd until it hits a \\n or `max` bytes. 65 | 66 | :param int fd: file descriptor. 67 | :param char **buf: Ptr->ptr that will contain the line read(must be freed). 68 | :param size_t max: Maximum number of bytes to read before finding \\n. 69 | :param timeout: Milliseconds to wait on reading before timing out. 70 | 71 | :return: Number of bytes read. 72 | :return: 0 if socket is closed. 73 | :return: -1 on failure. 74 | :return: -2 on timeout. 75 | 76 | lthread_recv_exact 77 | ------------------ 78 | .. c:function:: ssize_t lthread_recv_exact(int fd, void *buf, size_t buf_len, int flags, uint64_t timeout) 79 | 80 | Blocks until exact number of bytes are read. 81 | 82 | :return: Number of bytes read. 83 | :return: 0 if socket is closed. 84 | :return: -1 on failure. 85 | :return: -2 on timeout. 86 | 87 | 88 | lthread_read_exact 89 | ------------------ 90 | .. c:function:: ssize_t lthread_read_exact(int fd, void *buf, size_t length, uint64_t timeout) 91 | 92 | Blocks until exact number of bytes are read. 93 | 94 | :return: Number of bytes read. 95 | :return: 0 if socket is closed. 96 | :return: -1 on failure. 97 | :return: -2 on timeout. 98 | 99 | 100 | lthread_recvmsg 101 | --------------- 102 | .. c:function:: ssize_t lthread_recvmsg(int fd, struct msghdr *message, int flags, uint64_t timeout) 103 | 104 | lthread version of recvmsg(2). `man 2 recvmsg` for more details. 105 | 106 | :return: Returns number of bytes read, -1 on failure and -2 on timeout. 107 | 108 | lthread_recvfrom 109 | ---------------- 110 | .. c:function:: ssize_t lthread_recvfrom(int fd, void *buf, size_t length, int flags,\ 111 | struct sockaddr *address,\ 112 | socklen_t *address_len, uint64_t timeout) 113 | 114 | lthread version of recvfrom(2). `man 2 recvfrom` for more details. 115 | 116 | :return: Returns number of bytes read. 117 | :return: 0 if socket is closed. 118 | :return: -1 on failure. 119 | :return: -2 on timeout. 120 | 121 | 122 | lthread_send 123 | -------------- 124 | .. c:function:: ssize_t lthread_send(int fd, const void *buf, size_t buf_len, int flags) 125 | 126 | lthread version of send(2). `man 2 send` for more details. 127 | 128 | lthread_write 129 | ------------- 130 | .. c:function:: ssize_t lthread_write(int fd, const void *buf, size_t buf_len) 131 | 132 | lthread version of write(2). `man 2 write` for more details. 133 | 134 | lthread_sendmsg 135 | --------------- 136 | .. c:function:: ssize_t lthread_sendmsg(int fd, const struct msghdr *message, int flags) 137 | 138 | lthread version of sendmsg(2). `man 2 sendmsg` for more details. 139 | 140 | lthread_sendto 141 | -------------- 142 | .. c:function:: ssize_t lthread_sendto(int fd, const void *buf, size_t length,\ 143 | int flags, const struct sockaddr *dest_addr,\ 144 | socklen_t dest_len) 145 | 146 | lthread version of sendto(2). `man 2 sendto` for more details. 147 | 148 | lthread_writev 149 | -------------- 150 | .. c:function:: ssize_t lthread_writev(int fd, struct iovec *iov, int iovcnt) 151 | 152 | lthread version of writev(2). `man 2 writev` for more details. 153 | 154 | lthread_wait_read 155 | ----------------- 156 | .. c:function:: int lthread_wait_read(int fd, int timeout_ms) 157 | 158 | Waits for an fd to become readable. 159 | 160 | :return: 0 on success. 161 | :return: -2 on timeout. 162 | 163 | 164 | lthread_wait_write 165 | ------------------ 166 | .. c:function:: int lthread_wait_write(int fd, int timeout_ms) 167 | 168 | Waits for an fd to become writable. 169 | 170 | :return: 0 on success. 171 | :return: -2 on timeout. 172 | 173 | lthread_poll 174 | ------------ 175 | .. c:function:: int lthread_poll(struct pollfd *fds, nfds_t nfds, int timeout) 176 | 177 | Lthread version of poll(2) 178 | 179 | :return: -1 on poll(2) error (when timeout == 0). 180 | :return: 0 on timeout. 181 | :return: > 0 to indicate the # of fds returned. 182 | 183 | .. note:: If timeout == 0, poll(2) is called directly and the lthread never goes to sleep. 184 | -------------------------------------------------------------------------------- /src/lthread_io.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_io.c 27 | */ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include "lthread_int.h" 34 | 35 | #define IO_WORKERS 2 36 | 37 | static void _lthread_io_add(struct lthread *lt); 38 | static void *_lthread_io_worker(void *arg); 39 | 40 | static pthread_once_t key_once = PTHREAD_ONCE_INIT; 41 | 42 | struct lthread_io_worker { 43 | struct lthread_q lthreads; 44 | pthread_mutex_t run_mutex; 45 | pthread_cond_t run_mutex_cond; 46 | pthread_mutex_t lthreads_mutex; 47 | }; 48 | 49 | static struct lthread_io_worker io_workers[IO_WORKERS]; 50 | 51 | static void 52 | once_routine(void) 53 | { 54 | pthread_t pthread; 55 | struct lthread_io_worker *io_worker = NULL; 56 | int i = 0; 57 | 58 | for (i = 0; i < IO_WORKERS; i++) { 59 | io_worker = &io_workers[i]; 60 | 61 | assert(pthread_mutex_init(&io_worker->lthreads_mutex, NULL) == 0); 62 | assert(pthread_mutex_init(&io_worker->run_mutex, NULL) == 0); 63 | assert(pthread_create(&pthread, 64 | NULL, _lthread_io_worker, io_worker) == 0); 65 | TAILQ_INIT(&io_worker->lthreads); 66 | 67 | } 68 | } 69 | 70 | void 71 | _lthread_io_worker_init() 72 | { 73 | assert(pthread_once(&key_once, once_routine) == 0); 74 | } 75 | 76 | static void * 77 | _lthread_io_worker(void *arg) 78 | { 79 | struct lthread_io_worker *io_worker = arg; 80 | struct lthread *lt = NULL; 81 | 82 | assert(pthread_once(&key_once, once_routine) == 0); 83 | 84 | while (1) { 85 | 86 | while (1) { 87 | assert(pthread_mutex_lock(&io_worker->lthreads_mutex) == 0); 88 | 89 | /* we have no work to do, break and wait */ 90 | if (TAILQ_EMPTY(&io_worker->lthreads)) { 91 | assert(pthread_mutex_unlock(&io_worker->lthreads_mutex) == 0); 92 | break; 93 | } 94 | 95 | lt = TAILQ_FIRST(&io_worker->lthreads); 96 | TAILQ_REMOVE(&io_worker->lthreads, lt, io_next); 97 | 98 | assert(pthread_mutex_unlock(&io_worker->lthreads_mutex) == 0); 99 | 100 | if (lt->state & BIT(LT_ST_WAIT_IO_READ)) { 101 | lt->io.ret = read(lt->io.fd, lt->io.buf, lt->io.nbytes); 102 | lt->io.err = (lt->io.ret == -1) ? errno : 0; 103 | } else if (lt->state & BIT(LT_ST_WAIT_IO_WRITE)) { 104 | lt->io.ret = write(lt->io.fd, lt->io.buf, lt->io.nbytes); 105 | lt->io.err = (lt->io.ret == -1) ? errno : 0; 106 | } else 107 | assert(0); 108 | 109 | /* resume it back on the prev scheduler */ 110 | assert(pthread_mutex_lock(<->sched->defer_mutex) == 0); 111 | TAILQ_INSERT_TAIL(<->sched->defer, lt, defer_next); 112 | assert(pthread_mutex_unlock(<->sched->defer_mutex) == 0); 113 | 114 | /* signal the prev scheduler in case it was sleeping in a poll */ 115 | _lthread_poller_ev_trigger(lt->sched); 116 | } 117 | 118 | assert(pthread_mutex_lock(&io_worker->run_mutex) == 0); 119 | pthread_cond_wait(&io_worker->run_mutex_cond, 120 | &io_worker->run_mutex); 121 | assert(pthread_mutex_unlock(&io_worker->run_mutex) == 0); 122 | 123 | } 124 | 125 | } 126 | 127 | static void 128 | _lthread_io_add(struct lthread *lt) 129 | { 130 | static uint32_t io_selector = 0; 131 | struct lthread_io_worker *io_worker = &io_workers[io_selector++]; 132 | io_selector = io_selector % IO_WORKERS; 133 | 134 | LIST_INSERT_HEAD(<->sched->busy, lt, busy_next); 135 | 136 | assert(pthread_mutex_lock(&io_worker->lthreads_mutex) == 0); 137 | TAILQ_INSERT_TAIL(&io_worker->lthreads, lt, io_next); 138 | assert(pthread_mutex_unlock(&io_worker->lthreads_mutex) == 0); 139 | 140 | /* wakeup pthread if it was sleeping */ 141 | assert(pthread_mutex_lock(&io_worker->run_mutex) == 0); 142 | assert(pthread_cond_signal(&io_worker->run_mutex_cond) == 0); 143 | assert(pthread_mutex_unlock(&io_worker->run_mutex) == 0); 144 | 145 | _lthread_yield(lt); 146 | 147 | /* restore errno we got from io worker, if any */ 148 | if (lt->io.ret == -1) 149 | errno = lt->io.err; 150 | } 151 | 152 | ssize_t 153 | lthread_io_read(int fd, void *buf, size_t nbytes) 154 | { 155 | struct lthread *lt = lthread_get_sched()->current_lthread; 156 | lt->state |= BIT(LT_ST_WAIT_IO_READ); 157 | lt->io.buf = buf; 158 | lt->io.fd = fd; 159 | lt->io.nbytes = nbytes; 160 | 161 | _lthread_io_add(lt); 162 | lt->state &= CLEARBIT(LT_ST_WAIT_IO_READ); 163 | 164 | return (lt->io.ret); 165 | } 166 | 167 | ssize_t 168 | lthread_io_write(int fd, void *buf, size_t nbytes) 169 | { 170 | struct lthread *lt = lthread_get_sched()->current_lthread; 171 | lt->state |= BIT(LT_ST_WAIT_IO_WRITE); 172 | lt->io.buf = buf; 173 | lt->io.nbytes = nbytes; 174 | lt->io.fd = fd; 175 | 176 | _lthread_io_add(lt); 177 | lt->state &= CLEARBIT(LT_ST_WAIT_IO_WRITE); 178 | 179 | return (lt->io.ret); 180 | } 181 | -------------------------------------------------------------------------------- /docs/lthread.rst: -------------------------------------------------------------------------------- 1 | Lthread 2 | ======= 3 | 4 | lthread_create 5 | -------------- 6 | .. c:function:: int thread_create(lthread_t **new_lt, lthread_func func, void *arg) 7 | 8 | Creates a new lthread. 9 | 10 | :param lthread_t** new_lt: a ptr->ptr to store the new lthread structure on 11 | success 12 | :param lthread_func func: Function to run in an lthread. 13 | :param void* arg: Argument to pass to `func` when called. 14 | 15 | :return: 0 on success with new_lt pointing to the new lthread. 16 | :return: -1 on failure with `errno` specifying the reason. 17 | 18 | .. c:type:: lthread_func: void (*)(void*) 19 | 20 | lthread_sleep 21 | -------------- 22 | .. c:function:: void lthread_sleep(uint64_t msecs) 23 | 24 | Causes an lthread to sleep for `msecs` milliseconds. 25 | 26 | :param uint64_t msecs: Number of milliseconds to sleep. `msecs=0` causes the 27 | lthread to yield and allow other lthreads to resume 28 | before it continues. 29 | 30 | 31 | lthread_cancel 32 | -------------- 33 | .. c:function:: void lthread_cancel(lthread_t *lt) 34 | 35 | Cancels lthread and prepares it to be removed from lthread scheduler. If it 36 | was waiting for events, the events will get cancelled. If an lthread was 37 | joining on it, the lthread joining will get scheduled to run. 38 | 39 | :param lthread_t* lt: lthread to cancel 40 | 41 | lthread_run 42 | ----------- 43 | .. c:function:: void lthread_run(void) 44 | 45 | Runs lthread scheduler until all lthreads return. 46 | 47 | lthread_join 48 | ------------ 49 | .. c:function:: int lthread_join(lthread_t *lt, void **ptr, uint64_t timeout) 50 | 51 | Blocks the calling lthread until lt has exited or a timeout occurred. In 52 | case of timeout, lthread_join returns -2 and lt doesn't get freed. If target 53 | lthread was cancelled, it returns -1 and the target lthread will be freed. 54 | \*\*ptr will get populated by lthread_exit(). ptr cannot be from lthread's 55 | stack space. Joining on a joined lthread has undefined behavior. 56 | 57 | :param lthread_t* lt: lthread to join on. 58 | :param void** ptr: optional, this ptr will be populated by :c:func:`lthread_exit()`. 59 | :param uint64_t timeout: How long to wait trying to join on lt before timing out. 60 | 61 | :return: 0 on success. 62 | :return: -1 if target lthread got cancelled. 63 | :return: -2 on timeout. 64 | 65 | .. ATTENTION:: Joining on a joined lthread has undefined behavior 66 | 67 | lthread_detach 68 | -------------- 69 | .. c:function:: void lthread_detach(void) 70 | 71 | Marks the current lthread as detached, causing it to get freed once it exits. 72 | Otherwise :c:func:`lthread_join()` must be called on the lthread to free it 73 | up. If an lthread wasn't marked as detached and wasn't joined on then 74 | a memory leak occurs. 75 | 76 | lthread_detach2 77 | ---------------- 78 | .. c:function:: void lthread_detach2(lthread_t *lt) 79 | 80 | Same as :c:func:`lthread_detach()` except that it doesn't have to be called 81 | from within the lthread function. The lthread to detach is passed as a param. 82 | 83 | :param lthread_t* lt: Lthread to detach. 84 | 85 | 86 | lthread_exit 87 | ------------ 88 | .. c:function:: void lthread_exit(void *ptr) 89 | 90 | Sets ptr value for the lthread calling :c:func:`lthread_join()` and exits lthread. 91 | 92 | :param void* ptr: Optional, ptr value to pass to the joining lthread. 93 | 94 | 95 | lthread_wakeup 96 | -------------- 97 | .. c:function:: void lthread_wakeup(lthread_t *lt) 98 | 99 | Wakes up a sleeping lthread. If lthread wasn't sleeping this function has 100 | no effect. 101 | 102 | :param lthread_t* lt: The lthread to wake up. 103 | 104 | lthread_cond_create 105 | ------------------- 106 | .. c:function:: int lthread_cond_create(lthread_cond_t **c) 107 | 108 | Creates a condition variable that can be used between lthreads to block/signal each other. 109 | 110 | :param lthread_cond_t** c: ptr->ptr that will be populated on success. 111 | 112 | :return: 0 on success. 113 | :return: -1 on error with `errno` containing the reason. 114 | 115 | 116 | lthread_cond_wait 117 | ----------------- 118 | .. c:function:: int lthread_cond_wait(lthread_cond_t *c, uint64_t timeout) 119 | 120 | Puts the lthread calling :c:func:`lthread_cond_wait()` to sleep until 121 | `timeout` expires or another lthread signals it. 122 | 123 | :param lthread_cond_t* c: condition variable created by :c:func:`lthread_cond_create()` 124 | and shared between lthreads requiring synchronization. 125 | :param uint64_t timeout: Number of milliseconds to wait on the condition 126 | variable to be signaled before it times out. 0 to 127 | wait indefinitely. 128 | 129 | :return: 0 if it was signal. 130 | :return: -2 on timeout. 131 | 132 | lthread_cond_signal 133 | ------------------- 134 | .. c:function:: void lthread_cond_signal(lthread_cond_t *c) 135 | 136 | Signals a single lthread blocked on :c:func:`lthread_cond_wait()` to wake up and resume. 137 | 138 | :param lthread_cond_t* c: condition variable created by :c:func:`lthread_cond_create()` 139 | and shared between lthreads requiring synchronization. 140 | 141 | 142 | lthread_cond_broadcast 143 | ---------------------- 144 | .. c:function:: void lthread_cond_broadcast(lthread_cond_t *c) 145 | 146 | Signals all lthreads blocked on :c:func:`lthread_cond_wait()` to wake up and resume. 147 | 148 | :param lthread_cond_t* c: condition variable created by :c:func:`lthread_cond_create()` 149 | and shared between lthreads requiring synchronization. 150 | 151 | lthread_set_data 152 | ---------------- 153 | .. c:function:: void lthread_set_data(void *data) 154 | 155 | Sets data bound to the lthread. This value can be retrieved anywhere in 156 | the lthread using :c:func:`lthread_get_data()`. 157 | 158 | :param void* data: value to be set. 159 | 160 | 161 | lthread_get_data 162 | ---------------- 163 | .. c:function:: void *lthread_get_data(void) 164 | 165 | Returns the value set for the current lthread. 166 | 167 | :return: Value set by :c:func:`lthread_set_data()` 168 | 169 | 170 | lthread_current 171 | --------------- 172 | .. c:function:: lthread_t *lthread_current() 173 | 174 | Returns a pointer to the current lthread. 175 | 176 | :return: ptr to the current lthread running. 177 | 178 | 179 | lthread_compute_begin 180 | --------------------- 181 | .. c:function:: int lthread_compute_begin(void) 182 | 183 | Resumes lthread inside a pthread to run expensive computations or make a 184 | blocking call like `gethostbyname()`. This call *must* be followed by 185 | :c:func:`lthread_compute_end()` after the computation and/or blocking calls 186 | statements have been made, to resume the lthread in its original lthread scheduler. 187 | No lthread_* calls can be made during the 2 calls. 188 | 189 | :return: 0 on success. 190 | :return: -1 if lthread failed to resume it in a pthread. 191 | 192 | lthread_compute_end 193 | ------------------- 194 | .. c:function:: void lthread_compute_end(void) 195 | 196 | Moves lthread from pthread back to the lthread scheduler it was running on. 197 | 198 | DEFINE_LTHREAD 199 | -------------- 200 | 201 | .. c:macro:: DEFINE_LTHREAD(name) 202 | 203 | Sets the name of the function inside the lthread structure for easier 204 | crash debugging. Must be called inside the lthread. 205 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | clean: 51 | rm -rf $(BUILDDIR)/* 52 | livehtml: 53 | sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | 55 | html: 56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 57 | @echo 58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 59 | 60 | dirhtml: 61 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 62 | @echo 63 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 64 | 65 | singlehtml: 66 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 67 | @echo 68 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 69 | 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | json: 76 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 77 | @echo 78 | @echo "Build finished; now you can process the JSON files." 79 | 80 | htmlhelp: 81 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 82 | @echo 83 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 84 | ".hhp project file in $(BUILDDIR)/htmlhelp." 85 | 86 | qthelp: 87 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 88 | @echo 89 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 90 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 91 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Lthread.qhcp" 92 | @echo "To view the help file:" 93 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Lthread.qhc" 94 | 95 | devhelp: 96 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 97 | @echo 98 | @echo "Build finished." 99 | @echo "To view the help file:" 100 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Lthread" 101 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Lthread" 102 | @echo "# devhelp" 103 | 104 | epub: 105 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 106 | @echo 107 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 108 | 109 | latex: 110 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 111 | @echo 112 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 113 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 114 | "(use \`make latexpdf' here to do that automatically)." 115 | 116 | latexpdf: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo "Running LaTeX files through pdflatex..." 119 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 120 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 121 | 122 | latexpdfja: 123 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 124 | @echo "Running LaTeX files through platex and dvipdfmx..." 125 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 126 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 127 | 128 | text: 129 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 130 | @echo 131 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 132 | 133 | man: 134 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 135 | @echo 136 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 137 | 138 | texinfo: 139 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 140 | @echo 141 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 142 | @echo "Run \`make' in that directory to run these through makeinfo" \ 143 | "(use \`make info' here to do that automatically)." 144 | 145 | info: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo "Running Texinfo files through makeinfo..." 148 | make -C $(BUILDDIR)/texinfo info 149 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 150 | 151 | gettext: 152 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 153 | @echo 154 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 155 | 156 | changes: 157 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 158 | @echo 159 | @echo "The overview file is in $(BUILDDIR)/changes." 160 | 161 | linkcheck: 162 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 163 | @echo 164 | @echo "Link check complete; look for any errors in the above output " \ 165 | "or in $(BUILDDIR)/linkcheck/output.txt." 166 | 167 | doctest: 168 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 169 | @echo "Testing of doctests in the sources finished, look at the " \ 170 | "results in $(BUILDDIR)/doctest/output.txt." 171 | 172 | coverage: 173 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 174 | @echo "Testing of coverage in the sources finished, look at the " \ 175 | "results in $(BUILDDIR)/coverage/python.txt." 176 | 177 | xml: 178 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 179 | @echo 180 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 181 | 182 | pseudoxml: 183 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 184 | @echo 185 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 186 | -------------------------------------------------------------------------------- /src/lthread_int.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_int.c 27 | */ 28 | 29 | 30 | #ifndef LTHREAD_INT_H 31 | #define LTHREAD_INT_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | #include "lthread_poller.h" 40 | #include "queue.h" 41 | #include "tree.h" 42 | 43 | #define LT_MAX_EVENTS (1024) 44 | #define MAX_STACK_SIZE (128*1024) /* 128k */ 45 | 46 | #define BIT(x) (1 << (x)) 47 | #define CLEARBIT(x) ~(1 << (x)) 48 | 49 | struct lthread; 50 | struct lthread_sched; 51 | struct lthread_compute_sched; 52 | struct lthread_io_sched; 53 | struct lthread_cond; 54 | 55 | LIST_HEAD(lthread_l, lthread); 56 | TAILQ_HEAD(lthread_q, lthread); 57 | 58 | typedef void (*lthread_func)(void *); 59 | 60 | struct cpu_ctx { 61 | void *esp; 62 | void *ebp; 63 | void *eip; 64 | void *edi; 65 | void *esi; 66 | void *ebx; 67 | void *r1; 68 | void *r2; 69 | void *r3; 70 | void *r4; 71 | void *r5; 72 | }; 73 | 74 | enum lthread_event { 75 | LT_EV_READ, 76 | LT_EV_WRITE 77 | }; 78 | 79 | enum lthread_compute_st { 80 | LT_COMPUTE_BUSY, 81 | LT_COMPUTE_FREE, 82 | }; 83 | 84 | enum lthread_st { 85 | LT_ST_WAIT_READ, /* lthread waiting for READ on socket */ 86 | LT_ST_WAIT_WRITE, /* lthread waiting for WRITE on socket */ 87 | LT_ST_NEW, /* lthread spawned but needs initialization */ 88 | LT_ST_READY, /* lthread is ready to run */ 89 | LT_ST_EXITED, /* lthread has exited and needs cleanup */ 90 | LT_ST_BUSY, /* lthread is waiting on join/cond/compute/io */ 91 | LT_ST_SLEEPING, /* lthread is sleeping */ 92 | LT_ST_EXPIRED, /* lthread has expired and needs to run */ 93 | LT_ST_FDEOF, /* lthread socket has shut down */ 94 | LT_ST_DETACH, /* lthread frees when done, else it waits to join */ 95 | LT_ST_CANCELLED, /* lthread has been cancelled */ 96 | LT_ST_PENDING_RUNCOMPUTE, /* lthread needs to run in compute sched, step1 */ 97 | LT_ST_RUNCOMPUTE, /* lthread needs to run in compute sched (2), step2 */ 98 | LT_ST_WAIT_IO_READ, /* lthread waiting for READ IO to finish */ 99 | LT_ST_WAIT_IO_WRITE,/* lthread waiting for WRITE IO to finish */ 100 | LT_ST_WAIT_MULTI /* lthread waiting on multiple fds */ 101 | }; 102 | 103 | struct lthread { 104 | struct cpu_ctx ctx; /* cpu ctx info */ 105 | lthread_func fun; /* func lthread is running */ 106 | void *arg; /* func args passed to func */ 107 | void *data; /* user ptr attached to lthread */ 108 | size_t stack_size; /* current stack_size */ 109 | size_t last_stack_size; /* last yield stack_size */ 110 | enum lthread_st state; /* current lthread state */ 111 | struct lthread_sched *sched; /* scheduler lthread belongs to */ 112 | uint64_t birth; /* time lthread was born */ 113 | uint64_t id; /* lthread id */ 114 | int64_t fd_wait; /* fd we are waiting on */ 115 | char funcname[64]; /* optional func name */ 116 | struct lthread *lt_join; /* lthread we want to join on */ 117 | void **lt_exit_ptr; /* exit ptr for lthread_join */ 118 | void *stack; /* ptr to lthread_stack */ 119 | void *ebp; /* saved for compute sched */ 120 | uint32_t ops; /* num of ops since yield */ 121 | uint64_t sleep_usecs; /* how long lthread is sleeping */ 122 | RB_ENTRY(lthread) sleep_node; /* sleep tree node pointer */ 123 | RB_ENTRY(lthread) wait_node; /* event tree node pointer */ 124 | LIST_ENTRY(lthread) busy_next; /* blocked lthreads */ 125 | TAILQ_ENTRY(lthread) ready_next; /* ready to run list */ 126 | TAILQ_ENTRY(lthread) defer_next; /* ready to run after deferred job */ 127 | TAILQ_ENTRY(lthread) cond_next; /* waiting on a cond var */ 128 | TAILQ_ENTRY(lthread) io_next; /* waiting its turn in io */ 129 | TAILQ_ENTRY(lthread) compute_next; /* waiting to run in compute sched */ 130 | struct { 131 | void *buf; 132 | size_t nbytes; 133 | int fd; 134 | int ret; 135 | int err; 136 | } io; 137 | /* lthread_compute schduler - when running in compute block */ 138 | struct lthread_compute_sched *compute_sched; 139 | int ready_fds; /* # of fds that are ready. for poll(2) */ 140 | struct pollfd *pollfds; 141 | nfds_t nfds; 142 | }; 143 | 144 | RB_HEAD(lthread_rb_sleep, lthread); 145 | RB_HEAD(lthread_rb_wait, lthread); 146 | RB_PROTOTYPE(lthread_rb_wait, lthread, wait_node, _lthread_wait_cmp); 147 | 148 | struct lthread_cond { 149 | struct lthread_q blocked_lthreads; 150 | }; 151 | 152 | struct lthread_sched { 153 | uint64_t birth; 154 | struct cpu_ctx ctx; 155 | void *stack; 156 | size_t stack_size; 157 | int spawned_lthreads; 158 | uint64_t default_timeout; 159 | struct lthread *current_lthread; 160 | int page_size; 161 | /* poller variables */ 162 | int poller_fd; 163 | #if defined(__FreeBSD__) || defined(__APPLE__) 164 | struct kevent changelist[LT_MAX_EVENTS]; 165 | #endif 166 | int eventfd; 167 | POLL_EVENT_TYPE eventlist[LT_MAX_EVENTS]; 168 | int nevents; 169 | int num_new_events; 170 | pthread_mutex_t defer_mutex; 171 | /* lists to save an lthread depending on its state */ 172 | /* lthreads ready to run */ 173 | struct lthread_q ready; 174 | /* lthreads ready to run after io or compute is done */ 175 | struct lthread_q defer; 176 | /* lthreads in join/cond_wait/io/compute */ 177 | struct lthread_l busy; 178 | /* lthreads zzzzz */ 179 | struct lthread_rb_sleep sleeping; 180 | /* lthreads waiting on socket io */ 181 | struct lthread_rb_wait waiting; 182 | }; 183 | 184 | 185 | int sched_create(size_t stack_size); 186 | 187 | int _lthread_resume(struct lthread *lt); 188 | void _lthread_renice(struct lthread *lt); 189 | void _sched_free(struct lthread_sched *sched); 190 | void _lthread_del_event(struct lthread *lt); 191 | 192 | void _lthread_yield(struct lthread *lt); 193 | void _lthread_free(struct lthread *lt); 194 | void _lthread_desched_sleep(struct lthread *lt); 195 | void _lthread_sched_sleep(struct lthread *lt, uint64_t msecs); 196 | void _lthread_sched_busy_sleep(struct lthread *lt, uint64_t msecs); 197 | void _lthread_cancel_event(struct lthread *lt); 198 | struct lthread* _lthread_desched_event(int fd, enum lthread_event e); 199 | void _lthread_sched_event(struct lthread *lt, int fd, 200 | enum lthread_event e, uint64_t timeout); 201 | 202 | int _switch(struct cpu_ctx *new_ctx, struct cpu_ctx *cur_ctx); 203 | int _save_exec_state(struct lthread *lt); 204 | void _lthread_compute_add(struct lthread *lt); 205 | void _lthread_io_worker_init(); 206 | 207 | extern pthread_key_t lthread_sched_key; 208 | void print_timestamp(char *); 209 | 210 | static inline struct lthread_sched* 211 | lthread_get_sched() 212 | { 213 | return pthread_getspecific(lthread_sched_key); 214 | } 215 | 216 | static inline uint64_t 217 | _lthread_diff_usecs(uint64_t t1, uint64_t t2) 218 | { 219 | return (t2 - t1); 220 | } 221 | 222 | static inline uint64_t 223 | _lthread_usec_now(void) 224 | { 225 | struct timeval t1 = {0, 0}; 226 | gettimeofday(&t1, NULL); 227 | return (t1.tv_sec * 1000000) + t1.tv_usec; 228 | } 229 | 230 | #endif 231 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Lthread documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Nov 4 18:34:33 2014. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [] 32 | 33 | # Add any paths that contain templates here, relative to this directory. 34 | templates_path = ['_templates'] 35 | 36 | # The suffix of source filenames. 37 | source_suffix = '.rst' 38 | 39 | # The encoding of source files. 40 | #source_encoding = 'utf-8-sig' 41 | 42 | # The master toctree document. 43 | master_doc = 'index' 44 | 45 | # General information about the project. 46 | project = u'Lthread' 47 | copyright = u'2014, Hasan Alayli' 48 | 49 | # The version info for the project you're documenting, acts as replacement for 50 | # |version| and |release|, also used in various other places throughout the 51 | # built documents. 52 | # 53 | # The short X.Y version. 54 | version = '1.0' 55 | # The full version, including alpha/beta/rc tags. 56 | release = '1.0' 57 | 58 | # The language for content autogenerated by Sphinx. Refer to documentation 59 | # for a list of supported languages. 60 | #language = None 61 | 62 | # There are two options for replacing |today|: either, you set today to some 63 | # non-false value, then it is used: 64 | #today = '' 65 | # Else, today_fmt is used as the format for a strftime call. 66 | #today_fmt = '%B %d, %Y' 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | exclude_patterns = [] 71 | 72 | # The reST default role (used for this markup: `text`) to use for all 73 | # documents. 74 | #default_role = None 75 | 76 | # If true, '()' will be appended to :func: etc. cross-reference text. 77 | #add_function_parentheses = True 78 | 79 | # If true, the current module name will be prepended to all description 80 | # unit titles (such as .. function::). 81 | #add_module_names = True 82 | 83 | # If true, sectionauthor and moduleauthor directives will be shown in the 84 | # output. They are ignored by default. 85 | #show_authors = False 86 | 87 | # The name of the Pygments (syntax highlighting) style to use. 88 | pygments_style = 'sphinx' 89 | 90 | # A list of ignored prefixes for module index sorting. 91 | #modindex_common_prefix = [] 92 | 93 | # If true, keep warnings as "system message" paragraphs in the built documents. 94 | #keep_warnings = False 95 | 96 | 97 | # -- Options for HTML output ---------------------------------------------- 98 | 99 | # The theme to use for HTML and HTML Help pages. See the documentation for 100 | # a list of builtin themes. 101 | html_theme = 'default' 102 | 103 | # Theme options are theme-specific and customize the look and feel of a theme 104 | # further. For a list of options available for each theme, see the 105 | # documentation. 106 | #html_theme_options = {} 107 | 108 | # Add any paths that contain custom themes here, relative to this directory. 109 | #html_theme_path = [] 110 | 111 | # The name for this set of Sphinx documents. If None, it defaults to 112 | # " v documentation". 113 | #html_title = None 114 | 115 | # A shorter title for the navigation bar. Default is the same as html_title. 116 | #html_short_title = None 117 | 118 | # The name of an image file (relative to this directory) to place at the top 119 | # of the sidebar. 120 | #html_logo = None 121 | 122 | # The name of an image file (within the static path) to use as favicon of the 123 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 124 | # pixels large. 125 | #html_favicon = None 126 | 127 | # Add any paths that contain custom static files (such as style sheets) here, 128 | # relative to this directory. They are copied after the builtin static files, 129 | # so a file named "default.css" will overwrite the builtin "default.css". 130 | html_static_path = ['_static'] 131 | 132 | # Add any extra paths that contain custom files (such as robots.txt or 133 | # .htaccess) here, relative to this directory. These files are copied 134 | # directly to the root of the documentation. 135 | #html_extra_path = [] 136 | 137 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 138 | # using the given strftime format. 139 | #html_last_updated_fmt = '%b %d, %Y' 140 | 141 | # If true, SmartyPants will be used to convert quotes and dashes to 142 | # typographically correct entities. 143 | #html_use_smartypants = True 144 | 145 | # Custom sidebar templates, maps document names to template names. 146 | #html_sidebars = {} 147 | 148 | # Additional templates that should be rendered to pages, maps page names to 149 | # template names. 150 | #html_additional_pages = {} 151 | 152 | # If false, no module index is generated. 153 | #html_domain_indices = True 154 | 155 | # If false, no index is generated. 156 | #html_use_index = True 157 | 158 | # If true, the index is split into individual pages for each letter. 159 | #html_split_index = False 160 | 161 | # If true, links to the reST sources are added to the pages. 162 | #html_show_sourcelink = True 163 | 164 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 165 | #html_show_sphinx = True 166 | 167 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 168 | #html_show_copyright = True 169 | 170 | # If true, an OpenSearch description file will be output, and all pages will 171 | # contain a tag referring to it. The value of this option must be the 172 | # base URL from which the finished HTML is served. 173 | #html_use_opensearch = '' 174 | 175 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 176 | #html_file_suffix = None 177 | 178 | # Output file base name for HTML help builder. 179 | htmlhelp_basename = 'Lthreaddoc' 180 | 181 | 182 | # -- Options for LaTeX output --------------------------------------------- 183 | 184 | latex_elements = { 185 | # The paper size ('letterpaper' or 'a4paper'). 186 | #'papersize': 'letterpaper', 187 | 188 | # The font size ('10pt', '11pt' or '12pt'). 189 | #'pointsize': '10pt', 190 | 191 | # Additional stuff for the LaTeX preamble. 192 | #'preamble': '', 193 | } 194 | 195 | # Grouping the document tree into LaTeX files. List of tuples 196 | # (source start file, target name, title, 197 | # author, documentclass [howto, manual, or own class]). 198 | latex_documents = [ 199 | ('index', 'Lthread.tex', u'Lthread Documentation', 200 | u'Hasan Alayli', 'manual'), 201 | ] 202 | 203 | # The name of an image file (relative to this directory) to place at the top of 204 | # the title page. 205 | #latex_logo = None 206 | 207 | # For "manual" documents, if this is true, then toplevel headings are parts, 208 | # not chapters. 209 | #latex_use_parts = False 210 | 211 | # If true, show page references after internal links. 212 | #latex_show_pagerefs = False 213 | 214 | # If true, show URL addresses after external links. 215 | #latex_show_urls = False 216 | 217 | # Documents to append as an appendix to all manuals. 218 | #latex_appendices = [] 219 | 220 | # If false, no module index is generated. 221 | #latex_domain_indices = True 222 | 223 | 224 | # -- Options for manual page output --------------------------------------- 225 | 226 | # One entry per manual page. List of tuples 227 | # (source start file, name, description, authors, manual section). 228 | man_pages = [ 229 | ('index', 'lthread', u'Lthread Documentation', 230 | [u'Hasan Alayli'], 1) 231 | ] 232 | 233 | # If true, show URL addresses after external links. 234 | #man_show_urls = False 235 | 236 | 237 | # -- Options for Texinfo output ------------------------------------------- 238 | 239 | # Grouping the document tree into Texinfo files. List of tuples 240 | # (source start file, target name, title, author, 241 | # dir menu entry, description, category) 242 | texinfo_documents = [ 243 | ('index', 'Lthread', u'Lthread Documentation', 244 | u'Hasan Alayli', 'Lthread', 'One line description of project.', 245 | 'Miscellaneous'), 246 | ] 247 | 248 | # Documents to append as an appendix to all manuals. 249 | #texinfo_appendices = [] 250 | 251 | # If false, no module index is generated. 252 | #texinfo_domain_indices = True 253 | 254 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 255 | #texinfo_show_urls = 'footnote' 256 | 257 | # If true, do not generate a @detailmenu in the "Top" node's menu. 258 | #texinfo_no_detailmenu = False 259 | # If true, do not generate a @detailmenu in the "Top" node's menu. 260 | #texinfo_no_detailmenu = False 261 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 262 | if not on_rtd: # only import and set the theme if we're building docs locally 263 | import sphinx_rtd_theme 264 | html_theme = 'sphinx_rtd_theme' 265 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 266 | 267 | -------------------------------------------------------------------------------- /src/lthread_compute.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_compute.c 27 | */ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | #include "lthread_int.h" 41 | 42 | enum {THREAD_TIMEOUT_BEFORE_EXIT = 60}; 43 | static pthread_key_t compute_sched_key; 44 | static pthread_once_t key_once = PTHREAD_ONCE_INIT; 45 | 46 | LIST_HEAD(compute_sched_l, lthread_compute_sched) compute_scheds = \ 47 | LIST_HEAD_INITIALIZER(compute_scheds); 48 | pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER; 49 | 50 | static void* _lthread_compute_run(void *arg); 51 | static void _lthread_compute_resume(struct lthread *lt); 52 | static struct lthread_compute_sched* _lthread_compute_sched_create(void); 53 | static void _lthread_compute_sched_free( 54 | struct lthread_compute_sched *compute_sched); 55 | 56 | struct lthread_compute_sched { 57 | struct cpu_ctx ctx; 58 | struct lthread_q lthreads; 59 | struct lthread *current_lthread; 60 | pthread_mutex_t run_mutex; 61 | pthread_cond_t run_mutex_cond; 62 | pthread_mutex_t lthreads_mutex; 63 | LIST_ENTRY(lthread_compute_sched) compute_next; 64 | enum lthread_compute_st compute_st; 65 | }; 66 | 67 | int 68 | lthread_compute_begin(void) 69 | { 70 | struct lthread_sched *sched = lthread_get_sched(); 71 | struct lthread_compute_sched *compute_sched = NULL, *tmp = NULL; 72 | struct lthread *lt = sched->current_lthread; 73 | 74 | /* search for an empty compute_scheduler */ 75 | assert(pthread_mutex_lock(&sched_mutex) == 0); 76 | LIST_FOREACH(tmp, &compute_scheds, compute_next) { 77 | if (tmp->compute_st == LT_COMPUTE_FREE) { 78 | compute_sched = tmp; 79 | break; 80 | } 81 | } 82 | 83 | /* create schedule if there is no scheduler available */ 84 | if (compute_sched == NULL) { 85 | if ((compute_sched = _lthread_compute_sched_create()) == NULL) { 86 | /* we failed to create a scheduler. Use the first scheduler 87 | * in the list, otherwise return failure. 88 | */ 89 | compute_sched = LIST_FIRST(&compute_scheds); 90 | if (compute_sched == NULL) { 91 | assert(pthread_mutex_unlock(&sched_mutex) == 0); 92 | return -1; 93 | } 94 | } else { 95 | LIST_INSERT_HEAD(&compute_scheds, compute_sched, compute_next); 96 | } 97 | } 98 | 99 | lt->compute_sched = compute_sched; 100 | 101 | lt->state |= BIT(LT_ST_PENDING_RUNCOMPUTE); 102 | assert(pthread_mutex_lock(<->compute_sched->lthreads_mutex) == 0); 103 | TAILQ_INSERT_TAIL(<->compute_sched->lthreads, lt, compute_next); 104 | assert(pthread_mutex_unlock(<->compute_sched->lthreads_mutex) == 0); 105 | 106 | assert(pthread_mutex_unlock(&sched_mutex) == 0); 107 | 108 | /* yield function in scheduler to allow other lthreads to run while 109 | * this lthread runs in a pthread for expensive computations. 110 | */ 111 | _switch(<->sched->ctx, <->ctx); 112 | 113 | return (0); 114 | } 115 | 116 | void 117 | lthread_compute_end(void) 118 | { 119 | /* get current compute scheduler */ 120 | struct lthread_compute_sched *compute_sched = 121 | pthread_getspecific(compute_sched_key); 122 | struct lthread *lt = compute_sched->current_lthread; 123 | assert(compute_sched != NULL); 124 | _switch(&compute_sched->ctx, <->ctx); 125 | } 126 | 127 | void 128 | _lthread_compute_add(struct lthread *lt) 129 | { 130 | 131 | LIST_INSERT_HEAD(<->sched->busy, lt, busy_next); 132 | /* 133 | * lthread is in scheduler list at this point. lock mutex to change 134 | * state since the state is checked in scheduler as well. 135 | */ 136 | assert(pthread_mutex_lock(<->compute_sched->lthreads_mutex) == 0); 137 | lt->state &= CLEARBIT(LT_ST_PENDING_RUNCOMPUTE); 138 | lt->state |= BIT(LT_ST_RUNCOMPUTE); 139 | assert(pthread_mutex_unlock(<->compute_sched->lthreads_mutex) == 0); 140 | 141 | /* wakeup pthread if it was sleeping */ 142 | assert(pthread_mutex_lock(<->compute_sched->run_mutex) == 0); 143 | assert(pthread_cond_signal(<->compute_sched->run_mutex_cond) == 0); 144 | assert(pthread_mutex_unlock(<->compute_sched->run_mutex) == 0); 145 | 146 | } 147 | 148 | static void 149 | _lthread_compute_sched_free(struct lthread_compute_sched *compute_sched) 150 | { 151 | assert(pthread_mutex_destroy(&compute_sched->run_mutex) == 0); 152 | assert(pthread_mutex_destroy(&compute_sched->lthreads_mutex) == 0); 153 | assert(pthread_cond_destroy(&compute_sched->run_mutex_cond) == 0); 154 | free(compute_sched); 155 | } 156 | 157 | static struct lthread_compute_sched* 158 | _lthread_compute_sched_create(void) 159 | { 160 | struct lthread_compute_sched *compute_sched = NULL; 161 | pthread_t pthread; 162 | 163 | if ((compute_sched = calloc(1, 164 | sizeof(struct lthread_compute_sched))) == NULL) 165 | return NULL; 166 | 167 | if (pthread_mutex_init(&compute_sched->run_mutex, NULL) != 0 || 168 | pthread_mutex_init(&compute_sched->lthreads_mutex, NULL) != 0 || 169 | pthread_cond_init(&compute_sched->run_mutex_cond, NULL) != 0) { 170 | free(compute_sched); 171 | return NULL; 172 | } 173 | 174 | if (pthread_create(&pthread, 175 | NULL, _lthread_compute_run, compute_sched) != 0) { 176 | _lthread_compute_sched_free(compute_sched); 177 | return NULL; 178 | } 179 | assert(pthread_detach(pthread) == 0); 180 | 181 | TAILQ_INIT(&compute_sched->lthreads); 182 | 183 | return compute_sched; 184 | } 185 | 186 | static void 187 | _lthread_compute_resume(struct lthread *lt) 188 | { 189 | _switch(<->ctx, <->compute_sched->ctx); 190 | } 191 | 192 | static void 193 | once_routine(void) 194 | { 195 | assert(pthread_key_create(&compute_sched_key, NULL) == 0); 196 | } 197 | 198 | static void* 199 | _lthread_compute_run(void *arg) 200 | { 201 | struct lthread_compute_sched *compute_sched = arg; 202 | struct lthread *lt = NULL; 203 | struct timespec timeout; 204 | int status = 0; 205 | int ret = 0; 206 | (void)ret; /* silence compiler */ 207 | 208 | assert(pthread_once(&key_once, once_routine) == 0); 209 | assert(pthread_setspecific(compute_sched_key, arg) == 0); 210 | 211 | while (1) { 212 | 213 | /* resume lthreads to run their computation or make a blocking call */ 214 | while (1) { 215 | assert(pthread_mutex_lock(&compute_sched->lthreads_mutex) == 0); 216 | 217 | /* we have no work to do, break and wait 60 secs then exit */ 218 | if (TAILQ_EMPTY(&compute_sched->lthreads)) { 219 | assert(pthread_mutex_unlock( 220 | &compute_sched->lthreads_mutex) == 0); 221 | break; 222 | } 223 | 224 | lt = TAILQ_FIRST(&compute_sched->lthreads); 225 | if (lt->state & BIT(LT_ST_PENDING_RUNCOMPUTE)) { 226 | assert(pthread_mutex_unlock( 227 | &compute_sched->lthreads_mutex) == 0); 228 | continue; 229 | } 230 | 231 | TAILQ_REMOVE(&compute_sched->lthreads, lt, compute_next); 232 | 233 | assert(pthread_mutex_unlock(&compute_sched->lthreads_mutex) == 0); 234 | 235 | compute_sched->current_lthread = lt; 236 | compute_sched->compute_st = LT_COMPUTE_BUSY; 237 | 238 | _lthread_compute_resume(lt); 239 | 240 | compute_sched->current_lthread = NULL; 241 | compute_sched->compute_st = LT_COMPUTE_FREE; 242 | 243 | /* resume it back on the prev scheduler */ 244 | assert(pthread_mutex_lock(<->sched->defer_mutex) == 0); 245 | TAILQ_INSERT_TAIL(<->sched->defer, lt, defer_next); 246 | lt->state &= CLEARBIT(LT_ST_RUNCOMPUTE); 247 | assert(pthread_mutex_unlock(<->sched->defer_mutex) == 0); 248 | 249 | /* signal the prev scheduler in case it was sleeping in a poll */ 250 | _lthread_poller_ev_trigger(lt->sched); 251 | } 252 | 253 | assert(pthread_mutex_lock(&compute_sched->run_mutex) == 0); 254 | /* wait if we have no work to do, exit */ 255 | timeout.tv_sec = time(NULL) + THREAD_TIMEOUT_BEFORE_EXIT; 256 | timeout.tv_nsec = 0; 257 | status = pthread_cond_timedwait(&compute_sched->run_mutex_cond, 258 | &compute_sched->run_mutex, &timeout); 259 | assert(pthread_mutex_unlock(&compute_sched->run_mutex) == 0); 260 | 261 | /* if we didn't timeout, then we got signaled to do some work */ 262 | if (status != ETIMEDOUT) 263 | continue; 264 | 265 | /* lock the global sched to check if we have any pending work to do */ 266 | assert(pthread_mutex_lock(&sched_mutex) == 0); 267 | 268 | assert(pthread_mutex_lock(&compute_sched->lthreads_mutex) == 0); 269 | if (TAILQ_EMPTY(&compute_sched->lthreads)) { 270 | 271 | LIST_REMOVE(compute_sched, compute_next); 272 | 273 | assert(pthread_mutex_unlock(&compute_sched->lthreads_mutex) == 0); 274 | assert(pthread_mutex_unlock(&sched_mutex) == 0); 275 | _lthread_compute_sched_free(compute_sched); 276 | break; 277 | } 278 | 279 | assert(pthread_mutex_unlock(&compute_sched->lthreads_mutex) == 0); 280 | assert(pthread_mutex_unlock(&sched_mutex) == 0); 281 | } 282 | 283 | 284 | return NULL; 285 | } 286 | -------------------------------------------------------------------------------- /src/lthread_sched.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_sched.c 27 | */ 28 | 29 | 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | #include "lthread_int.h" 40 | #include "tree.h" 41 | 42 | #define FD_KEY(f,e) (((int64_t)(f) << (sizeof(int32_t) * 8)) | e) 43 | #define FD_EVENT(f) ((int32_t)(f)) 44 | #define FD_ONLY(f) ((f) >> ((sizeof(int32_t) * 8))) 45 | 46 | static inline int _lthread_sleep_cmp(struct lthread *l1, struct lthread *l2); 47 | static inline int _lthread_wait_cmp(struct lthread *l1, struct lthread *l2); 48 | 49 | static inline int 50 | _lthread_sleep_cmp(struct lthread *l1, struct lthread *l2) 51 | { 52 | if (l1->sleep_usecs < l2->sleep_usecs) 53 | return (-1); 54 | if (l1->sleep_usecs == l2->sleep_usecs) 55 | return (0); 56 | return (1); 57 | } 58 | 59 | static inline int 60 | _lthread_wait_cmp(struct lthread *l1, struct lthread *l2) 61 | { 62 | if (l1->fd_wait < l2->fd_wait) 63 | return (-1); 64 | if (l1->fd_wait == l2->fd_wait) 65 | return (0); 66 | return (1); 67 | } 68 | 69 | RB_GENERATE(lthread_rb_sleep, lthread, sleep_node, _lthread_sleep_cmp); 70 | RB_GENERATE(lthread_rb_wait, lthread, wait_node, _lthread_wait_cmp); 71 | 72 | static uint64_t _lthread_min_timeout(struct lthread_sched *); 73 | 74 | static int _lthread_poll(void); 75 | static void _lthread_resume_expired(struct lthread_sched *sched); 76 | static inline int _lthread_sched_isdone(struct lthread_sched *sched); 77 | 78 | static struct lthread find_lt; 79 | 80 | static int 81 | _lthread_poll(void) 82 | { 83 | struct lthread_sched *sched; 84 | sched = lthread_get_sched(); 85 | struct timespec t = {0, 0}; 86 | int ret = 0; 87 | uint64_t usecs = 0; 88 | 89 | sched->num_new_events = 0; 90 | usecs = _lthread_min_timeout(sched); 91 | 92 | /* never sleep if we have an lthread pending in the new queue */ 93 | if (usecs && TAILQ_EMPTY(&sched->ready)) { 94 | t.tv_sec = usecs / 1000000u; 95 | if (t.tv_sec != 0) 96 | t.tv_nsec = (usecs % 1000u) * 1000000u; 97 | else 98 | t.tv_nsec = usecs * 1000u; 99 | } else { 100 | return 0; 101 | t.tv_nsec = 0; 102 | t.tv_sec = 0; 103 | } 104 | 105 | 106 | while (1) { 107 | ret = _lthread_poller_poll(t); 108 | if (ret == -1 && errno == EINTR) { 109 | continue; 110 | } else if (ret == -1) { 111 | perror("error adding events to epoll/kqueue"); 112 | assert(0); 113 | } 114 | break; 115 | } 116 | 117 | sched->nevents = 0; 118 | sched->num_new_events = ret; 119 | 120 | return (0); 121 | } 122 | 123 | static uint64_t 124 | _lthread_min_timeout(struct lthread_sched *sched) 125 | { 126 | uint64_t t_diff_usecs = 0, min = 0; 127 | struct lthread *lt = NULL; 128 | 129 | t_diff_usecs = _lthread_diff_usecs(sched->birth, 130 | _lthread_usec_now()); 131 | min = sched->default_timeout; 132 | 133 | lt = RB_MIN(lthread_rb_sleep, &sched->sleeping); 134 | if (!lt) 135 | return (min); 136 | 137 | min = lt->sleep_usecs; 138 | if (min > t_diff_usecs) 139 | return (min - t_diff_usecs); 140 | else // we are running late on a thread, execute immediately 141 | return (0); 142 | 143 | return (0); 144 | } 145 | 146 | /* 147 | * Returns 0 if there is a pending job in scheduler or 1 if done and can exit. 148 | */ 149 | static inline int 150 | _lthread_sched_isdone(struct lthread_sched *sched) 151 | { 152 | return (RB_EMPTY(&sched->waiting) && 153 | LIST_EMPTY(&sched->busy) && 154 | RB_EMPTY(&sched->sleeping) && 155 | TAILQ_EMPTY(&sched->ready)); 156 | } 157 | 158 | void 159 | lthread_run(void) 160 | { 161 | struct lthread_sched *sched; 162 | struct lthread *lt = NULL; 163 | struct lthread *lt_read = NULL, *lt_write = NULL, *lt_last_ready = NULL; 164 | int p = 0; 165 | int fd = 0; 166 | int is_eof = 0; 167 | 168 | sched = lthread_get_sched(); 169 | /* scheduler not initiliazed, and no lthreads where created */ 170 | if (sched == NULL) 171 | return; 172 | 173 | while (!_lthread_sched_isdone(sched)) { 174 | 175 | /* 1. start by checking if a sleeping thread needs to wakeup */ 176 | _lthread_resume_expired(sched); 177 | 178 | /* 2. check to see if we have any ready threads to run. 179 | * if new lthreads got added to the ready queue in process, they'll 180 | * run the next time we get here again. 181 | */ 182 | lt_last_ready = TAILQ_LAST(&sched->ready, lthread_q); 183 | while (!TAILQ_EMPTY(&sched->ready)) { 184 | lt = TAILQ_FIRST(&sched->ready); 185 | TAILQ_REMOVE(<->sched->ready, lt, ready_next); 186 | _lthread_resume(lt); 187 | if (lt == lt_last_ready) 188 | break; 189 | } 190 | 191 | /* 3. resume lthreads we received from lthread_compute, if any */ 192 | while (!TAILQ_EMPTY(&sched->defer)) { 193 | assert(pthread_mutex_lock(&sched->defer_mutex) == 0); 194 | lt = TAILQ_FIRST(&sched->defer); 195 | if (lt == NULL) { 196 | assert(pthread_mutex_unlock(&sched->defer_mutex) == 0); 197 | break; 198 | } 199 | TAILQ_REMOVE(&sched->defer, lt, defer_next); 200 | assert(pthread_mutex_unlock(&sched->defer_mutex) == 0); 201 | LIST_REMOVE(lt, busy_next); 202 | _lthread_resume(lt); 203 | } 204 | 205 | /* 4. check if we received any events after lthread_poll */ 206 | _lthread_poll(); 207 | 208 | /* 5. fire up lthreads that are ready to run */ 209 | while (sched->num_new_events) { 210 | p = --sched->num_new_events; 211 | 212 | fd = _lthread_poller_ev_get_fd(&sched->eventlist[p]); 213 | 214 | /* 215 | * We got signaled via trigger to wakeup from polling & rusume file io. 216 | * Those lthreads will get handled in step 4. 217 | */ 218 | if (fd == sched->eventfd) { 219 | _lthread_poller_ev_clear_trigger(); 220 | continue; 221 | } 222 | 223 | is_eof = _lthread_poller_ev_is_eof(&sched->eventlist[p]); 224 | if (is_eof) 225 | errno = ECONNRESET; 226 | 227 | #define HANDLE_EV(lt_wr, ev) \ 228 | lt_wr = _lthread_desched_event(fd, ev); \ 229 | if (lt_wr != NULL) { \ 230 | \ 231 | if (!(lt_wr->state & BIT(LT_ST_WAIT_MULTI))) { \ 232 | if (is_eof) \ 233 | lt_wr->state |= BIT(LT_ST_FDEOF); \ 234 | _lthread_resume(lt_wr); \ 235 | } else { \ 236 | /* \ 237 | * this lthread was waiting on multiple events, increment \ 238 | * ready_fds and place it on the ready queue to resume after we \ 239 | * finished counting all ready fds that the lthread was waiting \ 240 | * on. This is to emulate poll(2) return call. \ 241 | */ \ 242 | if (lt_wr->ready_fds == 0) \ 243 | TAILQ_INSERT_TAIL(&sched->ready, lt_wr, ready_next); \ 244 | _lthread_poller_set_fd_ready(lt_wr, fd, ev, is_eof); \ 245 | } \ 246 | } \ 247 | 248 | HANDLE_EV(lt_read, LT_EV_READ); 249 | HANDLE_EV(lt_write, LT_EV_WRITE); 250 | is_eof = 0; 251 | 252 | assert(lt_write != NULL || lt_read != NULL); 253 | } 254 | } 255 | 256 | _sched_free(sched); 257 | 258 | return; 259 | } 260 | 261 | /* 262 | * Cancels registered event in poller and deschedules (fd, ev) -> lt from 263 | * rbtree. This is safe to be called even if the lthread wasn't waiting on an 264 | * event. 265 | */ 266 | void 267 | _lthread_cancel_event(struct lthread *lt) 268 | { 269 | if (lt->state & BIT(LT_ST_WAIT_READ)) { 270 | _lthread_poller_ev_clear_rd(FD_ONLY(lt->fd_wait)); 271 | lt->state &= CLEARBIT(LT_ST_WAIT_READ); 272 | } else if (lt->state & BIT(LT_ST_WAIT_WRITE)) { 273 | _lthread_poller_ev_clear_wr(FD_ONLY(lt->fd_wait)); 274 | lt->state &= CLEARBIT(LT_ST_WAIT_WRITE); 275 | } 276 | 277 | if (lt->fd_wait >= 0) 278 | _lthread_desched_event(FD_ONLY(lt->fd_wait), FD_EVENT(lt->fd_wait)); 279 | lt->fd_wait = -1; 280 | } 281 | 282 | /* 283 | * Deschedules an event by removing the (fd, ev) -> lt node from rbtree. 284 | * It also deschedules the lthread from sleeping in case it was in sleeping 285 | * tree. 286 | */ 287 | struct lthread * 288 | _lthread_desched_event(int fd, enum lthread_event e) 289 | { 290 | struct lthread *lt = NULL; 291 | struct lthread_sched *sched = lthread_get_sched(); 292 | find_lt.fd_wait = FD_KEY(fd, e); 293 | 294 | lt = RB_FIND(lthread_rb_wait, &sched->waiting, &find_lt); 295 | if (lt != NULL) { 296 | RB_REMOVE(lthread_rb_wait, <->sched->waiting, lt); 297 | _lthread_desched_sleep(lt); 298 | } 299 | 300 | return (lt); 301 | } 302 | 303 | /* 304 | * Schedules an lthread for a poller event. 305 | * Sets its state to LT_EV_(READ|WRITE) and inserts lthread in waiting rbtree. 306 | * When the event occurs, the state is cleared and node is removed by 307 | * _lthread_desched_event() called from lthread_run(). 308 | * 309 | * If event doesn't occur and lthread expired waiting, _lthread_cancel_event() 310 | * must be called. 311 | */ 312 | void 313 | _lthread_sched_event(struct lthread *lt, int fd, enum lthread_event e, 314 | uint64_t timeout) 315 | { 316 | struct lthread *lt_tmp = NULL; 317 | enum lthread_st st; 318 | if (lt->state & BIT(LT_ST_WAIT_READ) || lt->state & BIT(LT_ST_WAIT_WRITE)) { 319 | printf("Unexpected event. lt id %"PRIu64" fd %"PRId64" already in %"PRId32" state\n", 320 | lt->id, lt->fd_wait, lt->state); 321 | assert(0); 322 | } 323 | 324 | if (e == LT_EV_READ) { 325 | st = LT_ST_WAIT_READ; 326 | _lthread_poller_ev_register_rd(fd); 327 | } else if (e == LT_EV_WRITE) { 328 | st = LT_ST_WAIT_WRITE; 329 | _lthread_poller_ev_register_wr(fd); 330 | } else { 331 | assert(0); 332 | } 333 | 334 | lt->state |= BIT(st); 335 | lt->fd_wait = FD_KEY(fd, e); 336 | lt_tmp = RB_INSERT(lthread_rb_wait, <->sched->waiting, lt); 337 | assert(lt_tmp == NULL); 338 | if (timeout == -1) 339 | return; 340 | _lthread_sched_sleep(lt, timeout); 341 | lt->fd_wait = -1; 342 | lt->state &= CLEARBIT(st); 343 | } 344 | 345 | /* 346 | * Removes lthread from sleeping rbtree. 347 | * This can be called multiple times on the same lthread regardless if it was 348 | * sleeping or not. 349 | */ 350 | void 351 | _lthread_desched_sleep(struct lthread *lt) 352 | { 353 | if (lt->state & BIT(LT_ST_SLEEPING)) { 354 | RB_REMOVE(lthread_rb_sleep, <->sched->sleeping, lt); 355 | lt->state &= CLEARBIT(LT_ST_SLEEPING); 356 | lt->state |= BIT(LT_ST_READY); 357 | lt->state &= CLEARBIT(LT_ST_EXPIRED); 358 | } 359 | } 360 | 361 | /* 362 | * Schedules lthread to sleep for `msecs` by inserting lthread into sleeping 363 | * rbtree and setting the lthread state to LT_ST_SLEEPING. 364 | * lthread state is cleared upon resumption or expiry. 365 | */ 366 | void 367 | _lthread_sched_sleep(struct lthread *lt, uint64_t msecs) 368 | { 369 | struct lthread *lt_tmp = NULL; 370 | uint64_t usecs = msecs * 1000u; 371 | 372 | /* 373 | * if msecs is 0, we won't schedule lthread otherwise loop until 374 | * collision resolved(very rare) by incrementing usec++. 375 | */ 376 | lt->sleep_usecs = _lthread_diff_usecs(lt->sched->birth, 377 | _lthread_usec_now()) + usecs; 378 | while (msecs) { 379 | lt_tmp = RB_INSERT(lthread_rb_sleep, <->sched->sleeping, lt); 380 | if (lt_tmp) { 381 | lt->sleep_usecs++; 382 | continue; 383 | } 384 | lt->state |= BIT(LT_ST_SLEEPING); 385 | break; 386 | } 387 | 388 | 389 | _lthread_yield(lt); 390 | if (msecs > 0) 391 | lt->state &= CLEARBIT(LT_ST_SLEEPING); 392 | 393 | lt->sleep_usecs = 0; 394 | } 395 | 396 | void 397 | _lthread_sched_busy_sleep(struct lthread *lt, uint64_t msecs) 398 | { 399 | 400 | LIST_INSERT_HEAD(<->sched->busy, lt, busy_next); 401 | lt->state |= BIT(LT_ST_BUSY); 402 | _lthread_sched_sleep(lt, msecs); 403 | lt->state &= CLEARBIT(LT_ST_BUSY); 404 | LIST_REMOVE(lt, busy_next); 405 | } 406 | 407 | /* 408 | * Resumes expired lthread and cancels its events whether it was waiting 409 | * on one or not, and deschedules it from sleeping rbtree in case it was 410 | * sleeping. 411 | */ 412 | static void 413 | _lthread_resume_expired(struct lthread_sched *sched) 414 | { 415 | struct lthread *lt = NULL; 416 | //struct lthread *lt_tmp = NULL; 417 | uint64_t t_diff_usecs = 0; 418 | 419 | /* current scheduler time */ 420 | t_diff_usecs = _lthread_diff_usecs(sched->birth, _lthread_usec_now()); 421 | 422 | while ((lt = RB_MIN(lthread_rb_sleep, &sched->sleeping)) != NULL) { 423 | 424 | if (lt->sleep_usecs <= t_diff_usecs) { 425 | _lthread_cancel_event(lt); 426 | _lthread_desched_sleep(lt); 427 | lt->state |= BIT(LT_ST_EXPIRED); 428 | 429 | /* don't clear expired if lthread exited/cancelled */ 430 | if (_lthread_resume(lt) != -1) 431 | lt->state &= CLEARBIT(LT_ST_EXPIRED); 432 | 433 | continue; 434 | } 435 | break; 436 | } 437 | } 438 | -------------------------------------------------------------------------------- /src/lthread.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread.c 27 | */ 28 | 29 | 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | #include "lthread_int.h" 44 | #include "lthread_poller.h" 45 | 46 | extern int errno; 47 | 48 | static void _exec(void *lt); 49 | static void _lthread_init(struct lthread *lt); 50 | static void _lthread_key_create(void); 51 | static inline void _lthread_madvise(struct lthread *lt); 52 | 53 | pthread_key_t lthread_sched_key; 54 | static pthread_once_t key_once = PTHREAD_ONCE_INIT; 55 | 56 | 57 | 58 | int _switch(struct cpu_ctx *new_ctx, struct cpu_ctx *cur_ctx); 59 | #ifdef __i386__ 60 | __asm__ ( 61 | " .text \n" 62 | " .p2align 2,,3 \n" 63 | ".globl _switch \n" 64 | "_switch: \n" 65 | "__switch: \n" 66 | "movl 8(%esp), %edx # fs->%edx \n" 67 | "movl %esp, 0(%edx) # save esp \n" 68 | "movl %ebp, 4(%edx) # save ebp \n" 69 | "movl (%esp), %eax # save eip \n" 70 | "movl %eax, 8(%edx) \n" 71 | "movl %ebx, 12(%edx) # save ebx,esi,edi \n" 72 | "movl %esi, 16(%edx) \n" 73 | "movl %edi, 20(%edx) \n" 74 | "movl 4(%esp), %edx # ts->%edx \n" 75 | "movl 20(%edx), %edi # restore ebx,esi,edi \n" 76 | "movl 16(%edx), %esi \n" 77 | "movl 12(%edx), %ebx \n" 78 | "movl 0(%edx), %esp # restore esp \n" 79 | "movl 4(%edx), %ebp # restore ebp \n" 80 | "movl 8(%edx), %eax # restore eip \n" 81 | "movl %eax, (%esp) \n" 82 | "ret \n" 83 | ); 84 | #elif defined(__x86_64__) 85 | 86 | __asm__ ( 87 | " .text \n" 88 | " .p2align 4,,15 \n" 89 | ".globl _switch \n" 90 | ".globl __switch \n" 91 | "_switch: \n" 92 | "__switch: \n" 93 | " movq %rsp, 0(%rsi) # save stack_pointer \n" 94 | " movq %rbp, 8(%rsi) # save frame_pointer \n" 95 | " movq (%rsp), %rax # save insn_pointer \n" 96 | " movq %rax, 16(%rsi) \n" 97 | " movq %rbx, 24(%rsi) # save rbx,r12-r15 \n" 98 | " movq %r12, 32(%rsi) \n" 99 | " movq %r13, 40(%rsi) \n" 100 | " movq %r14, 48(%rsi) \n" 101 | " movq %r15, 56(%rsi) \n" 102 | " movq 56(%rdi), %r15 \n" 103 | " movq 48(%rdi), %r14 \n" 104 | " movq 40(%rdi), %r13 # restore rbx,r12-r15 \n" 105 | " movq 32(%rdi), %r12 \n" 106 | " movq 24(%rdi), %rbx \n" 107 | " movq 8(%rdi), %rbp # restore frame_pointer \n" 108 | " movq 0(%rdi), %rsp # restore stack_pointer \n" 109 | " movq 16(%rdi), %rax # restore insn_pointer \n" 110 | " movq %rax, (%rsp) \n" 111 | " ret \n" 112 | ); 113 | #endif 114 | 115 | static void 116 | _exec(void *lt) 117 | { 118 | 119 | #if defined(__llvm__) && defined(__x86_64__) 120 | __asm__ ("movq 16(%%rbp), %[lt]" : [lt] "=r" (lt)); 121 | #endif 122 | ((struct lthread *)lt)->fun(((struct lthread *)lt)->arg); 123 | ((struct lthread *)lt)->state |= BIT(LT_ST_EXITED); 124 | 125 | _lthread_yield(lt); 126 | } 127 | 128 | void 129 | _lthread_yield(struct lthread *lt) 130 | { 131 | lt->ops = 0; 132 | _switch(<->sched->ctx, <->ctx); 133 | } 134 | 135 | void 136 | _lthread_free(struct lthread *lt) 137 | { 138 | free(lt->stack); 139 | free(lt); 140 | } 141 | 142 | int 143 | _lthread_resume(struct lthread *lt) 144 | { 145 | 146 | struct lthread_sched *sched = lthread_get_sched(); 147 | 148 | if (lt->state & BIT(LT_ST_CANCELLED)) { 149 | /* if an lthread was joining on it, schedule it to run */ 150 | if (lt->lt_join) { 151 | _lthread_desched_sleep(lt->lt_join); 152 | TAILQ_INSERT_TAIL(&sched->ready, lt->lt_join, ready_next); 153 | lt->lt_join = NULL; 154 | } 155 | /* if lthread is detached, then we can free it up */ 156 | if (lt->state & BIT(LT_ST_DETACH)) 157 | _lthread_free(lt); 158 | if (lt->state & BIT(LT_ST_BUSY)) 159 | LIST_REMOVE(lt, busy_next); 160 | return (-1); 161 | } 162 | 163 | if (lt->state & BIT(LT_ST_NEW)) 164 | _lthread_init(lt); 165 | 166 | sched->current_lthread = lt; 167 | _switch(<->ctx, <->sched->ctx); 168 | sched->current_lthread = NULL; 169 | _lthread_madvise(lt); 170 | 171 | if (lt->state & BIT(LT_ST_EXITED)) { 172 | if (lt->lt_join) { 173 | /* if lthread was sleeping, deschedule it so it doesn't expire. */ 174 | _lthread_desched_sleep(lt->lt_join); 175 | TAILQ_INSERT_TAIL(&sched->ready, lt->lt_join, ready_next); 176 | lt->lt_join = NULL; 177 | } 178 | 179 | /* if lthread is detached, free it, otherwise lthread_join() will */ 180 | if (lt->state & BIT(LT_ST_DETACH)) 181 | _lthread_free(lt); 182 | return (-1); 183 | } else { 184 | /* place it in a compute scheduler if needed. */ 185 | if (lt->state & BIT(LT_ST_PENDING_RUNCOMPUTE)) { 186 | _lthread_compute_add(lt); 187 | } 188 | } 189 | 190 | return (0); 191 | } 192 | 193 | static inline void 194 | _lthread_madvise(struct lthread *lt) 195 | { 196 | size_t current_stack = (lt->stack + lt->stack_size) - lt->ctx.esp; 197 | size_t tmp; 198 | /* make sure function did not overflow stack, we can't recover from that */ 199 | assert(current_stack <= lt->stack_size); 200 | 201 | /* 202 | * free up stack space we no longer use. As long as we were using more than 203 | * pagesize bytes. 204 | */ 205 | if (current_stack < lt->last_stack_size && 206 | lt->last_stack_size > lt->sched->page_size) { 207 | /* round up to the nearest page size */ 208 | tmp = current_stack + (-current_stack & (lt->sched->page_size - 1)); 209 | assert(madvise(lt->stack, lt->stack_size - tmp, MADV_DONTNEED) == 0); 210 | } 211 | 212 | lt->last_stack_size = current_stack; 213 | } 214 | 215 | static void 216 | _lthread_key_destructor(void *data) 217 | { 218 | free(data); 219 | } 220 | 221 | static void 222 | _lthread_key_create(void) 223 | { 224 | assert(pthread_key_create(<hread_sched_key, 225 | _lthread_key_destructor) == 0); 226 | assert(pthread_setspecific(lthread_sched_key, NULL) == 0); 227 | 228 | return; 229 | } 230 | 231 | int 232 | lthread_init(size_t size) 233 | { 234 | return (sched_create(size)); 235 | } 236 | 237 | static void 238 | _lthread_init(struct lthread *lt) 239 | { 240 | void **stack = NULL; 241 | stack = (void **)(lt->stack + (lt->stack_size)); 242 | 243 | stack[-3] = NULL; 244 | stack[-2] = (void *)lt; 245 | lt->ctx.esp = (void *)stack - (4 * sizeof(void *)); 246 | lt->ctx.ebp = (void *)stack - (3 * sizeof(void *)); 247 | lt->ctx.eip = (void *)_exec; 248 | lt->state = BIT(LT_ST_READY); 249 | } 250 | 251 | void 252 | _sched_free(struct lthread_sched *sched) 253 | { 254 | close(sched->poller_fd); 255 | 256 | #if ! (defined(__FreeBSD__) && defined(__APPLE__)) 257 | close(sched->eventfd); 258 | #endif 259 | pthread_mutex_destroy(&sched->defer_mutex); 260 | 261 | free(sched); 262 | pthread_setspecific(lthread_sched_key, NULL); 263 | } 264 | 265 | int 266 | sched_create(size_t stack_size) 267 | { 268 | struct lthread_sched *new_sched; 269 | size_t sched_stack_size = 0; 270 | 271 | sched_stack_size = stack_size ? stack_size : MAX_STACK_SIZE; 272 | 273 | if ((new_sched = calloc(1, sizeof(struct lthread_sched))) == NULL) { 274 | perror("Failed to initialize scheduler\n"); 275 | return (errno); 276 | } 277 | 278 | assert(pthread_setspecific(lthread_sched_key, new_sched) == 0); 279 | _lthread_io_worker_init(); 280 | 281 | if ((new_sched->poller_fd = _lthread_poller_create()) == -1) { 282 | perror("Failed to initialize poller\n"); 283 | _sched_free(new_sched); 284 | return (errno); 285 | } 286 | _lthread_poller_ev_register_trigger(); 287 | 288 | if (pthread_mutex_init(&new_sched->defer_mutex, NULL) != 0) { 289 | perror("Failed to initialize defer_mutex\n"); 290 | _sched_free(new_sched); 291 | return (errno); 292 | } 293 | 294 | new_sched->stack_size = sched_stack_size; 295 | new_sched->page_size = getpagesize(); 296 | 297 | new_sched->spawned_lthreads = 0; 298 | new_sched->default_timeout = 3000000u; 299 | RB_INIT(&new_sched->sleeping); 300 | RB_INIT(&new_sched->waiting); 301 | new_sched->birth = _lthread_usec_now(); 302 | TAILQ_INIT(&new_sched->ready); 303 | TAILQ_INIT(&new_sched->defer); 304 | LIST_INIT(&new_sched->busy); 305 | 306 | bzero(&new_sched->ctx, sizeof(struct cpu_ctx)); 307 | 308 | return (0); 309 | } 310 | 311 | int 312 | lthread_create(struct lthread **new_lt, void *fun, void *arg) 313 | { 314 | struct lthread *lt = NULL; 315 | assert(pthread_once(&key_once, _lthread_key_create) == 0); 316 | struct lthread_sched *sched = lthread_get_sched(); 317 | 318 | if (sched == NULL) { 319 | sched_create(0); 320 | sched = lthread_get_sched(); 321 | if (sched == NULL) { 322 | perror("Failed to create scheduler"); 323 | return (-1); 324 | } 325 | } 326 | 327 | if ((lt = calloc(1, sizeof(struct lthread))) == NULL) { 328 | perror("Failed to allocate memory for new lthread"); 329 | return (errno); 330 | } 331 | 332 | if (posix_memalign(<->stack, getpagesize(), sched->stack_size)) { 333 | free(lt); 334 | perror("Failed to allocate stack for new lthread"); 335 | return (errno); 336 | } 337 | 338 | lt->sched = sched; 339 | lt->stack_size = sched->stack_size; 340 | lt->state = BIT(LT_ST_NEW); 341 | lt->id = sched->spawned_lthreads++; 342 | lt->fun = fun; 343 | lt->fd_wait = -1; 344 | lt->arg = arg; 345 | lt->birth = _lthread_usec_now(); 346 | *new_lt = lt; 347 | TAILQ_INSERT_TAIL(<->sched->ready, lt, ready_next); 348 | 349 | return (0); 350 | } 351 | 352 | void 353 | lthread_set_data(void *data) 354 | { 355 | lthread_get_sched()->current_lthread->data = data; 356 | } 357 | 358 | void * 359 | lthread_get_data(void) 360 | { 361 | return (lthread_get_sched()->current_lthread->data); 362 | } 363 | 364 | struct lthread* 365 | lthread_current(void) 366 | { 367 | return (lthread_get_sched()->current_lthread); 368 | } 369 | 370 | void 371 | lthread_cancel(struct lthread *lt) 372 | { 373 | if (lt == NULL) 374 | return; 375 | 376 | lt->state |= BIT(LT_ST_CANCELLED); 377 | _lthread_desched_sleep(lt); 378 | _lthread_cancel_event(lt); 379 | /* 380 | * we don't schedule the cancelled lthread if it was running in a compute 381 | * scheduler or pending to run in a compute scheduler or in an io worker. 382 | * otherwise it could get freed while it's still running. 383 | * when it's done in compute_scheduler, or io_worker - the scheduler will 384 | * attempt to run it and realize it's cancelled and abort the resumption. 385 | */ 386 | if (lt->state & BIT(LT_ST_PENDING_RUNCOMPUTE) || 387 | lt->state & BIT(LT_ST_WAIT_IO_READ) || 388 | lt->state & BIT(LT_ST_WAIT_IO_WRITE) || 389 | lt->state & BIT(LT_ST_RUNCOMPUTE)) 390 | return; 391 | TAILQ_INSERT_TAIL(<->sched->ready, lt, ready_next); 392 | } 393 | 394 | int 395 | lthread_cond_create(struct lthread_cond **c) 396 | { 397 | if ((*c = calloc(1, sizeof(struct lthread_cond))) == NULL) 398 | return (-1); 399 | 400 | TAILQ_INIT(&(*c)->blocked_lthreads); 401 | 402 | return (0); 403 | } 404 | 405 | int 406 | lthread_cond_wait(struct lthread_cond *c, uint64_t timeout) 407 | { 408 | struct lthread *lt = lthread_get_sched()->current_lthread; 409 | TAILQ_INSERT_TAIL(&c->blocked_lthreads, lt, cond_next); 410 | 411 | _lthread_sched_busy_sleep(lt, timeout); 412 | 413 | if (lt->state & BIT(LT_ST_EXPIRED)) { 414 | TAILQ_REMOVE(&c->blocked_lthreads, lt, cond_next); 415 | return (-2); 416 | } 417 | 418 | return (0); 419 | } 420 | 421 | void 422 | lthread_cond_signal(struct lthread_cond *c) 423 | { 424 | struct lthread *lt = TAILQ_FIRST(&c->blocked_lthreads); 425 | if (lt == NULL) 426 | return; 427 | TAILQ_REMOVE(&c->blocked_lthreads, lt, cond_next); 428 | _lthread_desched_sleep(lt); 429 | TAILQ_INSERT_TAIL(<hread_get_sched()->ready, lt, ready_next); 430 | } 431 | 432 | void 433 | lthread_cond_broadcast(struct lthread_cond *c) 434 | { 435 | struct lthread *lt = NULL; 436 | struct lthread *lttmp = NULL; 437 | 438 | TAILQ_FOREACH_SAFE(lt, &c->blocked_lthreads, cond_next, lttmp) { 439 | TAILQ_REMOVE(&c->blocked_lthreads, lt, cond_next); 440 | _lthread_desched_sleep(lt); 441 | TAILQ_INSERT_TAIL(<hread_get_sched()->ready, lt, ready_next); 442 | } 443 | } 444 | 445 | void 446 | lthread_sleep(uint64_t msecs) 447 | { 448 | struct lthread *lt = lthread_get_sched()->current_lthread; 449 | 450 | if (msecs == 0) { 451 | TAILQ_INSERT_TAIL(<->sched->ready, lt, ready_next); 452 | _lthread_yield(lt); 453 | } else { 454 | _lthread_sched_sleep(lt, msecs); 455 | } 456 | } 457 | 458 | void 459 | _lthread_renice(struct lthread *lt) 460 | { 461 | lt->ops++; 462 | if (lt->ops < 5) 463 | return; 464 | 465 | TAILQ_INSERT_TAIL(<hread_get_sched()->ready, lt, ready_next); 466 | _lthread_yield(lt); 467 | } 468 | 469 | void 470 | lthread_wakeup(struct lthread *lt) 471 | { 472 | if (lt->state & BIT(LT_ST_SLEEPING)) { 473 | TAILQ_INSERT_TAIL(<->sched->ready, lt, ready_next); 474 | _lthread_desched_sleep(lt); 475 | } 476 | } 477 | 478 | void 479 | lthread_exit(void *ptr) 480 | { 481 | struct lthread *lt = lthread_get_sched()->current_lthread; 482 | if (lt->lt_join && lt->lt_join->lt_exit_ptr && ptr) 483 | *(lt->lt_join->lt_exit_ptr) = ptr; 484 | 485 | lt->state |= BIT(LT_ST_EXITED); 486 | _lthread_yield(lt); 487 | } 488 | 489 | int 490 | lthread_join(struct lthread *lt, void **ptr, uint64_t timeout) 491 | { 492 | struct lthread *current = lthread_get_sched()->current_lthread; 493 | lt->lt_join = current; 494 | current->lt_exit_ptr = ptr; 495 | int ret = 0; 496 | 497 | /* fail if the lthread has exited already */ 498 | if (lt->state & BIT(LT_ST_EXITED)) 499 | return (-1); 500 | 501 | _lthread_sched_busy_sleep(current, timeout); 502 | 503 | if (current->state & BIT(LT_ST_EXPIRED)) { 504 | lt->lt_join = NULL; 505 | return (-2); 506 | } 507 | 508 | if (lt->state & BIT(LT_ST_CANCELLED)) 509 | ret = -1; 510 | 511 | _lthread_free(lt); 512 | 513 | return (ret); 514 | } 515 | 516 | void 517 | lthread_detach(void) 518 | { 519 | struct lthread *current = lthread_get_sched()->current_lthread; 520 | current->state |= BIT(LT_ST_DETACH); 521 | } 522 | 523 | void 524 | lthread_detach2(struct lthread *lt) 525 | { 526 | lt->state |= BIT(LT_ST_DETACH); 527 | } 528 | 529 | 530 | void 531 | lthread_set_funcname(const char *f) 532 | { 533 | struct lthread *lt = lthread_get_sched()->current_lthread; 534 | strncpy(lt->funcname, f, 64); 535 | } 536 | 537 | uint64_t 538 | lthread_id(void) 539 | { 540 | return (lthread_get_sched()->current_lthread->id); 541 | } 542 | 543 | struct lthread* 544 | lthread_self(void) 545 | { 546 | return (lthread_get_sched()->current_lthread); 547 | } 548 | 549 | /* 550 | * convenience function for performance measurement. 551 | */ 552 | void 553 | lthread_print_timestamp(char *msg) 554 | { 555 | struct timeval t1 = {0, 0}; 556 | gettimeofday(&t1, NULL); 557 | printf("lt timestamp: sec: %ld usec: %ld (%s)\n", t1.tv_sec, (long) t1.tv_usec, msg); 558 | } 559 | -------------------------------------------------------------------------------- /src/lthread_socket.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Lthread 3 | * Copyright (C) 2012, Hasan Alayli 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 | * SUCH DAMAGE. 25 | * 26 | * lthread_socket.c 27 | */ 28 | 29 | 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | #include 40 | #include 41 | #include 42 | 43 | #include "lthread_int.h" 44 | 45 | #if defined(__FreeBSD__) || defined(__APPLE__) 46 | #define FLAG 47 | #else 48 | #define FLAG | MSG_NOSIGNAL 49 | #endif 50 | 51 | 52 | #define LTHREAD_WAIT(fn, event) \ 53 | fn \ 54 | { \ 55 | struct lthread *lt = lthread_get_sched()->current_lthread; \ 56 | _lthread_sched_event(lt, fd, event, timeout_ms); \ 57 | if (lt->state & BIT(LT_ST_FDEOF)) \ 58 | return (-1); \ 59 | if (lt->state & BIT(LT_ST_EXPIRED)) \ 60 | return (-2); \ 61 | return (0); \ 62 | } 63 | 64 | #define LTHREAD_RECV(x, y) \ 65 | x { \ 66 | ssize_t ret = 0; \ 67 | struct lthread *lt = lthread_get_sched()->current_lthread; \ 68 | while (1) { \ 69 | if (lt->state & BIT(LT_ST_FDEOF)) \ 70 | return (-1); \ 71 | _lthread_renice(lt); \ 72 | ret = y; \ 73 | if (ret == -1 && errno != EAGAIN) \ 74 | return (-1); \ 75 | if ((ret == -1 && errno == EAGAIN)) { \ 76 | _lthread_sched_event(lt, fd, LT_EV_READ, timeout); \ 77 | if (lt->state & BIT(LT_ST_EXPIRED)) \ 78 | return (-2); \ 79 | } \ 80 | if (ret >= 0) \ 81 | return (ret); \ 82 | } \ 83 | } \ 84 | 85 | #define LTHREAD_RECV_EXACT(x, y) \ 86 | x { \ 87 | ssize_t ret = 0; \ 88 | ssize_t recvd = 0; \ 89 | struct lthread *lt = lthread_get_sched()->current_lthread; \ 90 | \ 91 | while (recvd != length) { \ 92 | if (lt->state & BIT(LT_ST_FDEOF)) \ 93 | return (-1); \ 94 | \ 95 | _lthread_renice(lt); \ 96 | ret = y; \ 97 | if (ret == 0) \ 98 | return (recvd); \ 99 | if (ret > 0) \ 100 | recvd += ret; \ 101 | if (ret == -1 && errno != EAGAIN) \ 102 | return (-1); \ 103 | if ((ret == -1 && errno == EAGAIN)) { \ 104 | _lthread_sched_event(lt, fd, LT_EV_READ, timeout); \ 105 | if (lt->state & BIT(LT_ST_EXPIRED)) \ 106 | return (-2); \ 107 | } \ 108 | } \ 109 | return (recvd); \ 110 | } \ 111 | 112 | 113 | #define LTHREAD_SEND(x, y) \ 114 | x { \ 115 | ssize_t ret = 0; \ 116 | ssize_t sent = 0; \ 117 | struct lthread *lt = lthread_get_sched()->current_lthread; \ 118 | while (sent != length) { \ 119 | if (lt->state & BIT(LT_ST_FDEOF)) \ 120 | return (-1); \ 121 | _lthread_renice(lt); \ 122 | ret = y; \ 123 | if (ret == 0) \ 124 | return (sent); \ 125 | if (ret > 0) \ 126 | sent += ret; \ 127 | if (ret == -1 && errno != EAGAIN) \ 128 | return (-1); \ 129 | if (ret == -1 && errno == EAGAIN) \ 130 | _lthread_sched_event(lt, fd, LT_EV_WRITE, 0); \ 131 | } \ 132 | return (sent); \ 133 | } \ 134 | 135 | #define LTHREAD_SEND_ONCE(x, y) \ 136 | x { \ 137 | ssize_t ret = 0; \ 138 | struct lthread *lt = lthread_get_sched()->current_lthread; \ 139 | while (1) { \ 140 | if (lt->state & BIT(LT_ST_FDEOF)) \ 141 | return (-1); \ 142 | ret = y; \ 143 | if (ret >= 0) \ 144 | return (ret); \ 145 | if (ret == -1 && errno != EAGAIN) \ 146 | return (-1); \ 147 | if (ret == -1 && errno == EAGAIN) \ 148 | _lthread_sched_event(lt, fd, LT_EV_WRITE, 0); \ 149 | } \ 150 | } \ 151 | 152 | const struct linger nolinger = { .l_onoff = 1, .l_linger = 1 }; 153 | 154 | int 155 | lthread_accept(int fd, struct sockaddr *addr, socklen_t *len) 156 | { 157 | int ret = -1; 158 | struct lthread *lt = lthread_get_sched()->current_lthread; 159 | 160 | while (1) { 161 | _lthread_renice(lt); 162 | ret = accept(fd, addr, len); 163 | if (ret == -1 && 164 | (errno == ENFILE || 165 | errno == EWOULDBLOCK || 166 | errno == EMFILE)) { 167 | _lthread_sched_event(lt, fd, LT_EV_READ, 0); 168 | continue; 169 | } 170 | 171 | if (ret > 0) 172 | break; 173 | 174 | if (ret == -1 && errno == ECONNABORTED) { 175 | perror("Cannot accept connection"); 176 | continue; 177 | } 178 | 179 | if (ret == -1 && errno != EWOULDBLOCK) { 180 | perror("Cannot accept connection"); 181 | return (-1); 182 | } 183 | 184 | } 185 | 186 | #ifndef __FreeBSD__ 187 | if ((fcntl(ret, F_SETFL, O_NONBLOCK)) == -1) { 188 | close(ret); 189 | perror("Failed to set socket properties"); 190 | return (-1); 191 | } 192 | #endif 193 | 194 | return (ret); 195 | } 196 | 197 | int 198 | lthread_close(int fd) 199 | { 200 | struct lthread *lt = NULL; 201 | 202 | /* wake up the lthreads waiting on this fd and notify them of close */ 203 | lt = _lthread_desched_event(fd, LT_EV_READ); 204 | if (lt) { 205 | TAILQ_INSERT_TAIL(<hread_get_sched()->ready, lt, ready_next); 206 | lt->state |= BIT(LT_ST_FDEOF); 207 | } 208 | 209 | lt = _lthread_desched_event(fd, LT_EV_WRITE); 210 | if (lt) { 211 | TAILQ_INSERT_TAIL(<hread_get_sched()->ready, lt, ready_next); 212 | lt->state |= BIT(LT_ST_FDEOF); 213 | } 214 | 215 | /* closing fd removes its registered events from poller */ 216 | return (close(fd)); 217 | } 218 | 219 | int 220 | lthread_socket(int domain, int type, int protocol) 221 | { 222 | int fd; 223 | #if defined(__FreeBSD__) || defined(__APPLE__) 224 | int set = 1; 225 | #endif 226 | 227 | if ((fd = socket(domain, type, protocol)) == -1) { 228 | perror("Failed to create a new socket"); 229 | return (-1); 230 | } 231 | 232 | if ((fcntl(fd, F_SETFL, O_NONBLOCK)) == -1) { 233 | close(fd); 234 | perror("Failed to set socket properties"); 235 | return (-1); 236 | } 237 | 238 | #if defined(__FreeBSD__) || defined(__APPLE__) 239 | if (setsockopt(fd, SOL_SOCKET, SO_NOSIGPIPE, &set, sizeof(int)) == -1) { 240 | close(fd); 241 | perror("Failed to set socket properties"); 242 | return (-1); 243 | } 244 | #endif 245 | 246 | return (fd); 247 | } 248 | 249 | /* forward declare lthread_recv for use in readline */ 250 | ssize_t lthread_recv(int fd, void *buf, size_t buf_len, int flags, 251 | uint64_t timeout); 252 | 253 | ssize_t 254 | lthread_readline(int fd, char **buf, size_t max, uint64_t timeout) 255 | { 256 | size_t cur = 0; 257 | ssize_t r = 0; 258 | size_t total_read = 0; 259 | char *data = NULL; 260 | 261 | data = calloc(1, max + 1); 262 | if (data == NULL) 263 | return (-1); 264 | 265 | while (total_read < max) { 266 | r = lthread_recv(fd, data + total_read, 1, 0, timeout); 267 | 268 | if (r == 0 || r == -2 || r == -1) { 269 | free(data); 270 | return (r); 271 | } 272 | 273 | total_read += 1; 274 | if (data[cur++] == '\n') 275 | break; 276 | } 277 | 278 | *buf = data; 279 | 280 | return (total_read); 281 | } 282 | 283 | int 284 | lthread_pipe(int fildes[2]) 285 | { 286 | int ret = 0; 287 | 288 | ret = pipe(fildes); 289 | if (ret != 0) 290 | return (ret); 291 | 292 | ret = fcntl(fildes[0], F_SETFL, O_NONBLOCK); 293 | if (ret != 0) 294 | goto err; 295 | 296 | ret = fcntl(fildes[1], F_SETFL, O_NONBLOCK); 297 | if (ret != 0) 298 | goto err; 299 | 300 | return (0); 301 | 302 | err: 303 | close(fildes[0]); 304 | close(fildes[1]); 305 | return (ret); 306 | } 307 | 308 | LTHREAD_WAIT(int lthread_wait_read(int fd, int timeout_ms), LT_EV_READ); 309 | LTHREAD_WAIT(int lthread_wait_write(int fd, int timeout_ms), LT_EV_WRITE); 310 | 311 | LTHREAD_RECV( 312 | ssize_t lthread_recv(int fd, void *buf, size_t length, int flags, 313 | uint64_t timeout), 314 | recv(fd, buf, length, flags FLAG) 315 | ) 316 | 317 | LTHREAD_RECV( 318 | ssize_t lthread_read(int fd, void *buf, size_t length, uint64_t timeout), 319 | read(fd, buf, length) 320 | ) 321 | 322 | LTHREAD_RECV_EXACT( 323 | ssize_t lthread_recv_exact(int fd, void *buf, size_t length, int flags, 324 | uint64_t timeout), 325 | recv(fd, buf + recvd, length - recvd, flags FLAG) 326 | ) 327 | 328 | LTHREAD_RECV_EXACT( 329 | ssize_t lthread_read_exact(int fd, void *buf, size_t length, 330 | uint64_t timeout), 331 | read(fd, buf + recvd, length - recvd) 332 | ) 333 | 334 | LTHREAD_RECV( 335 | ssize_t lthread_recvmsg(int fd, struct msghdr *message, int flags, 336 | uint64_t timeout), 337 | recvmsg(fd, message, flags FLAG) 338 | ) 339 | 340 | LTHREAD_RECV( 341 | ssize_t lthread_recvfrom(int fd, void *buf, size_t length, int flags, 342 | struct sockaddr *address, socklen_t *address_len, uint64_t timeout), 343 | recvfrom(fd, buf, length, flags FLAG, address, address_len) 344 | ) 345 | 346 | LTHREAD_SEND( 347 | ssize_t lthread_send(int fd, const void *buf, size_t length, int flags), 348 | send(fd, ((char *)buf) + sent, length - sent, flags FLAG) 349 | ) 350 | 351 | LTHREAD_SEND( 352 | ssize_t lthread_write(int fd, const void *buf, size_t length), 353 | write(fd, ((char *)buf) + sent, length - sent) 354 | ) 355 | 356 | LTHREAD_SEND_ONCE( 357 | ssize_t lthread_sendmsg(int fd, const struct msghdr *message, int flags), 358 | sendmsg(fd, message, flags FLAG) 359 | ) 360 | 361 | LTHREAD_SEND_ONCE( 362 | ssize_t lthread_sendto(int fd, const void *buf, size_t length, int flags, 363 | const struct sockaddr *dest_addr, socklen_t dest_len), 364 | sendto(fd, buf, length, flags FLAG, dest_addr, dest_len) 365 | ) 366 | 367 | int 368 | lthread_connect(int fd, struct sockaddr *name, socklen_t namelen, 369 | uint64_t timeout) 370 | { 371 | 372 | int ret = 0; 373 | struct lthread *lt = lthread_get_sched()->current_lthread; 374 | 375 | while (1) { 376 | _lthread_renice(lt); 377 | ret = connect(fd, name, namelen); 378 | if (ret == 0) 379 | break; 380 | if (ret == -1 && (errno == EAGAIN || 381 | errno == EWOULDBLOCK || 382 | errno == EINPROGRESS)) { 383 | _lthread_sched_event(lt, fd, LT_EV_WRITE, timeout); 384 | if (lt->state & BIT(LT_ST_EXPIRED)) 385 | return (-2); 386 | 387 | continue; 388 | } else { 389 | break; 390 | } 391 | } 392 | 393 | return (ret); 394 | } 395 | 396 | ssize_t 397 | lthread_writev(int fd, struct iovec *iov, int iovcnt) 398 | { 399 | ssize_t total = 0; 400 | int iov_index = 0; 401 | struct lthread *lt = lthread_get_sched()->current_lthread; 402 | 403 | do { 404 | _lthread_renice(lt); 405 | ssize_t n = writev(fd, iov + iov_index, iovcnt - iov_index); 406 | if (n > 0) { 407 | int i = 0; 408 | total += n; 409 | for (i = iov_index; i < iovcnt && n > 0; i++) { 410 | if (n < iov[i].iov_len) { 411 | iov[i].iov_base += n; 412 | iov[i].iov_len -= n; 413 | n = 0; 414 | } else { 415 | n -= iov[i].iov_len; 416 | iov_index++; 417 | } 418 | } 419 | } else if (-1 == n && EAGAIN == errno) { 420 | _lthread_sched_event(lt, fd, LT_EV_WRITE, 0); 421 | } else { 422 | return (n); 423 | } 424 | } while (iov_index < iovcnt); 425 | 426 | return (total); 427 | } 428 | 429 | #ifdef __FreeBSD__ 430 | int 431 | lthread_sendfile(int fd, int s, off_t offset, size_t nbytes, 432 | struct sf_hdtr *hdtr) 433 | { 434 | 435 | off_t sbytes = 0; 436 | int ret = 0; 437 | struct lthread *lt = lthread_get_sched()->current_lthread; 438 | 439 | do { 440 | ret = sendfile(fd, s, offset, nbytes, hdtr, &sbytes, 0); 441 | 442 | if (ret == 0) 443 | return (0); 444 | 445 | if (sbytes) 446 | offset += sbytes; 447 | 448 | sbytes = 0; 449 | 450 | if (ret == -1 && EAGAIN == errno) 451 | _lthread_sched_event(lt, s, LT_EV_WRITE, 0); 452 | else if (ret == -1) 453 | return (-1); 454 | 455 | } while (1); 456 | } 457 | #endif 458 | 459 | 460 | int 461 | lthread_poll(struct pollfd *fds, nfds_t nfds, int timeout) 462 | { 463 | int i = 0; 464 | if (timeout == 0) 465 | return poll(fds, nfds, 0); 466 | 467 | struct lthread *lt = lthread_get_sched()->current_lthread; 468 | /* schedule fd events, pass -1 to avoid yielding */ 469 | for (i = 0; i < nfds; i++) { 470 | if (fds[i].events & POLLIN) 471 | _lthread_sched_event(lt, fds[i].fd, LT_EV_READ, -1); 472 | else if (fds[i].events & POLLOUT) 473 | _lthread_sched_event(lt, fds[i].fd, LT_EV_WRITE, -1); 474 | else 475 | assert(0); 476 | } 477 | 478 | lt->ready_fds = 0; 479 | lt->fd_wait = -1; 480 | /* clear wait_read/write flags set by _lthread_sched_event */ 481 | lt->state &= CLEARBIT(LT_ST_WAIT_READ); 482 | lt->state &= CLEARBIT(LT_ST_WAIT_WRITE); 483 | /* we are waiting on multiple fd events */ 484 | lt->state |= BIT(LT_ST_WAIT_MULTI); 485 | 486 | lt->pollfds = fds; 487 | lt->nfds = nfds; 488 | 489 | /* go to sleep until one or more of the fds are ready or until we timeout */ 490 | _lthread_sched_sleep(lt, (uint64_t)timeout); 491 | 492 | lt->pollfds = NULL; 493 | lt->nfds = 0; 494 | lt->state &= CLEARBIT(LT_ST_WAIT_MULTI); 495 | 496 | if (lt->state & BIT(LT_ST_EXPIRED)) 497 | return (0); 498 | 499 | return (lt->ready_fds); 500 | } 501 | -------------------------------------------------------------------------------- /src/queue.h: -------------------------------------------------------------------------------- 1 | /*- 2 | * Copyright (c) 1991, 1993 3 | * The Regents of the University of California. All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 4. Neither the name of the University nor the names of its contributors 14 | * may be used to endorse or promote products derived from this software 15 | * without specific prior written permission. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 | * SUCH DAMAGE. 28 | * 29 | * @(#)queue.h 8.5 (Berkeley) 8/20/94 30 | * $FreeBSD: src/sys/sys/queue.h,v 1.72.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $ 31 | */ 32 | 33 | #ifndef _SYS_QUEUE_H_ 34 | #define _SYS_QUEUE_H_ 35 | 36 | #include 37 | 38 | /* 39 | * This file defines four types of data structures: singly-linked lists, 40 | * singly-linked tail queues, lists and tail queues. 41 | * 42 | * A singly-linked list is headed by a single forward pointer. The elements 43 | * are singly linked for minimum space and pointer manipulation overhead at 44 | * the expense of O(n) removal for arbitrary elements. New elements can be 45 | * added to the list after an existing element or at the head of the list. 46 | * Elements being removed from the head of the list should use the explicit 47 | * macro for this purpose for optimum efficiency. A singly-linked list may 48 | * only be traversed in the forward direction. Singly-linked lists are ideal 49 | * for applications with large datasets and few or no removals or for 50 | * implementing a LIFO queue. 51 | * 52 | * A singly-linked tail queue is headed by a pair of pointers, one to the 53 | * head of the list and the other to the tail of the list. The elements are 54 | * singly linked for minimum space and pointer manipulation overhead at the 55 | * expense of O(n) removal for arbitrary elements. New elements can be added 56 | * to the list after an existing element, at the head of the list, or at the 57 | * end of the list. Elements being removed from the head of the tail queue 58 | * should use the explicit macro for this purpose for optimum efficiency. 59 | * A singly-linked tail queue may only be traversed in the forward direction. 60 | * Singly-linked tail queues are ideal for applications with large datasets 61 | * and few or no removals or for implementing a FIFO queue. 62 | * 63 | * A list is headed by a single forward pointer (or an array of forward 64 | * pointers for a hash table header). The elements are doubly linked 65 | * so that an arbitrary element can be removed without a need to 66 | * traverse the list. New elements can be added to the list before 67 | * or after an existing element or at the head of the list. A list 68 | * may only be traversed in the forward direction. 69 | * 70 | * A tail queue is headed by a pair of pointers, one to the head of the 71 | * list and the other to the tail of the list. The elements are doubly 72 | * linked so that an arbitrary element can be removed without a need to 73 | * traverse the list. New elements can be added to the list before or 74 | * after an existing element, at the head of the list, or at the end of 75 | * the list. A tail queue may be traversed in either direction. 76 | * 77 | * For details on the use of these macros, see the queue(3) manual page. 78 | * 79 | * 80 | * SLIST LIST STAILQ TAILQ 81 | * _HEAD + + + + 82 | * _HEAD_INITIALIZER + + + + 83 | * _ENTRY + + + + 84 | * _INIT + + + + 85 | * _EMPTY + + + + 86 | * _FIRST + + + + 87 | * _NEXT + + + + 88 | * _PREV - - - + 89 | * _LAST - - + + 90 | * _FOREACH + + + + 91 | * _FOREACH_SAFE + + + + 92 | * _FOREACH_REVERSE - - - + 93 | * _FOREACH_REVERSE_SAFE - - - + 94 | * _INSERT_HEAD + + + + 95 | * _INSERT_BEFORE - + - + 96 | * _INSERT_AFTER + + + + 97 | * _INSERT_TAIL - - + + 98 | * _CONCAT - - + + 99 | * _REMOVE_AFTER + - + - 100 | * _REMOVE_HEAD + - + - 101 | * _REMOVE + + + + 102 | * 103 | */ 104 | #ifdef QUEUE_MACRO_DEBUG 105 | /* Store the last 2 places the queue element or head was altered */ 106 | struct qm_trace { 107 | char * lastfile; 108 | int lastline; 109 | char * prevfile; 110 | int prevline; 111 | }; 112 | 113 | #define TRACEBUF struct qm_trace trace; 114 | #define TRASHIT(x) do {(x) = (void *)-1;} while (0) 115 | 116 | #define QMD_TRACE_HEAD(head) do { \ 117 | (head)->trace.prevline = (head)->trace.lastline; \ 118 | (head)->trace.prevfile = (head)->trace.lastfile; \ 119 | (head)->trace.lastline = __LINE__; \ 120 | (head)->trace.lastfile = __FILE__; \ 121 | } while (0) 122 | 123 | #define QMD_TRACE_ELEM(elem) do { \ 124 | (elem)->trace.prevline = (elem)->trace.lastline; \ 125 | (elem)->trace.prevfile = (elem)->trace.lastfile; \ 126 | (elem)->trace.lastline = __LINE__; \ 127 | (elem)->trace.lastfile = __FILE__; \ 128 | } while (0) 129 | 130 | #else 131 | #define QMD_TRACE_ELEM(elem) 132 | #define QMD_TRACE_HEAD(head) 133 | #define TRACEBUF 134 | #define TRASHIT(x) 135 | #endif /* QUEUE_MACRO_DEBUG */ 136 | 137 | /* 138 | * Singly-linked List declarations. 139 | */ 140 | #define SLIST_HEAD(name, type) \ 141 | struct name { \ 142 | struct type *slh_first; /* first element */ \ 143 | } 144 | 145 | #define SLIST_HEAD_INITIALIZER(head) \ 146 | { NULL } 147 | 148 | #define SLIST_ENTRY(type) \ 149 | struct { \ 150 | struct type *sle_next; /* next element */ \ 151 | } 152 | 153 | /* 154 | * Singly-linked List functions. 155 | */ 156 | #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 157 | 158 | #define SLIST_FIRST(head) ((head)->slh_first) 159 | 160 | #define SLIST_FOREACH(var, head, field) \ 161 | for ((var) = SLIST_FIRST((head)); \ 162 | (var); \ 163 | (var) = SLIST_NEXT((var), field)) 164 | 165 | #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 166 | for ((var) = SLIST_FIRST((head)); \ 167 | (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 168 | (var) = (tvar)) 169 | 170 | #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ 171 | for ((varp) = &SLIST_FIRST((head)); \ 172 | ((var) = *(varp)) != NULL; \ 173 | (varp) = &SLIST_NEXT((var), field)) 174 | 175 | #define SLIST_INIT(head) do { \ 176 | SLIST_FIRST((head)) = NULL; \ 177 | } while (0) 178 | 179 | #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 180 | SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ 181 | SLIST_NEXT((slistelm), field) = (elm); \ 182 | } while (0) 183 | 184 | #define SLIST_INSERT_HEAD(head, elm, field) do { \ 185 | SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ 186 | SLIST_FIRST((head)) = (elm); \ 187 | } while (0) 188 | 189 | #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 190 | 191 | #define SLIST_REMOVE(head, elm, type, field) do { \ 192 | if (SLIST_FIRST((head)) == (elm)) { \ 193 | SLIST_REMOVE_HEAD((head), field); \ 194 | } \ 195 | else { \ 196 | struct type *curelm = SLIST_FIRST((head)); \ 197 | while (SLIST_NEXT(curelm, field) != (elm)) \ 198 | curelm = SLIST_NEXT(curelm, field); \ 199 | SLIST_REMOVE_AFTER(curelm, field); \ 200 | } \ 201 | TRASHIT((elm)->field.sle_next); \ 202 | } while (0) 203 | 204 | #define SLIST_REMOVE_AFTER(elm, field) do { \ 205 | SLIST_NEXT(elm, field) = \ 206 | SLIST_NEXT(SLIST_NEXT(elm, field), field); \ 207 | } while (0) 208 | 209 | #define SLIST_REMOVE_HEAD(head, field) do { \ 210 | SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ 211 | } while (0) 212 | 213 | /* 214 | * Singly-linked Tail queue declarations. 215 | */ 216 | #define STAILQ_HEAD(name, type) \ 217 | struct name { \ 218 | struct type *stqh_first;/* first element */ \ 219 | struct type **stqh_last;/* addr of last next element */ \ 220 | } 221 | 222 | #define STAILQ_HEAD_INITIALIZER(head) \ 223 | { NULL, &(head).stqh_first } 224 | 225 | #define STAILQ_ENTRY(type) \ 226 | struct { \ 227 | struct type *stqe_next; /* next element */ \ 228 | } 229 | 230 | /* 231 | * Singly-linked Tail queue functions. 232 | */ 233 | #define STAILQ_CONCAT(head1, head2) do { \ 234 | if (!STAILQ_EMPTY((head2))) { \ 235 | *(head1)->stqh_last = (head2)->stqh_first; \ 236 | (head1)->stqh_last = (head2)->stqh_last; \ 237 | STAILQ_INIT((head2)); \ 238 | } \ 239 | } while (0) 240 | 241 | #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 242 | 243 | #define STAILQ_FIRST(head) ((head)->stqh_first) 244 | 245 | #define STAILQ_FOREACH(var, head, field) \ 246 | for((var) = STAILQ_FIRST((head)); \ 247 | (var); \ 248 | (var) = STAILQ_NEXT((var), field)) 249 | 250 | 251 | #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 252 | for ((var) = STAILQ_FIRST((head)); \ 253 | (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 254 | (var) = (tvar)) 255 | 256 | #define STAILQ_INIT(head) do { \ 257 | STAILQ_FIRST((head)) = NULL; \ 258 | (head)->stqh_last = &STAILQ_FIRST((head)); \ 259 | } while (0) 260 | 261 | #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ 262 | if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ 263 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 264 | STAILQ_NEXT((tqelm), field) = (elm); \ 265 | } while (0) 266 | 267 | #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 268 | if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ 269 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 270 | STAILQ_FIRST((head)) = (elm); \ 271 | } while (0) 272 | 273 | #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 274 | STAILQ_NEXT((elm), field) = NULL; \ 275 | *(head)->stqh_last = (elm); \ 276 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 277 | } while (0) 278 | 279 | #define STAILQ_LAST(head, type, field) \ 280 | (STAILQ_EMPTY((head)) ? \ 281 | NULL : \ 282 | ((struct type *)(void *) \ 283 | ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) 284 | 285 | #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 286 | 287 | #define STAILQ_REMOVE(head, elm, type, field) do { \ 288 | if (STAILQ_FIRST((head)) == (elm)) { \ 289 | STAILQ_REMOVE_HEAD((head), field); \ 290 | } \ 291 | else { \ 292 | struct type *curelm = STAILQ_FIRST((head)); \ 293 | while (STAILQ_NEXT(curelm, field) != (elm)) \ 294 | curelm = STAILQ_NEXT(curelm, field); \ 295 | STAILQ_REMOVE_AFTER(head, curelm, field); \ 296 | } \ 297 | TRASHIT((elm)->field.stqe_next); \ 298 | } while (0) 299 | 300 | #define STAILQ_REMOVE_HEAD(head, field) do { \ 301 | if ((STAILQ_FIRST((head)) = \ 302 | STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ 303 | (head)->stqh_last = &STAILQ_FIRST((head)); \ 304 | } while (0) 305 | 306 | #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ 307 | if ((STAILQ_NEXT(elm, field) = \ 308 | STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ 309 | (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 310 | } while (0) 311 | 312 | #define STAILQ_SWAP(head1, head2, type) do { \ 313 | struct type *swap_first = STAILQ_FIRST(head1); \ 314 | struct type **swap_last = (head1)->stqh_last; \ 315 | STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ 316 | (head1)->stqh_last = (head2)->stqh_last; \ 317 | STAILQ_FIRST(head2) = swap_first; \ 318 | (head2)->stqh_last = swap_last; \ 319 | if (STAILQ_EMPTY(head1)) \ 320 | (head1)->stqh_last = &STAILQ_FIRST(head1); \ 321 | if (STAILQ_EMPTY(head2)) \ 322 | (head2)->stqh_last = &STAILQ_FIRST(head2); \ 323 | } while (0) 324 | 325 | 326 | /* 327 | * List declarations. 328 | */ 329 | #define LIST_HEAD(name, type) \ 330 | struct name { \ 331 | struct type *lh_first; /* first element */ \ 332 | } 333 | 334 | #define LIST_HEAD_INITIALIZER(head) \ 335 | { NULL } 336 | 337 | #define LIST_ENTRY(type) \ 338 | struct { \ 339 | struct type *le_next; /* next element */ \ 340 | struct type **le_prev; /* address of previous next element */ \ 341 | } 342 | 343 | /* 344 | * List functions. 345 | */ 346 | 347 | #if (defined(_KERNEL) && defined(INVARIANTS)) 348 | #define QMD_LIST_CHECK_HEAD(head, field) do { \ 349 | if (LIST_FIRST((head)) != NULL && \ 350 | LIST_FIRST((head))->field.le_prev != \ 351 | &LIST_FIRST((head))) \ 352 | panic("Bad list head %p first->prev != head", (head)); \ 353 | } while (0) 354 | 355 | #define QMD_LIST_CHECK_NEXT(elm, field) do { \ 356 | if (LIST_NEXT((elm), field) != NULL && \ 357 | LIST_NEXT((elm), field)->field.le_prev != \ 358 | &((elm)->field.le_next)) \ 359 | panic("Bad link elm %p next->prev != elm", (elm)); \ 360 | } while (0) 361 | 362 | #define QMD_LIST_CHECK_PREV(elm, field) do { \ 363 | if (*(elm)->field.le_prev != (elm)) \ 364 | panic("Bad link elm %p prev->next != elm", (elm)); \ 365 | } while (0) 366 | #else 367 | #define QMD_LIST_CHECK_HEAD(head, field) 368 | #define QMD_LIST_CHECK_NEXT(elm, field) 369 | #define QMD_LIST_CHECK_PREV(elm, field) 370 | #endif /* (_KERNEL && INVARIANTS) */ 371 | 372 | #define LIST_EMPTY(head) ((head)->lh_first == NULL) 373 | 374 | #define LIST_FIRST(head) ((head)->lh_first) 375 | 376 | #define LIST_FOREACH(var, head, field) \ 377 | for ((var) = LIST_FIRST((head)); \ 378 | (var); \ 379 | (var) = LIST_NEXT((var), field)) 380 | 381 | #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 382 | for ((var) = LIST_FIRST((head)); \ 383 | (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 384 | (var) = (tvar)) 385 | 386 | #define LIST_INIT(head) do { \ 387 | LIST_FIRST((head)) = NULL; \ 388 | } while (0) 389 | 390 | #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 391 | QMD_LIST_CHECK_NEXT(listelm, field); \ 392 | if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ 393 | LIST_NEXT((listelm), field)->field.le_prev = \ 394 | &LIST_NEXT((elm), field); \ 395 | LIST_NEXT((listelm), field) = (elm); \ 396 | (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ 397 | } while (0) 398 | 399 | #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 400 | QMD_LIST_CHECK_PREV(listelm, field); \ 401 | (elm)->field.le_prev = (listelm)->field.le_prev; \ 402 | LIST_NEXT((elm), field) = (listelm); \ 403 | *(listelm)->field.le_prev = (elm); \ 404 | (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ 405 | } while (0) 406 | 407 | #define LIST_INSERT_HEAD(head, elm, field) do { \ 408 | QMD_LIST_CHECK_HEAD((head), field); \ 409 | if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ 410 | LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ 411 | LIST_FIRST((head)) = (elm); \ 412 | (elm)->field.le_prev = &LIST_FIRST((head)); \ 413 | } while (0) 414 | 415 | #define LIST_NEXT(elm, field) ((elm)->field.le_next) 416 | 417 | #define LIST_REMOVE(elm, field) do { \ 418 | QMD_LIST_CHECK_NEXT(elm, field); \ 419 | QMD_LIST_CHECK_PREV(elm, field); \ 420 | if (LIST_NEXT((elm), field) != NULL) \ 421 | LIST_NEXT((elm), field)->field.le_prev = \ 422 | (elm)->field.le_prev; \ 423 | *(elm)->field.le_prev = LIST_NEXT((elm), field); \ 424 | TRASHIT((elm)->field.le_next); \ 425 | TRASHIT((elm)->field.le_prev); \ 426 | } while (0) 427 | 428 | #define LIST_SWAP(head1, head2, type, field) do { \ 429 | struct type *swap_tmp = LIST_FIRST((head1)); \ 430 | LIST_FIRST((head1)) = LIST_FIRST((head2)); \ 431 | LIST_FIRST((head2)) = swap_tmp; \ 432 | if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ 433 | swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ 434 | if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ 435 | swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ 436 | } while (0) 437 | 438 | /* 439 | * Tail queue declarations. 440 | */ 441 | #define TAILQ_HEAD(name, type) \ 442 | struct name { \ 443 | struct type *tqh_first; /* first element */ \ 444 | struct type **tqh_last; /* addr of last next element */ \ 445 | TRACEBUF \ 446 | } 447 | 448 | #define TAILQ_HEAD_INITIALIZER(head) \ 449 | { NULL, &(head).tqh_first } 450 | 451 | #define TAILQ_ENTRY(type) \ 452 | struct { \ 453 | struct type *tqe_next; /* next element */ \ 454 | struct type **tqe_prev; /* address of previous next element */ \ 455 | TRACEBUF \ 456 | } 457 | 458 | /* 459 | * Tail queue functions. 460 | */ 461 | #if (defined(_KERNEL) && defined(INVARIANTS)) 462 | #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ 463 | if (!TAILQ_EMPTY(head) && \ 464 | TAILQ_FIRST((head))->field.tqe_prev != \ 465 | &TAILQ_FIRST((head))) \ 466 | panic("Bad tailq head %p first->prev != head", (head)); \ 467 | } while (0) 468 | 469 | #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ 470 | if (*(head)->tqh_last != NULL) \ 471 | panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ 472 | } while (0) 473 | 474 | #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ 475 | if (TAILQ_NEXT((elm), field) != NULL && \ 476 | TAILQ_NEXT((elm), field)->field.tqe_prev != \ 477 | &((elm)->field.tqe_next)) \ 478 | panic("Bad link elm %p next->prev != elm", (elm)); \ 479 | } while (0) 480 | 481 | #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ 482 | if (*(elm)->field.tqe_prev != (elm)) \ 483 | panic("Bad link elm %p prev->next != elm", (elm)); \ 484 | } while (0) 485 | #else 486 | #define QMD_TAILQ_CHECK_HEAD(head, field) 487 | #define QMD_TAILQ_CHECK_TAIL(head, headname) 488 | #define QMD_TAILQ_CHECK_NEXT(elm, field) 489 | #define QMD_TAILQ_CHECK_PREV(elm, field) 490 | #endif /* (_KERNEL && INVARIANTS) */ 491 | 492 | #define TAILQ_CONCAT(head1, head2, field) do { \ 493 | if (!TAILQ_EMPTY(head2)) { \ 494 | *(head1)->tqh_last = (head2)->tqh_first; \ 495 | (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 496 | (head1)->tqh_last = (head2)->tqh_last; \ 497 | TAILQ_INIT((head2)); \ 498 | QMD_TRACE_HEAD(head1); \ 499 | QMD_TRACE_HEAD(head2); \ 500 | } \ 501 | } while (0) 502 | 503 | #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 504 | 505 | #define TAILQ_FIRST(head) ((head)->tqh_first) 506 | 507 | #define TAILQ_FOREACH(var, head, field) \ 508 | for ((var) = TAILQ_FIRST((head)); \ 509 | (var); \ 510 | (var) = TAILQ_NEXT((var), field)) 511 | 512 | #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ 513 | for ((var) = TAILQ_FIRST((head)); \ 514 | (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ 515 | (var) = (tvar)) 516 | 517 | #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 518 | for ((var) = TAILQ_LAST((head), headname); \ 519 | (var); \ 520 | (var) = TAILQ_PREV((var), headname, field)) 521 | 522 | #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ 523 | for ((var) = TAILQ_LAST((head), headname); \ 524 | (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ 525 | (var) = (tvar)) 526 | 527 | #define TAILQ_INIT(head) do { \ 528 | TAILQ_FIRST((head)) = NULL; \ 529 | (head)->tqh_last = &TAILQ_FIRST((head)); \ 530 | QMD_TRACE_HEAD(head); \ 531 | } while (0) 532 | 533 | #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 534 | QMD_TAILQ_CHECK_NEXT(listelm, field); \ 535 | if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ 536 | TAILQ_NEXT((elm), field)->field.tqe_prev = \ 537 | &TAILQ_NEXT((elm), field); \ 538 | else { \ 539 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 540 | QMD_TRACE_HEAD(head); \ 541 | } \ 542 | TAILQ_NEXT((listelm), field) = (elm); \ 543 | (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ 544 | QMD_TRACE_ELEM(&(elm)->field); \ 545 | QMD_TRACE_ELEM(&listelm->field); \ 546 | } while (0) 547 | 548 | #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 549 | QMD_TAILQ_CHECK_PREV(listelm, field); \ 550 | (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 551 | TAILQ_NEXT((elm), field) = (listelm); \ 552 | *(listelm)->field.tqe_prev = (elm); \ 553 | (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ 554 | QMD_TRACE_ELEM(&(elm)->field); \ 555 | QMD_TRACE_ELEM(&listelm->field); \ 556 | } while (0) 557 | 558 | #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 559 | QMD_TAILQ_CHECK_HEAD(head, field); \ 560 | if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ 561 | TAILQ_FIRST((head))->field.tqe_prev = \ 562 | &TAILQ_NEXT((elm), field); \ 563 | else \ 564 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 565 | TAILQ_FIRST((head)) = (elm); \ 566 | (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ 567 | QMD_TRACE_HEAD(head); \ 568 | QMD_TRACE_ELEM(&(elm)->field); \ 569 | } while (0) 570 | 571 | #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 572 | QMD_TAILQ_CHECK_TAIL(head, field); \ 573 | TAILQ_NEXT((elm), field) = NULL; \ 574 | (elm)->field.tqe_prev = (head)->tqh_last; \ 575 | *(head)->tqh_last = (elm); \ 576 | (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 577 | QMD_TRACE_HEAD(head); \ 578 | QMD_TRACE_ELEM(&(elm)->field); \ 579 | } while (0) 580 | 581 | #define TAILQ_LAST(head, headname) \ 582 | (*(((struct headname *)((head)->tqh_last))->tqh_last)) 583 | 584 | #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 585 | 586 | #define TAILQ_PREV(elm, headname, field) \ 587 | (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 588 | 589 | #define TAILQ_REMOVE(head, elm, field) do { \ 590 | QMD_TAILQ_CHECK_NEXT(elm, field); \ 591 | QMD_TAILQ_CHECK_PREV(elm, field); \ 592 | if ((TAILQ_NEXT((elm), field)) != NULL) \ 593 | TAILQ_NEXT((elm), field)->field.tqe_prev = \ 594 | (elm)->field.tqe_prev; \ 595 | else { \ 596 | (head)->tqh_last = (elm)->field.tqe_prev; \ 597 | QMD_TRACE_HEAD(head); \ 598 | } \ 599 | *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ 600 | TRASHIT((elm)->field.tqe_next); \ 601 | TRASHIT((elm)->field.tqe_prev); \ 602 | QMD_TRACE_ELEM(&(elm)->field); \ 603 | } while (0) 604 | 605 | #define TAILQ_SWAP(head1, head2, type, field) do { \ 606 | struct type *swap_first = (head1)->tqh_first; \ 607 | struct type **swap_last = (head1)->tqh_last; \ 608 | (head1)->tqh_first = (head2)->tqh_first; \ 609 | (head1)->tqh_last = (head2)->tqh_last; \ 610 | (head2)->tqh_first = swap_first; \ 611 | (head2)->tqh_last = swap_last; \ 612 | if ((swap_first = (head1)->tqh_first) != NULL) \ 613 | swap_first->field.tqe_prev = &(head1)->tqh_first; \ 614 | else \ 615 | (head1)->tqh_last = &(head1)->tqh_first; \ 616 | if ((swap_first = (head2)->tqh_first) != NULL) \ 617 | swap_first->field.tqe_prev = &(head2)->tqh_first; \ 618 | else \ 619 | (head2)->tqh_last = &(head2)->tqh_first; \ 620 | } while (0) 621 | 622 | #endif /* !_SYS_QUEUE_H_ */ 623 | -------------------------------------------------------------------------------- /src/tree.h: -------------------------------------------------------------------------------- 1 | /*- 2 | * Copyright 2002 Niels Provos 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef _SYS_TREE_H_ 27 | #define _SYS_TREE_H_ 28 | 29 | #include 30 | 31 | /* 32 | * This file defines data structures for different types of trees: 33 | * splay trees and red-black trees. 34 | * 35 | * A splay tree is a self-organizing data structure. Every operation 36 | * on the tree causes a splay to happen. The splay moves the requested 37 | * node to the root of the tree and partly rebalances it. 38 | * 39 | * This has the benefit that request locality causes faster lookups as 40 | * the requested nodes move to the top of the tree. On the other hand, 41 | * every lookup causes memory writes. 42 | * 43 | * The Balance Theorem bounds the total access time for m operations 44 | * and n inserts on an initially empty tree as O((m + n)lg n). The 45 | * amortized cost for a sequence of m accesses to a splay tree is O(lg n); 46 | * 47 | * A red-black tree is a binary search tree with the node color as an 48 | * extra attribute. It fulfills a set of conditions: 49 | * - every search path from the root to a leaf consists of the 50 | * same number of black nodes, 51 | * - each red node (except for the root) has a black parent, 52 | * - each leaf node is black. 53 | * 54 | * Every operation on a red-black tree is bounded as O(lg n). 55 | * The maximum height of a red-black tree is 2lg (n+1). 56 | */ 57 | 58 | #define SPLAY_HEAD(name, type) \ 59 | struct name { \ 60 | struct type *sph_root; /* root of the tree */ \ 61 | } 62 | 63 | #define SPLAY_INITIALIZER(root) \ 64 | { NULL } 65 | 66 | #define SPLAY_INIT(root) do { \ 67 | (root)->sph_root = NULL; \ 68 | } while (/*CONSTCOND*/ 0) 69 | 70 | #define SPLAY_ENTRY(type) \ 71 | struct { \ 72 | struct type *spe_left; /* left element */ \ 73 | struct type *spe_right; /* right element */ \ 74 | } 75 | 76 | #define SPLAY_LEFT(elm, field) (elm)->field.spe_left 77 | #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right 78 | #define SPLAY_ROOT(head) (head)->sph_root 79 | #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) 80 | 81 | /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ 82 | #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ 83 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ 84 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 85 | (head)->sph_root = tmp; \ 86 | } while (/*CONSTCOND*/ 0) 87 | 88 | #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ 89 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ 90 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 91 | (head)->sph_root = tmp; \ 92 | } while (/*CONSTCOND*/ 0) 93 | 94 | #define SPLAY_LINKLEFT(head, tmp, field) do { \ 95 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 96 | tmp = (head)->sph_root; \ 97 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ 98 | } while (/*CONSTCOND*/ 0) 99 | 100 | #define SPLAY_LINKRIGHT(head, tmp, field) do { \ 101 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 102 | tmp = (head)->sph_root; \ 103 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ 104 | } while (/*CONSTCOND*/ 0) 105 | 106 | #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ 107 | SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ 108 | SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ 109 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ 110 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ 111 | } while (/*CONSTCOND*/ 0) 112 | 113 | /* Generates prototypes and inline functions */ 114 | 115 | #define SPLAY_PROTOTYPE(name, type, field, cmp) \ 116 | void name##_SPLAY(struct name *, struct type *); \ 117 | void name##_SPLAY_MINMAX(struct name *, int); \ 118 | struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ 119 | struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ 120 | \ 121 | /* Finds the node with the same key as elm */ \ 122 | static __inline struct type * \ 123 | name##_SPLAY_FIND(struct name *head, struct type *elm) \ 124 | { \ 125 | if (SPLAY_EMPTY(head)) \ 126 | return(NULL); \ 127 | name##_SPLAY(head, elm); \ 128 | if ((cmp)(elm, (head)->sph_root) == 0) \ 129 | return (head->sph_root); \ 130 | return (NULL); \ 131 | } \ 132 | \ 133 | static __inline struct type * \ 134 | name##_SPLAY_NEXT(struct name *head, struct type *elm) \ 135 | { \ 136 | name##_SPLAY(head, elm); \ 137 | if (SPLAY_RIGHT(elm, field) != NULL) { \ 138 | elm = SPLAY_RIGHT(elm, field); \ 139 | while (SPLAY_LEFT(elm, field) != NULL) { \ 140 | elm = SPLAY_LEFT(elm, field); \ 141 | } \ 142 | } else \ 143 | elm = NULL; \ 144 | return (elm); \ 145 | } \ 146 | \ 147 | static __inline struct type * \ 148 | name##_SPLAY_MIN_MAX(struct name *head, int val) \ 149 | { \ 150 | name##_SPLAY_MINMAX(head, val); \ 151 | return (SPLAY_ROOT(head)); \ 152 | } 153 | 154 | /* Main splay operation. 155 | * Moves node close to the key of elm to top 156 | */ 157 | #define SPLAY_GENERATE(name, type, field, cmp) \ 158 | struct type * \ 159 | name##_SPLAY_INSERT(struct name *head, struct type *elm) \ 160 | { \ 161 | if (SPLAY_EMPTY(head)) { \ 162 | SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ 163 | } else { \ 164 | int __comp; \ 165 | name##_SPLAY(head, elm); \ 166 | __comp = (cmp)(elm, (head)->sph_root); \ 167 | if(__comp < 0) { \ 168 | SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ 169 | SPLAY_RIGHT(elm, field) = (head)->sph_root; \ 170 | SPLAY_LEFT((head)->sph_root, field) = NULL; \ 171 | } else if (__comp > 0) { \ 172 | SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ 173 | SPLAY_LEFT(elm, field) = (head)->sph_root; \ 174 | SPLAY_RIGHT((head)->sph_root, field) = NULL; \ 175 | } else \ 176 | return ((head)->sph_root); \ 177 | } \ 178 | (head)->sph_root = (elm); \ 179 | return (NULL); \ 180 | } \ 181 | \ 182 | struct type * \ 183 | name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ 184 | { \ 185 | struct type *__tmp; \ 186 | if (SPLAY_EMPTY(head)) \ 187 | return (NULL); \ 188 | name##_SPLAY(head, elm); \ 189 | if ((cmp)(elm, (head)->sph_root) == 0) { \ 190 | if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ 191 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ 192 | } else { \ 193 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 194 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ 195 | name##_SPLAY(head, elm); \ 196 | SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ 197 | } \ 198 | return (elm); \ 199 | } \ 200 | return (NULL); \ 201 | } \ 202 | \ 203 | void \ 204 | name##_SPLAY(struct name *head, struct type *elm) \ 205 | { \ 206 | struct type __node, *__left, *__right, *__tmp; \ 207 | int __comp; \ 208 | \ 209 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 210 | __left = __right = &__node; \ 211 | \ 212 | while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ 213 | if (__comp < 0) { \ 214 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ 215 | if (__tmp == NULL) \ 216 | break; \ 217 | if ((cmp)(elm, __tmp) < 0){ \ 218 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 219 | if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 220 | break; \ 221 | } \ 222 | SPLAY_LINKLEFT(head, __right, field); \ 223 | } else if (__comp > 0) { \ 224 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 225 | if (__tmp == NULL) \ 226 | break; \ 227 | if ((cmp)(elm, __tmp) > 0){ \ 228 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ 229 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 230 | break; \ 231 | } \ 232 | SPLAY_LINKRIGHT(head, __left, field); \ 233 | } \ 234 | } \ 235 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 236 | } \ 237 | \ 238 | /* Splay with either the minimum or the maximum element \ 239 | * Used to find minimum or maximum element in tree. \ 240 | */ \ 241 | void name##_SPLAY_MINMAX(struct name *head, int __comp) \ 242 | { \ 243 | struct type __node, *__left, *__right, *__tmp; \ 244 | \ 245 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 246 | __left = __right = &__node; \ 247 | \ 248 | while (1) { \ 249 | if (__comp < 0) { \ 250 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ 251 | if (__tmp == NULL) \ 252 | break; \ 253 | if (__comp < 0){ \ 254 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 255 | if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 256 | break; \ 257 | } \ 258 | SPLAY_LINKLEFT(head, __right, field); \ 259 | } else if (__comp > 0) { \ 260 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 261 | if (__tmp == NULL) \ 262 | break; \ 263 | if (__comp > 0) { \ 264 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ 265 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 266 | break; \ 267 | } \ 268 | SPLAY_LINKRIGHT(head, __left, field); \ 269 | } \ 270 | } \ 271 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 272 | } 273 | 274 | #define SPLAY_NEGINF -1 275 | #define SPLAY_INF 1 276 | 277 | #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) 278 | #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) 279 | #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) 280 | #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) 281 | #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ 282 | : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) 283 | #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ 284 | : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) 285 | 286 | #define SPLAY_FOREACH(x, name, head) \ 287 | for ((x) = SPLAY_MIN(name, head); \ 288 | (x) != NULL; \ 289 | (x) = SPLAY_NEXT(name, head, x)) 290 | 291 | /* Macros that define a red-black tree */ 292 | #define RB_HEAD(name, type) \ 293 | struct name { \ 294 | struct type *rbh_root; /* root of the tree */ \ 295 | } 296 | 297 | #define RB_INITIALIZER(root) \ 298 | { NULL } 299 | 300 | #define RB_INIT(root) do { \ 301 | (root)->rbh_root = NULL; \ 302 | } while (/*CONSTCOND*/ 0) 303 | 304 | #define RB_BLACK 0 305 | #define RB_RED 1 306 | #define RB_ENTRY(type) \ 307 | struct { \ 308 | struct type *rbe_left; /* left element */ \ 309 | struct type *rbe_right; /* right element */ \ 310 | struct type *rbe_parent; /* parent element */ \ 311 | int rbe_color; /* node color */ \ 312 | } 313 | 314 | #define RB_LEFT(elm, field) (elm)->field.rbe_left 315 | #define RB_RIGHT(elm, field) (elm)->field.rbe_right 316 | #define RB_PARENT(elm, field) (elm)->field.rbe_parent 317 | #define RB_COLOR(elm, field) (elm)->field.rbe_color 318 | #define RB_ROOT(head) (head)->rbh_root 319 | #define RB_EMPTY(head) (RB_ROOT(head) == NULL) 320 | 321 | #define RB_SET(elm, parent, field) do { \ 322 | RB_PARENT(elm, field) = parent; \ 323 | RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ 324 | RB_COLOR(elm, field) = RB_RED; \ 325 | } while (/*CONSTCOND*/ 0) 326 | 327 | #define RB_SET_BLACKRED(black, red, field) do { \ 328 | RB_COLOR(black, field) = RB_BLACK; \ 329 | RB_COLOR(red, field) = RB_RED; \ 330 | } while (/*CONSTCOND*/ 0) 331 | 332 | #ifndef RB_AUGMENT 333 | #define RB_AUGMENT(x) do {} while (0) 334 | #endif 335 | 336 | #define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ 337 | (tmp) = RB_RIGHT(elm, field); \ 338 | if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ 339 | RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ 340 | } \ 341 | RB_AUGMENT(elm); \ 342 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ 343 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 344 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 345 | else \ 346 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 347 | } else \ 348 | (head)->rbh_root = (tmp); \ 349 | RB_LEFT(tmp, field) = (elm); \ 350 | RB_PARENT(elm, field) = (tmp); \ 351 | RB_AUGMENT(tmp); \ 352 | if ((RB_PARENT(tmp, field))) \ 353 | RB_AUGMENT(RB_PARENT(tmp, field)); \ 354 | } while (/*CONSTCOND*/ 0) 355 | 356 | #define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ 357 | (tmp) = RB_LEFT(elm, field); \ 358 | if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ 359 | RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ 360 | } \ 361 | RB_AUGMENT(elm); \ 362 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ 363 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 364 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 365 | else \ 366 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 367 | } else \ 368 | (head)->rbh_root = (tmp); \ 369 | RB_RIGHT(tmp, field) = (elm); \ 370 | RB_PARENT(elm, field) = (tmp); \ 371 | RB_AUGMENT(tmp); \ 372 | if ((RB_PARENT(tmp, field))) \ 373 | RB_AUGMENT(RB_PARENT(tmp, field)); \ 374 | } while (/*CONSTCOND*/ 0) 375 | 376 | /* Generates prototypes and inline functions */ 377 | #define RB_PROTOTYPE(name, type, field, cmp) \ 378 | RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) 379 | #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ 380 | RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static) 381 | #define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ 382 | attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ 383 | attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ 384 | attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ 385 | attr struct type *name##_RB_INSERT(struct name *, struct type *); \ 386 | attr struct type *name##_RB_FIND(struct name *, struct type *); \ 387 | attr struct type *name##_RB_NFIND(struct name *, struct type *); \ 388 | attr struct type *name##_RB_NEXT(struct type *); \ 389 | attr struct type *name##_RB_PREV(struct type *); \ 390 | attr struct type *name##_RB_MINMAX(struct name *, int); \ 391 | \ 392 | 393 | /* Main rb operation. 394 | * Moves node close to the key of elm to top 395 | */ 396 | #define RB_GENERATE(name, type, field, cmp) \ 397 | RB_GENERATE_INTERNAL(name, type, field, cmp,) 398 | #define RB_GENERATE_STATIC(name, type, field, cmp) \ 399 | RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static) 400 | #define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ 401 | attr void \ 402 | name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ 403 | { \ 404 | struct type *parent, *gparent, *tmp; \ 405 | while ((parent = RB_PARENT(elm, field)) != NULL && \ 406 | RB_COLOR(parent, field) == RB_RED) { \ 407 | gparent = RB_PARENT(parent, field); \ 408 | if (parent == RB_LEFT(gparent, field)) { \ 409 | tmp = RB_RIGHT(gparent, field); \ 410 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 411 | RB_COLOR(tmp, field) = RB_BLACK; \ 412 | RB_SET_BLACKRED(parent, gparent, field);\ 413 | elm = gparent; \ 414 | continue; \ 415 | } \ 416 | if (RB_RIGHT(parent, field) == elm) { \ 417 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 418 | tmp = parent; \ 419 | parent = elm; \ 420 | elm = tmp; \ 421 | } \ 422 | RB_SET_BLACKRED(parent, gparent, field); \ 423 | RB_ROTATE_RIGHT(head, gparent, tmp, field); \ 424 | } else { \ 425 | tmp = RB_LEFT(gparent, field); \ 426 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 427 | RB_COLOR(tmp, field) = RB_BLACK; \ 428 | RB_SET_BLACKRED(parent, gparent, field);\ 429 | elm = gparent; \ 430 | continue; \ 431 | } \ 432 | if (RB_LEFT(parent, field) == elm) { \ 433 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 434 | tmp = parent; \ 435 | parent = elm; \ 436 | elm = tmp; \ 437 | } \ 438 | RB_SET_BLACKRED(parent, gparent, field); \ 439 | RB_ROTATE_LEFT(head, gparent, tmp, field); \ 440 | } \ 441 | } \ 442 | RB_COLOR(head->rbh_root, field) = RB_BLACK; \ 443 | } \ 444 | \ 445 | attr void \ 446 | name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ 447 | { \ 448 | struct type *tmp; \ 449 | while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ 450 | elm != RB_ROOT(head)) { \ 451 | if (RB_LEFT(parent, field) == elm) { \ 452 | tmp = RB_RIGHT(parent, field); \ 453 | if (RB_COLOR(tmp, field) == RB_RED) { \ 454 | RB_SET_BLACKRED(tmp, parent, field); \ 455 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 456 | tmp = RB_RIGHT(parent, field); \ 457 | } \ 458 | if ((RB_LEFT(tmp, field) == NULL || \ 459 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 460 | (RB_RIGHT(tmp, field) == NULL || \ 461 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 462 | RB_COLOR(tmp, field) = RB_RED; \ 463 | elm = parent; \ 464 | parent = RB_PARENT(elm, field); \ 465 | } else { \ 466 | if (RB_RIGHT(tmp, field) == NULL || \ 467 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ 468 | struct type *oleft; \ 469 | if ((oleft = RB_LEFT(tmp, field)) \ 470 | != NULL) \ 471 | RB_COLOR(oleft, field) = RB_BLACK;\ 472 | RB_COLOR(tmp, field) = RB_RED; \ 473 | RB_ROTATE_RIGHT(head, tmp, oleft, field);\ 474 | tmp = RB_RIGHT(parent, field); \ 475 | } \ 476 | RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 477 | RB_COLOR(parent, field) = RB_BLACK; \ 478 | if (RB_RIGHT(tmp, field)) \ 479 | RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ 480 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 481 | elm = RB_ROOT(head); \ 482 | break; \ 483 | } \ 484 | } else { \ 485 | tmp = RB_LEFT(parent, field); \ 486 | if (RB_COLOR(tmp, field) == RB_RED) { \ 487 | RB_SET_BLACKRED(tmp, parent, field); \ 488 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 489 | tmp = RB_LEFT(parent, field); \ 490 | } \ 491 | if ((RB_LEFT(tmp, field) == NULL || \ 492 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 493 | (RB_RIGHT(tmp, field) == NULL || \ 494 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 495 | RB_COLOR(tmp, field) = RB_RED; \ 496 | elm = parent; \ 497 | parent = RB_PARENT(elm, field); \ 498 | } else { \ 499 | if (RB_LEFT(tmp, field) == NULL || \ 500 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ 501 | struct type *oright; \ 502 | if ((oright = RB_RIGHT(tmp, field)) \ 503 | != NULL) \ 504 | RB_COLOR(oright, field) = RB_BLACK;\ 505 | RB_COLOR(tmp, field) = RB_RED; \ 506 | RB_ROTATE_LEFT(head, tmp, oright, field);\ 507 | tmp = RB_LEFT(parent, field); \ 508 | } \ 509 | RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 510 | RB_COLOR(parent, field) = RB_BLACK; \ 511 | if (RB_LEFT(tmp, field)) \ 512 | RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ 513 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 514 | elm = RB_ROOT(head); \ 515 | break; \ 516 | } \ 517 | } \ 518 | } \ 519 | if (elm) \ 520 | RB_COLOR(elm, field) = RB_BLACK; \ 521 | } \ 522 | \ 523 | attr struct type * \ 524 | name##_RB_REMOVE(struct name *head, struct type *elm) \ 525 | { \ 526 | struct type *child, *parent, *old = elm; \ 527 | int color; \ 528 | if (RB_LEFT(elm, field) == NULL) \ 529 | child = RB_RIGHT(elm, field); \ 530 | else if (RB_RIGHT(elm, field) == NULL) \ 531 | child = RB_LEFT(elm, field); \ 532 | else { \ 533 | struct type *left; \ 534 | elm = RB_RIGHT(elm, field); \ 535 | while ((left = RB_LEFT(elm, field)) != NULL) \ 536 | elm = left; \ 537 | child = RB_RIGHT(elm, field); \ 538 | parent = RB_PARENT(elm, field); \ 539 | color = RB_COLOR(elm, field); \ 540 | if (child) \ 541 | RB_PARENT(child, field) = parent; \ 542 | if (parent) { \ 543 | if (RB_LEFT(parent, field) == elm) \ 544 | RB_LEFT(parent, field) = child; \ 545 | else \ 546 | RB_RIGHT(parent, field) = child; \ 547 | RB_AUGMENT(parent); \ 548 | } else \ 549 | RB_ROOT(head) = child; \ 550 | if (RB_PARENT(elm, field) == old) \ 551 | parent = elm; \ 552 | (elm)->field = (old)->field; \ 553 | if (RB_PARENT(old, field)) { \ 554 | if (RB_LEFT(RB_PARENT(old, field), field) == old)\ 555 | RB_LEFT(RB_PARENT(old, field), field) = elm;\ 556 | else \ 557 | RB_RIGHT(RB_PARENT(old, field), field) = elm;\ 558 | RB_AUGMENT(RB_PARENT(old, field)); \ 559 | } else \ 560 | RB_ROOT(head) = elm; \ 561 | RB_PARENT(RB_LEFT(old, field), field) = elm; \ 562 | if (RB_RIGHT(old, field)) \ 563 | RB_PARENT(RB_RIGHT(old, field), field) = elm; \ 564 | if (parent) { \ 565 | left = parent; \ 566 | do { \ 567 | RB_AUGMENT(left); \ 568 | } while ((left = RB_PARENT(left, field)) != NULL); \ 569 | } \ 570 | goto color; \ 571 | } \ 572 | parent = RB_PARENT(elm, field); \ 573 | color = RB_COLOR(elm, field); \ 574 | if (child) \ 575 | RB_PARENT(child, field) = parent; \ 576 | if (parent) { \ 577 | if (RB_LEFT(parent, field) == elm) \ 578 | RB_LEFT(parent, field) = child; \ 579 | else \ 580 | RB_RIGHT(parent, field) = child; \ 581 | RB_AUGMENT(parent); \ 582 | } else \ 583 | RB_ROOT(head) = child; \ 584 | color: \ 585 | if (color == RB_BLACK) \ 586 | name##_RB_REMOVE_COLOR(head, parent, child); \ 587 | return (old); \ 588 | } \ 589 | \ 590 | /* Inserts a node into the RB tree */ \ 591 | attr struct type * \ 592 | name##_RB_INSERT(struct name *head, struct type *elm) \ 593 | { \ 594 | struct type *tmp; \ 595 | struct type *parent = NULL; \ 596 | int comp = 0; \ 597 | tmp = RB_ROOT(head); \ 598 | while (tmp) { \ 599 | parent = tmp; \ 600 | comp = (cmp)(elm, parent); \ 601 | if (comp < 0) \ 602 | tmp = RB_LEFT(tmp, field); \ 603 | else if (comp > 0) \ 604 | tmp = RB_RIGHT(tmp, field); \ 605 | else \ 606 | return (tmp); \ 607 | } \ 608 | RB_SET(elm, parent, field); \ 609 | if (parent != NULL) { \ 610 | if (comp < 0) \ 611 | RB_LEFT(parent, field) = elm; \ 612 | else \ 613 | RB_RIGHT(parent, field) = elm; \ 614 | RB_AUGMENT(parent); \ 615 | } else \ 616 | RB_ROOT(head) = elm; \ 617 | name##_RB_INSERT_COLOR(head, elm); \ 618 | return (NULL); \ 619 | } \ 620 | \ 621 | /* Finds the node with the same key as elm */ \ 622 | attr struct type * \ 623 | name##_RB_FIND(struct name *head, struct type *elm) \ 624 | { \ 625 | struct type *tmp = RB_ROOT(head); \ 626 | int comp; \ 627 | while (tmp) { \ 628 | comp = cmp(elm, tmp); \ 629 | if (comp < 0) \ 630 | tmp = RB_LEFT(tmp, field); \ 631 | else if (comp > 0) \ 632 | tmp = RB_RIGHT(tmp, field); \ 633 | else \ 634 | return (tmp); \ 635 | } \ 636 | return (NULL); \ 637 | } \ 638 | \ 639 | /* Finds the first node greater than or equal to the search key */ \ 640 | attr struct type * \ 641 | name##_RB_NFIND(struct name *head, struct type *elm) \ 642 | { \ 643 | struct type *tmp = RB_ROOT(head); \ 644 | struct type *res = NULL; \ 645 | int comp; \ 646 | while (tmp) { \ 647 | comp = cmp(elm, tmp); \ 648 | if (comp < 0) { \ 649 | res = tmp; \ 650 | tmp = RB_LEFT(tmp, field); \ 651 | } \ 652 | else if (comp > 0) \ 653 | tmp = RB_RIGHT(tmp, field); \ 654 | else \ 655 | return (tmp); \ 656 | } \ 657 | return (res); \ 658 | } \ 659 | \ 660 | /* ARGSUSED */ \ 661 | attr struct type * \ 662 | name##_RB_NEXT(struct type *elm) \ 663 | { \ 664 | if (RB_RIGHT(elm, field)) { \ 665 | elm = RB_RIGHT(elm, field); \ 666 | while (RB_LEFT(elm, field)) \ 667 | elm = RB_LEFT(elm, field); \ 668 | } else { \ 669 | if (RB_PARENT(elm, field) && \ 670 | (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ 671 | elm = RB_PARENT(elm, field); \ 672 | else { \ 673 | while (RB_PARENT(elm, field) && \ 674 | (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ 675 | elm = RB_PARENT(elm, field); \ 676 | elm = RB_PARENT(elm, field); \ 677 | } \ 678 | } \ 679 | return (elm); \ 680 | } \ 681 | \ 682 | /* ARGSUSED */ \ 683 | attr struct type * \ 684 | name##_RB_PREV(struct type *elm) \ 685 | { \ 686 | if (RB_LEFT(elm, field)) { \ 687 | elm = RB_LEFT(elm, field); \ 688 | while (RB_RIGHT(elm, field)) \ 689 | elm = RB_RIGHT(elm, field); \ 690 | } else { \ 691 | if (RB_PARENT(elm, field) && \ 692 | (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ 693 | elm = RB_PARENT(elm, field); \ 694 | else { \ 695 | while (RB_PARENT(elm, field) && \ 696 | (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ 697 | elm = RB_PARENT(elm, field); \ 698 | elm = RB_PARENT(elm, field); \ 699 | } \ 700 | } \ 701 | return (elm); \ 702 | } \ 703 | \ 704 | attr struct type * \ 705 | name##_RB_MINMAX(struct name *head, int val) \ 706 | { \ 707 | struct type *tmp = RB_ROOT(head); \ 708 | struct type *parent = NULL; \ 709 | while (tmp) { \ 710 | parent = tmp; \ 711 | if (val < 0) \ 712 | tmp = RB_LEFT(tmp, field); \ 713 | else \ 714 | tmp = RB_RIGHT(tmp, field); \ 715 | } \ 716 | return (parent); \ 717 | } 718 | 719 | #define RB_NEGINF -1 720 | #define RB_INF 1 721 | 722 | #define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) 723 | #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) 724 | #define RB_FIND(name, x, y) name##_RB_FIND(x, y) 725 | #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) 726 | #define RB_NEXT(name, x, y) name##_RB_NEXT(y) 727 | #define RB_PREV(name, x, y) name##_RB_PREV(y) 728 | #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) 729 | #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) 730 | 731 | #define RB_FOREACH(x, name, head) \ 732 | for ((x) = RB_MIN(name, head); \ 733 | (x) != NULL; \ 734 | (x) = name##_RB_NEXT(x)) 735 | 736 | #define RB_FOREACH_FROM(x, name, y) \ 737 | for ((x) = (y); \ 738 | ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ 739 | (x) = (y)) 740 | 741 | #define RB_FOREACH_SAFE(x, name, head, y) \ 742 | for ((x) = RB_MIN(name, head); \ 743 | ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ 744 | (x) = (y)) 745 | 746 | #define RB_FOREACH_REVERSE(x, name, head) \ 747 | for ((x) = RB_MAX(name, head); \ 748 | (x) != NULL; \ 749 | (x) = name##_RB_PREV(x)) 750 | 751 | #define RB_FOREACH_REVERSE_FROM(x, name, y) \ 752 | for ((x) = (y); \ 753 | ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ 754 | (x) = (y)) 755 | 756 | #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ 757 | for ((x) = RB_MAX(name, head); \ 758 | ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ 759 | (x) = (y)) 760 | 761 | #endif /* _SYS_TREE_H_ */ 762 | --------------------------------------------------------------------------------