├── .gitmodules ├── .travis.yml ├── Makefile ├── README.md ├── bench.c ├── chan.c ├── coro.c ├── coro.h ├── main.c ├── queue.c ├── queue.h ├── uvc.c └── uvc.h /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "libuv"] 2 | path = libuv 3 | url = https://github.com/libuv/libuv.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | gcc: 3 | - 4.8.2 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CFLAGS= -g -DCORO_USE_VALGRIND -DCORO_UCONTEXT -D_GNU_SOURCE -std=c99 2 | LDFLAGS= -luv -lpthread 3 | CC=gcc 4 | all:uvc chan 5 | uvc:coro.o uvc.o main.o 6 | chan:chan.o uvc.o coro.o 7 | 8 | coro.o:coro.c 9 | $(CC) $(CFLAGS) -D_BSD_SOURCE -c -o coro.o coro.c 10 | uvc.o:uvc.c 11 | chan.o:chan.c 12 | main.o:main.c 13 | clean: 14 | rm *.o uvc -rf 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libuvc 2 | >Coroutines and asychronous I/O for cross-platform 3 | 4 | 5 | a libuv and libcoro bind lib,help you write synchronization no-callback high-performance network Program. my goal is a network and coroutines framework for embedded system or pc. 6 | 7 | this lib has tested on linux(arm x86 x64) and windows. 8 | 9 | ##example for http get download file 10 | 11 | ```C 12 | static void download(void *ptr){ 13 | 14 | uvc_io *fs = malloc(sizeof(uvc_io)); 15 | uvc_io_create(fs,UV_FS); 16 | uvc_io *io=ptr; 17 | ssize_t cnt=0; 18 | char buf[256]; 19 | cnt = uvc_read(io,buf,sizeof(buf)); 20 | if(cnt <=0){ 21 | goto err; 22 | } 23 | sprintf(buf,"HTTP/1.1 200 OK\r\nContent-Length: %d\r\n" 24 | "Content-Type: application/zip\r\n\r\n",2735243); 25 | cnt=uvc_write(io,buf,strlen(buf)); 26 | if(cnt!=0){ 27 | goto err; 28 | } 29 | if(uvc_fs_open(fs,"/opt/nfshost/master.zip",O_RDONLY) <0){ 30 | printf("uvc_fs_open error\n"); 31 | goto err; 32 | } 33 | while(1){ 34 | cnt = uvc_fs_read(fs,buf,sizeof(buf)); 35 | if(cnt>0){ 36 | //printf("uvc_fs_read ok\n"); 37 | cnt=uvc_write(io,buf,cnt); 38 | if(cnt!=0){ 39 | printf("write file err\n"); 40 | goto err; 41 | } 42 | }else{ 43 | printf("uvc_fs_read err:%d\n",cnt); 44 | break; 45 | } 46 | } 47 | err: 48 | uvc_fs_close(fs); 49 | free(fs); 50 | uvc_close(io); 51 | free(io); 52 | printf("connection exit\n"); 53 | uvc_return(); 54 | } 55 | 56 | void server(void *ptr) 57 | { 58 | int ret=0; 59 | uvc_ctx *ctx_client; 60 | uvc_io io; 61 | uvc_io *io_client; 62 | uvc_io_create(&io,UV_TCP); 63 | ret = uvc_tcp_bind(&io,"0.0.0.0",8080); 64 | if(ret!=0){ 65 | printf("error bind:%d\n",ret); 66 | exit(1); 67 | } 68 | printf("start listen\n"); 69 | while(1){ 70 | ret = uvc_listen(&io,100); 71 | if(ret!=0){ 72 | printf("error listen:%d\n",ret); 73 | exit(1); 74 | } 75 | 76 | io_client = (uvc_io *)malloc(sizeof(uvc_io)); 77 | uvc_io_create(io_client,UV_TCP); 78 | ret = uvc_accept(&io,io_client); 79 | if(ret !=0){ 80 | printf("error accept:%d\n",ret); 81 | exit(1); 82 | } 83 | //printf("get a new connection\n"); 84 | uvc_create("download_task",10*1024,download,io_client); 85 | } 86 | uvc_close(&io); 87 | uvc_return(); 88 | } 89 | 90 | 91 | int main(){ 92 | uvc_create("listener",,128,server,NULL); 93 | uvc_schedule(); 94 | } 95 | ``` 96 | 97 | ##example Producer & Consumer 98 | 99 | ```C 100 | void Producer(void *ptr){ 101 | channel_t ch = *(int *)ptr; 102 | int o = 0; 103 | int i = 0; 104 | for (i = 0; i<10; i++){ 105 | o++; 106 | if (channel_write(ch, (void *)&o) != 0){ 107 | printf("channel brokern,Producer exit\n"); 108 | break; 109 | } 110 | uvc_sleep(100); 111 | } 112 | printf("Producer send over\n"); 113 | channel_close(ch); 114 | uvc_return(); 115 | } 116 | 117 | void Consumer(void *ptr){ 118 | channel_t ch = *(int *)ptr; 119 | int o = 0; 120 | while(1){ 121 | if (channel_read(ch, (void *)&o) != 0){ 122 | printf("channel brokern,Consumer exit\n"); 123 | break; 124 | } 125 | printf("Consumer read %d\n",o); 126 | } 127 | printf("Consumer recv over\n"); 128 | uvc_return(); 129 | 130 | } 131 | 132 | int main(){ 133 | channel_t ch; 134 | ch = channel_create(0, sizeof(int)); 135 | uvc_create(10 * 1024, Producer, (void *)&ch); 136 | uvc_create(10 * 1024, Consumer, (void *)&ch); 137 | uvc_schedule(); 138 | } 139 | ``` 140 | 141 | ##Feature highlights 142 | 143 | *Cross-platform and embedded system support 144 | *lightweight 145 | *Asynchronous for every IO 146 | *synchronization logic none-callback. 147 | *Multithreading Support(one thread more coroutines,or more thread more coroutines). 148 | *channels like go chan. buffred channel or unbuffred channel. 149 | 150 | ##api 151 | ```c 152 | uvc_ctx *uvc_create(char *name,unsigned int size,coro_func func,void *arg); 153 | void uvc_return( ); 154 | void uvc_yield( ); 155 | void uvc_resume(uvc_ctx *ctx); 156 | void uvc_schedule(); 157 | int uvc_io_create(uvc_io *io, uv_handle_type type); 158 | /*TCP*/ 159 | int uvc_tcp_bind(uvc_io *io, char *ip, short port); 160 | ssize_t uvc_read(uvc_io *io,void *data,size_t len); 161 | ssize_t uvc_read2(uvc_io *io,void *data,size_t len,uint64_t timeout); 162 | ssize_t uvc_write(uvc_io *io,void *data,size_t len); 163 | void uvc_close(uvc_io *io); 164 | int uvc_tcp_connect(uvc_io *io,char *ip,short port); 165 | int uvc_listen(uvc_io *io,int backlog); 166 | int uvc_accept( uvc_io *io,uvc_io *c); 167 | /*filesystem*/ 168 | uv_file uvc_fs_open(uvc_io *io,char *path,int flasgs); 169 | int uvc_fs_read(uvc_io *io,void *data,ssize_t size); 170 | int uvc_fs_write(uvc_io *io,void *data,ssize_t size); 171 | int uvc_fs_close(uvc_io *io); 172 | int uvc_fs_stat(char *path,uv_stat_t *statbuf); 173 | /*channels*/ 174 | channel_t channel_create(int cnt,int elem_size); 175 | int channel_close(channel_t c); 176 | int channel_write(channel_t c,void *buf); 177 | int channel_read(channel_t c,void *buf); 178 | channel_t channel_select(int need_default,char *fmt,...); 179 | ``` 180 | ## build 181 | ###linux 182 | * build install libuv ,see https://github.com/libuv/libuv 183 | * make 184 | ## examples 185 | *chan.c channel example 186 | *uvc.c io example 187 | 188 | ## channel 189 | 190 | The channel is implement like golang's chan and select 191 | ###channel_select 192 | need_default 0,if no channel active ,the select will 193 | be blocked,else will be return immediately. 194 | fmt 'r' of 'w', waiting channel event 195 | 196 | ###example 197 | ``` 198 | channel_t c; 199 | c=channel_select(1,"rw",ch1,ch2); 200 | if(c == ch1){ 201 | channel_read(ch1,&buf); 202 | }else if(c==ch2){ 203 | channel_write(ch2,&buf); 204 | }else{ 205 | //no channel active 206 | } 207 | 208 | 209 | ``` 210 | -------------------------------------------------------------------------------- /bench.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "uvc.h" 4 | 5 | void task(void *arg){ 6 | uvc_sleep(100000); 7 | } 8 | 9 | int main(int argc ,char **argv){ 10 | int i=0; 11 | for(i=0;i<100000;i++){ 12 | uvc_create("worker",128,task,NULL); 13 | } 14 | uvc_schedule(); 15 | 16 | } 17 | -------------------------------------------------------------------------------- /chan.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "uvc.h" 6 | 7 | void Producer(void *ptr){ 8 | channel_t ch = *(int *)ptr; 9 | int o = 0; 10 | int i = 0; 11 | for (i = 0; i<10; i++){ 12 | o++; 13 | if (channel_write(ch, (void *)&o) != 0){ 14 | printf("channel brokern,Producer exit\n"); 15 | break; 16 | } 17 | uvc_sleep(100); 18 | } 19 | printf("Producer send over\n"); 20 | channel_close(ch); 21 | uvc_return(); 22 | } 23 | 24 | void Consumer(void *ptr){ 25 | 26 | channel_t ch = *(int *)ptr; 27 | int o = 0; 28 | while(1){ 29 | if (channel_read(ch, (void *)&o) != 0){ 30 | printf("channel brokern,Consumer exit\n"); 31 | break; 32 | } 33 | printf("Consumer read %d\n",o); 34 | } 35 | printf("Consumer recv over\n"); 36 | uvc_return(); 37 | 38 | } 39 | 40 | int main(){ 41 | channel_t ch; 42 | ch = channel_create(0, sizeof(int)); 43 | uvc_create("Producer",10 * 1024, Producer, (void *)&ch); 44 | uvc_create("Consumer",10 * 1024, Consumer, (void *)&ch); 45 | uvc_schedule(); 46 | } 47 | -------------------------------------------------------------------------------- /coro.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2001-2011 Marc Alexander Lehmann 3 | * 4 | * Redistribution and use in source and binary forms, with or without modifica- 5 | * tion, are permitted provided that the following conditions are met: 6 | * 7 | * 1. Redistributions of source code must retain the above copyright notice, 8 | * this list of conditions and the following disclaimer. 9 | * 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 15 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 16 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 17 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 18 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 20 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 21 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 22 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 23 | * OF THE POSSIBILITY OF SUCH DAMAGE. 24 | * 25 | * Alternatively, the contents of this file may be used under the terms of 26 | * the GNU General Public License ("GPL") version 2 or any later version, 27 | * in which case the provisions of the GPL are applicable instead of 28 | * the above. If you wish to allow the use of your version of this file 29 | * only under the terms of the GPL and not to allow others to use your 30 | * version of this file under the BSD license, indicate your decision 31 | * by deleting the provisions above and replace them with the notice 32 | * and other provisions required by the GPL. If you do not delete the 33 | * provisions above, a recipient may use your version of this file under 34 | * either the BSD or the GPL. 35 | * 36 | * This library is modelled strictly after Ralf S. Engelschalls article at 37 | * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must 38 | * go to Ralf S. Engelschall . 39 | */ 40 | 41 | #include "coro.h" 42 | 43 | #include 44 | #include 45 | 46 | /*****************************************************************************/ 47 | /* ucontext/setjmp/asm backends */ 48 | /*****************************************************************************/ 49 | #if CORO_UCONTEXT || CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX || CORO_ASM 50 | 51 | # if CORO_UCONTEXT 52 | # include 53 | # endif 54 | 55 | # if !defined(STACK_ADJUST_PTR) 56 | # if __sgi 57 | /* IRIX is decidedly NON-unix */ 58 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 59 | # define STACK_ADJUST_SIZE(sp,ss) ((ss) - 8) 60 | # elif (__i386__ && CORO_LINUX) || (_M_IX86 && CORO_LOSER) 61 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss)) 62 | # define STACK_ADJUST_SIZE(sp,ss) (ss) 63 | # elif (__amd64__ && CORO_LINUX) || ((_M_AMD64 || _M_IA64) && CORO_LOSER) 64 | # define STACK_ADJUST_PTR(sp,ss) ((char *)(sp) + (ss) - 8) 65 | # define STACK_ADJUST_SIZE(sp,ss) (ss) 66 | # else 67 | # define STACK_ADJUST_PTR(sp,ss) (sp) 68 | # define STACK_ADJUST_SIZE(sp,ss) (ss) 69 | # endif 70 | # endif 71 | 72 | # include 73 | 74 | # if CORO_SJLJ 75 | # include 76 | # include 77 | # include 78 | # endif 79 | 80 | static coro_func coro_init_func; 81 | static void *coro_init_arg; 82 | static coro_context *new_coro, *create_coro; 83 | 84 | static void 85 | coro_init (void) 86 | { 87 | volatile coro_func func = coro_init_func; 88 | volatile void *arg = coro_init_arg; 89 | 90 | coro_transfer (new_coro, create_coro); 91 | 92 | #if __GCC_HAVE_DWARF2_CFI_ASM && __amd64 93 | __asm__(".cfi_undefined rip"); 94 | #endif 95 | 96 | func ((void *)arg); 97 | 98 | /* the new coro returned. bad. just abort() for now */ 99 | abort (); 100 | } 101 | 102 | # if CORO_SJLJ 103 | 104 | static volatile int trampoline_done; 105 | 106 | /* trampoline signal handler */ 107 | static void 108 | trampoline (int sig) 109 | { 110 | if (coro_setjmp (new_coro->env)) 111 | coro_init (); /* start it */ 112 | else 113 | trampoline_done = 1; 114 | } 115 | 116 | # endif 117 | 118 | # if CORO_ASM 119 | 120 | #if _WIN32 || __CYGWIN__ 121 | #define CORO_WIN_TIB 1 122 | #endif 123 | 124 | __asm__( 125 | "\t.text\n" 126 | #if _WIN32 || __CYGWIN__ 127 | "\t.globl _coro_transfer\n" 128 | "_coro_transfer:\n" 129 | #else 130 | "\t.globl coro_transfer\n" 131 | "coro_transfer:\n" 132 | #endif 133 | /* windows, of course, gives a shit on the amd64 ABI and uses different registers */ 134 | /* http://blogs.msdn.com/freik/archive/2005/03/17/398200.aspx */ 135 | #if __amd64 136 | 137 | #if _WIN32 || __CYGWIN__ 138 | #define NUM_SAVED 29 139 | "\tsubq $168, %rsp\t" /* one dummy qword to improve alignment */ 140 | "\tmovaps %xmm6, (%rsp)\n" 141 | "\tmovaps %xmm7, 16(%rsp)\n" 142 | "\tmovaps %xmm8, 32(%rsp)\n" 143 | "\tmovaps %xmm9, 48(%rsp)\n" 144 | "\tmovaps %xmm10, 64(%rsp)\n" 145 | "\tmovaps %xmm11, 80(%rsp)\n" 146 | "\tmovaps %xmm12, 96(%rsp)\n" 147 | "\tmovaps %xmm13, 112(%rsp)\n" 148 | "\tmovaps %xmm14, 128(%rsp)\n" 149 | "\tmovaps %xmm15, 144(%rsp)\n" 150 | "\tpushq %rsi\n" 151 | "\tpushq %rdi\n" 152 | "\tpushq %rbp\n" 153 | "\tpushq %rbx\n" 154 | "\tpushq %r12\n" 155 | "\tpushq %r13\n" 156 | "\tpushq %r14\n" 157 | "\tpushq %r15\n" 158 | #if CORO_WIN_TIB 159 | "\tpushq %fs:0x0\n" 160 | "\tpushq %fs:0x8\n" 161 | "\tpushq %fs:0xc\n" 162 | #endif 163 | "\tmovq %rsp, (%rcx)\n" 164 | "\tmovq (%rdx), %rsp\n" 165 | #if CORO_WIN_TIB 166 | "\tpopq %fs:0xc\n" 167 | "\tpopq %fs:0x8\n" 168 | "\tpopq %fs:0x0\n" 169 | #endif 170 | "\tpopq %r15\n" 171 | "\tpopq %r14\n" 172 | "\tpopq %r13\n" 173 | "\tpopq %r12\n" 174 | "\tpopq %rbx\n" 175 | "\tpopq %rbp\n" 176 | "\tpopq %rdi\n" 177 | "\tpopq %rsi\n" 178 | "\tmovaps (%rsp), %xmm6\n" 179 | "\tmovaps 16(%rsp), %xmm7\n" 180 | "\tmovaps 32(%rsp), %xmm8\n" 181 | "\tmovaps 48(%rsp), %xmm9\n" 182 | "\tmovaps 64(%rsp), %xmm10\n" 183 | "\tmovaps 80(%rsp), %xmm11\n" 184 | "\tmovaps 96(%rsp), %xmm12\n" 185 | "\tmovaps 112(%rsp), %xmm13\n" 186 | "\tmovaps 128(%rsp), %xmm14\n" 187 | "\tmovaps 144(%rsp), %xmm15\n" 188 | "\taddq $168, %rsp\n" 189 | #else 190 | #define NUM_SAVED 6 191 | "\tpushq %rbp\n" 192 | "\tpushq %rbx\n" 193 | "\tpushq %r12\n" 194 | "\tpushq %r13\n" 195 | "\tpushq %r14\n" 196 | "\tpushq %r15\n" 197 | "\tmovq %rsp, (%rdi)\n" 198 | "\tmovq (%rsi), %rsp\n" 199 | "\tpopq %r15\n" 200 | "\tpopq %r14\n" 201 | "\tpopq %r13\n" 202 | "\tpopq %r12\n" 203 | "\tpopq %rbx\n" 204 | "\tpopq %rbp\n" 205 | #endif 206 | "\tpopq %rcx\n" 207 | "\tjmpq *%rcx\n" 208 | 209 | #elif __i386 210 | 211 | #define NUM_SAVED 4 212 | "\tpushl %ebp\n" 213 | "\tpushl %ebx\n" 214 | "\tpushl %esi\n" 215 | "\tpushl %edi\n" 216 | #if CORO_WIN_TIB 217 | #undef NUM_SAVED 218 | #define NUM_SAVED 7 219 | "\tpushl %fs:0\n" 220 | "\tpushl %fs:4\n" 221 | "\tpushl %fs:8\n" 222 | #endif 223 | "\tmovl %esp, (%eax)\n" 224 | "\tmovl (%edx), %esp\n" 225 | #if CORO_WIN_TIB 226 | "\tpopl %fs:8\n" 227 | "\tpopl %fs:4\n" 228 | "\tpopl %fs:0\n" 229 | #endif 230 | "\tpopl %edi\n" 231 | "\tpopl %esi\n" 232 | "\tpopl %ebx\n" 233 | "\tpopl %ebp\n" 234 | "\tpopl %ecx\n" 235 | "\tjmpl *%ecx\n" 236 | 237 | #else 238 | #error unsupported architecture 239 | #endif 240 | ); 241 | 242 | # endif 243 | 244 | void 245 | coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) 246 | { 247 | coro_context nctx; 248 | # if CORO_SJLJ 249 | stack_t ostk, nstk; 250 | struct sigaction osa, nsa; 251 | sigset_t nsig, osig; 252 | # endif 253 | 254 | if (!coro) 255 | return; 256 | 257 | coro_init_func = coro; 258 | coro_init_arg = arg; 259 | 260 | new_coro = ctx; 261 | create_coro = &nctx; 262 | 263 | # if CORO_SJLJ 264 | /* we use SIGUSR2. first block it, then fiddle with it. */ 265 | 266 | sigemptyset (&nsig); 267 | sigaddset (&nsig, SIGUSR2); 268 | sigprocmask (SIG_BLOCK, &nsig, &osig); 269 | 270 | nsa.sa_handler = trampoline; 271 | sigemptyset (&nsa.sa_mask); 272 | nsa.sa_flags = SA_ONSTACK; 273 | 274 | if (sigaction (SIGUSR2, &nsa, &osa)) 275 | { 276 | perror ("sigaction"); 277 | abort (); 278 | } 279 | 280 | /* set the new stack */ 281 | nstk.ss_sp = STACK_ADJUST_PTR (sptr, ssize); /* yes, some platforms (IRIX) get this wrong. */ 282 | nstk.ss_size = STACK_ADJUST_SIZE (sptr, ssize); 283 | nstk.ss_flags = 0; 284 | 285 | if (sigaltstack (&nstk, &ostk) < 0) 286 | { 287 | perror ("sigaltstack"); 288 | abort (); 289 | } 290 | 291 | trampoline_done = 0; 292 | kill (getpid (), SIGUSR2); 293 | sigfillset (&nsig); sigdelset (&nsig, SIGUSR2); 294 | 295 | while (!trampoline_done) 296 | sigsuspend (&nsig); 297 | 298 | sigaltstack (0, &nstk); 299 | nstk.ss_flags = SS_DISABLE; 300 | if (sigaltstack (&nstk, 0) < 0) 301 | perror ("sigaltstack"); 302 | 303 | sigaltstack (0, &nstk); 304 | if (~nstk.ss_flags & SS_DISABLE) 305 | abort (); 306 | 307 | if (~ostk.ss_flags & SS_DISABLE) 308 | sigaltstack (&ostk, 0); 309 | 310 | sigaction (SIGUSR2, &osa, 0); 311 | sigprocmask (SIG_SETMASK, &osig, 0); 312 | 313 | # elif CORO_LOSER 314 | 315 | coro_setjmp (ctx->env); 316 | #if __CYGWIN__ && __i386 317 | ctx->env[8] = (long) coro_init; 318 | ctx->env[7] = (long) ((char *)sptr + ssize) - sizeof (long); 319 | #elif __CYGWIN__ && __x86_64 320 | ctx->env[7] = (long) coro_init; 321 | ctx->env[6] = (long) ((char *)sptr + ssize) - sizeof (long); 322 | #elif defined __MINGW32__ 323 | ctx->env[5] = (long) coro_init; 324 | ctx->env[4] = (long) ((char *)sptr + ssize) - sizeof (long); 325 | #elif defined _M_IX86 326 | ((_JUMP_BUFFER *)&ctx->env)->Eip = (long) coro_init; 327 | ((_JUMP_BUFFER *)&ctx->env)->Esp = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 328 | #elif defined _M_AMD64 329 | ((_JUMP_BUFFER *)&ctx->env)->Rip = (__int64) coro_init; 330 | ((_JUMP_BUFFER *)&ctx->env)->Rsp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 331 | #elif defined _M_IA64 332 | ((_JUMP_BUFFER *)&ctx->env)->StIIP = (__int64) coro_init; 333 | ((_JUMP_BUFFER *)&ctx->env)->IntSp = (__int64) STACK_ADJUST_PTR (sptr, ssize) - sizeof (__int64); 334 | #else 335 | #error "microsoft libc or architecture not supported" 336 | #endif 337 | 338 | # elif CORO_LINUX 339 | 340 | coro_setjmp (ctx->env); 341 | #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (JB_PC) && defined (JB_SP) 342 | ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; 343 | ctx->env[0].__jmpbuf[JB_SP] = (long) STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 344 | #elif __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 0 && defined (__mc68000__) 345 | ctx->env[0].__jmpbuf[0].__aregs[0] = (long int)coro_init; 346 | ctx->env[0].__jmpbuf[0].__sp = (int *) ((char *)sptr + ssize) - sizeof (long); 347 | #elif defined (__GNU_LIBRARY__) && defined (__i386__) 348 | ctx->env[0].__jmpbuf[0].__pc = (char *) coro_init; 349 | ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); 350 | #elif defined (__GNU_LIBRARY__) && defined (__amd64__) 351 | ctx->env[0].__jmpbuf[JB_PC] = (long) coro_init; 352 | ctx->env[0].__jmpbuf[0].__sp = (void *) ((char *)sptr + ssize) - sizeof (long); 353 | #else 354 | #error "linux libc or architecture not supported" 355 | #endif 356 | 357 | # elif CORO_IRIX 358 | 359 | coro_setjmp (ctx->env, 0); 360 | ctx->env[JB_PC] = (__uint64_t)coro_init; 361 | ctx->env[JB_SP] = (__uint64_t)STACK_ADJUST_PTR (sptr, ssize) - sizeof (long); 362 | 363 | # elif CORO_ASM 364 | 365 | ctx->sp = (void **)(ssize + (char *)sptr); 366 | *--ctx->sp = (void *)abort; /* needed for alignment only */ 367 | *--ctx->sp = (void *)coro_init; 368 | 369 | #if CORO_WIN_TIB 370 | *--ctx->sp = 0; /* ExceptionList */ 371 | *--ctx->sp = (char *)sptr + ssize; /* StackBase */ 372 | *--ctx->sp = sptr; /* StackLimit */ 373 | #endif 374 | 375 | ctx->sp -= NUM_SAVED; 376 | memset (ctx->sp, 0, sizeof (*ctx->sp) * NUM_SAVED); 377 | 378 | # elif CORO_UCONTEXT 379 | 380 | getcontext (&(ctx->uc)); 381 | 382 | ctx->uc.uc_link = 0; 383 | ctx->uc.uc_stack.ss_sp = sptr; 384 | ctx->uc.uc_stack.ss_size = (size_t)ssize; 385 | ctx->uc.uc_stack.ss_flags = 0; 386 | 387 | makecontext (&(ctx->uc), (void (*)())coro_init, 0); 388 | 389 | # endif 390 | 391 | coro_transfer (create_coro, new_coro); 392 | } 393 | 394 | /*****************************************************************************/ 395 | /* pthread backend */ 396 | /*****************************************************************************/ 397 | #elif CORO_PTHREAD 398 | 399 | /* this mutex will be locked by the running coroutine */ 400 | pthread_mutex_t coro_mutex = PTHREAD_MUTEX_INITIALIZER; 401 | 402 | struct coro_init_args 403 | { 404 | coro_func func; 405 | void *arg; 406 | coro_context *self, *main; 407 | }; 408 | 409 | static pthread_t null_tid; 410 | 411 | /* I'd so love to cast pthread_mutex_unlock to void (*)(void *)... */ 412 | static void 413 | mutex_unlock_wrapper (void *arg) 414 | { 415 | pthread_mutex_unlock ((pthread_mutex_t *)arg); 416 | } 417 | 418 | static void * 419 | coro_init (void *args_) 420 | { 421 | struct coro_init_args *args = (struct coro_init_args *)args_; 422 | coro_func func = args->func; 423 | void *arg = args->arg; 424 | 425 | pthread_mutex_lock (&coro_mutex); 426 | 427 | /* we try to be good citizens and use deferred cancellation and cleanup handlers */ 428 | pthread_cleanup_push (mutex_unlock_wrapper, &coro_mutex); 429 | coro_transfer (args->self, args->main); 430 | func (arg); 431 | pthread_cleanup_pop (1); 432 | 433 | return 0; 434 | } 435 | 436 | void 437 | coro_transfer (coro_context *prev, coro_context *next) 438 | { 439 | pthread_cond_signal (&next->cv); 440 | pthread_cond_wait (&prev->cv, &coro_mutex); 441 | #if __FreeBSD__ /* freebsd is of course broken and needs manual testcancel calls... yay... */ 442 | pthread_testcancel (); 443 | #endif 444 | } 445 | 446 | void 447 | coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) 448 | { 449 | static coro_context nctx; 450 | static int once; 451 | 452 | if (!once) 453 | { 454 | once = 1; 455 | 456 | pthread_mutex_lock (&coro_mutex); 457 | pthread_cond_init (&nctx.cv, 0); 458 | null_tid = pthread_self (); 459 | } 460 | 461 | pthread_cond_init (&ctx->cv, 0); 462 | 463 | if (coro) 464 | { 465 | pthread_attr_t attr; 466 | struct coro_init_args args; 467 | 468 | args.func = coro; 469 | args.arg = arg; 470 | args.self = ctx; 471 | args.main = &nctx; 472 | 473 | pthread_attr_init (&attr); 474 | #if __UCLIBC__ 475 | /* exists, but is borked */ 476 | /*pthread_attr_setstacksize (&attr, (size_t)ssize);*/ 477 | #elif __CYGWIN__ 478 | /* POSIX, not here */ 479 | pthread_attr_setstacksize (&attr, (size_t)ssize); 480 | #else 481 | pthread_attr_setstack (&attr, sptr, (size_t)ssize); 482 | #endif 483 | pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); 484 | pthread_create (&ctx->id, &attr, coro_init, &args); 485 | 486 | coro_transfer (args.main, args.self); 487 | } 488 | else 489 | ctx->id = null_tid; 490 | } 491 | 492 | void 493 | coro_destroy (coro_context *ctx) 494 | { 495 | if (!pthread_equal (ctx->id, null_tid)) 496 | { 497 | pthread_cancel (ctx->id); 498 | pthread_mutex_unlock (&coro_mutex); 499 | pthread_join (ctx->id, 0); 500 | pthread_mutex_lock (&coro_mutex); 501 | } 502 | 503 | pthread_cond_destroy (&ctx->cv); 504 | } 505 | 506 | /*****************************************************************************/ 507 | /* fiber backend */ 508 | /*****************************************************************************/ 509 | #elif CORO_FIBER 510 | 511 | #define WIN32_LEAN_AND_MEAN 512 | #if _WIN32_WINNT < 0x0400 513 | #undef _WIN32_WINNT 514 | #define _WIN32_WINNT 0x0400 515 | #endif 516 | #include 517 | 518 | VOID CALLBACK 519 | coro_init (PVOID arg) 520 | { 521 | coro_context *ctx = (coro_context *)arg; 522 | 523 | ctx->coro (ctx->arg); 524 | } 525 | 526 | void 527 | coro_transfer (coro_context *prev, coro_context *next) 528 | { 529 | if (!prev->fiber) 530 | { 531 | prev->fiber = GetCurrentFiber (); 532 | 533 | if (prev->fiber == 0 || prev->fiber == (void *)0x1e00) 534 | prev->fiber = ConvertThreadToFiber (0); 535 | } 536 | 537 | SwitchToFiber (next->fiber); 538 | } 539 | 540 | void 541 | coro_create (coro_context *ctx, coro_func coro, void *arg, void *sptr, size_t ssize) 542 | { 543 | ctx->fiber = 0; 544 | ctx->coro = coro; 545 | ctx->arg = arg; 546 | 547 | if (!coro) 548 | return; 549 | 550 | ctx->fiber = CreateFiber (ssize, coro_init, ctx); 551 | } 552 | 553 | void 554 | coro_destroy (coro_context *ctx) 555 | { 556 | DeleteFiber (ctx->fiber); 557 | } 558 | 559 | #else 560 | #error unsupported backend 561 | #endif 562 | 563 | /*****************************************************************************/ 564 | /* stack management */ 565 | /*****************************************************************************/ 566 | #if CORO_STACKALLOC 567 | 568 | #include 569 | 570 | #ifndef _WIN32 571 | # include 572 | #endif 573 | 574 | #if CORO_USE_VALGRIND 575 | # include 576 | #endif 577 | 578 | #if _POSIX_MAPPED_FILES 579 | # include 580 | # define CORO_MMAP 1 581 | # ifndef MAP_ANONYMOUS 582 | # ifdef MAP_ANON 583 | # define MAP_ANONYMOUS MAP_ANON 584 | # else 585 | # undef CORO_MMAP 586 | # endif 587 | # endif 588 | # include 589 | #else 590 | # undef CORO_MMAP 591 | #endif 592 | 593 | #if _POSIX_MEMORY_PROTECTION 594 | # ifndef CORO_GUARDPAGES 595 | # define CORO_GUARDPAGES 4 596 | # endif 597 | #else 598 | # undef CORO_GUARDPAGES 599 | #endif 600 | 601 | #if !CORO_MMAP 602 | # undef CORO_GUARDPAGES 603 | #endif 604 | 605 | #if !__i386 && !__x86_64 && !__powerpc && !__m68k && !__alpha && !__mips && !__sparc64 606 | # undef CORO_GUARDPAGES 607 | #endif 608 | 609 | #ifndef CORO_GUARDPAGES 610 | # define CORO_GUARDPAGES 0 611 | #endif 612 | 613 | #if !PAGESIZE 614 | #if !CORO_MMAP 615 | #define PAGESIZE 4096 616 | #else 617 | static size_t 618 | coro_pagesize (void) 619 | { 620 | static size_t pagesize; 621 | 622 | if (!pagesize) 623 | pagesize = sysconf (_SC_PAGESIZE); 624 | 625 | return pagesize; 626 | } 627 | 628 | #define PAGESIZE coro_pagesize () 629 | #endif 630 | #endif 631 | 632 | int 633 | coro_stack_alloc (struct coro_stack *stack, unsigned int size) 634 | { 635 | if (!size) 636 | size = 256 * 1024; 637 | 638 | stack->sptr = 0; 639 | stack->ssze = ((size_t)size * sizeof (void *) + PAGESIZE - 1) / PAGESIZE * PAGESIZE; 640 | 641 | #if CORO_FIBER 642 | 643 | stack->sptr = (void *)stack; 644 | return 1; 645 | 646 | #else 647 | 648 | size_t ssze = stack->ssze + CORO_GUARDPAGES * PAGESIZE; 649 | void *base; 650 | 651 | #if CORO_MMAP 652 | /* mmap supposedly does allocate-on-write for us */ 653 | base = mmap (0, ssze, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 654 | 655 | if (base == (void *)-1) 656 | { 657 | /* some systems don't let us have executable heap */ 658 | /* we assume they won't need executable stack in that case */ 659 | base = mmap (0, ssze, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 660 | 661 | if (base == (void *)-1) 662 | return 0; 663 | } 664 | 665 | #if CORO_GUARDPAGES 666 | mprotect (base, CORO_GUARDPAGES * PAGESIZE, PROT_NONE); 667 | #endif 668 | 669 | base = (void*)((char *)base + CORO_GUARDPAGES * PAGESIZE); 670 | #else 671 | base = malloc (ssze); 672 | if (!base) 673 | return 0; 674 | #endif 675 | 676 | #if CORO_USE_VALGRIND 677 | stack->valgrind_id = VALGRIND_STACK_REGISTER ((char *)base, ((char *)base) + ssze - CORO_GUARDPAGES * PAGESIZE); 678 | #endif 679 | 680 | stack->sptr = base; 681 | return 1; 682 | 683 | #endif 684 | } 685 | 686 | void 687 | coro_stack_free (struct coro_stack *stack) 688 | { 689 | #if CORO_FIBER 690 | /* nop */ 691 | #else 692 | #if CORO_USE_VALGRIND 693 | VALGRIND_STACK_DEREGISTER (stack->valgrind_id); 694 | #endif 695 | 696 | #if CORO_MMAP 697 | if (stack->sptr) 698 | munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE), 699 | stack->ssze + CORO_GUARDPAGES * PAGESIZE); 700 | #else 701 | free (stack->sptr); 702 | #endif 703 | #endif 704 | } 705 | 706 | #endif 707 | 708 | -------------------------------------------------------------------------------- /coro.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2001-2012 Marc Alexander Lehmann 3 | * 4 | * Redistribution and use in source and binary forms, with or without modifica- 5 | * tion, are permitted provided that the following conditions are met: 6 | * 7 | * 1. Redistributions of source code must retain the above copyright notice, 8 | * this list of conditions and the following disclaimer. 9 | * 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 15 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 16 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 17 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 18 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 20 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 21 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 22 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 23 | * OF THE POSSIBILITY OF SUCH DAMAGE. 24 | * 25 | * Alternatively, the contents of this file may be used under the terms of 26 | * the GNU General Public License ("GPL") version 2 or any later version, 27 | * in which case the provisions of the GPL are applicable instead of 28 | * the above. If you wish to allow the use of your version of this file 29 | * only under the terms of the GPL and not to allow others to use your 30 | * version of this file under the BSD license, indicate your decision 31 | * by deleting the provisions above and replace them with the notice 32 | * and other provisions required by the GPL. If you do not delete the 33 | * provisions above, a recipient may use your version of this file under 34 | * either the BSD or the GPL. 35 | * 36 | * This library is modelled strictly after Ralf S. Engelschalls article at 37 | * http://www.gnu.org/software/pth/rse-pmt.ps. So most of the credit must 38 | * go to Ralf S. Engelschall . 39 | * 40 | * This coroutine library is very much stripped down. You should either 41 | * build your own process abstraction using it or - better - just use GNU 42 | * Portable Threads, http://www.gnu.org/software/pth/. 43 | * 44 | */ 45 | 46 | /* 47 | * 2006-10-26 Include stddef.h on OS X to work around one of its bugs. 48 | * Reported by Michael_G_Schwern. 49 | * 2006-11-26 Use _setjmp instead of setjmp on GNU/Linux. 50 | * 2007-04-27 Set unwind frame info if gcc 3+ and ELF is detected. 51 | * Use _setjmp instead of setjmp on _XOPEN_SOURCE >= 600. 52 | * 2007-05-02 Add assembly versions for x86 and amd64 (to avoid reliance 53 | * on SIGUSR2 and sigaltstack in Crossfire). 54 | * 2008-01-21 Disable CFI usage on anything but GNU/Linux. 55 | * 2008-03-02 Switched to 2-clause BSD license with GPL exception. 56 | * 2008-04-04 New (but highly unrecommended) pthreads backend. 57 | * 2008-04-24 Reinstate CORO_LOSER (had wrong stack adjustments). 58 | * 2008-10-30 Support assembly method on x86 with and without frame pointer. 59 | * 2008-11-03 Use a global asm statement for CORO_ASM, idea by pippijn. 60 | * 2008-11-05 Hopefully fix misaligned stacks with CORO_ASM/SETJMP. 61 | * 2008-11-07 rbp wasn't saved in CORO_ASM on x86_64. 62 | * introduce coro_destroy, which is a nop except for pthreads. 63 | * speed up CORO_PTHREAD. Do no longer leak threads either. 64 | * coro_create now allows one to create source coro_contexts. 65 | * do not rely on makecontext passing a void * correctly. 66 | * try harder to get _setjmp/_longjmp. 67 | * major code cleanup/restructuring. 68 | * 2008-11-10 the .cfi hacks are no longer needed. 69 | * 2008-11-16 work around a freebsd pthread bug. 70 | * 2008-11-19 define coro_*jmp symbols for easier porting. 71 | * 2009-06-23 tentative win32-backend support for mingw32 (Yasuhiro Matsumoto). 72 | * 2010-12-03 tentative support for uclibc (which lacks all sorts of things). 73 | * 2011-05-30 set initial callee-saved-registers to zero with CORO_ASM. 74 | * use .cfi_undefined rip on linux-amd64 for better backtraces. 75 | * 2011-06-08 maybe properly implement weird windows amd64 calling conventions. 76 | * 2011-07-03 rely on __GCC_HAVE_DWARF2_CFI_ASM for cfi detection. 77 | * 2011-08-08 cygwin trashes stacks, use pthreads with double stack on cygwin. 78 | * 2012-12-04 reduce misprediction penalty for x86/amd64 assembly switcher. 79 | * 2012-12-05 experimental fiber backend (allocates stack twice). 80 | * 2012-12-07 API version 3 - add coro_stack_alloc/coro_stack_free. 81 | * 2012-12-21 valgrind stack registering was broken. 82 | */ 83 | 84 | #ifndef CORO_H 85 | #define CORO_H 86 | 87 | #if __cplusplus 88 | extern "C" { 89 | #endif 90 | 91 | /* 92 | * This library consists of only three files 93 | * coro.h, coro.c and LICENSE (and optionally README) 94 | * 95 | * It implements what is known as coroutines, in a hopefully 96 | * portable way. 97 | * 98 | * All compiletime symbols must be defined both when including coro.h 99 | * (using libcoro) as well as when compiling coro.c (the implementation). 100 | * 101 | * You can manually specify which flavour you want. If you don't define 102 | * any of these, libcoro tries to choose a safe and fast default: 103 | * 104 | * -DCORO_UCONTEXT 105 | * 106 | * This flavour uses SUSv2's get/set/swap/makecontext functions that 107 | * unfortunately only some unices support, and is quite slow. 108 | * 109 | * -DCORO_SJLJ 110 | * 111 | * This flavour uses SUSv2's setjmp/longjmp and sigaltstack functions to 112 | * do it's job. Coroutine creation is much slower than UCONTEXT, but 113 | * context switching is a bit cheaper. It should work on almost all unices. 114 | * 115 | * -DCORO_LINUX 116 | * 117 | * CORO_SJLJ variant. 118 | * Old GNU/Linux systems (<= glibc-2.1) only work with this implementation 119 | * (it is very fast and therefore recommended over other methods, but 120 | * doesn't work with anything newer). 121 | * 122 | * -DCORO_LOSER 123 | * 124 | * CORO_SJLJ variant. 125 | * Microsoft's highly proprietary platform doesn't support sigaltstack, and 126 | * this selects a suitable workaround for this platform. It might not work 127 | * with your compiler though - it has only been tested with MSVC 6. 128 | * 129 | * -DCORO_FIBER 130 | * 131 | * Slower, but probably more portable variant for the Microsoft operating 132 | * system, using fibers. Ignores the passed stack and allocates it internally. 133 | * Also, due to bugs in cygwin, this does not work with cygwin. 134 | * 135 | * -DCORO_IRIX 136 | * 137 | * CORO_SJLJ variant. 138 | * For SGI's version of Microsoft's NT ;) 139 | * 140 | * -DCORO_ASM 141 | * 142 | * Hand coded assembly, known to work only on a few architectures/ABI: 143 | * GCC + x86/IA32 and amd64/x86_64 + GNU/Linux and a few BSDs. Fastest choice, 144 | * if it works. 145 | * 146 | * -DCORO_PTHREAD 147 | * 148 | * Use the pthread API. You have to provide and -lpthread. 149 | * This is likely the slowest backend, and it also does not support fork(), 150 | * so avoid it at all costs. 151 | * 152 | * If you define neither of these symbols, coro.h will try to autodetect 153 | * the best/safest model. To help with the autodetection, you should check 154 | * (e.g. using autoconf) and define the following symbols: HAVE_UCONTEXT_H 155 | * / HAVE_SETJMP_H / HAVE_SIGALTSTACK. 156 | */ 157 | 158 | /* 159 | * Changes when the API changes incompatibly. 160 | * This is ONLY the API version - there is no ABI compatibility between releases. 161 | * 162 | * Changes in API version 2: 163 | * replaced bogus -DCORO_LOOSE with grammatically more correct -DCORO_LOSER 164 | * Changes in API version 3: 165 | * introduced stack management (CORO_STACKALLOC) 166 | */ 167 | #define CORO_VERSION 3 168 | 169 | #include 170 | 171 | /* 172 | * This is the type for the initialization function of a new coroutine. 173 | */ 174 | typedef void (*coro_func)(void *); 175 | 176 | /* 177 | * A coroutine state is saved in the following structure. Treat it as an 178 | * opaque type. errno and sigmask might be saved, but don't rely on it, 179 | * implement your own switching primitive if you need that. 180 | */ 181 | typedef struct coro_context coro_context; 182 | 183 | /* 184 | * This function creates a new coroutine. Apart from a pointer to an 185 | * uninitialised coro_context, it expects a pointer to the entry function 186 | * and the single pointer value that is given to it as argument. 187 | * 188 | * Allocating/deallocating the stack is your own responsibility. 189 | * 190 | * As a special case, if coro, arg, sptr and ssze are all zero, 191 | * then an "empty" coro_context will be created that is suitable 192 | * as an initial source for coro_transfer. 193 | * 194 | * This function is not reentrant, but putting a mutex around it 195 | * will work. 196 | */ 197 | void coro_create (coro_context *ctx, /* an uninitialised coro_context */ 198 | coro_func coro, /* the coroutine code to be executed */ 199 | void *arg, /* a single pointer passed to the coro */ 200 | void *sptr, /* start of stack area */ 201 | size_t ssze); /* size of stack area in bytes */ 202 | 203 | /* 204 | * The following prototype defines the coroutine switching function. It is 205 | * sometimes implemented as a macro, so watch out. 206 | * 207 | * This function is thread-safe and reentrant. 208 | */ 209 | #if 0 210 | void coro_transfer (coro_context *prev, coro_context *next); 211 | #endif 212 | 213 | /* 214 | * The following prototype defines the coroutine destroy function. It 215 | * is sometimes implemented as a macro, so watch out. It also serves no 216 | * purpose unless you want to use the CORO_PTHREAD backend, where it is 217 | * used to clean up the thread. You are responsible for freeing the stack 218 | * and the context itself. 219 | * 220 | * This function is thread-safe and reentrant. 221 | */ 222 | #if 0 223 | void coro_destroy (coro_context *ctx); 224 | #endif 225 | 226 | /*****************************************************************************/ 227 | /* optional stack management */ 228 | /*****************************************************************************/ 229 | /* 230 | * You can disable all of the stack management functions by 231 | * defining CORO_STACKALLOC to 0. Otherwise, they are enabled by default. 232 | * 233 | * If stack management is enabled, you can influence the implementation via these 234 | * symbols: 235 | * 236 | * -DCORO_USE_VALGRIND 237 | * 238 | * If defined, then libcoro will include valgrind/valgrind.h and register 239 | * and unregister stacks with valgrind. 240 | * 241 | * -DCORO_GUARDPAGES=n 242 | * 243 | * libcoro will try to use the specified number of guard pages to protect against 244 | * stack overflow. If n is 0, then the feature will be disabled. If it isn't 245 | * defined, then libcoro will choose a suitable default. If guardpages are not 246 | * supported on the platform, then the feature will be silently disabled. 247 | */ 248 | #ifndef CORO_STACKALLOC 249 | # define CORO_STACKALLOC 1 250 | #endif 251 | 252 | #if CORO_STACKALLOC 253 | 254 | /* 255 | * The only allowed operations on these struct members is to read the 256 | * "sptr" and "ssze" members to pass it to coro_create, to read the "sptr" 257 | * member to see if it is false, in which case the stack isn't allocated, 258 | * and to set the "sptr" member to 0, to indicate to coro_stack_free to 259 | * not actually do anything. 260 | */ 261 | 262 | struct coro_stack 263 | { 264 | void *sptr; 265 | size_t ssze; 266 | #if CORO_USE_VALGRIND 267 | int valgrind_id; 268 | #endif 269 | }; 270 | 271 | /* 272 | * Try to allocate a stack of at least the given size and return true if 273 | * successful, or false otherwise. 274 | * 275 | * The size is *NOT* specified in bytes, but in units of sizeof (void *), 276 | * i.e. the stack is typically 4(8) times larger on 32 bit(64 bit) platforms 277 | * then the size passed in. 278 | * 279 | * If size is 0, then a "suitable" stack size is chosen (usually 1-2MB). 280 | */ 281 | int coro_stack_alloc (struct coro_stack *stack, unsigned int size); 282 | 283 | /* 284 | * Free the stack allocated by coro_stack_alloc again. It is safe to 285 | * call this function on the coro_stack structure even if coro_stack_alloc 286 | * failed. 287 | */ 288 | void coro_stack_free (struct coro_stack *stack); 289 | 290 | #endif 291 | 292 | /* 293 | * That was it. No other user-serviceable parts below here. 294 | */ 295 | 296 | /*****************************************************************************/ 297 | 298 | #if !defined CORO_LOSER && !defined CORO_UCONTEXT \ 299 | && !defined CORO_SJLJ && !defined CORO_LINUX \ 300 | && !defined CORO_IRIX && !defined CORO_ASM \ 301 | && !defined CORO_PTHREAD && !defined CORO_FIBER 302 | # if defined WINDOWS && (defined __i386 || (__x86_64 || defined _M_IX86 || defined _M_AMD64)) 303 | # define CORO_ASM 1 304 | # elif defined WINDOWS || defined _WIN32 305 | # define CORO_LOSER 1 /* you don't win with windoze */ 306 | # elif __linux && (__i386 || (__x86_64 && !__ILP32)) 307 | # define CORO_ASM 1 308 | # elif defined HAVE_UCONTEXT_H 309 | # define CORO_UCONTEXT 1 310 | # elif defined HAVE_SETJMP_H && defined HAVE_SIGALTSTACK 311 | # define CORO_SJLJ 1 312 | # else 313 | error unknown or unsupported architecture 314 | # endif 315 | #endif 316 | 317 | /*****************************************************************************/ 318 | 319 | #if CORO_UCONTEXT 320 | 321 | # include 322 | 323 | struct coro_context 324 | { 325 | ucontext_t uc; 326 | }; 327 | 328 | # define coro_transfer(p,n) swapcontext (&((p)->uc), &((n)->uc)) 329 | # define coro_destroy(ctx) (void *)(ctx) 330 | 331 | #elif CORO_SJLJ || CORO_LOSER || CORO_LINUX || CORO_IRIX 332 | 333 | # if defined(CORO_LINUX) && !defined(_GNU_SOURCE) 334 | # define _GNU_SOURCE /* for glibc */ 335 | # endif 336 | 337 | # if !CORO_LOSER 338 | # include 339 | # endif 340 | 341 | /* solaris is hopelessly borked, it expands _XOPEN_UNIX to nothing */ 342 | # if __sun 343 | # undef _XOPEN_UNIX 344 | # define _XOPEN_UNIX 1 345 | # endif 346 | 347 | # include 348 | 349 | # if _XOPEN_UNIX > 0 || defined (_setjmp) 350 | # define coro_jmp_buf jmp_buf 351 | # define coro_setjmp(env) _setjmp (env) 352 | # define coro_longjmp(env) _longjmp ((env), 1) 353 | # elif CORO_LOSER 354 | # define coro_jmp_buf jmp_buf 355 | # define coro_setjmp(env) setjmp (env) 356 | # define coro_longjmp(env) longjmp ((env), 1) 357 | # else 358 | # define coro_jmp_buf sigjmp_buf 359 | # define coro_setjmp(env) sigsetjmp (env, 0) 360 | # define coro_longjmp(env) siglongjmp ((env), 1) 361 | # endif 362 | 363 | struct coro_context 364 | { 365 | coro_jmp_buf env; 366 | }; 367 | 368 | # define coro_transfer(p,n) do { if (!coro_setjmp ((p)->env)) coro_longjmp ((n)->env); } while (0) 369 | # define coro_destroy(ctx) (void *)(ctx) 370 | 371 | #elif CORO_ASM 372 | 373 | struct coro_context 374 | { 375 | void **sp; /* must be at offset 0 */ 376 | }; 377 | 378 | void __attribute__ ((__noinline__, __regparm__(2))) 379 | coro_transfer (coro_context *prev, coro_context *next); 380 | 381 | # define coro_destroy(ctx) (void *)(ctx) 382 | 383 | #elif CORO_PTHREAD 384 | 385 | # include 386 | 387 | extern pthread_mutex_t coro_mutex; 388 | 389 | struct coro_context 390 | { 391 | pthread_cond_t cv; 392 | pthread_t id; 393 | }; 394 | 395 | void coro_transfer (coro_context *prev, coro_context *next); 396 | void coro_destroy (coro_context *ctx); 397 | 398 | #elif CORO_FIBER 399 | 400 | struct coro_context 401 | { 402 | void *fiber; 403 | /* only used for initialisation */ 404 | coro_func coro; 405 | void *arg; 406 | }; 407 | 408 | void coro_transfer (coro_context *prev, coro_context *next); 409 | void coro_destroy (coro_context *ctx); 410 | 411 | #endif 412 | 413 | #if __cplusplus 414 | } 415 | #endif 416 | 417 | #endif 418 | 419 | -------------------------------------------------------------------------------- /main.c: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "uvc.h" 7 | 8 | 9 | void worker(void *ptr){ 10 | uvc_io *io=(uvc_io *)ptr; 11 | char buf[256]; 12 | ssize_t cnt=0; 13 | while(1){ 14 | cnt = uvc_read(io,buf,sizeof(buf)); 15 | if(cnt <=0){ 16 | break; 17 | } 18 | buf[cnt]='\0'; 19 | if(buf[0]=='q'){break;} 20 | //printf("server read: %s\n",buf); 21 | 22 | cnt=uvc_write(io,buf,strlen(buf)); 23 | if(cnt!=0){ 24 | break; 25 | } 26 | } 27 | uvc_close(io); 28 | free(io); 29 | printf("connection exit\n"); 30 | uvc_return(); 31 | } 32 | 33 | void http_hello(void *ptr){ 34 | 35 | uvc_io *io=ptr; 36 | ssize_t cnt=0; 37 | char buf[256]; 38 | cnt = uvc_read(io,buf,sizeof(buf)); 39 | if(cnt <=0){ 40 | goto err; 41 | } 42 | sprintf(buf,"HTTP/1.1 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html; Charset=gb2312\r\n\r\n%s",strlen("hello wrold!"),"hello wrold!"); 43 | cnt=uvc_write(io,buf,strlen(buf)); 44 | if(cnt!=0){ 45 | goto err; 46 | } 47 | 48 | err: 49 | uvc_close(io); 50 | free(io); 51 | //printf("connection exit\n"); 52 | uvc_return(); 53 | } 54 | 55 | 56 | 57 | void download(void *ptr){ 58 | 59 | uvc_io *fs = malloc(sizeof(uvc_io)); 60 | uvc_io_create(fs,UVC_IO_FS); 61 | uvc_io *io=ptr; 62 | ssize_t cnt=0; 63 | char buf[2048]; 64 | cnt = uvc_read(io,buf,sizeof(buf)); 65 | if(cnt <=0){ 66 | goto err; 67 | } 68 | sprintf(buf,"HTTP/1.1 200 OK\r\nContent-Length: %d\r\nContent-Type: application/zip\r\n\r\n",2735243); 69 | cnt=uvc_write(io,buf,strlen(buf)); 70 | if(cnt!=0){ 71 | goto err; 72 | } 73 | if(uvc_fs_open(fs,"/opt/nfshost/master.zip",O_RDONLY) <0){ 74 | printf("uvc_fs_open error\n"); 75 | goto err; 76 | } 77 | while(1){ 78 | cnt = uvc_fs_read(fs,buf,sizeof(buf)); 79 | if(cnt>0){ 80 | //printf("uvc_fs_read ok\n"); 81 | cnt=uvc_write(io,buf,cnt); 82 | if(cnt!=0){ 83 | printf("write file err\n"); 84 | goto err; 85 | } 86 | }else{ 87 | printf("uvc_fs_read err:%d\n",cnt); 88 | break; 89 | } 90 | 91 | 92 | } 93 | err: 94 | uvc_fs_close(fs); 95 | free(fs); 96 | uvc_close(io); 97 | free(io); 98 | printf("connection exit\n"); 99 | uvc_return(); 100 | } 101 | 102 | void server(void *ptr) 103 | { 104 | int ret=0; 105 | uvc_io io; 106 | uvc_io *io_client; 107 | uvc_io_create(&io,UVC_IO_TCP); 108 | ret = uvc_tcp_bind(&io,"0.0.0.0",8080); 109 | if(ret!=0){ 110 | printf("error bind:%d\n",ret); 111 | exit(1); 112 | } 113 | printf("start listen\n"); 114 | while(1){ 115 | ret = uvc_listen(&io,100); 116 | if(ret!=0){ 117 | printf("error listen:%d\n",ret); 118 | exit(1); 119 | } 120 | 121 | io_client = (uvc_io *)malloc(sizeof(uvc_io)); 122 | uvc_io_create(io_client,UVC_IO_TCP); 123 | ret = uvc_accept(&io,io_client); 124 | if(ret !=0){ 125 | printf("error accept:%d\n",ret); 126 | exit(1); 127 | } 128 | //printf("get a new connection\n"); 129 | uvc_create("hello",10*1024,download,io_client); 130 | } 131 | uvc_close(&io); 132 | uvc_return(); 133 | } 134 | 135 | int main(){ 136 | 137 | uvc_create("listen",128,server,NULL); 138 | uvc_schedule(); 139 | } 140 | -------------------------------------------------------------------------------- /queue.c: -------------------------------------------------------------------------------- 1 | #include "queue.h" 2 | 3 | 4 | queue_t *queue_middle(queue_t *queue) 5 | { 6 | queue_t *middle, *next; 7 | 8 | middle = queue_head(queue); 9 | 10 | if (middle == queue_last(queue)) 11 | { 12 | return middle; 13 | } 14 | 15 | next = queue_head(queue); 16 | 17 | for ( ;; ) 18 | { 19 | middle = queue_next(middle); 20 | 21 | next = queue_next(next); 22 | 23 | if (next == queue_last(queue)) 24 | { 25 | return middle; 26 | } 27 | 28 | next = queue_next(next); 29 | 30 | if (next == queue_last(queue)) 31 | { 32 | return middle; 33 | } 34 | } 35 | } 36 | 37 | 38 | /* the stable insertion sort */ 39 | 40 | void 41 | queue_sort(queue_t *queue, 42 | int (*cmp)(const queue_t *, const queue_t *)) 43 | { 44 | queue_t *q, *prev, *next; 45 | 46 | q = queue_head(queue); 47 | 48 | if (q == queue_last(queue)) 49 | { 50 | return; 51 | } 52 | 53 | for (q = queue_next(q); q != queue_sentinel(queue); q = next) 54 | { 55 | 56 | prev = queue_prev(q); 57 | next = queue_next(q); 58 | 59 | queue_remove(q); 60 | 61 | do 62 | { 63 | if (cmp(prev, q) <= 0) 64 | { 65 | break; 66 | } 67 | 68 | prev = queue_prev(prev); 69 | 70 | } 71 | while (prev != queue_sentinel(queue)); 72 | 73 | queue_insert_after(prev, q); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /queue.h: -------------------------------------------------------------------------------- 1 | #ifndef _queue_H_INCLUDED_ 2 | #define _queue_H_INCLUDED_ 3 | 4 | struct queue_s 5 | { 6 | struct queue_s *prev; 7 | struct queue_s *next; 8 | void *ext; 9 | }; 10 | typedef struct queue_s queue_t; 11 | 12 | 13 | 14 | 15 | #define queue_init(q) \ 16 | (q)->prev = q; \ 17 | (q)->next = q 18 | 19 | 20 | #define queue_empty(h) \ 21 | (h == (h)->prev) 22 | 23 | 24 | #define queue_insert_head(h, x) \ 25 | (x)->next = (h)->next; \ 26 | (x)->next->prev = x; \ 27 | (x)->prev = h; \ 28 | (h)->next = x 29 | 30 | 31 | #define queue_insert_after queue_insert_head 32 | 33 | 34 | #define queue_insert_tail(h, x) \ 35 | (x)->prev = (h)->prev; \ 36 | (x)->prev->next = x; \ 37 | (x)->next = h; \ 38 | (h)->prev = x 39 | 40 | 41 | #define queue_head(h) \ 42 | (h)->next 43 | 44 | 45 | #define queue_last(h) \ 46 | (h)->prev 47 | 48 | 49 | #define queue_sentinel(h) \ 50 | (h) 51 | 52 | 53 | #define queue_next(q) \ 54 | (q)->next 55 | 56 | 57 | #define queue_prev(q) \ 58 | (q)->prev 59 | 60 | 61 | #if (NGX_DEBUG) 62 | 63 | #define queue_remove(x) \ 64 | (x)->next->prev = (x)->prev; \ 65 | (x)->prev->next = (x)->next; \ 66 | (x)->prev = NULL; \ 67 | (x)->next = NULL 68 | 69 | #else 70 | 71 | #define queue_remove(x) \ 72 | (x)->next->prev = (x)->prev; \ 73 | (x)->prev->next = (x)->next 74 | 75 | #endif 76 | 77 | 78 | #define queue_split(h, q, n) \ 79 | (n)->prev = (h)->prev; \ 80 | (n)->prev->next = n; \ 81 | (n)->next = q; \ 82 | (h)->prev = (q)->prev; \ 83 | (h)->prev->next = h; \ 84 | (q)->prev = n; 85 | 86 | 87 | #define queue_add(h, n) \ 88 | (h)->prev->next = (n)->next; \ 89 | (n)->next->prev = (h)->prev; \ 90 | (h)->prev = (n)->prev; \ 91 | (h)->prev->next = h; 92 | 93 | 94 | #define queue_data(q, type, link) \ 95 | (type *) ((uint8_t *) q - offsetof(type, link)) 96 | 97 | 98 | queue_t *queue_middle(queue_t *queue); 99 | void queue_sort(queue_t *queue, int (*cmp)(const queue_t *, const queue_t *)); 100 | 101 | 102 | #endif /* _queue_H_INCLUDED_ */ 103 | -------------------------------------------------------------------------------- /uvc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "coro.h" 7 | #include 8 | #include "uvc.h" 9 | #include "queue.h" 10 | 11 | 12 | #define UV_TASK_STACK_SIZE 1024*1024 13 | //----------------------------------------------defaultloop---------------------------------------------------- 14 | static uv_key_t uvc_key; 15 | static uv_once_t once; 16 | 17 | 18 | typedef struct { 19 | size_t size; 20 | size_t cnt; 21 | channel_t id; 22 | 23 | int start; 24 | int end; 25 | int cur_cnt; 26 | queue_t readq; 27 | queue_t writq; 28 | int closeing; 29 | //void cbuf[0];/*for unbuffered euqe to*/ 30 | uint8_t buf[0];/*for buffered copy to*/ 31 | }channel; 32 | 33 | #define MAX_CHANNEL_POOL 10240 34 | 35 | /*为了达到O1的效率,使用数组*/ 36 | /*为了防止被释放的chan index,被新的chan占用, 37 | 在chan中记录了ID,查找后进行对比,ID不会重复 38 | */ 39 | struct channel_pool_s{ 40 | uint32_t current_empty; 41 | uint32_t maxid; 42 | channel *channels[MAX_CHANNEL_POOL]; 43 | int cnt; 44 | }; 45 | typedef struct channel_pool_s channel_pool; 46 | typedef struct { 47 | uv_loop_t *loop; 48 | coro_context ctx; 49 | channel_pool pool; 50 | uv_timer_t schedule_timer; 51 | uvc_ctx *uv_task; 52 | uvc_ctx *schedule_task; 53 | uvc_ctx *runing_task; 54 | queue_t ready_queue; 55 | queue_t pending_queue; 56 | //stacks 57 | }uvc_thread_env; 58 | 59 | void uvc_init(void){ 60 | uv_key_create(&uvc_key); 61 | } 62 | 63 | static uvc_thread_env *uvc_get_env(){ 64 | uvc_ctx *ctx = NULL; 65 | uv_once(&once,uvc_init); 66 | uvc_thread_env *env=(uvc_thread_env *)uv_key_get(&uvc_key); 67 | if(env==NULL){ 68 | env=(uvc_thread_env *)malloc(sizeof(uvc_thread_env)); 69 | memset(env,0,sizeof(uvc_thread_env)); 70 | env->loop = uv_loop_new(); 71 | queue_init(&env->pending_queue); 72 | queue_init(&env->ready_queue); 73 | 74 | ctx = (uvc_ctx *)malloc(sizeof(uvc_ctx)); 75 | memset(ctx, 0, sizeof(uvc_ctx)); 76 | coro_stack_alloc(&ctx->stack, 0); 77 | coro_create(&ctx->cur, NULL, NULL, ctx->stack.sptr, ctx->stack.ssze); 78 | sprintf(ctx->name, "ROOT"); 79 | env->schedule_task = ctx; 80 | env->runing_task = ctx; 81 | ctx->status = UVC_STATUS_RUNING; 82 | uv_key_set(&uvc_key,env); 83 | } 84 | return env; 85 | } 86 | 87 | uv_loop_t* uvc_loop_default(){ 88 | uvc_thread_env *env=uvc_get_env(); 89 | #ifdef UVC_DEBUG 90 | if(env ==NULL || env->loop==NULL){ 91 | assert("env ==NULL || env->loop==NULL"); 92 | } 93 | #endif 94 | return env->loop; 95 | } 96 | 97 | static channel_pool *get_chan_pool(){ 98 | uvc_thread_env *env=uvc_get_env(); 99 | return &env->pool; 100 | } 101 | 102 | 103 | 104 | static uvc_ctx *uvc_self(){ 105 | uvc_thread_env *env=uvc_get_env(); 106 | #ifdef UVC_DEBUG 107 | if(env ==NULL || queue_empty(&env->stack)){ 108 | assert("env ==NULL || env->stack.top==NULL || env->stack.top->ctx ==NULL"); 109 | } 110 | #endif 111 | return env->runing_task; 112 | } 113 | 114 | 115 | 116 | #if OLD_LIBUV 117 | static void schedule_timer_cb(uv_timer_t *timer, int status){ 118 | #else 119 | void schedule_timer_cb(uv_timer_t *timer){ 120 | #endif 121 | return; 122 | } 123 | 124 | void uvc_task_uv(void *ptr){ 125 | uvc_thread_env *env = uvc_get_env(); 126 | env->uv_task = uvc_self(); 127 | uv_run(env->loop, UV_RUN_DEFAULT); 128 | env->uv_task = NULL; 129 | uvc_return(); 130 | } 131 | 132 | void uvc_schedule(){ 133 | uvc_ctx *ctx; 134 | queue_t *node; 135 | uvc_thread_env *env; 136 | env = uvc_get_env(); 137 | for (;;){ 138 | if (!queue_empty(&env->ready_queue) ){ 139 | node = queue_last(&env->ready_queue); 140 | queue_remove( node); 141 | ctx = queue_data(node, uvc_ctx, task_node); 142 | } 143 | else{ 144 | //只有当没有ready任务时才运行uvloop 145 | if (env->uv_task){ 146 | ctx = env->uv_task; 147 | } 148 | else{ 149 | printf("no task need run ,exit\n"); 150 | exit(0); 151 | } 152 | } 153 | 154 | if (ctx != NULL){ 155 | env->runing_task = ctx; 156 | ctx->status = UVC_STATUS_RUNING; 157 | //printf("task[%s] runing\n",ctx->name); 158 | uvc_resume(ctx); 159 | //printf("task[%s] stoping\n",ctx->name); 160 | } 161 | if (ctx->status == UVC_STATUS_DIE){ 162 | coro_stack_free(&ctx->stack); 163 | free(ctx); 164 | }else{ 165 | ctx->status = UVC_STATUS_PENDING; 166 | } 167 | 168 | 169 | } 170 | 171 | } 172 | 173 | //----------------------------------------------base---------------------------------------------------- 174 | 175 | 176 | void uvc_ctx_set_name(char *name){ 177 | uvc_ctx *ctx = uvc_self(); 178 | sprintf(ctx->name, "%s",name); 179 | } 180 | 181 | char *uvc_ctx_get_name(){ 182 | uvc_ctx *ctx = uvc_self(); 183 | return ctx->name; 184 | } 185 | 186 | #define YIELD(e) coro_transfer(&(e)->runing_task->cur, &(e)->schedule_task->cur) 187 | #define RESUME(e,c) coro_transfer( &(e)->schedule_task->cur,&(c)->cur) 188 | 189 | void uvc_return(){ 190 | uvc_ctx *ctx=uvc_self(); 191 | uvc_thread_env *env = uvc_get_env(); 192 | env->runing_task->status= UVC_STATUS_DIE; 193 | printf("task[%s] exit\n",uvc_ctx_get_name()); 194 | YIELD(env); 195 | //TODO 当前协程先出栈,然后resume到专门释放协程的协程。 196 | } 197 | 198 | void uvc_io_ready(uvc_ctx *ctx){ 199 | uvc_thread_env *env = uvc_get_env(); 200 | queue_insert_head(&env->ready_queue, &ctx->task_node); 201 | YIELD(env);; 202 | } 203 | 204 | void uvc_yield(){ 205 | uvc_thread_env *env=uvc_get_env(); 206 | YIELD(env);; 207 | } 208 | 209 | void uvc_ready(uvc_ctx *ctx){ 210 | if (ctx){ 211 | uvc_thread_env *env = uvc_get_env(); 212 | queue_insert_head(&env->ready_queue, &ctx->task_node); 213 | } 214 | } 215 | 216 | void uvc_resume(uvc_ctx *ctx){ 217 | uvc_thread_env *env=uvc_get_env(); 218 | ctx->status = UVC_STATUS_RUNING; 219 | env->runing_task = ctx; 220 | RESUME(env, ctx); 221 | } 222 | 223 | void uvc_switch(uvc_ctx *prev, uvc_ctx *next){ 224 | printf("[switch] %s -> %s\n", prev->name, next->name); 225 | uvc_thread_env *env = uvc_get_env(); 226 | next->status = UVC_STATUS_RUNING; 227 | prev->status = UVC_STATUS_READY; 228 | env->runing_task = next; 229 | coro_transfer(&prev->cur, &next->cur); 230 | } 231 | 232 | void uvc_create(char *name, unsigned int size, coro_func func, void *arg){ 233 | uvc_ctx *ctx = (uvc_ctx *)malloc(sizeof(uvc_ctx)); 234 | memset(ctx, 0, sizeof(uvc_ctx)); 235 | //ctx->data=arg; 236 | coro_stack_alloc(&ctx->stack, size); 237 | coro_create(&ctx->cur, func, arg, ctx->stack.sptr, ctx->stack.ssze); 238 | if (name == NULL || strlen(name) == 0){ 239 | sprintf(ctx->name, "coro"); 240 | } 241 | else{ 242 | sprintf(ctx->name, name); 243 | } 244 | uvc_ready(ctx); 245 | //uvc_resume(ctx); 246 | 247 | return; 248 | } 249 | 250 | 251 | static void uvc_timer_close_cb(uv_handle_t *handle){ 252 | uvc_io_ready((uvc_ctx *)handle->data); 253 | } 254 | 255 | #if OLD_LIBUV 256 | static void uvc_timer_cb(uv_timer_t* handle, int status){ 257 | #else 258 | static void uvc_timer_cb(uv_timer_t* handle){ 259 | #endif 260 | uvc_io_ready((uvc_ctx *)handle->data); 261 | } 262 | 263 | void uvc_sleep(uint64_t msec){ 264 | uvc_thread_env *env = uvc_get_env(); 265 | uv_timer_t timer; 266 | timer.data = uvc_self(); 267 | uv_timer_init(uvc_loop_default(), &timer); 268 | uv_timer_start(&timer, uvc_timer_cb, msec, 0); 269 | 270 | if (env->uv_task == NULL){ 271 | uvc_create("UV_LOOP", UV_TASK_STACK_SIZE, uvc_task_uv, NULL); 272 | } 273 | 274 | uvc_yield(); 275 | uv_close((uv_handle_t *)&timer, uvc_timer_close_cb); 276 | uvc_yield(); 277 | } 278 | 279 | int uvc_io_create(uvc_io *io, uvc_io_type_t type) 280 | { 281 | uv_handle_t *h; 282 | uvc_thread_env *env = uvc_get_env(); 283 | 284 | memset(io,0,sizeof(uvc_io)); 285 | switch(type){ 286 | case UVC_IO_TCP: 287 | h = (uv_handle_t *)malloc(sizeof(uv_tcp_t)); 288 | uv_tcp_init(uvc_loop_default(),(uv_tcp_t *)h); 289 | //io->cur=ctx; 290 | io->handle=h; 291 | break; 292 | case UVC_IO_FS: 293 | h = (uv_handle_t *)malloc(sizeof(uv_fs_t)); 294 | io->handle=h; 295 | break; 296 | 297 | default: 298 | assert("unknown handle type"); 299 | } 300 | if (env->uv_task == NULL){ 301 | uvc_create("UV_LOOP", UV_TASK_STACK_SIZE, uvc_task_uv, NULL); 302 | } 303 | return 0; 304 | } 305 | 306 | int uvc_tcp_bind(uvc_io *io,char *ip,short port){ 307 | int status; 308 | struct sockaddr_in addr; 309 | status = uv_ip4_addr(ip,port,&addr); 310 | if (status){ 311 | return status; 312 | } 313 | #if OLD_LIBUV 314 | return uv_tcp_bind((uv_tcp_t*)io->handle,(const struct sockaddr *)&addr); 315 | #else 316 | return uv_tcp_bind((uv_tcp_t*)io->handle,(const struct sockaddr *)&addr,0); 317 | #endif 318 | } 319 | 320 | static void uvc_alloc_cb(uv_handle_t* handle,size_t s,uv_buf_t* buf){ 321 | uvc_io *io= (uvc_io *)handle->data; 322 | buf->base=io->buf.base; 323 | buf->len=io->buf.len; 324 | 325 | } 326 | static void uvc_read_cb(uv_stream_t* stream,ssize_t nread,const uv_buf_t* buf) 327 | { 328 | uvc_io *io= (uvc_io *)stream->data; 329 | io->buf.base=buf->base; 330 | io->buf.len=buf->len; 331 | io->nread=nread; 332 | uvc_io_ready(io->cur); 333 | } 334 | 335 | static void uvc_write_cb(uv_write_t* req, int status) 336 | { 337 | uvc_io *io= (uvc_io *)req->data; 338 | io->return_status = status; 339 | uvc_io_ready(io->cur); 340 | } 341 | 342 | static void uvc_close_cb(uv_handle_t* handle) 343 | { 344 | //uvc_io *io=uvc_container_of(handle,uvc_io,handle); 345 | uvc_io *io=(uvc_io *)handle->data; 346 | free(io->handle); 347 | uvc_io_ready(io->cur); 348 | } 349 | 350 | static void uvc_connect_cb(uv_connect_t* req, int status) 351 | { 352 | uvc_io *io=(uvc_io *)req->data; 353 | io->return_status = status; 354 | uvc_io_ready(io->cur); 355 | } 356 | 357 | static void uvc_connection_cb(uv_stream_t* server, int status) 358 | { 359 | 360 | uvc_io *io=(uvc_io *)server->data; 361 | //uvc_io *io=((uvc_io*)(((char*)(server)) - offsetof(uvc_io, handle))); 362 | io->return_status=status; 363 | uvc_io_ready(io->cur); 364 | } 365 | 366 | static void uvc_fs_cb(uv_fs_t* req) 367 | { 368 | uvc_io *io=(uvc_io *)req->data; 369 | uvc_io_ready(io->cur); 370 | } 371 | static void uvc_fs_cb2(uv_fs_t* req) 372 | { 373 | uvc_ctx *ctx=(uvc_ctx *)req->data; 374 | uvc_io_ready(ctx); 375 | } 376 | 377 | static void uvc_after_work_cb(uv_work_t* req, int status) 378 | { 379 | uvc_ctx *ctx =(uvc_ctx *)req->data; 380 | uvc_io_ready(ctx); 381 | } 382 | #ifdef OLD_LIBUV 383 | static void uvc_iotimer_cb(uv_timer_t* handle, int status) 384 | #else 385 | static void uvc_iotimer_cb(uv_timer_t* handle) 386 | #endif 387 | { 388 | uvc_io *io= (uvc_io *)handle->data; 389 | io->timeout=1; 390 | uvc_io_ready(io->cur); 391 | } 392 | 393 | //----------------------------------------------network---------------------------------------------------- 394 | 395 | ssize_t uvc_read(uvc_io *io,void *data,size_t len){ 396 | ssize_t nread=0; 397 | io->buf.base=(char *)data; 398 | io->buf.len=len; 399 | io->handle->data=io; 400 | nread=uv_read_start((uv_stream_t *)io->handle,uvc_alloc_cb,uvc_read_cb); 401 | if(nread ==UV_EOF){ 402 | return nread; 403 | } 404 | io->cur =uvc_self(); 405 | uvc_yield(); 406 | uv_read_stop((uv_stream_t *)io->handle); 407 | return io->nread; 408 | } 409 | 410 | ssize_t uvc_read2(uvc_io *io,void *data,size_t len,uint64_t timeout){ 411 | ssize_t nread=0; 412 | io->buf.base=(char *)data; 413 | io->buf.len=len; 414 | 415 | uv_timer_t timer; 416 | timer.data=io; 417 | io->timeout=0; 418 | uv_timer_init(uvc_loop_default(),&timer); 419 | uv_timer_start(&timer,uvc_iotimer_cb,timeout,0); 420 | 421 | nread=uv_read_start((uv_stream_t *)io->handle,uvc_alloc_cb,uvc_read_cb); 422 | if(nread ==UV_EOF){ 423 | return nread; 424 | } 425 | io->cur =uvc_self(); 426 | uvc_yield( ); 427 | uv_read_stop((uv_stream_t *)io->handle); 428 | if(io->timeout == 0){ 429 | return ETIMEDOUT; 430 | } 431 | return io->nread; 432 | } 433 | 434 | ssize_t uvc_write(uvc_io *io,void *data,size_t len){ 435 | uv_buf_t buf; 436 | uv_write_t req; 437 | ssize_t nwrite=0; 438 | buf.base=(char *)data; 439 | buf.len=len; 440 | req.data=io; 441 | uv_write(&req,(uv_stream_t *)io->handle,&buf,1,uvc_write_cb); 442 | io->cur =uvc_self(); 443 | uvc_yield( ); 444 | return (ssize_t)io->return_status; 445 | } 446 | 447 | void uvc_close(uvc_io *io){ 448 | io->handle->data=io; 449 | uv_close(io->handle,uvc_close_cb); 450 | io->cur =uvc_self(); 451 | uvc_yield( ); 452 | } 453 | 454 | int uvc_tcp_connect(uvc_io *io,char *ip,short port){ 455 | int status; 456 | uv_connect_t req; 457 | struct sockaddr_in addr; 458 | status = uv_ip4_addr(ip,port,&addr); 459 | if (status){ 460 | return status; 461 | } 462 | 463 | req.data = io; 464 | uv_tcp_connect(&req, (uv_tcp_t *)io->handle, (const struct sockaddr*)&addr, uvc_connect_cb); 465 | io->cur =uvc_self(); 466 | uvc_yield( ); 467 | return io->return_status; 468 | } 469 | 470 | int uvc_listen(uvc_io *io,int backlog){ 471 | io->handle->data=io; 472 | uv_listen((uv_stream_t *)io->handle,backlog,uvc_connection_cb); 473 | io->cur =uvc_self(); 474 | uvc_yield( ); 475 | return io->return_status; 476 | } 477 | 478 | int uvc_accept( uvc_io *io,uvc_io *c){ 479 | return uv_accept((uv_stream_t *)io->handle,(uv_stream_t *)c->handle); 480 | } 481 | 482 | //----------------------------------------------filesystem-------------------------------------------------- 483 | 484 | 485 | uv_file uvc_fs_open(uvc_io *io,char *path,int flasgs){ 486 | io->handle->data=io; 487 | uv_fs_open(uvc_loop_default(),(uv_fs_t *)io->handle,path,flasgs,0,uvc_fs_cb); 488 | io->cur =uvc_self(); 489 | uvc_yield( ); 490 | io->file = ((uv_fs_t *)(io->handle))->result; 491 | uv_fs_req_cleanup((uv_fs_t *)io->handle); 492 | return io->file; 493 | } 494 | 495 | int uvc_fs_read(uvc_io *io,void *data,ssize_t size){ 496 | io->handle->data=io; 497 | uv_buf_t buf; 498 | buf.len=size; 499 | buf.base=data; 500 | uv_fs_read(uvc_loop_default(),(uv_fs_t *)io->handle,io->file,&buf,1,-1,uvc_fs_cb); 501 | io->cur =uvc_self(); 502 | uvc_yield( ); 503 | uv_fs_req_cleanup((uv_fs_t *)io->handle); 504 | return ((uv_fs_t *)(io->handle))->result;; 505 | } 506 | 507 | int uvc_fs_write(uvc_io *io,void *data,ssize_t size){ 508 | uv_buf_t buf; 509 | io->handle->data=io; 510 | buf.len=size; 511 | buf.base=data; 512 | uv_fs_write(uvc_loop_default() ,(uv_fs_t *)io->handle,io->file,&buf,1,-1,uvc_fs_cb); 513 | io->cur =uvc_self(); 514 | uvc_yield( ); 515 | return ((uv_fs_t *)(io->handle))->result;; 516 | } 517 | 518 | int uvc_fs_close(uvc_io *io){ 519 | io->handle->data=io; 520 | uv_fs_close(uvc_loop_default(),(uv_fs_t *)io->handle,io->file,uvc_fs_cb); 521 | io->cur =uvc_self(); 522 | uvc_yield(); 523 | uv_fs_req_cleanup((uv_fs_t *)io->handle); 524 | free(io->handle); 525 | return 0; 526 | } 527 | 528 | int uvc_fs_stat( char *path,uv_stat_t *statbuf){ 529 | uv_fs_t req; 530 | req.data=uvc_self(); 531 | uv_fs_stat(uvc_loop_default(),&req,path,uvc_fs_cb2); 532 | 533 | uvc_yield(); 534 | memcpy(statbuf,&req.statbuf,sizeof(uv_stat_t)); 535 | uv_fs_req_cleanup(&req); 536 | return req.result; 537 | } 538 | 539 | //----------------------------------------------queue work-------------------------------------------------- 540 | 541 | int uvc_queue_work( uv_work_cb cb){ 542 | uv_work_t req; 543 | req.data=uvc_self(); 544 | uv_queue_work(uvc_loop_default(),&req,cb,uvc_after_work_cb); 545 | uvc_yield(); 546 | return 0; 547 | } 548 | 549 | 550 | //----------------------------------------------channel---------------------------------------------------- 551 | #define chanbuf_next_start(c) (((c)->start + 1) % (c)->cnt) 552 | #define chanbuf_next_end(c) (((c)->end + 1) % (c)->cnt) 553 | #define chanbuf_empty(c) ((c)->start == (c)->end) 554 | #define chanbuf_full(c) ((c)->cnt==0? 1:(chanbuf_next_end(c) == (c)->start)) 555 | 556 | static void chanbuf_push(channel *chan,uint8_t *buf){ 557 | if(!chanbuf_full(chan)){ 558 | memcpy(chan->buf+chan->end*chan->size,buf,chan->size); 559 | chan->end=chanbuf_next_end(chan); 560 | } 561 | } 562 | 563 | static void chanbuf_pop(channel *chan,uint8_t *buf){ 564 | if(!chanbuf_full(chan)){ 565 | memcpy(buf,chan->buf+chan->start*chan->size,chan->size); 566 | chan->start=chanbuf_next_start(chan); 567 | } 568 | } 569 | 570 | 571 | 572 | channel_pool pool; 573 | 574 | //#define channel_pool_get(p,i) (p)->channels[(i)%MAX_CHANNEL_POOL] 575 | #define channel_pool_put(p,i,c) (p)->channels[(i)%MAX_CHANNEL_POOL]=(c) 576 | #define channel_pool_remove(p,i) (p)->channels[(i)%MAX_CHANNEL_POOL]=NULL 577 | static channel *channel_pool_get(channel_pool *pool, channel_t i){ 578 | channel *chan = pool->channels[(i) % MAX_CHANNEL_POOL]; 579 | if (chan && chan->id == i){ 580 | return chan; 581 | } 582 | return NULL; 583 | 584 | } 585 | 586 | static void channel_queue_put(queue_t *q, uvc_ctx *ctx){ 587 | queue_insert_head(q, &ctx->i_node); 588 | } 589 | 590 | static uvc_ctx *channel_queue_get(queue_t *q){ 591 | queue_t *node = NULL; 592 | node = queue_last(q); 593 | queue_remove(node); 594 | /*is selected*/ 595 | if (node->ext == NULL){ 596 | return queue_data(node, uvc_ctx, i_node); 597 | } 598 | else{ 599 | return (uvc_ctx *)node->ext; 600 | } 601 | 602 | } 603 | 604 | static int find_empty_slot(channel_pool *pool){ 605 | int i=0; 606 | 607 | for (i = pool->current_empty + 1; i != pool->current_empty; i++){ 608 | pool->maxid++; 609 | if(channel_pool_get(pool,i)==NULL){ 610 | pool->current_empty = i; 611 | return pool->maxid; 612 | } 613 | } 614 | return -1; 615 | } 616 | 617 | int channel_is_closed(channel_t c){ 618 | channel *chan = channel_pool_get(get_chan_pool(),c); 619 | if(chan!=NULL && chan->id == c){ 620 | return 1; 621 | } 622 | return 0; 623 | } 624 | 625 | 626 | 627 | 628 | channel_t channel_create(int cnt,int elem_size){ 629 | 630 | channel *c=NULL; 631 | int idx=0; 632 | channel_pool *pool=get_chan_pool(); 633 | idx=find_empty_slot(pool); 634 | if(idx<0){ 635 | return -1; 636 | } 637 | c=malloc(sizeof(channel) +(elem_size*cnt)); 638 | memset(c,0,sizeof(channel)); 639 | c->size=elem_size; 640 | c->cnt=cnt; 641 | c->id = idx; 642 | queue_init(&c->readq); 643 | queue_init(&c->writq); 644 | 645 | channel_pool_put(pool,idx,c); 646 | return idx; 647 | } 648 | 649 | int channel_close(channel_t c){ 650 | channel_pool *pool=get_chan_pool(); 651 | channel *chan; 652 | uvc_ctx *ctx; 653 | uvc_ctx *ctx_self =uvc_self(); 654 | chan=channel_pool_get(pool,c); 655 | if(chan == NULL ||chan->id !=c || chan->closeing==1){ 656 | return -1; 657 | } 658 | chan->closeing=1; 659 | 660 | 661 | //唤醒所有发送队列,让发生者知道已经发送失败。 662 | do 663 | { 664 | if(queue_empty(&chan->writq) )break; 665 | ctx = channel_queue_get(&chan->writq); 666 | uvc_ready(ctx); 667 | uvc_ready(ctx_self); 668 | uvc_yield(); 669 | }while(1); 670 | 671 | 672 | //如果chanbuf中还有数据,那么不能立即关闭管道, 673 | //否则发送放无法获知释放已经送达数据。 674 | if (chanbuf_empty(chan) && queue_empty(&chan->readq)){ 675 | channel_pool_remove(pool, c); 676 | free(chan); 677 | }else{ 678 | do 679 | { 680 | if(queue_empty(&chan->readq) )break; 681 | ctx = channel_queue_get(&chan->readq); 682 | if (ctx!=ctx_self){ 683 | printf("channel closeing,wakeup task[%s]\n",ctx->name); 684 | uvc_ready(ctx); 685 | uvc_ready(ctx_self); 686 | uvc_yield(); 687 | } 688 | }while(1); 689 | } 690 | 691 | return 0; 692 | } 693 | 694 | int channel_write(channel_t c,void *buf){ 695 | channel *chan = NULL; 696 | uvc_ctx *ctx; 697 | void *p=NULL; 698 | chan = channel_pool_get(get_chan_pool(), c); 699 | if(chan == NULL ||chan->id !=c || chan->closeing==1){ 700 | return -1; 701 | } 702 | 703 | if(!chanbuf_full(chan)){ 704 | //buffered channel,and buffer not full 705 | chanbuf_push(chan, buf); 706 | if(!queue_empty(&chan->readq)){ 707 | ctx = channel_queue_get(&chan->readq); 708 | uvc_ready(ctx); 709 | } 710 | //写入buffer的数据不管 711 | return 0; 712 | }else{ 713 | ctx = uvc_self(); 714 | ctx->cbuf = buf; 715 | channel_queue_put(&chan->writq, ctx); 716 | //当readq不为空的时候,writeq一定为空 717 | if(queue_empty(&chan->readq)){ 718 | uvc_yield(); 719 | }else{ 720 | ctx = channel_queue_get(&chan->readq); 721 | uvc_ready(ctx); 722 | } 723 | } 724 | //检查管道释放 725 | if(chan == NULL ||chan->id !=c || chan->closeing==1){ 726 | return -1; 727 | } 728 | return 0; 729 | } 730 | 731 | int channel_read(channel_t c,void *buf){ 732 | channel *chan = NULL; 733 | uvc_ctx *ctx=uvc_self(); 734 | void *p=NULL; 735 | queue_t *node = NULL; 736 | channel_pool *pool = get_chan_pool(); 737 | chan = channel_pool_get(pool,c); 738 | if(chan == NULL ||chan->id !=c)return -1; 739 | //当管道关闭的时候,buf中数据任然可读 740 | if (!chanbuf_empty(chan) ){ 741 | chanbuf_pop(chan,buf); 742 | if(chan->closeing==1 &&chanbuf_empty(chan)){ 743 | //管道关闭状态,且buf中数据已经读完,失败chan 744 | free(chan); 745 | channel_pool_remove(pool,c); 746 | return -1; 747 | } 748 | return 0; 749 | } 750 | else if (chan->closeing == 1){ 751 | return -1; 752 | } 753 | 754 | // 没有协程等待写,入队,调度 755 | if(queue_empty(&chan->writq) ){ 756 | channel_queue_put(&chan->readq, ctx); 757 | //queue_add(&chan->readq, &ctx->i_node); 758 | uvc_yield(); 759 | if(chan == NULL ||chan->id !=c || chan->closeing==1){ 760 | return -1; 761 | } 762 | ctx = channel_queue_get(&chan->writq); 763 | 764 | }else { 765 | ctx = channel_queue_get(&chan->writq); 766 | uvc_ready(ctx); 767 | //uvc_ready(uvc_self()); 768 | //uvc_yield(); 769 | //uvc_io_ready(ctx); 770 | //uvc_resume(ctx); 771 | } 772 | memcpy(buf, ctx->cbuf, chan->size); 773 | return 0; 774 | 775 | } 776 | 777 | int channel_readable(channel_t c){ 778 | channel *chan = NULL; 779 | chan=channel_pool_get(get_chan_pool(),c); 780 | if(chan == NULL||chan->id !=c)return -1; 781 | if (!queue_empty(&chan->writq) || !chanbuf_empty(chan)){ 782 | return 1; 783 | } 784 | return 0; 785 | } 786 | 787 | int channel_writeable(channel_t c){ 788 | channel *chan = NULL; 789 | chan = channel_pool_get(get_chan_pool(), c); 790 | if(chan == NULL||chan->id !=c)return -1; 791 | if (!queue_empty(&chan->readq) || !chanbuf_full(chan)){ 792 | return 1; 793 | } 794 | return 0; 795 | } 796 | 797 | 798 | int channel_select_remove(channel *chan){ 799 | uvc_ctx *ctx=uvc_self(); 800 | //queue_t *node; 801 | queue_t *q = queue_head(&chan->readq); 802 | for (; q != queue_sentinel(&chan->readq); q = queue_next(&chan->readq)) { 803 | if(q->ext == ctx){ 804 | queue_remove(q); 805 | } 806 | } 807 | 808 | for (; q != queue_sentinel(&chan->writq); q = queue_next(&chan->writq)) { 809 | if(q->ext == ctx){ 810 | queue_remove(q); 811 | } 812 | } 813 | return 0; 814 | } 815 | 816 | #define HEAP_SELECT_CNT 20 817 | channel_t channel_select(int need_default,char *fmt,...){ 818 | va_list argp; 819 | int cnt = strlen(fmt); 820 | queue_t select_node[HEAP_SELECT_CNT]; 821 | channel_t channels[HEAP_SELECT_CNT]; 822 | channel *chan=NULL; 823 | uvc_ctx *ctx=uvc_self(); 824 | channel_t c; 825 | int i = 0; 826 | if (cnt < 0)return -1; 827 | 828 | va_start(argp, fmt); 829 | for (i = 0; ireadq, &select_node[i]); 856 | } 857 | else if(fmt[i]=='w'){ 858 | chan = channel_pool_get(get_chan_pool(), c); 859 | queue_insert_head(&chan->writq, &select_node[i]); 860 | }else if(fmt[i] != 'w' && fmt[i] != 'r'){ 861 | abort(); 862 | } 863 | } 864 | uvc_yield(); 865 | c = 0; 866 | for (i = 0; i < cnt; i++) 867 | { 868 | 869 | if (fmt[i] == 'r' && channel_readable(channels[i])){ 870 | assert(c == 0); 871 | c = channels[i]; 872 | } 873 | else if (fmt[i] == 'w' && channel_writeable(channels[i])){ 874 | assert(c == 0); 875 | c = channels[i]; 876 | } 877 | else{ 878 | queue_remove(&select_node[i]); 879 | } 880 | } 881 | assert(c != 0); 882 | //assert("won't be run here :\n"); 883 | 884 | 885 | return c; 886 | } 887 | 888 | 889 | 890 | //-----------------------------------------thread env----------------------------------------------- 891 | 892 | -------------------------------------------------------------------------------- /uvc.h: -------------------------------------------------------------------------------- 1 | #ifndef _UVC_H 2 | #define _UVC_H 3 | #include 4 | #include "coro.h" 5 | #include "queue.h" 6 | #if __cplusplus 7 | extern "C" { 8 | #endif 9 | typedef enum uvc_io_type_s{ 10 | UVC_IO_TCP, 11 | UVC_IO_UDP, 12 | UVC_IO_STREAM, 13 | UVC_IO_FS 14 | }uvc_io_type_t; 15 | 16 | typedef enum uvc_status_s{ 17 | UVC_STATUS_INIT, 18 | UVC_STATUS_PENDING, 19 | UVC_STATUS_READY, 20 | UVC_STATUS_RUNING, 21 | UVC_STATUS_DIE 22 | }uvc_status; 23 | 24 | struct _uvc_ctx{ 25 | char name[64]; 26 | coro_context *prev; 27 | coro_context cur; 28 | struct coro_stack stack; 29 | uv_timer_t timer; 30 | void *data; 31 | queue_t i_node;//for channel queue 32 | //queue_t s_node;//for stack queue 33 | queue_t task_node; 34 | uvc_status status; 35 | void *cbuf; 36 | 37 | }; 38 | typedef struct _uvc_ctx uvc_ctx; 39 | void uvc_init(); 40 | void uvc_create(char *name, unsigned int size, coro_func func, void *arg); 41 | void uvc_ctx_set_name(char *name); 42 | char *uvc_ctx_get_name(); 43 | void uvc_return( ); 44 | void uvc_yield( ); 45 | void uvc_resume(uvc_ctx *ctx); 46 | void uvc_switch(uvc_ctx *prev,uvc_ctx *next); 47 | void uvc_schedule(); 48 | typedef struct _uvc_io{ 49 | uvc_ctx *cur; 50 | uv_buf_t buf; 51 | ssize_t nread; 52 | int return_status; 53 | uv_handle_t *handle; 54 | int timeout; 55 | uv_file file; 56 | }uvc_io; 57 | uv_loop_t* uvc_loop_default(); 58 | void uvc_sleep(uint64_t msec); 59 | int uvc_io_create(uvc_io *io, uvc_io_type_t type); 60 | int uvc_tcp_bind(uvc_io *io, char *ip, short port); 61 | ssize_t uvc_read(uvc_io *io,void *data,size_t len); 62 | ssize_t uvc_read2(uvc_io *io,void *data,size_t len,uint64_t timeout); 63 | ssize_t uvc_write(uvc_io *io,void *data,size_t len); 64 | void uvc_close(uvc_io *io); 65 | int uvc_tcp_connect(uvc_io *io,char *ip,short port); 66 | int uvc_listen(uvc_io *io,int backlog); 67 | int uvc_accept( uvc_io *io,uvc_io *c); 68 | uv_file uvc_fs_open(uvc_io *io,char *path,int flasgs); 69 | int uvc_fs_read(uvc_io *io,void *data,ssize_t size); 70 | int uvc_fs_write(uvc_io *io,void *data,ssize_t size); 71 | int uvc_fs_close(uvc_io *io); 72 | int uvc_fs_stat(char *path,uv_stat_t *statbuf); 73 | int uvc_queue_work(uv_work_cb cb); 74 | 75 | typedef int32_t channel_t; 76 | channel_t channel_create(int cnt, int elem_size); 77 | int channel_close(channel_t c); 78 | int channel_write(channel_t c,void *buf); 79 | int channel_read(channel_t c,void *buf); 80 | channel_t channel_select(int need_default, char *fmt, ...); 81 | 82 | 83 | #if __cplusplus 84 | } 85 | #endif 86 | 87 | 88 | #endif 89 | --------------------------------------------------------------------------------