├── c10k
└── .gitkeep
├── spring-reactive-lottery
└── .gitkeep
├── concurrent-servers
├── .vscode
│ └── settings.json
├── .DS_Store
├── .gitignore
├── uv-timer-sleep-demo.c
├── uv-timer-work-demo.c
├── utils.h
├── threadspammer.c
├── blocking-listener.c
├── nonblocking-listener.c
├── Makefile
├── threadpool-server.py
├── utils.c
├── sequential-server.c
├── threadpool-server-coroutine.py
├── threaded-server.c
├── simple-client.py
├── server-test.py
├── uv-server.c
├── uv-isprime-server.c
├── epoll-server.c
└── select-server.c
├── .gitignore
├── LICENSE
├── io-multiplexing
├── c
│ ├── blocking-io.c
│ ├── epoll.c
│ └── select.c
└── go
│ └── epoll.go
└── README.md
/c10k/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/spring-reactive-lottery/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/concurrent-servers/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "uv.h": "c"
4 | }
5 | }
--------------------------------------------------------------------------------
/concurrent-servers/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wx-chevalier/ms-seckilling-examples/master/concurrent-servers/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled class file
2 | *.class
3 |
4 | # Log file
5 | *.log
6 |
7 | # BlueJ files
8 | *.ctxt
9 |
10 | # Mobile Tools for Java (J2ME)
11 | .mtj.tmp/
12 |
13 | # Package Files #
14 | *.jar
15 | *.war
16 | *.nar
17 | *.ear
18 | *.zip
19 | *.tar.gz
20 | *.rar
21 |
22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
23 | hs_err_pid*
24 |
--------------------------------------------------------------------------------
/concurrent-servers/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore all
2 | *
3 |
4 | # Unignore all with extensions
5 | !*.*
6 |
7 | # Unignore all dirs
8 | !*/
9 |
10 | ### Above combination will ignore all files without extension ###
11 |
12 | # Ignore files with extension `.class` & `.sm`
13 | *.class
14 | *.sm
15 |
16 | # Ignore `bin` dir
17 | bin/
18 | # or
19 | */bin/*
20 |
21 | # Unignore all `.jar` in `bin` dir
22 | !*/bin/*.jar
23 |
24 | # Ignore all `library.jar` in `bin` dir
25 | */bin/library.jar
26 |
27 | *.dSYM
--------------------------------------------------------------------------------
/concurrent-servers/uv-timer-sleep-demo.c:
--------------------------------------------------------------------------------
1 | // Demo of sleeping/blocking in a callback, and its effect on the event loop.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 |
12 | void on_timer(uv_timer_t* timer) {
13 | uint64_t timestamp = uv_hrtime();
14 | printf("on_timer [%" PRIu64 " ms]\n", (timestamp / 1000000) % 100000);
15 |
16 | // "Work"
17 | if (random() % 5 == 0) {
18 | printf("Sleeping...\n");
19 | sleep(3);
20 | }
21 | }
22 |
23 | int main(int argc, const char** argv) {
24 | uv_timer_t timer;
25 | uv_timer_init(uv_default_loop(), &timer);
26 | uv_timer_start(&timer, on_timer, 0, 1000);
27 | return uv_run(uv_default_loop(), UV_RUN_DEFAULT);
28 | }
29 |
--------------------------------------------------------------------------------
/concurrent-servers/uv-timer-work-demo.c:
--------------------------------------------------------------------------------
1 | // Demo of dispatching sleeping/blocking in a callback to a work queue.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 |
12 | void on_after_work(uv_work_t* req, int status) {
13 | free(req);
14 | }
15 |
16 | void on_work(uv_work_t* req) {
17 | // "Work"
18 | if (random() % 5 == 0) {
19 | printf("Sleeping...\n");
20 | sleep(3);
21 | }
22 | }
23 |
24 | void on_timer(uv_timer_t* timer) {
25 | uint64_t timestamp = uv_hrtime();
26 | printf("on_timer [%" PRIu64 " ms]\n", (timestamp / 1000000) % 100000);
27 |
28 | uv_work_t* work_req = (uv_work_t*)malloc(sizeof(*work_req));
29 | uv_queue_work(uv_default_loop(), work_req, on_work, on_after_work);
30 | }
31 |
32 | int main(int argc, const char** argv) {
33 | uv_timer_t timer;
34 | uv_timer_init(uv_default_loop(), &timer);
35 | uv_timer_start(&timer, on_timer, 0, 1000);
36 | return uv_run(uv_default_loop(), UV_RUN_DEFAULT);
37 | }
38 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 wx-chevalier
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/concurrent-servers/utils.h:
--------------------------------------------------------------------------------
1 | // Utility functions for socket servers in C.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #ifndef UTILS_H
6 | #define UTILS_H
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | // Dies (exits with a failure status) after printing the given printf-like
13 | // message to stdout.
14 | void die(char* fmt, ...);
15 |
16 | // Wraps malloc with error checking: dies if malloc fails.
17 | void* xmalloc(size_t size);
18 |
19 | // Dies (exits with a failure status) after printing the current perror status
20 | // prefixed with msg.
21 | void perror_die(char* msg);
22 |
23 | // Reports a peer connection to stdout. sa is the data populated by a successful
24 | // accept() call.
25 | void report_peer_connected(const struct sockaddr_in* sa, socklen_t salen);
26 |
27 | // Creates a bound and listening INET socket on the given port number. Returns
28 | // the socket fd when successful; dies in case of errors.
29 | int listen_inet_socket(int portnum);
30 |
31 | // Sets the given socket into non-blocking mode.
32 | void make_socket_non_blocking(int sockfd);
33 |
34 | #endif /* UTILS_H */
35 |
--------------------------------------------------------------------------------
/concurrent-servers/threadspammer.c:
--------------------------------------------------------------------------------
1 | // Simple tool for measuring resource usage of a large number of idle threads
2 | // running simultaneously.
3 | //
4 | // Run it with some large argument, for example:
5 | //
6 | // $ ./threadspammer 10000
7 | //
8 | // And watch the process's resource usage via top or other tools.
9 | //
10 | // Eli Bendersky [http://eli.thegreenplace.net]
11 | // This code is in the public domain.
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | void* threadfunc(void* p) {
19 | printf("%ld: thread 0x%0x\n", (long)p, (unsigned)pthread_self());
20 | while (1) {
21 | usleep(50 * 1000);
22 | }
23 | return NULL;
24 | }
25 |
26 | int main(int argc, const char** argv) {
27 | int nthreads = 10;
28 | if (argc > 1) {
29 | nthreads = atoi(argv[1]);
30 | }
31 | printf("Running with nthreads = %d\n", nthreads);
32 |
33 | for (long i = 0; i < nthreads; ++i) {
34 | pthread_t t;
35 |
36 | pthread_create(&t, NULL, threadfunc, (void*)i);
37 | usleep(1000);
38 | }
39 |
40 | printf("... waiting ... \n");
41 | while (1) {
42 | usleep(200 * 1000);
43 | }
44 |
45 | return 0;
46 | }
47 |
--------------------------------------------------------------------------------
/io-multiplexing/c/blocking-io.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #define SERV_PORT 8031
10 | #define BUFSIZE 1024
11 |
12 | int main(void)
13 | {
14 | int lfd, cfd;
15 | struct sockaddr_in serv_addr, clin_addr;
16 | socklen_t clin_len;
17 | char recvbuf[BUFSIZE];
18 | int len;
19 |
20 | lfd = socket(AF_INET, SOCK_STREAM, 0);
21 |
22 | serv_addr.sin_family = AF_INET;
23 | serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);
24 | serv_addr.sin_port = htons(SERV_PORT);
25 |
26 | bind(lfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr));
27 |
28 | listen(lfd, 128);
29 |
30 | while (1)
31 | {
32 | clin_len = sizeof(clin_addr);
33 | cfd = accept(lfd, (struct sockaddr *)&clin_addr, &clin_len);
34 | while (len = read(cfd, recvbuf, BUFSIZE))
35 | {
36 | write(STDOUT_FILENO, recvbuf, len); //把客户端输入的内容输出在终端
37 | // 只有当客户端输入 stop 就停止当前客户端的连接
38 | if (strncasecmp(recvbuf, "stop", 4) == 0)
39 | {
40 | close(cfd);
41 | break;
42 | }
43 | }
44 | }
45 | close(lfd);
46 | return 0;
47 | }
--------------------------------------------------------------------------------
/concurrent-servers/blocking-listener.c:
--------------------------------------------------------------------------------
1 | // Simple blocking socket listener.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | #include "utils.h"
12 |
13 | int main(int argc, const char** argv) {
14 | setvbuf(stdout, NULL, _IONBF, 0);
15 |
16 | int portnum = 9988;
17 | if (argc >= 2) {
18 | portnum = atoi(argv[1]);
19 | }
20 | printf("Listening on port %d\n", portnum);
21 |
22 | int sockfd = listen_inet_socket(portnum);
23 | struct sockaddr_in peer_addr;
24 | socklen_t peer_addr_len = sizeof(peer_addr);
25 |
26 | int newsockfd = accept(sockfd, (struct sockaddr*)&peer_addr, &peer_addr_len);
27 | if (newsockfd < 0) {
28 | perror_die("ERROR on accept");
29 | }
30 | report_peer_connected(&peer_addr, peer_addr_len);
31 |
32 | while (1) {
33 | uint8_t buf[1024];
34 | printf("Calling recv...\n");
35 | int len = recv(newsockfd, buf, sizeof buf, 0);
36 | if (len < 0) {
37 | perror_die("recv");
38 | } else if (len == 0) {
39 | printf("Peer disconnected; I'm done.\n");
40 | break;
41 | }
42 | printf("recv returned %d bytes\n", len);
43 | }
44 |
45 | close(newsockfd);
46 | close(sockfd);
47 |
48 | return 0;
49 | }
50 |
--------------------------------------------------------------------------------
/concurrent-servers/nonblocking-listener.c:
--------------------------------------------------------------------------------
1 | // Simple non-blocking socket listener.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "utils.h"
14 |
15 | int main(int argc, const char** argv) {
16 | setvbuf(stdout, NULL, _IONBF, 0);
17 |
18 | int portnum = 9988;
19 | if (argc >= 2) {
20 | portnum = atoi(argv[1]);
21 | }
22 | printf("Listening on port %d\n", portnum);
23 |
24 | int sockfd = listen_inet_socket(portnum);
25 | struct sockaddr_in peer_addr;
26 | socklen_t peer_addr_len = sizeof(peer_addr);
27 |
28 | int newsockfd = accept(sockfd, (struct sockaddr*)&peer_addr, &peer_addr_len);
29 | if (newsockfd < 0) {
30 | perror_die("ERROR on accept");
31 | }
32 | report_peer_connected(&peer_addr, peer_addr_len);
33 |
34 | // Set nonblocking mode on the socket.
35 | int flags = fcntl(newsockfd, F_GETFL, 0);
36 | if (flags == -1) {
37 | perror_die("fcntl F_GETFL");
38 | }
39 |
40 | if (fcntl(newsockfd, F_SETFL, flags | O_NONBLOCK) == -1) {
41 | perror_die("fcntl F_SETFL O_NONBLOCK");
42 | }
43 |
44 | while (1) {
45 | uint8_t buf[1024];
46 | printf("Calling recv...\n");
47 | int len = recv(newsockfd, buf, sizeof buf, 0);
48 | if (len < 0) {
49 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
50 | // No data on the socket; sleep a bit and re-try recv().
51 | usleep(200 * 1000);
52 | continue;
53 | }
54 | perror_die("recv");
55 | } else if (len == 0) {
56 | printf("Peer disconnected; I'm done.\n");
57 | break;
58 | }
59 | printf("recv returned %d bytes\n", len);
60 | }
61 |
62 | close(newsockfd);
63 | close(sockfd);
64 |
65 | return 0;
66 | }
67 |
--------------------------------------------------------------------------------
/concurrent-servers/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for the C code in concurrent-servers.
2 | #
3 | # This code is in the public domain.
4 | CC = gcc
5 | CCFLAGS = -std=gnu99 -Wall -O3 -g -DNDEBUG -pthread
6 | LDFLAGS = -lpthread -pthread
7 |
8 | # It's possible to compile uv-server after installing libuv. The full
9 | # instructions for installation I used (including `make install`) are from:
10 | # https://github.com/libuv/libuv/blob/master/README.md.
11 | # libuv compiles into a shared library which is placed alongside the .a in the
12 | # installation directory.
13 | LDLIBUV = -luv -Wl,-rpath=/usr/local/lib
14 |
15 | EXECUTABLES = \
16 | sequential-server \
17 | select-server \
18 | epoll-server \
19 | uv-server \
20 | uv-timer-sleep-demo \
21 | uv-timer-work-demo \
22 | uv-isprime-server \
23 | threadspammer \
24 | blocking-listener \
25 | nonblocking-listener \
26 | threaded-server
27 |
28 | all: $(EXECUTABLES)
29 |
30 | sequential-server: utils.c sequential-server.c
31 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
32 |
33 | select-server: utils.c select-server.c
34 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
35 |
36 | threaded-server: utils.c threaded-server.c
37 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
38 |
39 | epoll-server: utils.c epoll-server.c
40 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
41 |
42 | uv-server: utils.c uv-server.c
43 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS) $(LDLIBUV)
44 |
45 | uv-timer-sleep-demo: utils.c uv-timer-sleep-demo.c
46 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS) $(LDLIBUV)
47 |
48 | uv-timer-work-demo: utils.c uv-timer-work-demo.c
49 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS) $(LDLIBUV)
50 |
51 | uv-isprime-server: utils.c uv-isprime-server.c
52 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS) $(LDLIBUV)
53 |
54 | threadspammer: threadspammer.c
55 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
56 |
57 | blocking-listener: utils.c blocking-listener.c
58 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
59 |
60 | nonblocking-listener: utils.c nonblocking-listener.c
61 | $(CC) $(CCFLAGS) $^ -o $@ $(LDFLAGS)
62 |
63 | .PHONY: clean format
64 |
65 | clean:
66 | rm -f $(EXECUTABLES) *.o
67 |
68 | format:
69 | clang-format -style=file -i *.c *.h
70 |
--------------------------------------------------------------------------------
/io-multiplexing/go/epoll.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net"
6 | "os"
7 | "syscall"
8 | )
9 |
10 | const (
11 | EPOLLET = 1 << 31
12 | MaxEpollEvents = 32
13 | )
14 |
15 | func echo(fd int) {
16 | defer syscall.Close(fd)
17 | var buf [32 * 1024]byte
18 | for {
19 | nbytes, e := syscall.Read(fd, buf[:])
20 | if nbytes > 0 {
21 | fmt.Printf(">>> %s", buf)
22 | syscall.Write(fd, buf[:nbytes])
23 | fmt.Printf("<<< %s", buf)
24 | }
25 | if e != nil {
26 | break
27 | }
28 | }
29 | }
30 |
31 | func main() {
32 | var event syscall.EpollEvent
33 | var events [MaxEpollEvents]syscall.EpollEvent
34 |
35 | fd, err := syscall.Socket(syscall.AF_INET, syscall.O_NONBLOCK|syscall.SOCK_STREAM, 0)
36 | if err != nil {
37 | fmt.Println(err)
38 | os.Exit(1)
39 | }
40 | defer syscall.Close(fd)
41 |
42 | if err = syscall.SetNonblock(fd, true); err != nil {
43 | fmt.Println("setnonblock1: ", err)
44 | os.Exit(1)
45 | }
46 |
47 | addr := syscall.SockaddrInet4{Port: 2000}
48 | copy(addr.Addr[:], net.ParseIP("0.0.0.0").To4())
49 |
50 | syscall.Bind(fd, &addr)
51 | syscall.Listen(fd, 10)
52 |
53 | epfd, e := syscall.EpollCreate1(0)
54 | if e != nil {
55 | fmt.Println("epoll_create1: ", e)
56 | os.Exit(1)
57 | }
58 | defer syscall.Close(epfd)
59 |
60 | event.Events = syscall.EPOLLIN
61 | event.Fd = int32(fd)
62 | if e = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, fd, &event); e != nil {
63 | fmt.Println("epoll_ctl: ", e)
64 | os.Exit(1)
65 | }
66 |
67 | for {
68 | nevents, e := syscall.EpollWait(epfd, events[:], -1)
69 | if e != nil {
70 | fmt.Println("epoll_wait: ", e)
71 | break
72 | }
73 |
74 | for ev := 0; ev < nevents; ev++ {
75 | if int(events[ev].Fd) == fd {
76 | connFd, _, err := syscall.Accept(fd)
77 | if err != nil {
78 | fmt.Println("accept: ", err)
79 | continue
80 | }
81 | syscall.SetNonblock(fd, true)
82 | event.Events = syscall.EPOLLIN | EPOLLET
83 | event.Fd = int32(connFd)
84 | if err := syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, connFd, &event); err != nil {
85 | fmt.Print("epoll_ctl: ", connFd, err)
86 | os.Exit(1)
87 | }
88 | } else {
89 | go echo(int(events[ev].Fd))
90 | }
91 | }
92 |
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/concurrent-servers/threadpool-server.py:
--------------------------------------------------------------------------------
1 | # Threaded socket server - accepting multiple clients concurrently, by
2 | # dispatching them into a thread pool.
3 | #
4 | # Eli Bendersky [http://eli.thegreenplace.net]
5 | # This code is in the public domain.
6 | import argparse
7 | from concurrent.futures import ThreadPoolExecutor
8 | from enum import Enum
9 | import socket
10 | import sys
11 |
12 | ProcessingState = Enum('ProcessingState', 'WAIT_FOR_MSG IN_MSG')
13 |
14 |
15 | def serve_connection(sockobj, client_address):
16 | print('{0} connected'.format(client_address))
17 | sockobj.sendall(b'*')
18 | state = ProcessingState.WAIT_FOR_MSG
19 |
20 | while True:
21 | try:
22 | buf = sockobj.recv(1024)
23 | if not buf:
24 | break
25 | except IOError as e:
26 | break
27 | for b in buf:
28 | if state == ProcessingState.WAIT_FOR_MSG:
29 | if b == ord(b'^'):
30 | state = ProcessingState.IN_MSG
31 | elif state == ProcessingState.IN_MSG:
32 | if b == ord(b'$'):
33 | state = ProcessingState.WAIT_FOR_MSG
34 | else:
35 | sockobj.send(bytes([b + 1]))
36 | else:
37 | assert False
38 |
39 | print('{0} done'.format(client_address))
40 | sys.stdout.flush()
41 | sockobj.close()
42 |
43 |
44 | if __name__ == '__main__':
45 | argparser = argparse.ArgumentParser('Threadpool server')
46 | argparser.add_argument('--port', type=int, default=9090, help='Server port')
47 | argparser.add_argument('-n', type=int,
48 | default=64, help='Number of threads in pool')
49 | args = argparser.parse_args()
50 |
51 | pool = ThreadPoolExecutor(args.n)
52 | sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
53 | sockobj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
54 | sockobj.bind(('localhost', args.port))
55 | sockobj.listen(15)
56 |
57 | try:
58 | while True:
59 | client_socket, client_address = sockobj.accept()
60 | pool.submit(serve_connection, client_socket, client_address)
61 | except KeyboardInterrupt as e:
62 | print(e)
63 | sockobj.close()
64 |
--------------------------------------------------------------------------------
/concurrent-servers/utils.c:
--------------------------------------------------------------------------------
1 | // Utility functions for socket servers in C.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include "utils.h"
6 |
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #define _GNU_SOURCE
15 | #include
16 |
17 | #define N_BACKLOG 64
18 |
19 | void die(char* fmt, ...) {
20 | va_list args;
21 | va_start(args, fmt);
22 | vfprintf(stderr, fmt, args);
23 | va_end(args);
24 | fprintf(stderr, "\n");
25 | exit(EXIT_FAILURE);
26 | }
27 |
28 | void* xmalloc(size_t size) {
29 | void* ptr = malloc(size);
30 | if (!ptr) {
31 | die("malloc failed");
32 | }
33 | return ptr;
34 | }
35 |
36 | void perror_die(char* msg) {
37 | perror(msg);
38 | exit(EXIT_FAILURE);
39 | }
40 |
41 | void report_peer_connected(const struct sockaddr_in* sa, socklen_t salen) {
42 | char hostbuf[NI_MAXHOST];
43 | char portbuf[NI_MAXSERV];
44 | if (getnameinfo((struct sockaddr*)sa, salen, hostbuf, NI_MAXHOST, portbuf,
45 | NI_MAXSERV, 0) == 0) {
46 | printf("peer (%s, %s) connected\n", hostbuf, portbuf);
47 | } else {
48 | printf("peer (unknonwn) connected\n");
49 | }
50 | }
51 |
52 | int listen_inet_socket(int portnum) {
53 | int sockfd = socket(AF_INET, SOCK_STREAM, 0);
54 | if (sockfd < 0) {
55 | perror_die("ERROR opening socket");
56 | }
57 |
58 | // This helps avoid spurious EADDRINUSE when the previous instance of this
59 | // server died.
60 | int opt = 1;
61 | if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
62 | perror_die("setsockopt");
63 | }
64 |
65 | struct sockaddr_in serv_addr;
66 | memset(&serv_addr, 0, sizeof(serv_addr));
67 | serv_addr.sin_family = AF_INET;
68 | serv_addr.sin_addr.s_addr = INADDR_ANY;
69 | serv_addr.sin_port = htons(portnum);
70 |
71 | if (bind(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) < 0) {
72 | perror_die("ERROR on binding");
73 | }
74 |
75 | if (listen(sockfd, N_BACKLOG) < 0) {
76 | perror_die("ERROR on listen");
77 | }
78 |
79 | return sockfd;
80 | }
81 |
82 | void make_socket_non_blocking(int sockfd) {
83 | int flags = fcntl(sockfd, F_GETFL, 0);
84 | if (flags == -1) {
85 | perror_die("fcntl F_GETFL");
86 | }
87 |
88 | if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) == -1) {
89 | perror_die("fcntl F_SETFL O_NONBLOCK");
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/concurrent-servers/sequential-server.c:
--------------------------------------------------------------------------------
1 | // Sequential socket server - accepting one client at a time.
2 | //
3 | // Eli Bendersky [http://eli.thegreenplace.net]
4 | // This code is in the public domain.
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "utils.h"
14 |
15 | typedef enum { WAIT_FOR_MSG, IN_MSG } ProcessingState;
16 |
17 | void serve_connection(int sockfd) {
18 | // Clients attempting to connect and send data will succeed even before the
19 | // connection is accept()-ed by the server. Therefore, to better simulate
20 | // blocking of other clients while one is being served, do this "ack" from the
21 | // server which the client expects to see before proceeding.
22 | if (send(sockfd, "*", 1, 0) < 1) {
23 | perror_die("send");
24 | }
25 |
26 | ProcessingState state = WAIT_FOR_MSG;
27 |
28 | while (1) {
29 | uint8_t buf[1024];
30 | int len = recv(sockfd, buf, sizeof buf, 0);
31 | if (len < 0) {
32 | perror_die("recv");
33 | } else if (len == 0) {
34 | break;
35 | }
36 |
37 | for (int i = 0; i < len; ++i) {
38 | switch (state) {
39 | case WAIT_FOR_MSG:
40 | if (buf[i] == '^') {
41 | state = IN_MSG;
42 | }
43 | break;
44 | case IN_MSG:
45 | if (buf[i] == '$') {
46 | state = WAIT_FOR_MSG;
47 | } else {
48 | buf[i] += 1;
49 | if (send(sockfd, &buf[i], 1, 0) < 1) {
50 | perror("send error");
51 | close(sockfd);
52 | return;
53 | }
54 | }
55 | break;
56 | }
57 | }
58 | }
59 |
60 | close(sockfd);
61 | }
62 |
63 | int main(int argc, char** argv) {
64 | setvbuf(stdout, NULL, _IONBF, 0);
65 |
66 | int portnum = 9090;
67 | if (argc >= 2) {
68 | portnum = atoi(argv[1]);
69 | }
70 | printf("Serving on port %d\n", portnum);
71 |
72 | int sockfd = listen_inet_socket(portnum);
73 |
74 | while (1) {
75 | struct sockaddr_in peer_addr;
76 | socklen_t peer_addr_len = sizeof(peer_addr);
77 |
78 | int newsockfd =
79 | accept(sockfd, (struct sockaddr*)&peer_addr, &peer_addr_len);
80 |
81 | if (newsockfd < 0) {
82 | perror_die("ERROR on accept");
83 | }
84 |
85 | report_peer_connected(&peer_addr, peer_addr_len);
86 | serve_connection(newsockfd);
87 | printf("peer done\n");
88 | }
89 |
90 | return 0;
91 | }
92 |
--------------------------------------------------------------------------------
/concurrent-servers/threadpool-server-coroutine.py:
--------------------------------------------------------------------------------
1 | # Threaded socket server - accepting multiple clients concurrently, by
2 | # dispatching them into a thread pool.
3 | #
4 | # The client protocol in this sample is implemented with coroutines rather than
5 | # with an explicit state machine, using the technique discussed in:
6 | # https://eli.thegreenplace.net/2009/08/29/co-routines-as-an-alternative-to-state-machines
7 | #
8 | # Eli Bendersky [http://eli.thegreenplace.net]
9 | # This code is in the public domain.
10 | import argparse
11 | from concurrent.futures import ThreadPoolExecutor
12 | import socket
13 | import sys
14 |
15 |
16 | def coroutine(func):
17 | def start(*args,**kwargs):
18 | cr = func(*args,**kwargs)
19 | next(cr)
20 | return cr
21 | return start
22 |
23 |
24 | @coroutine
25 | def client_protocol(target=None):
26 | while True:
27 | # Each iteration of this outer loop processes a whole "frame" (bytes
28 | # delimited by ^....$).
29 | b = (yield)
30 | if b == ord(b'^'):
31 | # Frame starts. Loop until end is encountered and send replies to
32 | # target.
33 | while True:
34 | b = (yield)
35 | if b == ord(b'$'):
36 | break
37 | target.send(bytes([b + 1]))
38 |
39 |
40 | @coroutine
41 | def reply_processor(sockobj):
42 | while True:
43 | reply = (yield)
44 | sockobj.send(reply)
45 |
46 |
47 | def serve_connection(sockobj, client_address):
48 | print('{0} connected'.format(client_address))
49 | sockobj.sendall(b'*')
50 | protocol = client_protocol(target=reply_processor(sockobj))
51 |
52 | while True:
53 | try:
54 | buf = sockobj.recv(1024)
55 | if not buf:
56 | break
57 | except IOError as e:
58 | break
59 | for b in buf:
60 | protocol.send(b)
61 |
62 | print('{0} done'.format(client_address))
63 | sys.stdout.flush()
64 | sockobj.close()
65 |
66 |
67 | if __name__ == '__main__':
68 | argparser = argparse.ArgumentParser('Threadpool server')
69 | argparser.add_argument('--port', type=int, default=9090, help='Server port')
70 | argparser.add_argument('-n', type=int,
71 | default=64, help='Number of threads in pool')
72 | args = argparser.parse_args()
73 |
74 | pool = ThreadPoolExecutor(args.n)
75 | sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
76 | sockobj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
77 | sockobj.bind(('localhost', args.port))
78 | sockobj.listen(15)
79 |
80 | try:
81 | while True:
82 | client_socket, client_address = sockobj.accept()
83 | pool.submit(serve_connection, client_socket, client_address)
84 | except KeyboardInterrupt as e:
85 | print(e)
86 | sockobj.close()
87 |
--------------------------------------------------------------------------------
/concurrent-servers/threaded-server.c:
--------------------------------------------------------------------------------
1 | // Threaded socket server - accepting multiple clients concurrently, by creating
2 | // a new thread for each connecting client.
3 | //
4 | // Eli Bendersky [http://eli.thegreenplace.net]
5 | // This code is in the public domain.
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 |
15 | #include "utils.h"
16 |
17 | typedef struct { int sockfd; } thread_config_t;
18 |
19 | typedef enum { WAIT_FOR_MSG, IN_MSG } ProcessingState;
20 |
21 | void serve_connection(int sockfd) {
22 | if (send(sockfd, "*", 1, 0) < 1) {
23 | perror_die("send");
24 | }
25 |
26 | ProcessingState state = WAIT_FOR_MSG;
27 |
28 | while (1) {
29 | uint8_t buf[1024];
30 | int len = recv(sockfd, buf, sizeof buf, 0);
31 | if (len < 0) {
32 | perror_die("recv");
33 | } else if (len == 0) {
34 | break;
35 | }
36 |
37 | for (int i = 0; i < len; ++i) {
38 | switch (state) {
39 | case WAIT_FOR_MSG:
40 | if (buf[i] == '^') {
41 | state = IN_MSG;
42 | }
43 | break;
44 | case IN_MSG:
45 | if (buf[i] == '$') {
46 | state = WAIT_FOR_MSG;
47 | } else {
48 | buf[i] += 1;
49 | if (send(sockfd, &buf[i], 1, 0) < 1) {
50 | perror("send error");
51 | close(sockfd);
52 | return;
53 | }
54 | }
55 | break;
56 | }
57 | }
58 | }
59 |
60 | close(sockfd);
61 | }
62 |
63 | void* server_thread(void* arg) {
64 | thread_config_t* config = (thread_config_t*)arg;
65 | int sockfd = config->sockfd;
66 | free(config);
67 |
68 | // This cast will work for Linux, but in general casting pthread_id to an
69 | // integral type isn't portable.
70 | unsigned long id = (unsigned long)pthread_self();
71 | printf("Thread %lu created to handle connection with socket %d\n", id,
72 | sockfd);
73 | serve_connection(sockfd);
74 | printf("Thread %lu done\n", id);
75 | return 0;
76 | }
77 |
78 | int main(int argc, char** argv) {
79 | setvbuf(stdout, NULL, _IONBF, 0);
80 |
81 | int portnum = 9090;
82 | if (argc >= 2) {
83 | portnum = atoi(argv[1]);
84 | }
85 | printf("Serving on port %d\n", portnum);
86 | fflush(stdout);
87 |
88 | int sockfd = listen_inet_socket(portnum);
89 |
90 | while (1) {
91 | struct sockaddr_in peer_addr;
92 | socklen_t peer_addr_len = sizeof(peer_addr);
93 |
94 | int newsockfd =
95 | accept(sockfd, (struct sockaddr*)&peer_addr, &peer_addr_len);
96 |
97 | if (newsockfd < 0) {
98 | perror_die("ERROR on accept");
99 | }
100 |
101 | report_peer_connected(&peer_addr, peer_addr_len);
102 | pthread_t the_thread;
103 |
104 | thread_config_t* config = (thread_config_t*)malloc(sizeof(*config));
105 | if (!config) {
106 | die("OOM");
107 | }
108 | config->sockfd = newsockfd;
109 | pthread_create(&the_thread, NULL, server_thread, config);
110 |
111 | // Detach the thread - when it's done, its resources will be cleaned up.
112 | // Since the main thread lives forever, it will outlive the serving threads.
113 | pthread_detach(the_thread);
114 | }
115 |
116 | return 0;
117 | }
118 |
--------------------------------------------------------------------------------
/concurrent-servers/simple-client.py:
--------------------------------------------------------------------------------
1 | # Simple client used to interact with concurrent servers.
2 | #
3 | # Launches N concurrent client connections, each executing a pre-set sequence of
4 | # sends to the server, and logs what was received back.
5 | #
6 | # Tested with Python 3.6
7 | #
8 | # Eli Bendersky [http://eli.thegreenplace.net]
9 | # This code is in the public domain.
10 | import argparse
11 | import logging
12 | import socket
13 | import sys
14 | import threading
15 | import time
16 |
17 |
18 | class ReadThread(threading.Thread):
19 | def __init__(self, name, sockobj):
20 | super().__init__()
21 | self.sockobj = sockobj
22 | self.name = name
23 | self.bufsize = 8 * 1024
24 |
25 | def run(self):
26 | fullbuf = b''
27 | while True:
28 | buf = self.sockobj.recv(self.bufsize)
29 | logging.info('{0} received {1}'.format(self.name, buf))
30 | fullbuf += buf
31 | if b'1111' in fullbuf:
32 | break
33 |
34 |
35 | def make_new_connection(name, host, port):
36 | """Creates a single socket connection to the host:port.
37 |
38 | Sets a pre-set sequence of messages to the server with pre-set delays; in
39 | parallel, reads from the socket in a separate thread.
40 | """
41 | sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
42 | sockobj.connect((host, port))
43 | if sockobj.recv(1) != b'*':
44 | logging.error('Something is wrong! Did not receive *')
45 | logging.info('{0} connected...'.format(name))
46 |
47 | rthread = ReadThread(name, sockobj)
48 | rthread.start()
49 |
50 | s = b'^abc$de^abte$f'
51 | logging.info('{0} sending {1}'.format(name, s))
52 | sockobj.send(s)
53 | time.sleep(1.0)
54 |
55 | s = b'xyz^123'
56 | logging.info('{0} sending {1}'.format(name, s))
57 | sockobj.send(s)
58 | time.sleep(1.0)
59 |
60 | # The 0000 sent to the server here will result in an echo of 1111, which is
61 | # a sign for the reading thread to terminate.
62 | # Add WXY after 0000 to enable kill-switch in some servers.
63 | s = b'25$^ab0000$abab'
64 | logging.info('{0} sending {1}'.format(name, s))
65 | sockobj.send(s)
66 | time.sleep(0.2)
67 |
68 | rthread.join()
69 | sockobj.close()
70 | logging.info('{0} disconnecting'.format(name))
71 |
72 |
73 | def main():
74 | argparser = argparse.ArgumentParser('Simple TCP client')
75 | argparser.add_argument('host', help='Server host name')
76 | argparser.add_argument('port', type=int, help='Server port')
77 | argparser.add_argument('-n', '--num_concurrent', type=int,
78 | default=1,
79 | help='Number of concurrent connections')
80 | args = argparser.parse_args()
81 |
82 | logging.basicConfig(
83 | level=logging.DEBUG,
84 | format='%(levelname)s:%(asctime)s:%(message)s')
85 |
86 | t1 = time.time()
87 | connections = []
88 | for i in range(args.num_concurrent):
89 | name = 'conn{0}'.format(i)
90 | tconn = threading.Thread(target=make_new_connection,
91 | args=(name, args.host, args.port))
92 | tconn.start()
93 | connections.append(tconn)
94 |
95 | for conn in connections:
96 | conn.join()
97 |
98 | print('Elapsed:', time.time() - t1)
99 |
100 |
101 | if __name__ == '__main__':
102 | main()
103 |
--------------------------------------------------------------------------------
/io-multiplexing/c/epoll.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 | #define SERV_PORT 8031
15 | #define MAX_EVENT_NUMBER 1024
16 | #define BUFFER_SIZE 10
17 |
18 | /* 将文件描述符 fd 上的 EPOLLIN 注册到 epollfd 指示的 epoll 内核事件表中 */
19 | void addfd(int epollfd, int fd)
20 | {
21 | struct epoll_event event;
22 | event.data.fd = fd;
23 | event.events = EPOLLIN | EPOLLET;
24 | epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
25 | int old_option = fcntl(fd, F_GETFL);
26 | int new_option = old_option | O_NONBLOCK;
27 | fcntl(fd, F_SETFL, new_option);
28 | }
29 |
30 | void et(struct epoll_event *events, int number, int epollfd, int listenfd)
31 | {
32 | char buf[BUFFER_SIZE];
33 | for (int i = 0; i < number; ++i)
34 | {
35 | int sockfd = events[i].data.fd;
36 | if (sockfd == listenfd)
37 | {
38 | struct sockaddr_in client_address;
39 | socklen_t length = sizeof(client_address);
40 | int connfd = accept(listenfd, (struct sockaddr *)&client_address, &length);
41 | printf("接收一个请求来自于: %s:%d\n", inet_ntoa(client_address.sin_addr), ntohs(client_address.sin_port));
42 |
43 | addfd(epollfd, connfd);
44 | }
45 | else if (events[i].events & EPOLLIN)
46 | {
47 | /* 这段代码不会被重复触发,所以我们循环读取数据,以确保把 socket 缓存中的所有数据读取*/
48 | while (1)
49 | {
50 | memset(buf, '\0', BUFFER_SIZE);
51 | int ret = recv(sockfd, buf, BUFFER_SIZE - 1, 0);
52 | if (ret < 0)
53 | {
54 | /* 对非阻塞 IO ,下面的条件成立表示数据已经全部读取完毕。此后 epoll 就能再次触发 sockfd 上的 EPOLLIN 事件,以驱动下一次读操作 */
55 | if ((errno == EAGAIN) || (errno == EWOULDBLOCK))
56 | {
57 | printf("read later\n");
58 | break;
59 | }
60 | close(sockfd);
61 | break;
62 | }
63 | else if (ret == 0)
64 | {
65 | printf("断开一个连接\n");
66 | close(sockfd);
67 | }
68 | else
69 | {
70 | printf("get %d bytes of content: %s\n", ret, buf);
71 | }
72 | }
73 | }
74 | }
75 | }
76 |
77 | int main(void)
78 | {
79 | int lfd, epollfd, ret;
80 | struct sockaddr_in serv_addr;
81 |
82 | if ((lfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
83 | {
84 | perror("套接字描述符创建失败");
85 | exit(1);
86 | }
87 |
88 | int opt = 1;
89 | setsockopt(lfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
90 |
91 | memset(&serv_addr, 0, sizeof(serv_addr));
92 | serv_addr.sin_family = AF_INET;
93 | serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);
94 | serv_addr.sin_port = htons(SERV_PORT);
95 |
96 | if (bind(lfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) == -1)
97 | {
98 | perror("绑定失败");
99 | exit(1);
100 | }
101 |
102 | if (listen(lfd, 5) == -1)
103 | {
104 | perror("监听失败");
105 | exit(1);
106 | }
107 |
108 | struct epoll_event events[MAX_EVENT_NUMBER];
109 | if ((epollfd = epoll_create(5)) == -1)
110 | {
111 | perror("创建失败");
112 | exit(1);
113 | }
114 |
115 | // 把服务器端 lfd 添加到 epollfd 指定的 epoll 内核事件表中,添加一个 lfd 可读的事件
116 | addfd(epollfd, lfd);
117 | while (1)
118 | {
119 | // 阻塞等待新客户端的连接或者客户端的数据写入,返回需要处理的事件数目
120 | if ((ret = epoll_wait(epollfd, events, MAX_EVENT_NUMBER, -1)) < 0)
121 | {
122 | perror("epoll_wait失败");
123 | exit(1);
124 | }
125 |
126 | et(events, ret, epollfd, lfd);
127 | }
128 |
129 | close(lfd);
130 | return 0;
131 | }
--------------------------------------------------------------------------------
/io-multiplexing/c/select.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 | #define SERV_PORT 8031
15 | #define BUFSIZE 1024
16 | #define FD_SET_SIZE 128
17 |
18 | int main(void)
19 | {
20 | int lfd, cfd, maxfd, scokfd, retval;
21 | struct sockaddr_in serv_addr, clin_addr;
22 |
23 | socklen_t clin_len; // 地址信息结构体大小
24 |
25 | char recvbuf[BUFSIZE];
26 | int len;
27 |
28 | fd_set read_set, read_set_init;
29 |
30 | int client[FD_SET_SIZE];
31 | int i;
32 | int maxi = -1;
33 |
34 | if ((lfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
35 | {
36 | perror("套接字描述符创建失败");
37 | exit(1);
38 | }
39 |
40 | int opt = 1;
41 | setsockopt(lfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
42 |
43 | memset(&serv_addr, 0, sizeof(serv_addr));
44 | serv_addr.sin_family = AF_INET;
45 | serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);
46 | serv_addr.sin_port = htons(SERV_PORT);
47 |
48 | if (bind(lfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) == -1)
49 | {
50 | perror("绑定失败");
51 | exit(1);
52 | }
53 |
54 | if (listen(lfd, FD_SET_SIZE) == -1)
55 | {
56 | perror("监听失败");
57 | exit(1);
58 | }
59 |
60 | maxfd = lfd;
61 |
62 | for (i = 0; i < FD_SET_SIZE; ++i)
63 | {
64 | client[i] = -1;
65 | }
66 |
67 | FD_ZERO(&read_set_init);
68 | FD_SET(lfd, &read_set_init);
69 |
70 | while (1)
71 | {
72 | // 每次循环开始时,都初始化 read_set
73 | read_set = read_set_init;
74 |
75 | // 因为上一步 read_set 已经重置,所以需要已连接上的客户端 fd (由上次循环后产生)重新添加进 read_set
76 | for (i = 0; i < FD_SET_SIZE; ++i)
77 | {
78 | if (client[i] > 0)
79 | {
80 | FD_SET(client[i], &read_set);
81 | }
82 | }
83 |
84 | printf("select 等待\n");
85 | // 这里会阻塞,直到 read_set 中某一个 fd 有数据可读才返回,注意 read_set 中除了客户端 fd 还有服务端监听的 fd
86 | retval = select(maxfd + 1, &read_set, NULL, NULL, NULL);
87 | if (retval == -1)
88 | {
89 | perror("select 错误\n");
90 | }
91 | else if (retval == 0)
92 | {
93 | printf("超时\n");
94 | continue;
95 | }
96 | printf("select 返回\n");
97 |
98 | //------------------------------------------------------------------------------------------------
99 | // 用 FD_ISSET 来判断 lfd (服务端监听的fd)是否可读。只有当新的客户端连接时,lfd 才可读
100 | if (FD_ISSET(lfd, &read_set))
101 | {
102 | clin_len = sizeof(clin_addr);
103 | if ((cfd = accept(lfd, (struct sockaddr *)&clin_addr, &clin_len)) == -1)
104 | {
105 | perror("接收错误\n");
106 | continue;
107 | }
108 |
109 | for (i = 0; i < FD_SET_SIZE; ++i)
110 | {
111 | if (client[i] < 0)
112 | {
113 | // 把客户端 fd 放入 client 数组
114 | client[i] = cfd;
115 | printf("接收client[%d]一个请求来自于: %s:%d\n", i, inet_ntoa(clin_addr.sin_addr), ntohs(clin_addr.sin_port));
116 | break;
117 | }
118 | }
119 |
120 | // 最大的描述符值也要重新计算
121 | maxfd = (cfd > maxfd) ? cfd : maxfd;
122 | // maxi 用于下面遍历所有有效客户端 fd 使用,以免遍历整个 client 数组
123 | maxi = (i >= maxi) ? ++i : maxi;
124 | }
125 | //------------------------------------------------------------------------------------------------
126 |
127 | for (i = 0; i < maxi; ++i)
128 | {
129 | if (client[i] < 0)
130 | {
131 | continue;
132 | }
133 |
134 | // 如果客户端 fd 中有数据可读,则进行读取
135 | if (FD_ISSET(client[i], &read_set))
136 | {
137 | // 注意:这里没有使用 while 循环读取,如果使用 while 循环读取,则有阻塞在一个客户端了。
138 | // 可能你会想到如果一次读取不完怎么办?
139 | // 读取不完时,在循环到 select 时 由于未读完的 fd 还有数据可读,那么立即返回,然后到这里继续读取,原来的 while 循环读取直接提到最外层的 while(1) + select 来判断是否有数据继续可读
140 | len = read(client[i], recvbuf, BUFSIZE);
141 | if (len > 0)
142 | {
143 | write(STDOUT_FILENO, recvbuf, len);
144 | }
145 | else if (len == 0)
146 | {
147 | // 如果在客户端 ctrl+z
148 | close(client[i]);
149 | printf("clinet[%d] 连接关闭\n", i);
150 | FD_CLR(client[i], &read_set);
151 | client[i] = -1;
152 | break;
153 | }
154 | }
155 | }
156 | }
157 |
158 | close(lfd);
159 |
160 | return 0;
161 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [![Contributors][contributors-shield]][contributors-url]
2 | [![Forks][forks-shield]][forks-url]
3 | [![Stargazers][stars-shield]][stars-url]
4 | [![Issues][issues-shield]][issues-url]
5 | [![MIT License][license-shield]][license-url]
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
seckilling-examples
15 |
16 |
17 | 高可用秒杀场景的实践案例
18 |
19 | Explore the docs »
20 |
21 |
22 | View Demo
23 | ·
24 | Report Bug
25 | ·
26 | Request Feature
27 |
28 |
29 |
30 |
31 |
32 | # Introduction
33 |
34 | [](https://example.com)
35 |
36 | Here's a blank template to get started:
37 | **To avoid retyping too much info. Do a search and replace with your text editor for the following:**
38 | `wx-chevalier`, `seckilling-examples`, `twitter_handle`, `email`
39 |
40 | ## Nav | 导航
41 |
42 | # Getting Started
43 |
44 | To get a local copy up and running follow these simple steps.
45 |
46 | ## Prerequisites
47 |
48 | This is an example of how to list things you need to use the software and how to install them.
49 |
50 | - npm
51 |
52 | ```sh
53 | npm install npm@latest -g
54 | ```
55 |
56 | ## Installation
57 |
58 | 1. Clone the seckilling-examples
59 |
60 | ```sh
61 | git clone https://github.com/wx-chevalier/seckilling-examples.git
62 | ```
63 |
64 | 2. Install NPM packages
65 |
66 | ```sh
67 | npm install
68 | ```
69 |
70 |
71 |
72 | ## Usage
73 |
74 | Use this space to show useful examples of how a project can be used. Additional screenshots, code examples and demos work well in this space. You may also link to more resources.
75 |
76 | _For more examples, please refer to the [Documentation](https://example.com)_
77 |
78 | # About
79 |
80 |
81 |
82 | ## Roadmap
83 |
84 | See the [open issues](https://github.com/wx-chevalier/seckilling-examples/issues) for a list of proposed features (and known issues).
85 |
86 |
87 |
88 | ## Contributing
89 |
90 | Contributions are what make the open source community such an amazing place to be learn, inspire, and create. Any contributions you make are **greatly appreciated**.
91 |
92 | 1. Fork the Project
93 | 2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
94 | 3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
95 | 4. Push to the Branch (`git push origin feature/AmazingFeature`)
96 | 5. Open a Pull Request
97 |
98 |
99 |
100 | ## License
101 |
102 | Distributed under the MIT License. See `LICENSE` for more information.
103 |
104 |
105 |
106 | ## Acknowledgements
107 |
108 | - [Awesome-Lists](https://github.com/wx-chevalier/Awesome-Lists): 📚 Guide to Galaxy, curated, worthy and up-to-date links/reading list for ITCS-Coding/Algorithm/SoftwareArchitecture/AI. 💫 ITCS-编程/算法/软件架构/人工智能等领域的文章/书籍/资料/项目链接精选。
109 |
110 | - [Awesome-CS-Books](https://github.com/wx-chevalier/Awesome-CS-Books): :books: Awesome CS Books/Series(.pdf by git lfs) Warehouse for Geeks, ProgrammingLanguage, SoftwareEngineering, Web, AI, ServerSideApplication, Infrastructure, FE etc. :dizzy: 优秀计算机科学与技术领域相关的书籍归档。
111 |
112 | ## Copyright & More | 延伸阅读
113 |
114 | 笔者所有文章遵循[知识共享 署名 - 非商业性使用 - 禁止演绎 4.0 国际许可协议](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.zh),欢迎转载,尊重版权。您还可以前往 [NGTE Books](https://ng-tech.icu/books/) 主页浏览包含知识体系、编程语言、软件工程、模式与架构、Web 与大前端、服务端开发实践与工程架构、分布式基础架构、人工智能与深度学习、产品运营与创业等多类目的书籍列表:
115 |
116 | [](https://ng-tech.icu/books/)
117 |
118 |
119 |
120 |
121 | [contributors-shield]: https://img.shields.io/github/contributors/wx-chevalier/seckilling-examples.svg?style=flat-square
122 | [contributors-url]: https://github.com/wx-chevalier/seckilling-examples/graphs/contributors
123 | [forks-shield]: https://img.shields.io/github/forks/wx-chevalier/seckilling-examples.svg?style=flat-square
124 | [forks-url]: https://github.com/wx-chevalier/seckilling-examples/network/members
125 | [stars-shield]: https://img.shields.io/github/stars/wx-chevalier/seckilling-examples.svg?style=flat-square
126 | [stars-url]: https://github.com/wx-chevalier/seckilling-examples/stargazers
127 | [issues-shield]: https://img.shields.io/github/issues/wx-chevalier/seckilling-examples.svg?style=flat-square
128 | [issues-url]: https://github.com/wx-chevalier/seckilling-examples/issues
129 | [license-shield]: https://img.shields.io/github/license/wx-chevalier/seckilling-examples.svg?style=flat-square
130 | [license-url]: https://github.com/wx-chevalier/seckilling-examples/blob/master/LICENSE.txt
131 |
--------------------------------------------------------------------------------
/concurrent-servers/server-test.py:
--------------------------------------------------------------------------------
1 | # Tests a concurrent server, by connecting multiple clients sending pre-set
2 | # messages, and comparing the echoes with expected values.
3 | #
4 | # Run with -h for full usage.
5 | #
6 | # Eli Bendersky [http://eli.thegreenplace.net]
7 | # This code is in the public domain.
8 | import argparse
9 | import itertools
10 | import logging
11 | import queue
12 | import socket
13 | import subprocess
14 | import sys
15 | import threading
16 | import time
17 |
18 |
19 | def server_runner(path, args, stop_event):
20 | """Runs the server as a subprocess until stop is requested.
21 |
22 | Run this function in a separate thread!
23 |
24 | path is the path to the server to run, with the given args. If 'path' ends
25 | with .py, a python interpreter is prepended. The args have to be a (possibly
26 | empty) iterable.
27 | stop_event is a threading.Event object; when it's set, the subprocess is
28 | killed and this function returns.
29 | """
30 | runcmd = ['python3.6', '-u', path] if path.endswith('.py') else [path]
31 | runcmd.extend(args)
32 | logging.info('server_runner: executing subprocess "{0}"'.format(runcmd))
33 | proc = subprocess.Popen(runcmd)
34 | logging.info('server_runner waiting for stop event')
35 | stop_event.wait()
36 | logging.info('server_runner sending kill to subprocess')
37 | proc.terminate()
38 | try:
39 | proc.wait(timeout=0.2)
40 | except subprocess.TimeoutExpired:
41 | logging.info('server_runner: subprocess did not die within timeout')
42 |
43 |
44 | def socket_reader(sockobj, outq, exit_event):
45 | """Reads from sockobj, 1 byte at a time; places results in outq.
46 |
47 | This function runs in a loop until the sockobj connection is closed or until
48 | exit_event is set.
49 | """
50 | while not exit_event.is_set():
51 | try:
52 | buf = sockobj.recv(1)
53 | if len(buf) < 1:
54 | break
55 | outq.put(buf)
56 | except socket.timeout:
57 | continue
58 | except OSError:
59 | break
60 |
61 |
62 | def assert_queue_contains(q, val, timeout=0.1):
63 | try:
64 | v = q.get(timeout=timeout)
65 | assert v == val
66 | except queue.Empty:
67 | assert False, f'queue was empty with timeout={timeout}'
68 |
69 |
70 | def assert_queue_empty(q, wait=0.1):
71 | time.sleep(wait)
72 | assert q.empty(), 'queue had {0} with wait={1}'.format(q.get(), wait)
73 |
74 |
75 | def client_thread_runner(client_body_func, port, initial_timeout=0.1):
76 | """Abstracts the function running within a client thread.
77 |
78 | Connects to the port with a socket, launches a reading thread and makes sure
79 | to shut down properly. client_body_func is the actual interaction with a
80 | socket, once connected.
81 | """
82 | sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
83 | sockobj.settimeout(initial_timeout)
84 | sockobj.connect(('localhost', port))
85 | logging.info('{0} connected to server'.format(client_body_func.__name__))
86 |
87 | readq = queue.Queue()
88 | exit_event = threading.Event()
89 | tread = threading.Thread(
90 | target=socket_reader,
91 | args=(sockobj, readq, exit_event))
92 | tread.start()
93 |
94 | try:
95 | client_body_func(sockobj, readq, initial_timeout)
96 | finally:
97 | # Closing the socket before killing the server helps the bound socket be
98 | # fully released on the server side; otherwise it may be kept alive by
99 | # the kernel for a while after the server process exits.
100 | sockobj.shutdown(socket.SHUT_RDWR)
101 | sockobj.close()
102 | exit_event.set()
103 | tread.join()
104 |
105 |
106 | def client0(sock, readq, initial_timeout):
107 | assert_queue_contains(readq, b'*', timeout=initial_timeout)
108 | assert_queue_empty(readq)
109 |
110 |
111 | def client1(sock, readq, initial_timeout):
112 | assert_queue_contains(readq, b'*', timeout=initial_timeout)
113 |
114 | sock.send(b'abcdef')
115 | assert_queue_empty(readq)
116 |
117 | sock.send(b'^')
118 | assert_queue_empty(readq)
119 |
120 | sock.send(b'f')
121 | assert_queue_contains(readq, b'g')
122 |
123 | sock.send(b'1234')
124 | assert_queue_contains(readq, b'2')
125 | assert_queue_contains(readq, b'3')
126 | assert_queue_contains(readq, b'4')
127 | assert_queue_contains(readq, b'5')
128 |
129 | sock.send(b'$')
130 | assert_queue_empty(readq)
131 | sock.send(b'1234')
132 | assert_queue_empty(readq)
133 |
134 | sock.send(b'^')
135 | sock.send(b'xy')
136 | assert_queue_contains(readq, b'y')
137 | assert_queue_contains(readq, b'z')
138 |
139 |
140 | def client2(sock, readq, initial_timeout):
141 | assert_queue_contains(readq, b'*', timeout=initial_timeout)
142 | sock.send(b'^ab$^kl$^80$50')
143 | for b in [b'b', b'c', b'l', b'm', b'9', b'1']:
144 | assert_queue_contains(readq, b)
145 | assert_queue_empty(readq)
146 |
147 |
148 | def client3(sock, readq, initial_timeout):
149 | assert_queue_contains(readq, b'*', timeout=initial_timeout)
150 | sock.send(b'^$^$^$^$^$^$$^$$$$foobarjoemoedoe^$$')
151 | assert_queue_empty(readq)
152 |
153 |
154 | def test_main():
155 | argparser = argparse.ArgumentParser('Server test')
156 | argparser.add_argument('server_path', help='path to the server executable')
157 | argparser.add_argument('-p', '--server-port', default=9090, type=int,
158 | help='the server listens on this port')
159 | argparser.add_argument('--timeout-bump', default=0.0, type=float,
160 | help='amount of time (in sec) by which to bump the '
161 | 'timeout between consecutive clients')
162 | argparser.add_argument('-n', '--num-clients', default=2, type=int,
163 | help='number of clients to launch simultaneously; ')
164 | argparser.add_argument('--loop', default=1, type=int,
165 | help='launch test in a loop')
166 | args = argparser.parse_args()
167 | assert args.num_clients >= 1
168 |
169 | logging.basicConfig(
170 | level=logging.DEBUG,
171 | format='%(levelname)s:%(asctime)s:%(message)s')
172 |
173 | # Launch the server in a thread, listening on the port.
174 | stop_event = threading.Event()
175 | server_thread = threading.Thread(
176 | target=server_runner,
177 | args=(args.server_path, [str(args.server_port)], stop_event))
178 | server_thread.start()
179 | time.sleep(0.3)
180 |
181 | TIMEOUT = 0.5 + (args.num_clients - 1) * args.timeout_bump
182 |
183 | for i in range(args.loop):
184 | logging.info('** Test iteration {}'.format(i))
185 | client_iter = itertools.cycle([client0, client1, client2, client3])
186 | threads = []
187 | for i in range(args.num_clients):
188 | tester_thread = threading.Thread(
189 | target=client_thread_runner,
190 | args=(next(client_iter), args.server_port, TIMEOUT))
191 | tester_thread.start()
192 | threads.append(tester_thread)
193 |
194 | time.sleep(TIMEOUT)
195 | for thread in threads:
196 | thread.join()
197 |
198 | stop_event.set()
199 |
200 |
201 | if __name__ == '__main__':
202 | test_main()
203 |
--------------------------------------------------------------------------------
/concurrent-servers/uv-server.c:
--------------------------------------------------------------------------------
1 | // Asynchronous socket server - accepting multiple clients concurrently,
2 | // using libuv's event loop.
3 | //
4 | // Eli Bendersky [http://eli.thegreenplace.net]
5 | // This code is in the public domain.
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include "uv.h"
12 |
13 | #include "utils.h"
14 |
15 | #define N_BACKLOG 64
16 |
17 | typedef enum { INITIAL_ACK, WAIT_FOR_MSG, IN_MSG } ProcessingState;
18 |
19 | #define SENDBUF_SIZE 1024
20 |
21 | typedef struct {
22 | ProcessingState state;
23 | char sendbuf[SENDBUF_SIZE];
24 | int sendbuf_end;
25 | uv_tcp_t* client;
26 | } peer_state_t;
27 |
28 | void on_alloc_buffer(uv_handle_t* handle, size_t suggested_size,
29 | uv_buf_t* buf) {
30 | buf->base = (char*)xmalloc(suggested_size);
31 | buf->len = suggested_size;
32 | }
33 |
34 | void on_client_closed(uv_handle_t* handle) {
35 | uv_tcp_t* client = (uv_tcp_t*)handle;
36 | // The client handle owns the peer state storing its address in the data
37 | // field, so we free it here.
38 | if (client->data) {
39 | free(client->data);
40 | }
41 | free(client);
42 | }
43 |
44 | void on_wrote_buf(uv_write_t* req, int status) {
45 | if (status) {
46 | die("Write error: %s\n", uv_strerror(status));
47 | }
48 | peer_state_t* peerstate = (peer_state_t*)req->data;
49 |
50 | // Kill switch for testing leaks in the server. When a client sends a message
51 | // ending with WXY (note the shift-by-1 in sendbuf), this signals the server
52 | // to clean up and exit, by stopping the default event loop. Running the
53 | // server under valgrind can now track memory leaks, and a run should be
54 | // clean except a single uv_tcp_t allocated for the client that sent the kill
55 | // signal (it's still connected when we stop the loop and exit).
56 | if (peerstate->sendbuf_end >= 3 &&
57 | peerstate->sendbuf[peerstate->sendbuf_end - 3] == 'X' &&
58 | peerstate->sendbuf[peerstate->sendbuf_end - 2] == 'Y' &&
59 | peerstate->sendbuf[peerstate->sendbuf_end - 1] == 'Z') {
60 | free(peerstate);
61 | free(req);
62 | uv_stop(uv_default_loop());
63 | return;
64 | }
65 |
66 | // The send buffer is done; move pointer back to 0.
67 | peerstate->sendbuf_end = 0;
68 | free(req);
69 | }
70 |
71 | void on_peer_read(uv_stream_t* client, ssize_t nread, const uv_buf_t* buf) {
72 | if (nread < 0) {
73 | if (nread != UV_EOF) {
74 | fprintf(stderr, "Read error: %s\n", uv_strerror(nread));
75 | }
76 | uv_close((uv_handle_t*)client, on_client_closed);
77 | } else if (nread == 0) {
78 | // From the documentation of uv_read_cb: nread might be 0, which does not
79 | // indicate an error or EOF. This is equivalent to EAGAIN or EWOULDBLOCK
80 | // under read(2).
81 | } else {
82 | // nread > 0
83 | assert(buf->len >= nread);
84 |
85 | peer_state_t* peerstate = (peer_state_t*)client->data;
86 | if (peerstate->state == INITIAL_ACK) {
87 | // If the initial ACK hasn't been sent for some reason, ignore whatever
88 | // the client sends in.
89 | free(buf->base);
90 | return;
91 | }
92 |
93 | // Run the protocol state machine.
94 | for (int i = 0; i < nread; ++i) {
95 | switch (peerstate->state) {
96 | case INITIAL_ACK:
97 | assert(0 && "can't reach here");
98 | break;
99 | case WAIT_FOR_MSG:
100 | if (buf->base[i] == '^') {
101 | peerstate->state = IN_MSG;
102 | }
103 | break;
104 | case IN_MSG:
105 | if (buf->base[i] == '$') {
106 | peerstate->state = WAIT_FOR_MSG;
107 | } else {
108 | assert(peerstate->sendbuf_end < SENDBUF_SIZE);
109 | peerstate->sendbuf[peerstate->sendbuf_end++] = buf->base[i] + 1;
110 | }
111 | break;
112 | }
113 | }
114 |
115 | if (peerstate->sendbuf_end > 0) {
116 | // We have data to send. The write buffer will point to the buffer stored
117 | // in the peer state for this client.
118 | uv_buf_t writebuf =
119 | uv_buf_init(peerstate->sendbuf, peerstate->sendbuf_end);
120 | uv_write_t* writereq = (uv_write_t*)xmalloc(sizeof(*writereq));
121 | writereq->data = peerstate;
122 | int rc;
123 | if ((rc = uv_write(writereq, (uv_stream_t*)client, &writebuf, 1,
124 | on_wrote_buf)) < 0) {
125 | die("uv_write failed: %s", uv_strerror(rc));
126 | }
127 | }
128 | }
129 | free(buf->base);
130 | }
131 |
132 | void on_wrote_init_ack(uv_write_t* req, int status) {
133 | if (status) {
134 | die("Write error: %s\n", uv_strerror(status));
135 | }
136 | peer_state_t* peerstate = (peer_state_t*)req->data;
137 | // Flip the peer state to WAIT_FOR_MSG, and start listening for incoming data
138 | // from this peer.
139 | peerstate->state = WAIT_FOR_MSG;
140 | peerstate->sendbuf_end = 0;
141 |
142 | int rc;
143 | if ((rc = uv_read_start((uv_stream_t*)peerstate->client, on_alloc_buffer,
144 | on_peer_read)) < 0) {
145 | die("uv_read_start failed: %s", uv_strerror(rc));
146 | }
147 |
148 | // Note: the write request doesn't own the peer state, hence we only free the
149 | // request itself, not the state.
150 | free(req);
151 | }
152 |
153 | void on_peer_connected(uv_stream_t* server_stream, int status) {
154 | if (status < 0) {
155 | fprintf(stderr, "Peer connection error: %s\n", uv_strerror(status));
156 | return;
157 | }
158 |
159 | // client will represent this peer; it's allocated on the heap and only
160 | // released when the client disconnects. The client holds a pointer to
161 | // peer_state_t in its data field; this peer state tracks the protocol state
162 | // with this client throughout interaction.
163 | uv_tcp_t* client = (uv_tcp_t*)xmalloc(sizeof(*client));
164 | int rc;
165 | if ((rc = uv_tcp_init(uv_default_loop(), client)) < 0) {
166 | die("uv_tcp_init failed: %s", uv_strerror(rc));
167 | }
168 | client->data = NULL;
169 |
170 | if (uv_accept(server_stream, (uv_stream_t*)client) == 0) {
171 | struct sockaddr_storage peername;
172 | int namelen = sizeof(peername);
173 | if ((rc = uv_tcp_getpeername(client, (struct sockaddr*)&peername,
174 | &namelen)) < 0) {
175 | die("uv_tcp_getpeername failed: %s", uv_strerror(rc));
176 | }
177 | report_peer_connected((const struct sockaddr_in*)&peername, namelen);
178 |
179 | // Initialize the peer state for a new client: we start by sending the peer
180 | // the initial '*' ack.
181 | peer_state_t* peerstate = (peer_state_t*)xmalloc(sizeof(*peerstate));
182 | peerstate->state = INITIAL_ACK;
183 | peerstate->sendbuf[0] = '*';
184 | peerstate->sendbuf_end = 1;
185 | peerstate->client = client;
186 | client->data = peerstate;
187 |
188 | // Enqueue the write request to send the ack; when it's done,
189 | // on_wrote_init_ack will be called. The peer state is passed to the write
190 | // request via the data pointer; the write request does not own this peer
191 | // state - it's owned by the client handle.
192 | uv_buf_t writebuf = uv_buf_init(peerstate->sendbuf, peerstate->sendbuf_end);
193 | uv_write_t* req = (uv_write_t*)xmalloc(sizeof(*req));
194 | req->data = peerstate;
195 | if ((rc = uv_write(req, (uv_stream_t*)client, &writebuf, 1,
196 | on_wrote_init_ack)) < 0) {
197 | die("uv_write failed: %s", uv_strerror(rc));
198 | }
199 | } else {
200 | uv_close((uv_handle_t*)client, on_client_closed);
201 | }
202 | }
203 |
204 | int main(int argc, const char** argv) {
205 | setvbuf(stdout, NULL, _IONBF, 0);
206 |
207 | int portnum = 9090;
208 | if (argc >= 2) {
209 | portnum = atoi(argv[1]);
210 | }
211 | printf("Serving on port %d\n", portnum);
212 |
213 | int rc;
214 | uv_tcp_t server_stream;
215 | if ((rc = uv_tcp_init(uv_default_loop(), &server_stream)) < 0) {
216 | die("uv_tcp_init failed: %s", uv_strerror(rc));
217 | }
218 |
219 | struct sockaddr_in server_address;
220 | if ((rc = uv_ip4_addr("0.0.0.0", portnum, &server_address)) < 0) {
221 | die("uv_ip4_addr failed: %s", uv_strerror(rc));
222 | }
223 |
224 | if ((rc = uv_tcp_bind(&server_stream, (const struct sockaddr*)&server_address,
225 | 0)) < 0) {
226 | die("uv_tcp_bind failed: %s", uv_strerror(rc));
227 | }
228 |
229 | // Listen on the socket for new peers to connect. When a new peer connects,
230 | // the on_peer_connected callback will be invoked.
231 | if ((rc = uv_listen((uv_stream_t*)&server_stream, N_BACKLOG,
232 | on_peer_connected)) < 0) {
233 | die("uv_listen failed: %s", uv_strerror(rc));
234 | }
235 |
236 | // Run the libuv event loop.
237 | uv_run(uv_default_loop(), UV_RUN_DEFAULT);
238 |
239 | // If uv_run returned, close the default loop before exiting.
240 | return uv_loop_close(uv_default_loop());
241 | }
242 |
--------------------------------------------------------------------------------
/concurrent-servers/uv-isprime-server.c:
--------------------------------------------------------------------------------
1 | // Primality testing server. Accepts a number and sends back "prime" or
2 | // "composite" after testing the number for primality (in the naive, slow way).
3 | //
4 | // Can be configured via an environment variable to do this in a blocking way,
5 | // without using libuv's work queue (MODE=BLOCK).
6 | //
7 | // Eli Bendersky [http://eli.thegreenplace.net]
8 | // This code is in the public domain.
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include "uv.h"
18 |
19 | #include "utils.h"
20 |
21 | #define N_BACKLOG 64
22 |
23 | #define SENDBUF_SIZE 1024
24 |
25 | // State maintained for each connected client.
26 | typedef struct {
27 | uint64_t number;
28 | uv_tcp_t* client;
29 | char sendbuf[SENDBUF_SIZE];
30 | int sendbuf_end;
31 | } peer_state_t;
32 |
33 | // Sets sendbuf/sendbuf_end in the given state to the contents of the
34 | // NULL-terminated string passed as 'str'.
35 | void set_peer_sendbuf(peer_state_t* state, const char* str) {
36 | int i = 0;
37 | for (; str[i]; ++i) {
38 | assert(i < SENDBUF_SIZE);
39 | state->sendbuf[i] = str[i];
40 | }
41 | state->sendbuf_end = i;
42 | }
43 |
44 | void on_alloc_buffer(uv_handle_t* handle, size_t suggested_size,
45 | uv_buf_t* buf) {
46 | buf->base = (char*)xmalloc(suggested_size);
47 | buf->len = suggested_size;
48 | }
49 |
50 | void on_client_closed(uv_handle_t* handle) {
51 | uv_tcp_t* client = (uv_tcp_t*)handle;
52 | // The client handle owns the peer state storing its address in the data
53 | // field, so we free it here.
54 | if (client->data) {
55 | free(client->data);
56 | }
57 | free(client);
58 | }
59 |
60 | // Naive primality test, iterating all the way to sqrt(n) to find numbers that
61 | // divide n.
62 | bool isprime(uint64_t n) {
63 | if (n % 2 == 0) {
64 | return n == 2 ? true : false;
65 | }
66 |
67 | for (uint64_t r = 3; r * r <= n; r += 2) {
68 | if (n % r == 0) {
69 | return false;
70 | }
71 | }
72 | return true;
73 | }
74 |
75 | void on_sent_response(uv_write_t* req, int status) {
76 | if (status) {
77 | die("Write error: %s\n", uv_strerror(status));
78 | }
79 | free(req);
80 | }
81 |
82 | // Runs in a separate thread, can do blocking/time-consuming operations.
83 | void on_work_submitted(uv_work_t* req) {
84 | peer_state_t* peerstate = (peer_state_t*)req->data;
85 | printf("work submitted: %" PRIu64 "\n", peerstate->number);
86 | if (isprime(peerstate->number)) {
87 | set_peer_sendbuf(peerstate, "prime\n");
88 | } else {
89 | set_peer_sendbuf(peerstate, "composite\n");
90 | }
91 | }
92 |
93 | void on_work_completed(uv_work_t* req, int status) {
94 | if (status) {
95 | die("on_work_completed error: %s\n", uv_strerror(status));
96 | }
97 | peer_state_t* peerstate = (peer_state_t*)req->data;
98 | printf("work completed: %" PRIu64 "\n", peerstate->number);
99 | uv_buf_t writebuf = uv_buf_init(peerstate->sendbuf, peerstate->sendbuf_end);
100 | uv_write_t* writereq = (uv_write_t*)xmalloc(sizeof(*writereq));
101 | writereq->data = peerstate;
102 | int rc;
103 | if ((rc = uv_write(writereq, (uv_stream_t*)peerstate->client, &writebuf, 1,
104 | on_sent_response)) < 0) {
105 | die("uv_write failed: %s", uv_strerror(rc));
106 | }
107 | free(req);
108 | }
109 |
110 | void on_peer_read(uv_stream_t* client, ssize_t nread, const uv_buf_t* buf) {
111 | if (nread < 0) {
112 | if (nread != UV_EOF) {
113 | fprintf(stderr, "Read error: %s\n", uv_strerror(nread));
114 | }
115 | uv_close((uv_handle_t*)client, on_client_closed);
116 | } else if (nread == 0) {
117 | // From the documentation of uv_read_cb: nread might be 0, which does not
118 | // indicate an error or EOF. This is equivalent to EAGAIN or EWOULDBLOCK
119 | // under read(2).
120 | } else {
121 | // nread > 0
122 | assert(buf->len >= nread);
123 | int rc;
124 |
125 | // Parse the number from client request: assume for simplicity the request
126 | // all arrives at the same time and contains only digits (possibly followed
127 | // by non-digits like a newline).
128 | uint64_t number = 0;
129 | for (int i = 0; i < nread; ++i) {
130 | char c = buf->base[i];
131 | if (isdigit(c)) {
132 | number = number * 10 + (c - '0');
133 | } else {
134 | break;
135 | }
136 | }
137 | peer_state_t* peerstate = (peer_state_t*)client->data;
138 | peerstate->client = (uv_tcp_t*)client;
139 | peerstate->number = number;
140 |
141 | char* mode = getenv("MODE");
142 | if (mode && !strcmp(mode, "BLOCK")) {
143 | // BLOCK mode: compute isprime synchronously, blocking the callback.
144 | printf("Got %zu bytes\n", nread);
145 | printf("Num %" PRIu64 "\n", number);
146 |
147 | uint64_t t1 = uv_hrtime();
148 | if (isprime(number)) {
149 | set_peer_sendbuf(peerstate, "prime\n");
150 | } else {
151 | set_peer_sendbuf(peerstate, "composite\n");
152 | }
153 | uint64_t t2 = uv_hrtime();
154 | printf("Elapsed %" PRIu64 " ns\n", t2 - t1);
155 |
156 | uv_buf_t writebuf =
157 | uv_buf_init(peerstate->sendbuf, peerstate->sendbuf_end);
158 | uv_write_t* writereq = (uv_write_t*)xmalloc(sizeof(*writereq));
159 | writereq->data = peerstate;
160 | if ((rc = uv_write(writereq, (uv_stream_t*)client, &writebuf, 1,
161 | on_sent_response)) < 0) {
162 | die("uv_write failed: %s", uv_strerror(rc));
163 | }
164 | } else {
165 | // Otherwise, compute isprime on the work queue, without blocking the
166 | // callback.
167 | uv_work_t* work_req = (uv_work_t*)xmalloc(sizeof(*work_req));
168 | work_req->data = peerstate;
169 | if ((rc = uv_queue_work(uv_default_loop(), work_req, on_work_submitted,
170 | on_work_completed)) < 0) {
171 | die("uv_queue_work failed: %s", uv_strerror(rc));
172 | }
173 | }
174 | }
175 | free(buf->base);
176 | }
177 |
178 | void on_peer_connected(uv_stream_t* server, int status) {
179 | if (status < 0) {
180 | fprintf(stderr, "Peer connection error: %s\n", uv_strerror(status));
181 | return;
182 | }
183 |
184 | // client will represent this peer; it's allocated on the heap and only
185 | // released when the client disconnects. The client holds a pointer to
186 | // peer_state_t in its data field; this peer state tracks the protocol state
187 | // with this client throughout interaction.
188 | uv_tcp_t* client = (uv_tcp_t*)xmalloc(sizeof(*client));
189 | int rc;
190 | if ((rc = uv_tcp_init(uv_default_loop(), client)) < 0) {
191 | die("uv_tcp_init failed: %s", uv_strerror(rc));
192 | }
193 | client->data = NULL;
194 |
195 | if (uv_accept(server, (uv_stream_t*)client) == 0) {
196 | struct sockaddr_storage peername;
197 | int namelen = sizeof(peername);
198 | if ((rc = uv_tcp_getpeername(client, (struct sockaddr*)&peername,
199 | &namelen)) < 0) {
200 | die("uv_tcp_getpeername failed: %s", uv_strerror(rc));
201 | }
202 | report_peer_connected((const struct sockaddr_in*)&peername, namelen);
203 |
204 | // Initialize the peer state for a new client.
205 | peer_state_t* peerstate = (peer_state_t*)xmalloc(sizeof(*peerstate));
206 | peerstate->sendbuf_end = 0;
207 | client->data = peerstate;
208 |
209 | // Start reading on the peer socket.
210 | if ((rc = uv_read_start((uv_stream_t*)client, on_alloc_buffer,
211 | on_peer_read)) < 0) {
212 | die("uv_read_start failed: %s", uv_strerror(rc));
213 | }
214 | } else {
215 | uv_close((uv_handle_t*)client, on_client_closed);
216 | }
217 | }
218 |
219 | int main(int argc, const char** argv) {
220 | setvbuf(stdout, NULL, _IONBF, 0);
221 |
222 | int portnum = 8070;
223 | if (argc >= 2) {
224 | portnum = atoi(argv[1]);
225 | }
226 | printf("Serving on port %d\n", portnum);
227 |
228 | int rc;
229 | uv_tcp_t server;
230 | if ((rc = uv_tcp_init(uv_default_loop(), &server)) < 0) {
231 | die("uv_tcp_init failed: %s", uv_strerror(rc));
232 | }
233 |
234 | struct sockaddr_in addr;
235 | if ((rc = uv_ip4_addr("0.0.0.0", portnum, &addr)) < 0) {
236 | die("uv_ip4_addr failed: %s", uv_strerror(rc));
237 | }
238 |
239 | if ((rc = uv_tcp_bind(&server, (const struct sockaddr*)&addr, 0)) < 0) {
240 | die("uv_tcp_bind failed: %s", uv_strerror(rc));
241 | }
242 |
243 | // Listen on the socket for new peers to connect. When a new peer connects,
244 | // the on_peer_connected callback will be invoked.
245 | if ((rc = uv_listen((uv_stream_t*)&server, N_BACKLOG, on_peer_connected)) <
246 | 0) {
247 | die("uv_listen failed: %s", uv_strerror(rc));
248 | }
249 |
250 | // Run the libuv event loop.
251 | uv_run(uv_default_loop(), UV_RUN_DEFAULT);
252 |
253 | // If uv_run returned, close the default loop before exiting.
254 | return uv_loop_close(uv_default_loop());
255 | }
256 |
--------------------------------------------------------------------------------
/concurrent-servers/epoll-server.c:
--------------------------------------------------------------------------------
1 | // Asynchronous socket server - accepting multiple clients concurrently,
2 | // multiplexing the connections with epoll.
3 | //
4 | // Eli Bendersky [http://eli.thegreenplace.net]
5 | // This code is in the public domain.
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include "utils.h"
19 |
20 | #define MAXFDS 16 * 1024
21 |
22 | typedef enum { INITIAL_ACK, WAIT_FOR_MSG, IN_MSG } ProcessingState;
23 |
24 | #define SENDBUF_SIZE 1024
25 |
26 | typedef struct {
27 | ProcessingState state;
28 | uint8_t sendbuf[SENDBUF_SIZE];
29 | int sendbuf_end;
30 | int sendptr;
31 | } peer_state_t;
32 |
33 | // Each peer is globally identified by the file descriptor (fd) it's connected
34 | // on. As long as the peer is connected, the fd is unique to it. When a peer
35 | // disconnects, a new peer may connect and get the same fd. on_peer_connected
36 | // should initialize the state properly to remove any trace of the old peer on
37 | // the same fd.
38 | peer_state_t global_state[MAXFDS];
39 |
40 | // Callbacks (on_XXX functions) return this status to the main loop; the status
41 | // instructs the loop about the next steps for the fd for which the callback was
42 | // invoked.
43 | // want_read=true means we want to keep monitoring this fd for reading.
44 | // want_write=true means we want to keep monitoring this fd for writing.
45 | // When both are false it means the fd is no longer needed and can be closed.
46 | typedef struct {
47 | bool want_read;
48 | bool want_write;
49 | } fd_status_t;
50 |
51 | // These constants make creating fd_status_t values less verbose.
52 | const fd_status_t fd_status_R = {.want_read = true, .want_write = false};
53 | const fd_status_t fd_status_W = {.want_read = false, .want_write = true};
54 | const fd_status_t fd_status_RW = {.want_read = true, .want_write = true};
55 | const fd_status_t fd_status_NORW = {.want_read = false, .want_write = false};
56 |
57 | fd_status_t on_peer_connected(int sockfd, const struct sockaddr_in* peer_addr,
58 | socklen_t peer_addr_len) {
59 | assert(sockfd < MAXFDS);
60 | report_peer_connected(peer_addr, peer_addr_len);
61 |
62 | // Initialize state to send back a '*' to the peer immediately.
63 | peer_state_t* peerstate = &global_state[sockfd];
64 | peerstate->state = INITIAL_ACK;
65 | peerstate->sendbuf[0] = '*';
66 | peerstate->sendptr = 0;
67 | peerstate->sendbuf_end = 1;
68 |
69 | // Signal that this socket is ready for writing now.
70 | return fd_status_W;
71 | }
72 |
73 | fd_status_t on_peer_ready_recv(int sockfd) {
74 | assert(sockfd < MAXFDS);
75 | peer_state_t* peerstate = &global_state[sockfd];
76 |
77 | if (peerstate->state == INITIAL_ACK ||
78 | peerstate->sendptr < peerstate->sendbuf_end) {
79 | // Until the initial ACK has been sent to the peer, there's nothing we
80 | // want to receive. Also, wait until all data staged for sending is sent to
81 | // receive more data.
82 | return fd_status_W;
83 | }
84 |
85 | uint8_t buf[1024];
86 | int nbytes = recv(sockfd, buf, sizeof buf, 0);
87 | if (nbytes == 0) {
88 | // The peer disconnected.
89 | return fd_status_NORW;
90 | } else if (nbytes < 0) {
91 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
92 | // The socket is not *really* ready for recv; wait until it is.
93 | return fd_status_R;
94 | } else {
95 | perror_die("recv");
96 | }
97 | }
98 | bool ready_to_send = false;
99 | for (int i = 0; i < nbytes; ++i) {
100 | switch (peerstate->state) {
101 | case INITIAL_ACK:
102 | assert(0 && "can't reach here");
103 | break;
104 | case WAIT_FOR_MSG:
105 | if (buf[i] == '^') {
106 | peerstate->state = IN_MSG;
107 | }
108 | break;
109 | case IN_MSG:
110 | if (buf[i] == '$') {
111 | peerstate->state = WAIT_FOR_MSG;
112 | } else {
113 | assert(peerstate->sendbuf_end < SENDBUF_SIZE);
114 | peerstate->sendbuf[peerstate->sendbuf_end++] = buf[i] + 1;
115 | ready_to_send = true;
116 | }
117 | break;
118 | }
119 | }
120 | // Report reading readiness iff there's nothing to send to the peer as a
121 | // result of the latest recv.
122 | return (fd_status_t){.want_read = !ready_to_send,
123 | .want_write = ready_to_send};
124 | }
125 |
126 | fd_status_t on_peer_ready_send(int sockfd) {
127 | assert(sockfd < MAXFDS);
128 | peer_state_t* peerstate = &global_state[sockfd];
129 |
130 | if (peerstate->sendptr >= peerstate->sendbuf_end) {
131 | // Nothing to send.
132 | return fd_status_RW;
133 | }
134 | int sendlen = peerstate->sendbuf_end - peerstate->sendptr;
135 | int nsent = send(sockfd, &peerstate->sendbuf[peerstate->sendptr], sendlen, 0);
136 | if (nsent == -1) {
137 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
138 | return fd_status_W;
139 | } else {
140 | perror_die("send");
141 | }
142 | }
143 | if (nsent < sendlen) {
144 | peerstate->sendptr += nsent;
145 | return fd_status_W;
146 | } else {
147 | // Everything was sent successfully; reset the send queue.
148 | peerstate->sendptr = 0;
149 | peerstate->sendbuf_end = 0;
150 |
151 | // Special-case state transition in if we were in INITIAL_ACK until now.
152 | if (peerstate->state == INITIAL_ACK) {
153 | peerstate->state = WAIT_FOR_MSG;
154 | }
155 |
156 | return fd_status_R;
157 | }
158 | }
159 |
160 | int main(int argc, const char** argv) {
161 | setvbuf(stdout, NULL, _IONBF, 0);
162 |
163 | int portnum = 9090;
164 | if (argc >= 2) {
165 | portnum = atoi(argv[1]);
166 | }
167 | printf("Serving on port %d\n", portnum);
168 |
169 | int listener_sockfd = listen_inet_socket(portnum);
170 | make_socket_non_blocking(listener_sockfd);
171 |
172 | int epollfd = epoll_create1(0);
173 | if (epollfd < 0) {
174 | perror_die("epoll_create1");
175 | }
176 |
177 | struct epoll_event accept_event;
178 | accept_event.data.fd = listener_sockfd;
179 | accept_event.events = EPOLLIN;
180 | if (epoll_ctl(epollfd, EPOLL_CTL_ADD, listener_sockfd, &accept_event) < 0) {
181 | perror_die("epoll_ctl EPOLL_CTL_ADD");
182 | }
183 |
184 | struct epoll_event* events = calloc(MAXFDS, sizeof(struct epoll_event));
185 | if (events == NULL) {
186 | die("Unable to allocate memory for epoll_events");
187 | }
188 |
189 | while (1) {
190 | int nready = epoll_wait(epollfd, events, MAXFDS, -1);
191 | for (int i = 0; i < nready; i++) {
192 | if (events[i].events & EPOLLERR) {
193 | perror_die("epoll_wait returned EPOLLERR");
194 | }
195 |
196 | if (events[i].data.fd == listener_sockfd) {
197 | // The listening socket is ready; this means a new peer is connecting.
198 |
199 | struct sockaddr_in peer_addr;
200 | socklen_t peer_addr_len = sizeof(peer_addr);
201 | int newsockfd = accept(listener_sockfd, (struct sockaddr*)&peer_addr,
202 | &peer_addr_len);
203 | if (newsockfd < 0) {
204 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
205 | // This can happen due to the nonblocking socket mode; in this
206 | // case don't do anything, but print a notice (since these events
207 | // are extremely rare and interesting to observe...)
208 | printf("accept returned EAGAIN or EWOULDBLOCK\n");
209 | } else {
210 | perror_die("accept");
211 | }
212 | } else {
213 | make_socket_non_blocking(newsockfd);
214 | if (newsockfd >= MAXFDS) {
215 | die("socket fd (%d) >= MAXFDS (%d)", newsockfd, MAXFDS);
216 | }
217 |
218 | fd_status_t status =
219 | on_peer_connected(newsockfd, &peer_addr, peer_addr_len);
220 | struct epoll_event event = {0};
221 | event.data.fd = newsockfd;
222 | if (status.want_read) {
223 | event.events |= EPOLLIN;
224 | }
225 | if (status.want_write) {
226 | event.events |= EPOLLOUT;
227 | }
228 |
229 | if (epoll_ctl(epollfd, EPOLL_CTL_ADD, newsockfd, &event) < 0) {
230 | perror_die("epoll_ctl EPOLL_CTL_ADD");
231 | }
232 | }
233 | } else {
234 | // A peer socket is ready.
235 | if (events[i].events & EPOLLIN) {
236 | // Ready for reading.
237 | int fd = events[i].data.fd;
238 | fd_status_t status = on_peer_ready_recv(fd);
239 | struct epoll_event event = {0};
240 | event.data.fd = fd;
241 | if (status.want_read) {
242 | event.events |= EPOLLIN;
243 | }
244 | if (status.want_write) {
245 | event.events |= EPOLLOUT;
246 | }
247 | if (event.events == 0) {
248 | printf("socket %d closing\n", fd);
249 | if (epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, NULL) < 0) {
250 | perror_die("epoll_ctl EPOLL_CTL_DEL");
251 | }
252 | close(fd);
253 | } else if (epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event) < 0) {
254 | perror_die("epoll_ctl EPOLL_CTL_MOD");
255 | }
256 | } else if (events[i].events & EPOLLOUT) {
257 | // Ready for writing.
258 | int fd = events[i].data.fd;
259 | fd_status_t status = on_peer_ready_send(fd);
260 | struct epoll_event event = {0};
261 | event.data.fd = fd;
262 |
263 | if (status.want_read) {
264 | event.events |= EPOLLIN;
265 | }
266 | if (status.want_write) {
267 | event.events |= EPOLLOUT;
268 | }
269 | if (event.events == 0) {
270 | printf("socket %d closing\n", fd);
271 | if (epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, NULL) < 0) {
272 | perror_die("epoll_ctl EPOLL_CTL_DEL");
273 | }
274 | close(fd);
275 | } else if (epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event) < 0) {
276 | perror_die("epoll_ctl EPOLL_CTL_MOD");
277 | }
278 | }
279 | }
280 | }
281 | }
282 |
283 | return 0;
284 | }
285 |
--------------------------------------------------------------------------------
/concurrent-servers/select-server.c:
--------------------------------------------------------------------------------
1 | // Asynchronous socket server - accepting multiple clients concurrently,
2 | // multiplexing the connections with select.
3 | //
4 | // Eli Bendersky [http://eli.thegreenplace.net]
5 | // This code is in the public domain.
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include "utils.h"
19 |
20 | // Note: FD_SETSIZE is 1024 on Linux, which is tricky to change. This provides a
21 | // natural limit to the number of simultaneous FDs monitored by select().
22 | #define MAXFDS 1000
23 |
24 | typedef enum { INITIAL_ACK, WAIT_FOR_MSG, IN_MSG } ProcessingState;
25 |
26 | #define SENDBUF_SIZE 1024
27 |
28 | typedef struct {
29 | ProcessingState state;
30 |
31 | // sendbuf contains data the server has to send back to the client. The
32 | // on_peer_ready_recv handler populates this buffer, and on_peer_ready_send
33 | // drains it. sendbuf_end points to the last valid byte in the buffer, and
34 | // sendptr at the next byte to send.
35 | uint8_t sendbuf[SENDBUF_SIZE];
36 | int sendbuf_end;
37 | int sendptr;
38 | } peer_state_t;
39 |
40 | // Each peer is globally identified by the file descriptor (fd) it's connected
41 | // on. As long as the peer is connected, the fd is unique to it. When a peer
42 | // disconnects, a new peer may connect and get the same fd. on_peer_connected
43 | // should initialize the state properly to remove any trace of the old peer on
44 | // the same fd.
45 | peer_state_t global_state[MAXFDS];
46 |
47 | // Callbacks (on_XXX functions) return this status to the main loop; the status
48 | // instructs the loop about the next steps for the fd for which the callback was
49 | // invoked.
50 | // want_read=true means we want to keep monitoring this fd for reading.
51 | // want_write=true means we want to keep monitoring this fd for writing.
52 | // When both are false it means the fd is no longer needed and can be closed.
53 | typedef struct {
54 | bool want_read;
55 | bool want_write;
56 | } fd_status_t;
57 |
58 | // These constants make creating fd_status_t values less verbose.
59 | const fd_status_t fd_status_R = {.want_read = true, .want_write = false};
60 | const fd_status_t fd_status_W = {.want_read = false, .want_write = true};
61 | const fd_status_t fd_status_RW = {.want_read = true, .want_write = true};
62 | const fd_status_t fd_status_NORW = {.want_read = false, .want_write = false};
63 |
64 | fd_status_t on_peer_connected(int sockfd, const struct sockaddr_in* peer_addr,
65 | socklen_t peer_addr_len) {
66 | assert(sockfd < MAXFDs);
67 | report_peer_connected(peer_addr, peer_addr_len);
68 |
69 | // Initialize state to send back a '*' to the peer immediately.
70 | peer_state_t* peerstate = &global_state[sockfd];
71 | peerstate->state = INITIAL_ACK;
72 | peerstate->sendbuf[0] = '*';
73 | peerstate->sendptr = 0;
74 | peerstate->sendbuf_end = 1;
75 |
76 | // Signal that this socket is ready for writing now.
77 | return fd_status_W;
78 | }
79 |
80 | fd_status_t on_peer_ready_recv(int sockfd) {
81 | assert(sockfd < MAXFDs);
82 | peer_state_t* peerstate = &global_state[sockfd];
83 |
84 | if (peerstate->state == INITIAL_ACK ||
85 | peerstate->sendptr < peerstate->sendbuf_end) {
86 | // Until the initial ACK has been sent to the peer, there's nothing we
87 | // want to receive. Also, wait until all data staged for sending is sent to
88 | // receive more data.
89 | return fd_status_W;
90 | }
91 |
92 | uint8_t buf[1024];
93 | int nbytes = recv(sockfd, buf, sizeof buf, 0);
94 | if (nbytes == 0) {
95 | // The peer disconnected.
96 | return fd_status_NORW;
97 | } else if (nbytes < 0) {
98 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
99 | // The socket is not *really* ready for recv; wait until it is.
100 | return fd_status_R;
101 | } else {
102 | perror_die("recv");
103 | }
104 | }
105 | bool ready_to_send = false;
106 | for (int i = 0; i < nbytes; ++i) {
107 | switch (peerstate->state) {
108 | case INITIAL_ACK:
109 | assert(0 && "can't reach here");
110 | break;
111 | case WAIT_FOR_MSG:
112 | if (buf[i] == '^') {
113 | peerstate->state = IN_MSG;
114 | }
115 | break;
116 | case IN_MSG:
117 | if (buf[i] == '$') {
118 | peerstate->state = WAIT_FOR_MSG;
119 | } else {
120 | assert(peerstate->sendbuf_end < SENDBUF_SIZE);
121 | peerstate->sendbuf[peerstate->sendbuf_end++] = buf[i] + 1;
122 | ready_to_send = true;
123 | }
124 | break;
125 | }
126 | }
127 | // Report reading readiness iff there's nothing to send to the peer as a
128 | // result of the latest recv.
129 | return (fd_status_t){.want_read = !ready_to_send,
130 | .want_write = ready_to_send};
131 | }
132 |
133 | fd_status_t on_peer_ready_send(int sockfd) {
134 | assert(sockfd < MAXFDs);
135 | peer_state_t* peerstate = &global_state[sockfd];
136 |
137 | if (peerstate->sendptr >= peerstate->sendbuf_end) {
138 | // Nothing to send.
139 | return fd_status_RW;
140 | }
141 | int sendlen = peerstate->sendbuf_end - peerstate->sendptr;
142 | int nsent = send(sockfd, &peerstate->sendbuf[peerstate->sendptr], sendlen, 0);
143 | if (nsent == -1) {
144 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
145 | return fd_status_W;
146 | } else {
147 | perror_die("send");
148 | }
149 | }
150 | if (nsent < sendlen) {
151 | peerstate->sendptr += nsent;
152 | return fd_status_W;
153 | } else {
154 | // Everything was sent successfully; reset the send queue.
155 | peerstate->sendptr = 0;
156 | peerstate->sendbuf_end = 0;
157 |
158 | // Special-case state transition in if we were in INITIAL_ACK until now.
159 | if (peerstate->state == INITIAL_ACK) {
160 | peerstate->state = WAIT_FOR_MSG;
161 | }
162 |
163 | return fd_status_R;
164 | }
165 | }
166 |
167 | int main(int argc, char** argv) {
168 | setvbuf(stdout, NULL, _IONBF, 0);
169 |
170 | int portnum = 9090;
171 | if (argc >= 2) {
172 | portnum = atoi(argv[1]);
173 | }
174 | printf("Serving on port %d\n", portnum);
175 |
176 | int listener_sockfd = listen_inet_socket(portnum);
177 |
178 | // The select() manpage warns that select() can return a read notification
179 | // for a socket that isn't actually readable. Thus using blocking I/O isn't
180 | // safe.
181 | make_socket_non_blocking(listener_sockfd);
182 |
183 | if (listener_sockfd >= FD_SETSIZE) {
184 | die("listener socket fd (%d) >= FD_SETSIZE (%d)", listener_sockfd,
185 | FD_SETSIZE);
186 | }
187 |
188 | // The "master" sets are owned by the loop, tracking which FDs we want to
189 | // monitor for reading and which FDs we want to monitor for writing.
190 | fd_set readfds_master;
191 | FD_ZERO(&readfds_master);
192 | fd_set writefds_master;
193 | FD_ZERO(&writefds_master);
194 |
195 | // The listenting socket is always monitored for read, to detect when new
196 | // peer connections are incoming.
197 | FD_SET(listener_sockfd, &readfds_master);
198 |
199 | // For more efficiency, fdset_max tracks the maximal FD seen so far; this
200 | // makes it unnecessary for select to iterate all the way to FD_SETSIZE on
201 | // every call.
202 | int fdset_max = listener_sockfd;
203 |
204 | while (1) {
205 | // select() modifies the fd_sets passed to it, so we have to pass in copies.
206 | fd_set readfds = readfds_master;
207 | fd_set writefds = writefds_master;
208 |
209 | int nready = select(fdset_max + 1, &readfds, &writefds, NULL, NULL);
210 | if (nready < 0) {
211 | perror_die("select");
212 | }
213 |
214 | // nready tells us the total number of ready events; if one socket is both
215 | // readable and writable it will be 2. Therefore, it's decremented when
216 | // either a readable or a writable socket is encountered.
217 | for (int fd = 0; fd <= fdset_max && nready > 0; fd++) {
218 | // Check if this fd became readable.
219 | if (FD_ISSET(fd, &readfds)) {
220 | nready--;
221 |
222 | if (fd == listener_sockfd) {
223 | // The listening socket is ready; this means a new peer is connecting.
224 | struct sockaddr_in peer_addr;
225 | socklen_t peer_addr_len = sizeof(peer_addr);
226 | int newsockfd = accept(listener_sockfd, (struct sockaddr*)&peer_addr,
227 | &peer_addr_len);
228 | if (newsockfd < 0) {
229 | if (errno == EAGAIN || errno == EWOULDBLOCK) {
230 | // This can happen due to the nonblocking socket mode; in this
231 | // case don't do anything, but print a notice (since these events
232 | // are extremely rare and interesting to observe...)
233 | printf("accept returned EAGAIN or EWOULDBLOCK\n");
234 | } else {
235 | perror_die("accept");
236 | }
237 | } else {
238 | make_socket_non_blocking(newsockfd);
239 | if (newsockfd > fdset_max) {
240 | if (newsockfd >= FD_SETSIZE) {
241 | die("socket fd (%d) >= FD_SETSIZE (%d)", newsockfd, FD_SETSIZE);
242 | }
243 | fdset_max = newsockfd;
244 | }
245 |
246 | fd_status_t status =
247 | on_peer_connected(newsockfd, &peer_addr, peer_addr_len);
248 | if (status.want_read) {
249 | FD_SET(newsockfd, &readfds_master);
250 | } else {
251 | FD_CLR(newsockfd, &readfds_master);
252 | }
253 | if (status.want_write) {
254 | FD_SET(newsockfd, &writefds_master);
255 | } else {
256 | FD_CLR(newsockfd, &writefds_master);
257 | }
258 | }
259 | } else {
260 | fd_status_t status = on_peer_ready_recv(fd);
261 | if (status.want_read) {
262 | FD_SET(fd, &readfds_master);
263 | } else {
264 | FD_CLR(fd, &readfds_master);
265 | }
266 | if (status.want_write) {
267 | FD_SET(fd, &writefds_master);
268 | } else {
269 | FD_CLR(fd, &writefds_master);
270 | }
271 | if (!status.want_read && !status.want_write) {
272 | printf("socket %d closing\n", fd);
273 | close(fd);
274 | }
275 | }
276 | }
277 |
278 | // Check if this fd became writable.
279 | if (FD_ISSET(fd, &writefds)) {
280 | nready--;
281 | fd_status_t status = on_peer_ready_send(fd);
282 | if (status.want_read) {
283 | FD_SET(fd, &readfds_master);
284 | } else {
285 | FD_CLR(fd, &readfds_master);
286 | }
287 | if (status.want_write) {
288 | FD_SET(fd, &writefds_master);
289 | } else {
290 | FD_CLR(fd, &writefds_master);
291 | }
292 | if (!status.want_read && !status.want_write) {
293 | printf("socket %d closing\n", fd);
294 | close(fd);
295 | }
296 | }
297 | }
298 | }
299 |
300 | return 0;
301 | }
302 |
--------------------------------------------------------------------------------