├── log.h ├── config.xml ├── fdwrapper.h ├── Makefile ├── conn.h ├── mgr.h ├── fdwrapper.cpp ├── log.cpp ├── README.md ├── conn.cpp ├── main.cpp ├── mgr.cpp └── processpool.h /log.h: -------------------------------------------------------------------------------- 1 | #ifndef LOG_H 2 | #define LOG_H 3 | 4 | #include 5 | #include 6 | 7 | void set_loglevel( int log_level = LOG_DEBUG ); 8 | void log( int log_level, const char* file_name, int line_num, const char* format, ... ); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /config.xml: -------------------------------------------------------------------------------- 1 | Listen 127.0.0.1:8080 2 | 3 | 4 | 103.65.41.126 5 | 80 6 | 2 7 | 8 | 9 | 103.65.41.125 10 | 80 11 | 2 12 | 13 | -------------------------------------------------------------------------------- /fdwrapper.h: -------------------------------------------------------------------------------- 1 | #ifndef FDWRAPPER_H 2 | #define FDWRAPPER_H 3 | 4 | enum RET_CODE { OK = 0, NOTHING = 1, IOERR = -1, CLOSED = -2, BUFFER_FULL = -3, BUFFER_EMPTY = -4, TRY_AGAIN }; 5 | enum OP_TYPE { READ = 0, WRITE, ERROR }; 6 | int setnonblocking( int fd ); 7 | void add_read_fd( int epollfd, int fd ); 8 | void add_write_fd( int epollfd, int fd ); 9 | void removefd( int epollfd, int fd ); 10 | void closefd( int epollfd, int fd ); 11 | void modfd( int epollfd, int fd, int ev ); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: log.o fdwrapper.o conn.o mgr.o springsnail 2 | 3 | log.o: log.cpp log.h 4 | g++ -c log.cpp -o log.o 5 | fdwrapper.o: fdwrapper.cpp fdwrapper.h 6 | g++ -c fdwrapper.cpp -o fdwrapper.o 7 | conn.o: conn.cpp conn.h 8 | g++ -c conn.cpp -o conn.o 9 | mgr.o: mgr.cpp mgr.h 10 | g++ -c mgr.cpp -o mgr.o 11 | springsnail: processpool.h main.cpp log.o fdwrapper.o conn.o mgr.o 12 | g++ processpool.h log.o fdwrapper.o conn.o mgr.o main.cpp -o springsnail 13 | 14 | clean: 15 | rm *.o springsnail 16 | -------------------------------------------------------------------------------- /conn.h: -------------------------------------------------------------------------------- 1 | #ifndef CONN_H 2 | #define CONN_H 3 | 4 | #include 5 | #include "fdwrapper.h" 6 | 7 | class conn 8 | { 9 | public: 10 | conn(); 11 | ~conn(); 12 | void init_clt( int sockfd, const sockaddr_in& client_addr ); 13 | void init_srv( int sockfd, const sockaddr_in& server_addr ); 14 | void reset(); 15 | RET_CODE read_clt(); 16 | RET_CODE write_clt(); 17 | RET_CODE read_srv(); 18 | RET_CODE write_srv(); 19 | 20 | public: 21 | static const int BUF_SIZE = 2048; 22 | 23 | char* m_clt_buf; 24 | int m_clt_read_idx; 25 | int m_clt_write_idx; 26 | sockaddr_in m_clt_address; 27 | int m_cltfd; 28 | 29 | char* m_srv_buf; 30 | int m_srv_read_idx; 31 | int m_srv_write_idx; 32 | sockaddr_in m_srv_address; 33 | int m_srvfd; 34 | 35 | bool m_srv_closed; 36 | }; 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /mgr.h: -------------------------------------------------------------------------------- 1 | #ifndef SRVMGR_H 2 | #define SRVMGR_H 3 | 4 | #include 5 | #include 6 | #include "fdwrapper.h" 7 | #include "conn.h" 8 | 9 | using std::map; 10 | 11 | class host 12 | { 13 | public: 14 | char m_hostname[1024]; 15 | int m_port; 16 | int m_conncnt; 17 | }; 18 | 19 | class mgr 20 | { 21 | public: 22 | mgr( int epollfd, const host& srv ); 23 | ~mgr(); 24 | /* 客户端连接服务器 */ 25 | int conn2srv( const sockaddr_in& address ); 26 | conn* pick_conn( int sockfd ); 27 | void free_conn( conn* connection ); 28 | int get_used_conn_cnt(); 29 | void recycle_conns(); 30 | RET_CODE process( int fd, OP_TYPE type ); 31 | 32 | private: 33 | static int m_epollfd; 34 | map< int, conn* > m_conns; 35 | map< int, conn* > m_used; 36 | map< int, conn* > m_freed; 37 | host m_logic_srv; 38 | }; 39 | 40 | #endif 41 | -------------------------------------------------------------------------------- /fdwrapper.cpp: -------------------------------------------------------------------------------- 1 | #ifndef FDWRAPPER_H 2 | #define FDWRAPPER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int setnonblocking( int fd ) 9 | { 10 | int old_option = fcntl( fd, F_GETFL ); 11 | int new_option = old_option | O_NONBLOCK; 12 | fcntl( fd, F_SETFL, new_option ); 13 | return old_option; 14 | } 15 | 16 | void add_read_fd( int epollfd, int fd ) 17 | { 18 | epoll_event event; 19 | event.data.fd = fd; 20 | event.events = EPOLLIN | EPOLLET; 21 | epoll_ctl( epollfd, EPOLL_CTL_ADD, fd, &event ); 22 | setnonblocking( fd ); 23 | } 24 | 25 | void add_write_fd( int epollfd, int fd ) 26 | { 27 | epoll_event event; 28 | event.data.fd = fd; 29 | event.events = EPOLLOUT | EPOLLET; 30 | epoll_ctl( epollfd, EPOLL_CTL_ADD, fd, &event ); 31 | setnonblocking( fd ); 32 | } 33 | 34 | void closefd( int epollfd, int fd ) 35 | { 36 | epoll_ctl( epollfd, EPOLL_CTL_DEL, fd, 0 ); 37 | close( fd ); 38 | } 39 | 40 | void removefd( int epollfd, int fd ) 41 | { 42 | epoll_ctl( epollfd, EPOLL_CTL_DEL, fd, 0 ); 43 | } 44 | 45 | void modfd( int epollfd, int fd, int ev ) 46 | { 47 | epoll_event event; 48 | event.data.fd = fd; 49 | event.events = ev | EPOLLET; 50 | epoll_ctl( epollfd, EPOLL_CTL_MOD, fd, &event ); 51 | } 52 | 53 | #endif 54 | -------------------------------------------------------------------------------- /log.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "log.h" 5 | 6 | static int level = LOG_INFO; 7 | static int LOG_BUFFER_SIZE = 2048; 8 | static const char* loglevels[] = 9 | { 10 | "emerge!", "alert!", "critical!", "error!", "warn!", "notice:", "info:", "debug:" 11 | }; 12 | 13 | void set_loglevel( int log_level ) 14 | { 15 | level = log_level; 16 | } 17 | 18 | void log( int log_level, const char* file_name, int line_num, const char* format, ... ) 19 | { 20 | if ( log_level > level ) 21 | { 22 | return; 23 | } 24 | 25 | time_t tmp = time( NULL ); 26 | struct tm* cur_time = localtime( &tmp ); 27 | if ( ! cur_time ) 28 | { 29 | return; 30 | } 31 | 32 | char arg_buffer[ LOG_BUFFER_SIZE ]; 33 | memset( arg_buffer, '\0', LOG_BUFFER_SIZE ); 34 | strftime( arg_buffer, LOG_BUFFER_SIZE - 1, "[ %x %X ] ", cur_time ); 35 | printf( "%s", arg_buffer ); 36 | printf( "%s:%04d ", file_name, line_num ); 37 | printf( "%s ", loglevels[ log_level - LOG_EMERG ] ); 38 | 39 | va_list arg_list; 40 | va_start( arg_list, format ); 41 | memset( arg_buffer, '\0', LOG_BUFFER_SIZE ); 42 | vsnprintf( arg_buffer, LOG_BUFFER_SIZE - 1, format, arg_list ); 43 | printf( "%s\n", arg_buffer ); 44 | fflush( stdout ); 45 | va_end( arg_list ); 46 | } 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # springsnail 2 | 《Linux 高性能服务器》附带的项目程序详细解读 3 | 4 | ## 安装及使用 5 | 下载源码: 6 | ```shell 7 | git clone https://github.com/liu-jianhao/springsnail.git 8 | ``` 9 | 然后进入springsnil目录直接`make`即可生成可执行文件 10 | 11 | 填写配置文件,我测试的是网站是网易云音乐,首先我先看看网易云音乐服务器的ip有哪几个: 12 | ```shell 13 | $ nslookup music.163.com 14 | Server: 8.8.8.8 15 | Address: 8.8.8.8#53 16 | 17 | Non-authoritative answer: 18 | Name: music.163.com 19 | Address: 103.65.41.125 20 | Name: music.163.com 21 | Address: 103.65.41.126 22 | ``` 23 | 可以看出网易云音乐的网站有两个ip地址,我配置文件中的逻辑地址就用这两个地址 24 | 25 | `config.xml`: 26 | ```xml 27 | Listen 127.0.0.1:8080 28 | 29 | 30 | 103.65.41.126 31 | 80 32 | 2 33 | 34 | 35 | 103.65.41.125 36 | 80 37 | 2 38 | 39 | ``` 40 | 第一行是负载均衡服务器的地址,下面两个则是真正的服务器,`spingsnil`只是起到一个中转站的作用,将客户端的连接转发给比较“闲”的服务器 41 | 42 | 接下来开始运行程序: 43 | ```shell 44 | $ ./springsnail -f config.xml 45 | [ 09/13/18 21:49:56 ] mgr.cpp:0050 info: logcial srv host info: (103.65.41.126, 80) 46 | [ 09/13/18 21:49:56 ] mgr.cpp:0050 info: logcial srv host info: (103.65.41.125, 80) 47 | [ 09/13/18 21:49:58 ] mgr.cpp:0062 info: build connection 0 to server success 48 | [ 09/13/18 21:49:59 ] mgr.cpp:0062 info: build connection 0 to server success 49 | [ 09/13/18 21:49:59 ] mgr.cpp:0062 info: build connection 1 to server success 50 | [ 09/13/18 21:50:00 ] mgr.cpp:0062 info: build connection 1 to server success 51 | ``` 52 | 没有出错服务器就已经在运行了,然后我尝试连接该负载均衡服务器: 53 | ```shell 54 | $ nc localhost 8080 55 | GET / HTTP/1.1 56 | Host: music.163.com 57 | 58 | HTTP/1.1 302 Found 59 | Server: nginx 60 | Date: Thu, 13 Sep 2018 13:52:34 GMT 61 | Content-Length: 0 62 | Connection: keep-alive 63 | Cache-Control: no-store 64 | Pragrma: no-cache 65 | Expires: Thu, 01 Jan 1970 00:00:00 GMT 66 | Cache-Control: no-cache 67 | Location: https://music.163.com/ 68 | X-Via: MusicEdgeServer 69 | X-From-Src: 218.17.40.86 70 | ``` 71 | 而另一端服务器则会有类似以下的输出: 72 | ```shell 73 | [ 09/13/18 21:52:24 ] processpool.h:0378 info: send request to child 0 74 | [ 09/13/18 21:52:24 ] mgr.cpp:0109 info: bind client sock 11 with server sock 9 75 | ``` 76 | 我采用模拟HTTP报文发送给负载均衡服务器,服务器也确实有返回数据回来,我也用浏览器直接访问,但是会出现`403`编码,表示禁止访问,可能服务器出于安全的考虑,禁止这样的中转站的存在,所以不能让客户端访问(纯属猜测) 77 | 78 | 大家也可以试试其他的网站,每个网站的结果都有些不一样 79 | 80 | ## 各个文件的作用 81 | 1. main.cpp: 82 | 1. 设置命令行参数的处理 83 | 2. 读取配置文件 84 | 3. 解析配置文件 85 | 4. 开始线程池的循环 86 | 87 | 2. processpool: 线程池,是整个项目的发动机 88 | 3. fdwrapper:操作fd的包裹函数 89 | 4. log:日志 90 | 5. conn:客户端 91 | 6. mgr:处理网络连接和负载均衡的框架 -------------------------------------------------------------------------------- /conn.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "conn.h" 5 | #include "log.h" 6 | #include "fdwrapper.h" 7 | 8 | conn::conn() 9 | { 10 | m_srvfd = -1; 11 | m_clt_buf = new char[ BUF_SIZE ]; 12 | if( !m_clt_buf ) 13 | { 14 | throw std::exception(); 15 | } 16 | m_srv_buf = new char[ BUF_SIZE ]; 17 | if( !m_srv_buf ) 18 | { 19 | throw std::exception(); 20 | } 21 | reset(); 22 | } 23 | 24 | conn::~conn() 25 | { 26 | delete [] m_clt_buf; 27 | delete [] m_srv_buf; 28 | } 29 | 30 | void conn::init_clt( int sockfd, const sockaddr_in& client_addr ) 31 | { 32 | m_cltfd = sockfd; 33 | m_clt_address = client_addr; 34 | } 35 | 36 | void conn::init_srv( int sockfd, const sockaddr_in& server_addr ) 37 | { 38 | m_srvfd = sockfd; 39 | m_srv_address = server_addr; 40 | } 41 | 42 | void conn::reset() 43 | { 44 | m_clt_read_idx = 0; 45 | m_clt_write_idx = 0; 46 | m_srv_read_idx = 0; 47 | m_srv_write_idx = 0; 48 | m_srv_closed = false; 49 | m_cltfd = -1; 50 | memset( m_clt_buf, '\0', BUF_SIZE ); 51 | memset( m_srv_buf, '\0', BUF_SIZE ); 52 | } 53 | 54 | RET_CODE conn::read_clt() 55 | { 56 | int bytes_read = 0; 57 | while( true ) 58 | { 59 | if( m_clt_read_idx >= BUF_SIZE ) 60 | { 61 | log( LOG_ERR, __FILE__, __LINE__, "%s", "the client read buffer is full, let server write" ); 62 | return BUFFER_FULL; 63 | } 64 | 65 | bytes_read = recv( m_cltfd, m_clt_buf + m_clt_read_idx, BUF_SIZE - m_clt_read_idx, 0 ); 66 | if ( bytes_read == -1 ) 67 | { 68 | if( errno == EAGAIN || errno == EWOULDBLOCK ) 69 | { 70 | break; 71 | } 72 | return IOERR; 73 | } 74 | else if ( bytes_read == 0 ) 75 | { 76 | return CLOSED; 77 | } 78 | 79 | m_clt_read_idx += bytes_read; 80 | } 81 | return ( ( m_clt_read_idx - m_clt_write_idx ) > 0 ) ? OK : NOTHING; 82 | } 83 | 84 | RET_CODE conn::read_srv() 85 | { 86 | int bytes_read = 0; 87 | while( true ) 88 | { 89 | if( m_srv_read_idx >= BUF_SIZE ) 90 | { 91 | log( LOG_ERR, __FILE__, __LINE__, "%s", "the server read buffer is full, let client write" ); 92 | return BUFFER_FULL; 93 | } 94 | 95 | bytes_read = recv( m_srvfd, m_srv_buf + m_srv_read_idx, BUF_SIZE - m_srv_read_idx, 0 ); 96 | if ( bytes_read == -1 ) 97 | { 98 | if( errno == EAGAIN || errno == EWOULDBLOCK ) 99 | { 100 | break; 101 | } 102 | return IOERR; 103 | } 104 | else if ( bytes_read == 0 ) 105 | { 106 | log( LOG_ERR, __FILE__, __LINE__, "%s", "the server should not close the persist connection" ); 107 | return CLOSED; 108 | } 109 | 110 | m_srv_read_idx += bytes_read; 111 | } 112 | return ( ( m_srv_read_idx - m_srv_write_idx ) > 0 ) ? OK : NOTHING; 113 | } 114 | 115 | RET_CODE conn::write_srv() 116 | { 117 | int bytes_write = 0; 118 | while( true ) 119 | { 120 | if( m_clt_read_idx <= m_clt_write_idx ) 121 | { 122 | m_clt_read_idx = 0; 123 | m_clt_write_idx = 0; 124 | return BUFFER_EMPTY; 125 | } 126 | 127 | bytes_write = send( m_srvfd, m_clt_buf + m_clt_write_idx, m_clt_read_idx - m_clt_write_idx, 0 ); 128 | if ( bytes_write == -1 ) 129 | { 130 | if( errno == EAGAIN || errno == EWOULDBLOCK ) 131 | { 132 | return TRY_AGAIN; 133 | } 134 | log( LOG_ERR, __FILE__, __LINE__, "write server socket failed, %s", strerror( errno ) ); 135 | return IOERR; 136 | } 137 | else if ( bytes_write == 0 ) 138 | { 139 | return CLOSED; 140 | } 141 | 142 | m_clt_write_idx += bytes_write; 143 | } 144 | } 145 | 146 | RET_CODE conn::write_clt() 147 | { 148 | int bytes_write = 0; 149 | while( true ) 150 | { 151 | if( m_srv_read_idx <= m_srv_write_idx ) 152 | { 153 | m_srv_read_idx = 0; 154 | m_srv_write_idx = 0; 155 | return BUFFER_EMPTY; 156 | } 157 | 158 | bytes_write = send( m_cltfd, m_srv_buf + m_srv_write_idx, m_srv_read_idx - m_srv_write_idx, 0 ); 159 | if ( bytes_write == -1 ) 160 | { 161 | if( errno == EAGAIN || errno == EWOULDBLOCK ) 162 | { 163 | return TRY_AGAIN; 164 | } 165 | log( LOG_ERR, __FILE__, __LINE__, "write client socket failed, %s", strerror( errno ) ); 166 | return IOERR; 167 | } 168 | else if ( bytes_write == 0 ) 169 | { 170 | return CLOSED; 171 | } 172 | 173 | m_srv_write_idx += bytes_write; 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include "log.h" 19 | #include "conn.h" 20 | #include "mgr.h" 21 | #include "processpool.h" 22 | 23 | using std::vector; 24 | 25 | static const char* version = "1.0"; 26 | 27 | static void usage( const char* prog ) 28 | { 29 | log( LOG_INFO, __FILE__, __LINE__, "usage: %s [-h] [-v] [-f config_file]", prog ); 30 | } 31 | 32 | int main( int argc, char* argv[] ) 33 | { 34 | char cfg_file[1024]; 35 | memset( cfg_file, '\0', 100 ); 36 | int option; 37 | // 命令参数处理设置 38 | while ( ( option = getopt( argc, argv, "f:xvh" ) ) != -1 ) 39 | { 40 | switch ( option ) 41 | { 42 | case 'x': 43 | { 44 | set_loglevel( LOG_DEBUG ); 45 | break; 46 | } 47 | case 'v': 48 | { 49 | log( LOG_INFO, __FILE__, __LINE__, "%s %s", argv[0], version ); 50 | return 0; 51 | } 52 | case 'h': 53 | { 54 | usage( basename( argv[ 0 ] ) ); 55 | return 0; 56 | } 57 | case 'f': 58 | { 59 | memcpy( cfg_file, optarg, strlen( optarg ) ); 60 | break; 61 | } 62 | case '?': 63 | { 64 | log( LOG_ERR, __FILE__, __LINE__, "un-recognized option %c", option ); 65 | usage( basename( argv[ 0 ] ) ); 66 | return 1; 67 | } 68 | } 69 | } 70 | 71 | // 读取配置文件 72 | if( cfg_file[0] == '\0' ) 73 | { 74 | log( LOG_ERR, __FILE__, __LINE__, "%s", "please specify the config file" ); 75 | return 1; 76 | } 77 | int cfg_fd = open( cfg_file, O_RDONLY ); 78 | if( !cfg_fd ) 79 | { 80 | log( LOG_ERR, __FILE__, __LINE__, "read config file met error: %s", strerror( errno ) ); 81 | return 1; 82 | } 83 | struct stat ret_stat; 84 | if( fstat( cfg_fd, &ret_stat ) < 0 ) 85 | { 86 | log( LOG_ERR, __FILE__, __LINE__, "read config file met error: %s", strerror( errno ) ); 87 | return 1; 88 | } 89 | char* buf = new char [ret_stat.st_size + 1]; 90 | memset( buf, '\0', ret_stat.st_size + 1 ); 91 | /* 把配置文件读入buf中 */ 92 | ssize_t read_sz = read( cfg_fd, buf, ret_stat.st_size ); 93 | if ( read_sz < 0 ) 94 | { 95 | log( LOG_ERR, __FILE__, __LINE__, "read config file met error: %s", strerror( errno ) ); 96 | return 1; 97 | } 98 | vector< host > balance_srv; // 本机地址 99 | vector< host > logical_srv; // 实际的要负载均衡的主机地址 100 | host tmp_host; 101 | memset( tmp_host.m_hostname, '\0', 1024 ); 102 | char* tmp_hostname; 103 | char* tmp_port; 104 | char* tmp_conncnt; 105 | bool opentag = false; 106 | char* tmp = buf; 107 | char* tmp2 = NULL; 108 | char* tmp3 = NULL; 109 | char* tmp4 = NULL; 110 | /* The strpbrk() function locates the first occurrence in the string s('\n') of any of */ 111 | /* the bytes in the string(tmp) accept. */ 112 | // 解析配置文件 113 | while( tmp2 = strpbrk( tmp, "\n" ) ) 114 | { 115 | *tmp2++ = '\0'; 116 | /* The strstr() function finds the first occurrence of the substring needle("") in the */ 117 | /* string haystack(tmp). The terminating null bytes ('\0') are not compared. */ 118 | if( strstr( tmp, "" ) ) 119 | { 120 | if( opentag ) 121 | { 122 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 123 | return 1; 124 | } 125 | opentag = true; 126 | } 127 | else if( strstr( tmp, "" ) ) 128 | { 129 | if( !opentag ) 130 | { 131 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 132 | return 1; 133 | } 134 | logical_srv.push_back( tmp_host ); 135 | memset( tmp_host.m_hostname, '\0', 1024 ); 136 | opentag = false; 137 | } 138 | else if( tmp3 = strstr( tmp, "" ) ) 139 | { 140 | tmp_hostname = tmp3 + 6; 141 | tmp4 = strstr( tmp_hostname, "" ); 142 | if( !tmp4 ) 143 | { 144 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 145 | return 1; 146 | } 147 | *tmp4 = '\0'; 148 | memcpy( tmp_host.m_hostname, tmp_hostname, strlen( tmp_hostname ) ); 149 | } 150 | else if( tmp3 = strstr( tmp, "" ) ) 151 | { 152 | tmp_port = tmp3 + 6; 153 | tmp4 = strstr( tmp_port, "" ); 154 | if( !tmp4 ) 155 | { 156 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 157 | return 1; 158 | } 159 | *tmp4 = '\0'; 160 | tmp_host.m_port = atoi( tmp_port ); 161 | } 162 | else if( tmp3 = strstr( tmp, "" ) ) 163 | { 164 | tmp_conncnt = tmp3 + 7; 165 | tmp4 = strstr( tmp_conncnt, "" ); 166 | if( !tmp4 ) 167 | { 168 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 169 | return 1; 170 | } 171 | *tmp4 = '\0'; 172 | tmp_host.m_conncnt = atoi( tmp_conncnt ); 173 | } 174 | else if( tmp3 = strstr( tmp, "Listen" ) ) 175 | { 176 | tmp_hostname = tmp3 + 6; 177 | tmp4 = strstr( tmp_hostname, ":" ); 178 | if( !tmp4 ) 179 | { 180 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 181 | return 1; 182 | } 183 | *tmp4++ = '\0'; 184 | tmp_host.m_port = atoi( tmp4 ); 185 | memcpy( tmp_host.m_hostname, tmp3, strlen( tmp3 ) ); 186 | balance_srv.push_back( tmp_host ); 187 | memset( tmp_host.m_hostname, '\0', 1024 ); 188 | } 189 | tmp = tmp2; 190 | } 191 | 192 | if( balance_srv.size() == 0 || logical_srv.size() == 0 ) 193 | { 194 | log( LOG_ERR, __FILE__, __LINE__, "%s", "parse config file failed" ); 195 | return 1; 196 | } 197 | const char* ip = balance_srv[0].m_hostname; 198 | int port = balance_srv[0].m_port; 199 | 200 | int listenfd = socket( PF_INET, SOCK_STREAM, 0 ); 201 | assert( listenfd >= 0 ); 202 | 203 | int ret = 0; 204 | struct sockaddr_in address; 205 | bzero( &address, sizeof( address ) ); 206 | address.sin_family = AF_INET; 207 | inet_pton( AF_INET, ip, &address.sin_addr ); 208 | address.sin_port = htons( port ); 209 | 210 | ret = bind( listenfd, ( struct sockaddr* )&address, sizeof( address ) ); 211 | assert( ret != -1 ); 212 | 213 | ret = listen( listenfd, 5 ); 214 | assert( ret != -1 ); 215 | 216 | //memset( cfg_host.m_hostname, '\0', 1024 ); 217 | //memcpy( cfg_host.m_hostname, "127.0.0.1", strlen( "127.0.0.1" ) ); 218 | //cfg_host.m_port = 54321; 219 | //cfg_host.m_conncnt = 5; 220 | processpool< conn, host, mgr >* pool = processpool< conn, host, mgr >::create( listenfd, logical_srv.size() ); 221 | if( pool ) 222 | { 223 | pool->run( logical_srv ); 224 | delete pool; 225 | } 226 | 227 | close( listenfd ); 228 | return 0; 229 | } 230 | -------------------------------------------------------------------------------- /mgr.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include "log.h" 19 | #include "mgr.h" 20 | 21 | using std::pair; 22 | 23 | int mgr::m_epollfd = -1; 24 | int mgr::conn2srv( const sockaddr_in& address ) 25 | { 26 | int sockfd = socket( PF_INET, SOCK_STREAM, 0 ); 27 | if( sockfd < 0 ) 28 | { 29 | return -1; 30 | } 31 | 32 | if ( connect( sockfd, ( struct sockaddr* )&address, sizeof( address ) ) != 0 ) 33 | { 34 | log(LOG_INFO, __FILE__, __LINE__, "connect error"); 35 | close( sockfd ); 36 | return -1; 37 | } 38 | return sockfd; 39 | } 40 | 41 | mgr::mgr( int epollfd, const host& srv ) : m_logic_srv( srv ) 42 | { 43 | m_epollfd = epollfd; 44 | int ret = 0; 45 | struct sockaddr_in address; 46 | bzero( &address, sizeof( address ) ); 47 | address.sin_family = AF_INET; 48 | inet_pton( AF_INET, srv.m_hostname, &address.sin_addr ); 49 | address.sin_port = htons( srv.m_port ); 50 | log( LOG_INFO, __FILE__, __LINE__, "logcial srv host info: (%s, %d)", srv.m_hostname, srv.m_port ); 51 | 52 | for( int i = 0; i < srv.m_conncnt; ++i ) 53 | { 54 | sleep( 1 ); 55 | int sockfd = conn2srv( address ); 56 | if( sockfd < 0 ) 57 | { 58 | log( LOG_ERR, __FILE__, __LINE__, "build connection %d failed", i ); 59 | } 60 | else 61 | { 62 | log( LOG_INFO, __FILE__, __LINE__, "build connection %d to server success", i ); 63 | conn* tmp = NULL; 64 | try 65 | { 66 | tmp = new conn; 67 | } 68 | catch( ... ) 69 | { 70 | close( sockfd ); 71 | continue; 72 | } 73 | tmp->init_srv( sockfd, address ); 74 | m_conns.insert( pair< int, conn* >( sockfd, tmp ) ); 75 | } 76 | } 77 | } 78 | 79 | mgr::~mgr() 80 | { 81 | } 82 | 83 | int mgr::get_used_conn_cnt() 84 | { 85 | return m_used.size(); 86 | } 87 | 88 | conn* mgr::pick_conn( int cltfd ) 89 | { 90 | if( m_conns.empty() ) 91 | { 92 | log( LOG_ERR, __FILE__, __LINE__, "%s", "not enough srv connections to server" ); 93 | return NULL; 94 | } 95 | 96 | map< int, conn* >::iterator iter = m_conns.begin(); 97 | int srvfd = iter->first; 98 | conn* tmp = iter->second; 99 | if( !tmp ) 100 | { 101 | log( LOG_ERR, __FILE__, __LINE__, "%s", "empty server connection object" ); 102 | return NULL; 103 | } 104 | m_conns.erase( iter ); 105 | m_used.insert( pair< int, conn* >( cltfd, tmp ) ); 106 | m_used.insert( pair< int, conn* >( srvfd, tmp ) ); 107 | add_read_fd( m_epollfd, cltfd ); 108 | add_read_fd( m_epollfd, srvfd ); 109 | log( LOG_INFO, __FILE__, __LINE__, "bind client sock %d with server sock %d", cltfd, srvfd ); 110 | return tmp; 111 | } 112 | 113 | void mgr::free_conn( conn* connection ) 114 | { 115 | int cltfd = connection->m_cltfd; 116 | int srvfd = connection->m_srvfd; 117 | closefd( m_epollfd, cltfd ); 118 | closefd( m_epollfd, srvfd ); 119 | m_used.erase( cltfd ); 120 | m_used.erase( srvfd ); 121 | connection->reset(); 122 | m_freed.insert( pair< int, conn* >( srvfd, connection ) ); 123 | } 124 | 125 | void mgr::recycle_conns() 126 | { 127 | if( m_freed.empty() ) 128 | { 129 | return; 130 | } 131 | for( map< int, conn* >::iterator iter = m_freed.begin(); iter != m_freed.end(); iter++ ) 132 | { 133 | sleep( 1 ); 134 | int srvfd = iter->first; 135 | conn* tmp = iter->second; 136 | srvfd = conn2srv( tmp->m_srv_address ); 137 | if( srvfd < 0 ) 138 | { 139 | log( LOG_ERR, __FILE__, __LINE__, "%s", "fix connection failed"); 140 | } 141 | else 142 | { 143 | log( LOG_INFO, __FILE__, __LINE__, "%s", "fix connection success" ); 144 | tmp->init_srv( srvfd, tmp->m_srv_address ); 145 | m_conns.insert( pair< int, conn* >( srvfd, tmp ) ); 146 | } 147 | } 148 | m_freed.clear(); 149 | } 150 | 151 | RET_CODE mgr::process( int fd, OP_TYPE type ) 152 | { 153 | conn* connection = m_used[ fd ]; 154 | if( !connection ) 155 | { 156 | return NOTHING; 157 | } 158 | if( connection->m_cltfd == fd ) 159 | { 160 | int srvfd = connection->m_srvfd; 161 | switch( type ) 162 | { 163 | case READ: 164 | { 165 | RET_CODE res = connection->read_clt(); 166 | switch( res ) 167 | { 168 | case OK: 169 | { 170 | log( LOG_DEBUG, __FILE__, __LINE__, "content read from client: %s", connection->m_clt_buf ); 171 | } 172 | case BUFFER_FULL: 173 | { 174 | modfd( m_epollfd, srvfd, EPOLLOUT ); 175 | break; 176 | } 177 | case IOERR: 178 | case CLOSED: 179 | { 180 | free_conn( connection ); 181 | return CLOSED; 182 | } 183 | default: 184 | break; 185 | } 186 | if( connection->m_srv_closed ) 187 | { 188 | free_conn( connection ); 189 | return CLOSED; 190 | } 191 | break; 192 | } 193 | case WRITE: 194 | { 195 | RET_CODE res = connection->write_clt(); 196 | switch( res ) 197 | { 198 | case TRY_AGAIN: 199 | { 200 | modfd( m_epollfd, fd, EPOLLOUT ); 201 | break; 202 | } 203 | case BUFFER_EMPTY: 204 | { 205 | modfd( m_epollfd, srvfd, EPOLLIN ); 206 | modfd( m_epollfd, fd, EPOLLIN ); 207 | break; 208 | } 209 | case IOERR: 210 | case CLOSED: 211 | { 212 | free_conn( connection ); 213 | return CLOSED; 214 | } 215 | default: 216 | break; 217 | } 218 | if( connection->m_srv_closed ) 219 | { 220 | free_conn( connection ); 221 | return CLOSED; 222 | } 223 | break; 224 | } 225 | default: 226 | { 227 | log( LOG_ERR, __FILE__, __LINE__, "%s", "other operation not support yet" ); 228 | break; 229 | } 230 | } 231 | } 232 | else if( connection->m_srvfd == fd ) 233 | { 234 | int cltfd = connection->m_cltfd; 235 | switch( type ) 236 | { 237 | case READ: 238 | { 239 | RET_CODE res = connection->read_srv(); 240 | switch( res ) 241 | { 242 | case OK: 243 | { 244 | log( LOG_DEBUG, __FILE__, __LINE__, "content read from server: %s", connection->m_srv_buf ); 245 | } 246 | case BUFFER_FULL: 247 | { 248 | modfd( m_epollfd, cltfd, EPOLLOUT ); 249 | break; 250 | } 251 | case IOERR: 252 | case CLOSED: 253 | { 254 | modfd( m_epollfd, cltfd, EPOLLOUT ); 255 | connection->m_srv_closed = true; 256 | break; 257 | } 258 | default: 259 | break; 260 | } 261 | break; 262 | } 263 | case WRITE: 264 | { 265 | RET_CODE res = connection->write_srv(); 266 | switch( res ) 267 | { 268 | case TRY_AGAIN: 269 | { 270 | modfd( m_epollfd, fd, EPOLLOUT ); 271 | break; 272 | } 273 | case BUFFER_EMPTY: 274 | { 275 | modfd( m_epollfd, cltfd, EPOLLIN ); 276 | modfd( m_epollfd, fd, EPOLLIN ); 277 | break; 278 | } 279 | case IOERR: 280 | case CLOSED: 281 | { 282 | /* 283 | if( connection->m_srv_write_idx == connection->m_srvread_idx ) 284 | { 285 | free_conn( connection ); 286 | } 287 | else 288 | { 289 | modfd( m_epollfd, cltfd, EPOLLOUT ); 290 | } 291 | */ 292 | modfd( m_epollfd, cltfd, EPOLLOUT ); 293 | connection->m_srv_closed = true; 294 | break; 295 | } 296 | default: 297 | break; 298 | } 299 | break; 300 | } 301 | default: 302 | { 303 | log( LOG_ERR, __FILE__, __LINE__, "%s", "other operation not support yet" ); 304 | break; 305 | } 306 | } 307 | } 308 | else 309 | { 310 | return NOTHING; 311 | } 312 | return OK; 313 | } 314 | -------------------------------------------------------------------------------- /processpool.h: -------------------------------------------------------------------------------- 1 | #ifndef PROCESSPOOL_H 2 | #define PROCESSPOOL_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include "log.h" 21 | #include "fdwrapper.h" 22 | 23 | using std::vector; 24 | 25 | class process 26 | { 27 | public: 28 | process() : m_pid( -1 ){} 29 | 30 | public: 31 | int m_busy_ratio; 32 | pid_t m_pid; 33 | int m_pipefd[2]; 34 | }; 35 | 36 | template< typename C, typename H, typename M > 37 | class processpool 38 | { 39 | private: 40 | processpool( int listenfd, int process_number = 8 ); 41 | public: 42 | // 返回一个线程池实例 43 | static processpool< C, H, M >* create( int listenfd, int process_number = 8 ) 44 | { 45 | if( !m_instance ) 46 | { 47 | m_instance = new processpool< C, H, M >( listenfd, process_number ); 48 | } 49 | return m_instance; 50 | } 51 | ~processpool() 52 | { 53 | delete [] m_sub_process; 54 | } 55 | void run( const vector& arg ); 56 | 57 | private: 58 | void notify_parent_busy_ratio( int pipefd, M* manager ); 59 | int get_most_free_srv(); 60 | void setup_sig_pipe(); 61 | void run_parent(); 62 | void run_child( const vector& arg ); 63 | 64 | private: 65 | static const int MAX_PROCESS_NUMBER = 16; 66 | static const int USER_PER_PROCESS = 65536; 67 | static const int MAX_EVENT_NUMBER = 10000; 68 | int m_process_number; 69 | int m_idx; 70 | int m_epollfd; 71 | int m_listenfd; 72 | int m_stop; 73 | process* m_sub_process; 74 | static processpool< C, H, M >* m_instance; 75 | }; 76 | template< typename C, typename H, typename M > 77 | processpool< C, H, M >* processpool< C, H, M >::m_instance = NULL; 78 | 79 | static int EPOLL_WAIT_TIME = 5000; 80 | static int sig_pipefd[2]; 81 | static void sig_handler( int sig ) 82 | { 83 | int save_errno = errno; 84 | int msg = sig; 85 | send( sig_pipefd[1], ( char* )&msg, 1, 0 ); 86 | errno = save_errno; 87 | } 88 | 89 | static void addsig( int sig, void( handler )(int), bool restart = true ) 90 | { 91 | struct sigaction sa; 92 | memset( &sa, '\0', sizeof( sa ) ); 93 | sa.sa_handler = handler; 94 | if( restart ) 95 | { 96 | sa.sa_flags |= SA_RESTART; 97 | } 98 | sigfillset( &sa.sa_mask ); 99 | assert( sigaction( sig, &sa, NULL ) != -1 ); 100 | } 101 | 102 | template< typename C, typename H, typename M > 103 | processpool< C, H, M >::processpool( int listenfd, int process_number ) 104 | : m_listenfd( listenfd ), m_process_number( process_number ), m_idx( -1 ), m_stop( false ) 105 | { 106 | assert( ( process_number > 0 ) && ( process_number <= MAX_PROCESS_NUMBER ) ); 107 | 108 | m_sub_process = new process[ process_number ]; 109 | assert( m_sub_process ); 110 | 111 | for( int i = 0; i < process_number; ++i ) 112 | { 113 | // Unix domain 114 | int ret = socketpair( PF_UNIX, SOCK_STREAM, 0, m_sub_process[i].m_pipefd ); 115 | assert( ret == 0 ); 116 | 117 | m_sub_process[i].m_pid = fork(); 118 | assert( m_sub_process[i].m_pid >= 0 ); 119 | // father 120 | if( m_sub_process[i].m_pid > 0 ) 121 | { 122 | close( m_sub_process[i].m_pipefd[1] ); 123 | m_sub_process[i].m_busy_ratio = 0; 124 | continue; 125 | } 126 | // child 127 | else 128 | { 129 | close( m_sub_process[i].m_pipefd[0] ); 130 | m_idx = i; 131 | break; 132 | } 133 | } 134 | } 135 | 136 | template< typename C, typename H, typename M > 137 | int processpool< C, H, M >::get_most_free_srv() 138 | { 139 | int ratio = m_sub_process[0].m_busy_ratio; 140 | int idx = 0; 141 | for( int i = 0; i < m_process_number; ++i ) 142 | { 143 | if( m_sub_process[i].m_busy_ratio < ratio ) 144 | { 145 | idx = i; 146 | ratio = m_sub_process[i].m_busy_ratio; 147 | } 148 | } 149 | return idx; 150 | } 151 | 152 | template< typename C, typename H, typename M > 153 | void processpool< C, H, M >::setup_sig_pipe() 154 | { 155 | m_epollfd = epoll_create( 5 ); 156 | assert( m_epollfd != -1 ); 157 | 158 | int ret = socketpair( PF_UNIX, SOCK_STREAM, 0, sig_pipefd ); 159 | assert( ret != -1 ); 160 | 161 | setnonblocking( sig_pipefd[1] ); 162 | add_read_fd( m_epollfd, sig_pipefd[0] ); 163 | 164 | addsig( SIGCHLD, sig_handler ); 165 | addsig( SIGTERM, sig_handler ); 166 | addsig( SIGINT, sig_handler ); 167 | addsig( SIGPIPE, SIG_IGN ); 168 | } 169 | 170 | template< typename C, typename H, typename M > 171 | void processpool< C, H, M >::run( const vector& arg ) 172 | { 173 | if( m_idx != -1 ) 174 | { 175 | run_child( arg ); 176 | return; 177 | } 178 | run_parent(); 179 | } 180 | 181 | template< typename C, typename H, typename M > 182 | void processpool< C, H, M >::notify_parent_busy_ratio( int pipefd, M* manager ) 183 | { 184 | int msg = manager->get_used_conn_cnt(); 185 | send( pipefd, ( char* )&msg, 1, 0 ); 186 | } 187 | 188 | template< typename C, typename H, typename M > 189 | void processpool< C, H, M >::run_child( const vector& arg ) 190 | { 191 | setup_sig_pipe(); 192 | 193 | int pipefd_read = m_sub_process[m_idx].m_pipefd[ 1 ]; 194 | add_read_fd( m_epollfd, pipefd_read ); 195 | 196 | epoll_event events[ MAX_EVENT_NUMBER ]; 197 | 198 | M* manager = new M( m_epollfd, arg[m_idx] ); 199 | assert( manager ); 200 | 201 | int number = 0; 202 | int ret = -1; 203 | 204 | while( ! m_stop ) 205 | { 206 | number = epoll_wait( m_epollfd, events, MAX_EVENT_NUMBER, EPOLL_WAIT_TIME ); 207 | if ( ( number < 0 ) && ( errno != EINTR ) ) 208 | { 209 | log( LOG_ERR, __FILE__, __LINE__, "%s", "epoll failure" ); 210 | break; 211 | } 212 | 213 | if( number == 0 ) 214 | { 215 | manager->recycle_conns(); 216 | continue; 217 | } 218 | 219 | for ( int i = 0; i < number; i++ ) 220 | { 221 | int sockfd = events[i].data.fd; 222 | if( ( sockfd == pipefd_read ) && ( events[i].events & EPOLLIN ) ) 223 | { 224 | int client = 0; 225 | ret = recv( sockfd, ( char* )&client, sizeof( client ), 0 ); 226 | if( ( ( ret < 0 ) && ( errno != EAGAIN ) ) || ret == 0 ) 227 | { 228 | continue; 229 | } 230 | else 231 | { 232 | struct sockaddr_in client_address; 233 | socklen_t client_addrlength = sizeof( client_address ); 234 | int connfd = accept( m_listenfd, ( struct sockaddr* )&client_address, &client_addrlength ); 235 | if ( connfd < 0 ) 236 | { 237 | log( LOG_ERR, __FILE__, __LINE__, "errno: %s", strerror( errno ) ); 238 | continue; 239 | } 240 | add_read_fd( m_epollfd, connfd ); 241 | C* conn = manager->pick_conn( connfd ); 242 | if( !conn ) 243 | { 244 | closefd( m_epollfd, connfd ); 245 | continue; 246 | } 247 | conn->init_clt( connfd, client_address ); 248 | notify_parent_busy_ratio( pipefd_read, manager ); 249 | } 250 | } 251 | else if( ( sockfd == sig_pipefd[0] ) && ( events[i].events & EPOLLIN ) ) 252 | { 253 | int sig; 254 | char signals[1024]; 255 | ret = recv( sig_pipefd[0], signals, sizeof( signals ), 0 ); 256 | if( ret <= 0 ) 257 | { 258 | continue; 259 | } 260 | else 261 | { 262 | for( int i = 0; i < ret; ++i ) 263 | { 264 | switch( signals[i] ) 265 | { 266 | case SIGCHLD: 267 | { 268 | pid_t pid; 269 | int stat; 270 | while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 ) 271 | { 272 | continue; 273 | } 274 | break; 275 | } 276 | case SIGTERM: 277 | case SIGINT: 278 | { 279 | m_stop = true; 280 | break; 281 | } 282 | default: 283 | { 284 | break; 285 | } 286 | } 287 | } 288 | } 289 | } 290 | else if( events[i].events & EPOLLIN ) 291 | { 292 | RET_CODE result = manager->process( sockfd, READ ); 293 | switch( result ) 294 | { 295 | case CLOSED: 296 | { 297 | notify_parent_busy_ratio( pipefd_read, manager ); 298 | break; 299 | } 300 | default: 301 | break; 302 | } 303 | } 304 | else if( events[i].events & EPOLLOUT ) 305 | { 306 | RET_CODE result = manager->process( sockfd, WRITE ); 307 | switch( result ) 308 | { 309 | case CLOSED: 310 | { 311 | notify_parent_busy_ratio( pipefd_read, manager ); 312 | break; 313 | } 314 | default: 315 | break; 316 | } 317 | } 318 | else 319 | { 320 | continue; 321 | } 322 | } 323 | } 324 | 325 | close( pipefd_read ); 326 | close( m_epollfd ); 327 | } 328 | 329 | template< typename C, typename H, typename M > 330 | void processpool< C, H, M >::run_parent() 331 | { 332 | setup_sig_pipe(); 333 | 334 | for( int i = 0; i < m_process_number; ++i ) 335 | { 336 | add_read_fd( m_epollfd, m_sub_process[i].m_pipefd[ 0 ] ); 337 | } 338 | 339 | add_read_fd( m_epollfd, m_listenfd ); 340 | 341 | epoll_event events[ MAX_EVENT_NUMBER ]; 342 | int sub_process_counter = 0; 343 | int new_conn = 1; 344 | int number = 0; 345 | int ret = -1; 346 | 347 | while( ! m_stop ) 348 | { 349 | number = epoll_wait( m_epollfd, events, MAX_EVENT_NUMBER, EPOLL_WAIT_TIME ); 350 | if ( ( number < 0 ) && ( errno != EINTR ) ) 351 | { 352 | log( LOG_ERR, __FILE__, __LINE__, "%s", "epoll failure" ); 353 | break; 354 | } 355 | 356 | for ( int i = 0; i < number; i++ ) 357 | { 358 | int sockfd = events[i].data.fd; 359 | if( sockfd == m_listenfd ) 360 | { 361 | /* 362 | int i = sub_process_counter; 363 | do 364 | { 365 | if( m_sub_process[i].m_pid != -1 ) 366 | { 367 | break; 368 | } 369 | i = (i+1)%m_process_number; 370 | } 371 | while( i != sub_process_counter ); 372 | 373 | if( m_sub_process[i].m_pid == -1 ) 374 | { 375 | m_stop = true; 376 | break; 377 | } 378 | sub_process_counter = (i+1)%m_process_number; 379 | */ 380 | int idx = get_most_free_srv(); 381 | send( m_sub_process[idx].m_pipefd[0], ( char* )&new_conn, sizeof( new_conn ), 0 ); 382 | log( LOG_INFO, __FILE__, __LINE__, "send request to child %d", idx ); 383 | } 384 | else if( ( sockfd == sig_pipefd[0] ) && ( events[i].events & EPOLLIN ) ) 385 | { 386 | int sig; 387 | char signals[1024]; 388 | ret = recv( sig_pipefd[0], signals, sizeof( signals ), 0 ); 389 | if( ret <= 0 ) 390 | { 391 | continue; 392 | } 393 | else 394 | { 395 | for( int i = 0; i < ret; ++i ) 396 | { 397 | switch( signals[i] ) 398 | { 399 | case SIGCHLD: 400 | { 401 | pid_t pid; 402 | int stat; 403 | while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 ) 404 | { 405 | for( int i = 0; i < m_process_number; ++i ) 406 | { 407 | if( m_sub_process[i].m_pid == pid ) 408 | { 409 | log( LOG_INFO, __FILE__, __LINE__, "child %d join", i ); 410 | close( m_sub_process[i].m_pipefd[0] ); 411 | m_sub_process[i].m_pid = -1; 412 | } 413 | } 414 | } 415 | m_stop = true; 416 | for( int i = 0; i < m_process_number; ++i ) 417 | { 418 | if( m_sub_process[i].m_pid != -1 ) 419 | { 420 | m_stop = false; 421 | } 422 | } 423 | break; 424 | } 425 | case SIGTERM: 426 | case SIGINT: 427 | { 428 | log( LOG_INFO, __FILE__, __LINE__, "%s", "kill all the clild now" ); 429 | for( int i = 0; i < m_process_number; ++i ) 430 | { 431 | int pid = m_sub_process[i].m_pid; 432 | if( pid != -1 ) 433 | { 434 | kill( pid, SIGTERM ); 435 | } 436 | } 437 | break; 438 | } 439 | default: 440 | { 441 | break; 442 | } 443 | } 444 | } 445 | } 446 | } 447 | else if( events[i].events & EPOLLIN ) 448 | { 449 | int busy_ratio = 0; 450 | ret = recv( sockfd, ( char* )&busy_ratio, sizeof( busy_ratio ), 0 ); 451 | if( ( ( ret < 0 ) && ( errno != EAGAIN ) ) || ret == 0 ) 452 | { 453 | continue; 454 | } 455 | for( int i = 0; i < m_process_number; ++i ) 456 | { 457 | if( sockfd == m_sub_process[i].m_pipefd[0] ) 458 | { 459 | m_sub_process[i].m_busy_ratio = busy_ratio; 460 | break; 461 | } 462 | } 463 | continue; 464 | } 465 | } 466 | } 467 | 468 | for( int i = 0; i < m_process_number; ++i ) 469 | { 470 | closefd( m_epollfd, m_sub_process[i].m_pipefd[ 0 ] ); 471 | } 472 | close( m_epollfd ); 473 | } 474 | 475 | #endif 476 | --------------------------------------------------------------------------------