├── Makefile ├── README.md ├── example └── echo │ ├── Makefile │ ├── client.cc │ └── server.cc ├── mnet.cc └── mnet.h /Makefile: -------------------------------------------------------------------------------- 1 | FAGS=-O3 2 | CC=g++ 3 | all: libmnet 4 | 5 | mnet: mnet.h mnet.cc 6 | $(CC) -c -g $(FLAGS) mnet.cc 7 | 8 | libmnet: mnet 9 | ar rcs libmnet.a mnet.o 10 | clean: 11 | rm -f *.o *a 12 | 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | An Ultimate Small Proactor Network Library in C++03 2 | ================================================== 3 | 4 | # Introduction 5 | MNet is a small proactor library that works only on Linux. It uses epoll edge trigger to achieve performance while reserving easy to use by implementing a proactor based library. 6 | 7 | The MNet library is extreamly small , however enough for most of the intranet or IPV4 TCP tasks. It only supports TCP protocol with IPV4. It is a C++ 03 compatible library , however user doesn't need to use inheritance to implement callback function. Actually MNet has a builtin callback library to enable user uses signature based way to implement callback function. 8 | 9 | MNet is also designed to achieve high performance. It utilize epoll function with edge trigger, also it uses scatter read to minimize system call at most. Lastly, since edge trigger has special property, which allows MNet to call epoll_ctl for each file descriptor at most twice . Unlike boost::asio, each time a event happen , 2 system call will have to be called. In MNet, it achieves nearly the same semantic with boost::asio which is regarded to be a easy to use library, however user will not need to pay so much on system call. 10 | 11 | Although it has so many features, it is extreamly easy to be used. Since MNet doesn't use inheritance, user will only need to know several objects' function and some function signature, then user are good to go. You can checkout example directory for more examples. 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /example/echo/Makefile: -------------------------------------------------------------------------------- 1 | all: server.cc client.cc 2 | g++ -g server.cc ../../mnet.h ../../mnet.cc -o server 3 | g++ -g client.cc ../../mnet.h ../../mnet.cc -o client 4 | 5 | .PHONY: clean 6 | 7 | clean: 8 | rm -r server client 9 | -------------------------------------------------------------------------------- /example/echo/client.cc: -------------------------------------------------------------------------------- 1 | #include "../../mnet.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | using namespace mnet; 9 | 10 | class Client { 11 | public: 12 | Client( const char* str , int num ) : 13 | io_manager_(), 14 | info_(str), 15 | client_socket_array_() 16 | { 17 | client_socket_array_.reserve( num ); 18 | for( int i = 0 ; i AsyncConnect( Endpoint("127.0.0.1:12345"),this ); 21 | client_socket_array_.push_back(s); 22 | } 23 | } 24 | 25 | 26 | void Run() { 27 | io_manager_.RunMainLoop(); 28 | } 29 | 30 | void OnConnect( Socket* socket , const NetState& state ); 31 | void OnWrite( Socket* socket , size_t size , const NetState& state ); 32 | void OnRead( Socket* socket , size_t size , const NetState& state ); 33 | 34 | private: 35 | IOManager io_manager_; 36 | std::string info_; 37 | std::vector< ClientSocket* > client_socket_array_; 38 | }; 39 | 40 | 41 | void Client::OnConnect( Socket* socket , const NetState& ok ) { 42 | if(!ok) { 43 | std::cerr<<"Cannot connect:"<write_buffer().Write( info_.c_str() , info_.size() ); 49 | socket->write_buffer().Write(&end,1); 50 | socket->AsyncWrite(this); 51 | } 52 | } 53 | 54 | void Client::OnWrite( Socket* socket, size_t size , const NetState& ok ) { 55 | if(!ok) { 56 | std::cerr<<"Cannot write:"<AsyncRead(this); 61 | } 62 | } 63 | 64 | void Client::OnRead( Socket* socket, size_t size , const NetState& ok ) { 65 | if(!ok) { 66 | std::cerr<<"Cannot read:"<read_buffer().readable_size(); 75 | void* mem = socket->read_buffer().Read(&read_sz); 76 | std::cout<<(char*)(mem)<AsyncRead(this); 78 | return; 79 | } 80 | } 81 | 82 | 83 | int main( int argc , char* argv[] ) { 84 | if( argc != 3 && argc != 4 ) { 85 | std::cerr<<"Usage: string/file"< ibeg(file) , iend; 99 | file>>std::noskipws; 100 | std::string buf; 101 | 102 | while( ibeg != iend ) { 103 | buf.push_back( *ibeg ); 104 | ++ibeg; 105 | } 106 | 107 | Client c(buf.c_str(),atoi(argv[3])); 108 | c.Run(); 109 | } 110 | return 0; 111 | } 112 | -------------------------------------------------------------------------------- /example/echo/server.cc: -------------------------------------------------------------------------------- 1 | #include "../../mnet.h" 2 | #include 3 | using namespace mnet; 4 | 5 | class Server { 6 | public: 7 | Server(); 8 | 9 | void OnAccept( Socket* new_socket , const NetState& ok ); 10 | void OnRead( Socket* socket , std::size_t size , const NetState& ok ); 11 | void OnWrite( Socket* socket , std::size_t size , const NetState& ok ); 12 | void Run() { 13 | io_manager_.RunMainLoop(); 14 | } 15 | void WakeUp() { 16 | io_manager_.Interrupt(); 17 | } 18 | 19 | private: 20 | int times_; // Times for each echo request 21 | int fd_fail_; 22 | ServerSocket server_; // ServerSocket 23 | IOManager io_manager_; // IOManager 24 | Socket* socket_; 25 | }; 26 | 27 | 28 | Server::Server(): 29 | times_(0), 30 | fd_fail_(0), 31 | server_(), 32 | io_manager_(), 33 | socket_( NULL ) 34 | { 35 | server_.Bind( Endpoint( "127.0.0.1:12345" ) ); 36 | server_.SetIOManager(&io_manager_); 37 | socket_ = new Socket( &io_manager_ ); 38 | server_.AsyncAccept( socket_ , this ); 39 | } 40 | 41 | void Server::OnAccept( Socket* new_socket , const NetState& ok ) { 42 | if(ok) { 43 | // Start to read 44 | new_socket->AsyncRead( this ); 45 | ++times_; 46 | server_.AsyncAccept( new Socket( &io_manager_ ) , this ); 47 | } else { 48 | delete new_socket; 49 | if( ok.error_code() != EMFILE && 50 | ok.error_code() != ENFILE ) { 51 | } else { 52 | std::cout<<"Run out FD!Force to shutdown!"<Close(); 64 | delete socket; 65 | } else { 66 | // Dump whatever we have here 67 | std::size_t read_sz = socket->read_buffer().readable_size(); 68 | void* buf = socket->read_buffer().Read(&read_sz); 69 | socket->write_buffer().Write( buf , read_sz ); 70 | socket->AsyncWrite( this ); 71 | socket->AsyncRead( this ); 72 | } 73 | } else { 74 | socket->Close(); 75 | delete socket; 76 | } 77 | } 78 | 79 | void Server::OnWrite( Socket* socket , std::size_t size , const NetState& ok ) { 80 | } 81 | 82 | Server* GServer; 83 | 84 | void onsignal( int val ) { 85 | GServer->WakeUp(); 86 | } 87 | 88 | int main() { 89 | Server s; 90 | signal(SIGTERM,onsignal); 91 | signal(SIGINT,onsignal); 92 | signal(SIGTSTP,onsignal); 93 | signal(SIGPIPE,SIG_IGN); 94 | GServer = &s; 95 | s.Run(); 96 | std::cout<<"Done!"< 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | // The DO_INVOKE macro is a trick to help resolve the problem of that 13 | // during the user callback it register a new handler. However if we 14 | // remove the current handler this will remove the newly registered 15 | // handl 16 | 17 | #define DO_INVOKE(X,T,...) \ 18 | do { \ 19 | assert(!(X).IsNull()); \ 20 | T cb((X).Release()); \ 21 | cb->Invoke(__VA_ARGS__); \ 22 | } while(false) 23 | 24 | 25 | #define VERIFY(cond) \ 26 | do { \ 27 | if(!(cond)) { \ 28 | fprintf(stderr,"Assertion failed:%s,(%d:%s)",#cond,errno,strerror(errno)); \ 29 | std::abort(); \ 30 | } \ 31 | } while(0) 32 | 33 | 34 | namespace mnet { 35 | namespace detail { 36 | namespace { 37 | 38 | // This function sets the file descriptors to has TCP attributes 39 | // TCP_NODELAY 40 | void SetTcpNoDelay( int fd ) { 41 | int tag = 1; 42 | VERIFY( ::setsockopt(fd, 43 | IPPROTO_TCP, 44 | TCP_NODELAY, 45 | reinterpret_cast(&tag), 46 | sizeof(int)) ==0 ); 47 | } 48 | 49 | // This function sets the file as REUSE the TCP address 50 | void SetReuseAddr( int fd ) { 51 | int tag = 1; 52 | VERIFY( ::setsockopt(fd, 53 | SOL_SOCKET, 54 | SO_REUSEADDR, 55 | reinterpret_cast(&tag), 56 | sizeof(int)) == 0 ); 57 | } 58 | 59 | // This function creates that file descriptors and set its FD has 60 | // 1. O_NONBLOCK 2. O_CLOEXEC 61 | int NewFileDescriptor() { 62 | int fd = socket(AF_INET,SOCK_STREAM,IPPROTO_TCP); 63 | if (UNLIKELY(fd <0)) 64 | return -1; 65 | // Setting up the FCNTL 66 | int flag = fcntl(fd,F_GETFL); 67 | flag |= O_NONBLOCK; 68 | flag |= O_CLOEXEC; 69 | fcntl(fd,F_SETFL,flag); 70 | return fd; 71 | } 72 | 73 | uint64_t GetCurrentTimeInMS() { 74 | struct timeval tv; 75 | VERIFY( ::gettimeofday(&tv,NULL) == 0 ); 76 | return tv.tv_sec * 1000 + static_cast(tv.tv_usec/1000); 77 | } 78 | }// namespace 79 | 80 | int CreateTcpFileDescriptor() { 81 | int fd = NewFileDescriptor(); 82 | if( UNLIKELY(fd < 0) ) 83 | return -1; 84 | SetTcpNoDelay(fd); 85 | SetReuseAddr(fd); 86 | return fd; 87 | } 88 | 89 | int CreateTcpServerSocketFileDescriptor() { 90 | int fd = NewFileDescriptor(); 91 | if( UNLIKELY(fd < 0) ) 92 | return -1; 93 | SetReuseAddr(fd); 94 | return fd; 95 | } 96 | 97 | }// namespace detail 98 | 99 | 100 | void Buffer::Grow( std::size_t cap ) { 101 | if( UNLIKELY(cap == 0) ) { 102 | return; 103 | } 104 | const std::size_t sz = cap + readable_size(); 105 | void* mem = malloc(sz); 106 | 107 | // Copy the readable portion of the data into the head of the 108 | // new memory buffer 109 | if( readable_size() > 0 ) 110 | memcpy(mem,static_cast(mem_)+read_ptr_,readable_size()); 111 | write_ptr_ = readable_size(); 112 | read_ptr_ = 0; 113 | capacity_ = sz; 114 | 115 | // Free the old buffer 116 | free(mem_); 117 | mem_ = mem; 118 | } 119 | 120 | void* Buffer::Read( std::size_t* size ) { 121 | // Calculate the possible read size for this buffer 122 | const std::size_t read_sz = 123 | std::min(*size, readable_size() ); 124 | void* mem = static_cast(mem_) + read_ptr_ ; 125 | 126 | // Update the read_ptr since Read is a mutator 127 | read_ptr_ += read_sz; 128 | 129 | RewindBuffer(); 130 | return mem; 131 | } 132 | 133 | bool Buffer::Write( const void* mem , std::size_t length ) { 134 | // Check if we have enough space to hold this memory 135 | if( UNLIKELY(writable_size() < length) ) { 136 | if( is_fixed_ ) 137 | return false; 138 | 139 | std::size_t ncap = length > capacity_ ? length : capacity_; 140 | ncap *= 2; 141 | // We cannot hold this buffer now, just grow the buffer here 142 | Grow( ncap ); 143 | } 144 | 145 | memcpy(static_cast(mem_)+write_ptr_,mem,length); 146 | write_ptr_ += length; 147 | return true; 148 | } 149 | 150 | std::size_t Buffer::Fill( const void* mem , std::size_t length ) { 151 | std::size_t sz = std::min(writable_size(),length); 152 | if( UNLIKELY(sz == 0) ) 153 | return 0; 154 | else { 155 | memcpy(static_cast(mem_)+write_ptr_,mem,sz); 156 | write_ptr_ += sz; 157 | return sz; 158 | } 159 | } 160 | 161 | bool Buffer::Inject( const void* mem , std::size_t length ) { 162 | if( UNLIKELY(writable_size() < length) ) { 163 | if( is_fixed_ ) 164 | return false; 165 | Grow(length); 166 | } 167 | memcpy(static_cast(mem_)+write_ptr_,mem,length); 168 | write_ptr_ += length; 169 | assert( write_ptr_ == capacity_ ); 170 | return true; 171 | } 172 | 173 | int Endpoint::Ipv4ToString( char* buf ) const { 174 | // Parsing the IPV4 into the string. The following code should 175 | // only work on little endian 176 | uint32_t c1,c2,c3,c4; 177 | 178 | c1 = ipv4_ &0xff; 179 | c2 = (ipv4_>>8)&0xff; 180 | c3 = (ipv4_>>16)&0xff; 181 | c4 = (ipv4_>>24)&0xff; 182 | 183 | return sprintf(buf,"%d.%d.%d.%d",c4,c3,c2,c1); 184 | } 185 | 186 | int Endpoint::PortToString( char* buf ) const { 187 | return sprintf(buf,"%d",port_); 188 | } 189 | 190 | int Endpoint::StringToIpv4( const char* buf ) { 191 | uint32_t c1,c2,c3,c4; 192 | int len = 0; 193 | char* pend; 194 | 195 | #define PARSE(c) \ 196 | do { \ 197 | errno = 0; \ 198 | (c) = static_cast(std::strtol(buf+len,&pend,10)); \ 199 | if( errno != 0 ) { \ 200 | goto fail; \ 201 | } \ 202 | if( c > 255 || c <0 ) { \ 203 | goto fail; \ 204 | } \ 205 | } while(false) 206 | 207 | #define CHECK() \ 208 | do { \ 209 | if( *pend != '.' ) \ 210 | goto fail; \ 211 | len = pend-buf+1; \ 212 | } while(false) 213 | 214 | // Component 1 215 | PARSE(c1); 216 | CHECK(); 217 | 218 | // Component 2 219 | PARSE(c2); 220 | CHECK(); 221 | 222 | // Component 3 223 | PARSE(c3); 224 | CHECK(); 225 | 226 | // Component 4 227 | PARSE(c4); 228 | ipv4_ = (c4) | (c3<<8) | (c2<<16) | (c1<<24); 229 | return pend-buf; 230 | 231 | fail: 232 | port_ = kEndpointError; 233 | return -1; 234 | 235 | #undef PARSE 236 | #undef CHECK 237 | } 238 | 239 | int Endpoint::StringToPort( const char* buf ) { 240 | long p; 241 | char* pend; 242 | 243 | errno = 0 ; 244 | p = strtol(buf,&pend,10); 245 | 246 | if( UNLIKELY(errno != 0) ) { 247 | port_ = kEndpointError; 248 | return -1; 249 | } else { 250 | if( UNLIKELY(p > 65535 || p < 0) ) { 251 | port_ = kEndpointError; 252 | return -1; 253 | } else { 254 | port_ = static_cast(p); 255 | } 256 | } 257 | return (pend-buf); 258 | } 259 | 260 | void Socket::OnReadNotify( ) { 261 | set_can_read(true); 262 | // In order to not make the misbehavior program mess up our user space 263 | // memory. If we detect that the user has not registered any callback 264 | // function just leave the data inside of the kernel and put the states 265 | // of current Pollable to readable 266 | if( UNLIKELY(user_read_callback_.IsNull()) ) { 267 | return; 268 | } else { 269 | NetState state; 270 | std::size_t read_sz = DoRead(&state); 271 | if( LIKELY(state_ != CLOSING) ) { 272 | // Invoke the callback function 273 | DO_INVOKE( user_read_callback_ , 274 | detail::ScopePtr, 275 | this,read_sz,state); 276 | } else { 277 | if( LIKELY(state) ) { 278 | // We are in closing state, so it should be an asynchronous close 279 | // We may still receive data here, we need to notify the user to 280 | // consume the data here 281 | if( UNLIKELY(read_sz > 0) ) { 282 | user_close_callback_->InvokeData( read_sz ); 283 | } else { 284 | // Checking whether we hit eof during the last read 285 | if( eof_ ) { 286 | bool deleted = false; 287 | set_notify_flag( &deleted ); 288 | detail::ScopePtr cb( user_close_callback_.Release() ); 289 | cb->InvokeClose(NetState()); 290 | if( !deleted ) { 291 | Close(); 292 | state_ = CLOSED; 293 | } 294 | } 295 | } 296 | } else { 297 | bool deleted = false; 298 | set_notify_flag( &deleted ); 299 | // We failed here, so we just go straitforward to issue an 300 | // Close operation on the notifier and close the underlying socket 301 | user_close_callback_->InvokeClose(state); 302 | if( !deleted ) { 303 | Close(); 304 | state_ = CLOSED; 305 | } 306 | } 307 | } 308 | } 309 | } 310 | 311 | void Socket::OnWriteNotify( ) { 312 | // Set up the can write flag 313 | set_can_write(true); 314 | if( UNLIKELY(write_buffer().readable_size() == 0) ) { 315 | // We do nothing since we have nothing to write out 316 | return; 317 | } else { 318 | NetState write_state; 319 | std::size_t write_sz = DoWrite(&write_state); 320 | if( LIKELY(write_state) ) { 321 | // We don't have an error just check if we hit the buffer size 322 | if( write_buffer().readable_size() == 0 ) { 323 | // We have written all the data into the underlying socket 324 | DO_INVOKE(user_write_callback_, 325 | detail::ScopePtr, 326 | this, 327 | prev_write_size_ + write_sz , write_state ); 328 | } else { 329 | prev_write_size_ += write_sz; 330 | } 331 | } else { 332 | DO_INVOKE(user_write_callback_, 333 | detail::ScopePtr, 334 | this, 335 | prev_write_size_, write_state ); 336 | } 337 | } 338 | } 339 | 340 | void Socket::OnException( const NetState& state ) { 341 | assert( !state ); 342 | bool deleted = false; 343 | set_notify_flag( &deleted ); 344 | 345 | if( LIKELY(!user_read_callback_.IsNull()) ) { 346 | DO_INVOKE(user_read_callback_, 347 | detail::ScopePtr, 348 | this,0,state); 349 | } 350 | if( !deleted ) { 351 | if( LIKELY(!user_write_callback_.IsNull()) ) { 352 | DO_INVOKE(user_write_callback_, 353 | detail::ScopePtr, 354 | this,0,state); 355 | } 356 | } 357 | } 358 | 359 | std::size_t Socket::DoRead( NetState* ok ) { 360 | // Whenevenr this function gets called, we need to look into the 361 | // kernel since this means that user wants data. 362 | struct iovec buf[2]; 363 | std::size_t read_sz = 0; 364 | // Clear the NetState structure 365 | ok->Clear(); 366 | 367 | // When we are see eof, we will always spin on this states unless 368 | // user use Close/AsyncClose to move the socket state to correct one 369 | if( UNLIKELY(eof_) ) { 370 | return 0; 371 | } 372 | 373 | do { 374 | // Using a loop to force us run into the EAGAIN/EWOULDBLOCK 375 | 376 | // The iovec will contain following structure. The first component 377 | // of that buffer is pointed to the extra(free) buffer in our read 378 | // buffer. The second component is pointed to our stack buffer. In 379 | // most cases this one readv will read up all the data in the kernel 380 | // since the system call time is way less than the packet transfer 381 | // time. Assume epoll_wait will wake up once a fd recieve a packet. 382 | 383 | Buffer::Accessor accessor = read_buffer().GetWriteAccessor(); 384 | 385 | // Setting up the first component which points to the extra write space 386 | buf[0].iov_base = accessor.address(); 387 | buf[0].iov_len = accessor.size(); 388 | 389 | // Setting up the stack iovec component 390 | buf[1].iov_base = io_manager_->swap_buffer_; 391 | buf[1].iov_len = io_manager_->swap_buffer_size_; 392 | 393 | // Start to read 394 | ssize_t sz = ::readv( fd() , buf , 2 ); 395 | 396 | if( sz < 0 ) { 397 | // Error happened 398 | if( LIKELY(errno == EAGAIN || errno == EWOULDBLOCK) ) { 399 | set_can_read(false); 400 | return read_sz; 401 | } else { 402 | if( errno == EINTR ) 403 | continue; 404 | // The current error is not recoverable, we just return with an error 405 | // states here 406 | ok->CheckPoint(state_category::kSystem,errno); 407 | return read_sz; 408 | } 409 | } else { 410 | if( UNLIKELY(sz == 0) ) { 411 | // We have seen an EOF flag, however for this user reading 412 | // operations, we will not being able to do this, we set 413 | // our socket flag to SEE_EOF , later on we can replay this 414 | // eof to user 415 | eof_ = true; 416 | return read_sz; 417 | } else { 418 | const std::size_t accessor_sz = accessor.size(); 419 | 420 | if( static_cast(sz) <= accessor_sz ) { 421 | accessor.set_committed_size( sz ); 422 | } else { 423 | // The kernel has written data into the second stack buffer 424 | // now we need to grow our buffer by using write operations 425 | accessor.set_committed_size( accessor_sz ); 426 | accessor.Commit(); 427 | 428 | // Inject the data into the buffer, this injection will not 429 | // cause buffer overhead since they just write the data without 430 | // preallocation 431 | if( !read_buffer().Inject( io_manager_->swap_buffer_ , sz-accessor_sz ) ) { 432 | *ok = NetState(state_category::kSystem,ENOBUFS); 433 | return read_sz; 434 | } 435 | } 436 | read_sz += sz; 437 | 438 | if( static_cast(sz) < io_manager_->swap_buffer_size_ + accessor_sz ) { 439 | set_can_read(false); 440 | return read_sz; 441 | } else { 442 | continue; 443 | } 444 | } 445 | } 446 | } while(true); 447 | } 448 | 449 | std::size_t Socket::DoWrite( NetState* ok ) { 450 | assert( write_buffer().readable_size() > 0 ); 451 | ok->Clear(); 452 | do { 453 | // Start to write the data 454 | Buffer::Accessor accessor = write_buffer().GetReadAccessor(); 455 | 456 | // Trying to send out the data to underlying TCP socket 457 | ssize_t sz = ::write(fd(),accessor.address(),accessor.size()); 458 | 459 | // Write can return zero which has same meaning with negative 460 | // value( I guess this is for historic reason ). What we gonna 461 | // do is that we will treat zero and -1 as same stuff and check 462 | // the errno value 463 | if( UNLIKELY(sz <= 0) ) { 464 | if( LIKELY(errno == EAGAIN || errno == EWOULDBLOCK) ) { 465 | // This is a partial operation, we need to wait until epoll_wait 466 | // to wake me up 467 | set_can_write(false); 468 | return 0; 469 | } else { 470 | if( errno == EINTR ) 471 | continue; 472 | // Set up the error object and record the error string 473 | ok->CheckPoint(state_category::kSystem,errno); 474 | // Return the size of the data has been sent to the kernel 475 | return prev_write_size_; 476 | } 477 | } else { 478 | // Set up the committed size 479 | if( static_cast(sz) < accessor.size() ) { 480 | set_can_write(false); 481 | } 482 | accessor.set_committed_size( static_cast(sz) ); 483 | return static_cast(sz); 484 | } 485 | } while(true); 486 | } 487 | 488 | void Socket::GetLocalEndpoint( Endpoint* endpoint ) { 489 | struct sockaddr_in ipv4; 490 | bzero(&ipv4,sizeof(ipv4)); 491 | ipv4.sin_family = AF_INET; 492 | socklen_t sz = sizeof(ipv4); 493 | 494 | VERIFY( ::getsockname(fd(), 495 | reinterpret_cast(&ipv4),&sz) == 0); 496 | 497 | // writing the data into the endpoint representation 498 | endpoint->set_port( ntohs(ipv4.sin_port) ); 499 | endpoint->set_ipv4( ntohl(ipv4.sin_addr.s_addr) ); 500 | } 501 | 502 | void Socket::GetPeerEndpoint( Endpoint* endpoint ) { 503 | struct sockaddr_in ipv4; 504 | bzero(&ipv4,sizeof(ipv4)); 505 | ipv4.sin_family = AF_INET; 506 | socklen_t sz = sizeof(ipv4); 507 | 508 | VERIFY( ::getpeername(fd(), 509 | reinterpret_cast(&ipv4),&sz) == 0); 510 | 511 | endpoint->set_port( ntohs(ipv4.sin_port) ); 512 | 513 | endpoint->set_ipv4( ntohl(ipv4.sin_addr.s_addr) ); 514 | } 515 | 516 | void ClientSocket::OnReadNotify( ) { 517 | if( LIKELY(state_ == CONNECTED) ) { 518 | Socket::OnReadNotify(); 519 | return; 520 | } else { 521 | switch( state_ ) { 522 | case DISCONNECTED: 523 | 524 | // When disconneted socket has any notifiaction 525 | // just ignore it, maybe register a log information 526 | return; 527 | case CONNECTING: 528 | // Read, for connecting, the read information is unrelated 529 | // even if we receive it( we should not ), we just ignore 530 | return; 531 | default: 532 | UNREACHABLE(return); 533 | } 534 | } 535 | } 536 | 537 | void ClientSocket::OnWriteNotify() { 538 | if( LIKELY(state_ == CONNECTED) ) { 539 | Socket::OnWriteNotify(); 540 | return; 541 | } else { 542 | if( state_ == CONNECTING ) { 543 | set_can_write(true); 544 | state_ = CONNECTED; 545 | DO_INVOKE(user_conn_callback_, 546 | detail::ScopePtr, 547 | this,NetState()); 548 | } 549 | } 550 | } 551 | 552 | void ClientSocket::OnException( const NetState& state ) { 553 | assert( !state ); 554 | if( LIKELY(state_ == CONNECTED) ) { 555 | Socket::OnException(state); 556 | } else { 557 | if( state_ == CONNECTING ) { 558 | state_ = DISCONNECTED; 559 | if( UNLIKELY(!user_conn_callback_.IsNull()) ) { 560 | DO_INVOKE(user_conn_callback_, 561 | detail::ScopePtr, 562 | this,state); 563 | } 564 | } 565 | } 566 | } 567 | 568 | bool ServerSocket::Bind( const Endpoint& endpoint ) { 569 | assert( is_bind_ == false ); 570 | // Setting up the listener file descriptors 571 | int sock_fd = detail::CreateTcpServerSocketFileDescriptor(); 572 | if( UNLIKELY(sock_fd < 0) ) { 573 | return false; 574 | } 575 | set_fd( sock_fd ); 576 | 577 | // Set up the struct sockaddr_in 578 | struct sockaddr_in ipv4; 579 | bzero(&ipv4,sizeof(ipv4)); 580 | 581 | ipv4.sin_family = AF_INET; 582 | ipv4.sin_addr.s_addr = htonl(endpoint.ipv4()); 583 | ipv4.sin_port = htons(endpoint.port()); 584 | 585 | // Bind the address 586 | int ret = ::bind( fd(), 587 | reinterpret_cast(&ipv4) , sizeof(ipv4) ); 588 | if( UNLIKELY(ret != 0) ) { 589 | ::close(fd()); 590 | set_fd(-1); 591 | return false; 592 | } 593 | 594 | // Set the fd as listen fd 595 | ret = ::listen( fd() , SOMAXCONN ); 596 | if( UNLIKELY(ret != 0) ) { 597 | ::close(fd()); 598 | set_fd(-1); 599 | return false; 600 | } 601 | 602 | is_bind_ = true; 603 | return true; 604 | } 605 | 606 | void ServerSocket::HandleRunOutOfFD( int err ) { 607 | // Handling the run out of file descriptors error here, if we 608 | // don't kernel will not free any resource but continue bothering 609 | // us for this problem. 610 | switch( err ) { 611 | case EMFILE: 612 | case ENFILE: { 613 | int f; 614 | // Run out the file descriptors and gracefully shutdown the peer side 615 | VERIFY( ::close( dummy_fd_ ) == 0 ); 616 | f = ::accept(fd(),NULL,NULL); 617 | if( f > 0 ) 618 | VERIFY( ::close( f ) == 0 ); 619 | VERIFY( (dummy_fd_ = ::open("/dev/null",O_RDONLY)) >= 0 ); 620 | } 621 | default: return; 622 | } 623 | UNREACHABLE(return); 624 | } 625 | 626 | int ServerSocket::DoAccept( NetState* state ) { 627 | assert( can_read() ); 628 | do { 629 | int nfd = ::accept4( fd() , NULL , NULL , O_CLOEXEC | O_NONBLOCK ); 630 | if( UNLIKELY(nfd < 0) ) { 631 | if( LIKELY(errno == EAGAIN || errno == EWOULDBLOCK) ) { 632 | set_can_read(false); 633 | return -1; 634 | } else { 635 | if( errno == EINTR ) 636 | continue; 637 | int err = errno; 638 | HandleRunOutOfFD( err ); 639 | state->CheckPoint(state_category::kSystem,err); 640 | if( errno == EAGAIN || errno == EWOULDBLOCK ) { 641 | set_can_read(false); 642 | } 643 | return -1; 644 | } 645 | } else { 646 | return nfd; 647 | } 648 | } while( true ); 649 | } 650 | 651 | void ServerSocket::OnReadNotify() { 652 | assert( is_bind_ ); 653 | set_can_read( true ); 654 | if( UNLIKELY(user_accept_callback_.IsNull()) ) 655 | return; 656 | else { 657 | NetState accept_state; 658 | 659 | int nfd = DoAccept(&accept_state); 660 | if( UNLIKELY(nfd < 0) ) { 661 | if( !accept_state ) { 662 | DO_INVOKE( user_accept_callback_ , 663 | detail::ScopePtr, 664 | new_accept_socket_,accept_state); 665 | new_accept_socket_ = NULL; 666 | } 667 | } else { 668 | detail::Pollable* p = static_cast(new_accept_socket_); 669 | p->set_fd( nfd ); 670 | 671 | // Temporarily store the new_accept_socket_ to enable user seting it during 672 | // the invocation of the user_accept_callback_ function 673 | 674 | Socket* s = new_accept_socket_; 675 | new_accept_socket_ = NULL ; 676 | 677 | DO_INVOKE( user_accept_callback_ , 678 | detail::ScopePtr, 679 | s, NetState()); 680 | } 681 | } 682 | } 683 | 684 | void ServerSocket::OnException( const NetState& state ) { 685 | HandleRunOutOfFD( state.error_code() ); 686 | // We have an exception on the listener socket file descriptor 687 | if( !user_accept_callback_.IsNull() ) { 688 | DO_INVOKE( user_accept_callback_ , 689 | detail::ScopePtr, 690 | new_accept_socket_,state); 691 | } 692 | } 693 | 694 | ServerSocket::ServerSocket() : 695 | new_accept_socket_(NULL), 696 | io_manager_(NULL), 697 | is_bind_( false ) 698 | { 699 | // Initialize the dummy_fd_ here 700 | dummy_fd_ = ::open("/dev/null", O_RDONLY ); 701 | VERIFY( dummy_fd_ >= 0 ); 702 | } 703 | 704 | ServerSocket::~ServerSocket() { 705 | // Closing the listen fd 706 | VERIFY( ::close(fd()) == 0 ); 707 | set_fd(-1); 708 | ::close( dummy_fd_ ); 709 | } 710 | 711 | IOManager::IOManager( std::size_t cap ) { 712 | epoll_fd_ = ::epoll_create1( EPOLL_CLOEXEC ); 713 | VERIFY( epoll_fd_ > 0 ); 714 | 715 | // Set up the control file descriptors. This file descriptor will 716 | // be set up as a udp socket just because it is simple. 717 | int fd = socket( AF_INET , SOCK_DGRAM , 0 ); 718 | VERIFY(fd >0); 719 | 720 | // Set up the flag for this file descriptor 721 | int flag = ::fcntl(fd,F_GETFL); 722 | flag |= O_CLOEXEC; 723 | flag |= O_NONBLOCK; 724 | ::fcntl(fd,F_SETFL); 725 | 726 | // Setup the bind for the control file descriptor 727 | struct sockaddr_in ipv4; 728 | ipv4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 729 | ipv4.sin_family = AF_INET; 730 | ipv4.sin_port = htons(0); 731 | VERIFY( ::bind(fd,reinterpret_cast(&ipv4),sizeof(ipv4)) == 0 ); 732 | // Set this newly created file descriptor back to the CtrlFd object 733 | ctrl_fd_.fd_ = fd; 734 | 735 | // Now we need to watch the read operation for this UDP socket. 736 | WatchRead(&ctrl_fd_); 737 | 738 | if( cap == 0 ) { 739 | static const std::size_t kDefaultRecvBufferSize = 3495200; 740 | cap = kDefaultRecvBufferSize; 741 | } 742 | 743 | swap_buffer_ = malloc(cap); 744 | swap_buffer_size_ = cap; 745 | 746 | } 747 | 748 | IOManager::~IOManager() { 749 | if( epoll_fd_ > 0 ) { 750 | ::close(ctrl_fd_.fd_); 751 | ctrl_fd_.set_fd(-1); 752 | ::close(epoll_fd_); 753 | } 754 | // Check if we have timer queue problem 755 | for( std::size_t i = 0 ; i < timer_queue_.size() ; ++i ) { 756 | delete timer_queue_[i].callback; 757 | } 758 | 759 | free(swap_buffer_); 760 | } 761 | 762 | void IOManager::CtrlFd::OnReadNotify() { 763 | // We will ignore the error once our control file descriptor receive notification 764 | // since no matter is correct notification or not (it should in most case do not 765 | // have an error state), the IOManager needs to be waked up. 766 | char buf[kDataLen]; 767 | // If this state is correct we read up the information inside of it and 768 | // hit the EAGAIN/EWOULDBLOCK 769 | ::recvfrom( fd() , buf , kDataLen , 0 , NULL , NULL ); 770 | is_wake_up_ = true; 771 | } 772 | 773 | void IOManager::Interrupt() { 774 | struct sockaddr_in ipv4; 775 | bzero(&ipv4,sizeof(ipv4)); 776 | ipv4.sin_family = AF_INET; 777 | socklen_t ipv4_len = sizeof(ipv4); 778 | 779 | int ret = ::getsockname( ctrl_fd_.fd(), 780 | reinterpret_cast(&ipv4) , &ipv4_len ); 781 | 782 | VERIFY(ret == 0); 783 | 784 | // Random bytes 785 | char buf[CtrlFd::kDataLen]; 786 | 787 | // Send the data to that UDP socket 788 | VERIFY( ::sendto(ctrl_fd_.fd(),buf,CtrlFd::kDataLen,0, 789 | reinterpret_cast(&ipv4),sizeof(ipv4)) == CtrlFd::kDataLen ); 790 | } 791 | 792 | 793 | // Through epoll watch the pollable descriptor's read/write operation 794 | // there's no extra space usage , the pointer for Pollable are stored 795 | // inside of the epoll_data structure per registeration 796 | 797 | void IOManager::WatchRead( detail::Pollable* pollable ) { 798 | assert( pollable->Valid() ); 799 | // We don't remove any file descriptors unless user explicitly require so 800 | if( LIKELY(pollable->is_epoll_read_) ) 801 | return; 802 | 803 | struct epoll_event ev; 804 | int op; 805 | 806 | ev.data.ptr = pollable; 807 | 808 | // Edge trigger for read 809 | ev.events = EPOLLIN | EPOLLET ; 810 | 811 | if( UNLIKELY(pollable->is_epoll_write_) ) { 812 | op = EPOLL_CTL_MOD; 813 | ev.events |= EPOLLOUT; 814 | } else { 815 | op = EPOLL_CTL_ADD; 816 | } 817 | 818 | VERIFY( ::epoll_ctl( epoll_fd_ , op , pollable->fd_ , &ev ) == 0 ); 819 | 820 | // Set up we gonna watch it 821 | pollable->is_epoll_read_ = true; 822 | 823 | } 824 | 825 | void IOManager::WatchWrite( detail::Pollable* pollable ) { 826 | assert( pollable->Valid() ); 827 | if( LIKELY(pollable->is_epoll_write_) ) 828 | return; 829 | 830 | struct epoll_event ev; 831 | int op; 832 | 833 | ev.data.ptr = pollable; 834 | ev.events = EPOLLOUT | EPOLLET ; 835 | 836 | if( UNLIKELY(pollable->is_epoll_read_) ) { 837 | op = EPOLL_CTL_MOD; 838 | ev.events |= EPOLLIN; 839 | } else { 840 | op = EPOLL_CTL_ADD; 841 | } 842 | 843 | VERIFY( ::epoll_ctl( epoll_fd_ , op , pollable->fd_ , &ev ) == 0 ); 844 | pollable->is_epoll_write_ = true; 845 | } 846 | 847 | void IOManager::WatchControlFd() { 848 | struct epoll_event ev; 849 | ev.data.ptr = &ctrl_fd_; 850 | ev.events = EPOLLIN; 851 | VERIFY( ::epoll_ctl( epoll_fd_ , EPOLL_CTL_ADD , ctrl_fd_.fd_ , &ev ) == 0 ); 852 | } 853 | 854 | void IOManager::DispatchLoop( const struct epoll_event* event_queue , std::size_t sz ) { 855 | for( std::size_t i = 0 ; i < sz ; ++i ) { 856 | detail::Pollable* p = static_cast(event_queue[i].data.ptr); 857 | int ev = event_queue[i].events; 858 | 859 | // Handling error 860 | if( UNLIKELY(ev & EPOLLERR) ) { 861 | // Get the per socket error here 862 | socklen_t len = sizeof(int); 863 | int err_no; 864 | VERIFY( ::getsockopt(p->fd_,SOL_SOCKET,SO_ERROR,&err_no,&len) == 0 ); 865 | if( err_no != 0 ) { 866 | p->OnException( NetState(state_category::kSystem,err_no) ); 867 | continue; 868 | } 869 | ev &= ~EPOLLERR; 870 | } 871 | 872 | if( UNLIKELY(event_queue[i].events & EPOLLHUP) ) { 873 | // Translate it into a read event 874 | p->OnReadNotify(); 875 | continue; 876 | } 877 | 878 | // IN/OUT events 879 | bool deleted = false; 880 | p->set_notify_flag( &deleted ); 881 | 882 | if( LIKELY(event_queue[i].events & EPOLLIN) ) { 883 | p->OnReadNotify(); 884 | ev &= ~EPOLLIN; 885 | } 886 | 887 | if( LIKELY(event_queue[i].events & EPOLLOUT) ) { 888 | if( !deleted ) 889 | p->OnWriteNotify(); 890 | ev &= ~EPOLLOUT; 891 | } 892 | // We may somehow have unwatched event here. 893 | // We can log them for debuggin or other stuff 894 | VERIFY( ev == 0 ); 895 | } 896 | } 897 | 898 | uint64_t IOManager::UpdateTimer( std::size_t event_sz , uint64_t prev_time ) { 899 | static const int kMinDiff = 3; 900 | #define TIME_TRIGGER(diff,t) (std::abs((t)-diff) < kMinDiff) 901 | 902 | if( !timer_queue_.empty() ) { 903 | if( event_sz == 0 ) { 904 | uint64_t diff = timer_queue_.front().time; 905 | while( !timer_queue_.empty() ) { 906 | if( LIKELY(TIME_TRIGGER(diff,timer_queue_.front().time)) ) { 907 | detail::ScopePtr cb( 908 | timer_queue_.front().callback); 909 | 910 | cb->Invoke(timer_queue_.front().time); 911 | // Pop from the top element from the heap 912 | std::pop_heap( timer_queue_.begin() , timer_queue_.end() ); 913 | timer_queue_.pop_back(); 914 | 915 | } else { 916 | break; 917 | } 918 | } 919 | return detail::GetCurrentTimeInMS(); 920 | } else { 921 | int ret = detail::GetCurrentTimeInMS(); 922 | int diff = static_cast( ret - prev_time ); 923 | for( std::size_t i = 0 ; i < timer_queue_.size() ; ++i ) { 924 | timer_queue_[i].time -= diff; 925 | } 926 | return ret; 927 | } 928 | } 929 | #undef TIME_TRIGGER 930 | } 931 | 932 | void IOManager::ExecutePendingAccept() { 933 | while( !pending_accept_callback_.IsNull() ) { 934 | DO_INVOKE( pending_accept_callback_, 935 | detail::ScopePtr, 936 | new_accept_socket_,pending_accept_state_); 937 | } 938 | } 939 | 940 | NetState IOManager::RunMainLoop() { 941 | struct epoll_event event_queue[ IOManager::kEpollEventLength ]; 942 | uint64_t prev_time = detail::GetCurrentTimeInMS(); 943 | do { 944 | // 0. Execute pending accept 945 | ExecutePendingAccept(); 946 | // 1. Set up the parameter that we need for the epoll_wait , it is very simple 947 | int tm = timer_queue_.empty() ? -1 : timer_queue_.front().time ; 948 | 949 | repoll: 950 | int ret = ::epoll_wait( epoll_fd_ , event_queue , kEpollEventLength , tm ); 951 | 952 | if( UNLIKELY(ret < 0) ) { 953 | 954 | if( LIKELY(errno != EINTR) ) 955 | return NetState(state_category::kSystem,errno); 956 | else 957 | // We don't need to go to the begining of the loop since this will cause us 958 | // to reflush the timer there. Goto repoll label to start another epoll_wait 959 | // would be easiest way we can do 960 | goto repoll; 961 | } else { 962 | // Do dispatch for the event here 963 | DispatchLoop( event_queue , static_cast( ret ) ); 964 | // Update or invoke the timer event. 965 | prev_time = UpdateTimer( static_cast(ret) , prev_time ); 966 | // Checking whether we have been notified by interruption 967 | if( UNLIKELY(ctrl_fd_.is_wake_up()) ) { 968 | // We have been waken up by the caller, just return empty 969 | // NetState here 970 | return NetState(); 971 | } 972 | } 973 | } while( true ); 974 | } 975 | 976 | }// namespace mnet 977 | 978 | -------------------------------------------------------------------------------- /mnet.h: -------------------------------------------------------------------------------- 1 | #ifndef MNET_H_ 2 | #define MNET_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | // System related header 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | // Macros 28 | #define DISALLOW_COPY_AND_ASSIGN(x) \ 29 | void operator=( const x& ); \ 30 | x( const x& ) 31 | 32 | #define UNREACHABLE(x) \ 33 | do { \ 34 | assert(0&&"Unreachable!"); \ 35 | x; \ 36 | } while(0) 37 | 38 | 39 | #define LIKELY(x) __builtin_expect((x),1) 40 | #define UNLIKELY(x) __builtin_expect((x),0) 41 | 42 | // This directory is used to make gcc options -Weffc++ and -Wnon-virtual-destructor 43 | // happy. Since those option will force every class that has a virtual function needs 44 | // a non virtual destructor which is not very helpful in our cases. Anyway bearing 45 | // those rules here. You could define this directory to make our code pass these warnings. 46 | // #define FORCE_VIRTUAL_DESTRUCTOR 47 | 48 | // MNet is a small library that is designed to solve massive concurrent 49 | // tcp connection to server. It is a extreamly small C++ library that is 50 | // strictly compatible with C++03 standard. It has only 4 class needs to 51 | // know in order to work with it. It runs sololy on Linux and it is highly 52 | // optimized in the following rules : 53 | // 1. Minimize System Call 54 | // 2. Minimize Buffer Copy 55 | // 3. Minimize OOP overhead 56 | 57 | namespace mnet { 58 | class Buffer; 59 | class Endpoint; 60 | class NetState; 61 | 62 | class Socket; 63 | class ClientSocket; 64 | class ServerSocket; 65 | class IOManager; 66 | 67 | namespace detail { 68 | class Pollable; 69 | 70 | class ReadCallback { 71 | public: 72 | virtual void Invoke( Socket* socket , std::size_t size, const NetState& ok ) = 0; 73 | 74 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 75 | virtual ~ReadCallback() {} 76 | #endif // FORCE_VIRTUAL_DESTRUCTOR 77 | 78 | }; 79 | 80 | class WriteCallback { 81 | public: 82 | virtual void Invoke( Socket* socket , std::size_t size, const NetState& ok ) = 0; 83 | 84 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 85 | virtual ~WriteCallback() {} 86 | #endif // FORCE_VIRTUAL_DESTRUCTOR 87 | }; 88 | 89 | class ConnectCallback { 90 | public: 91 | virtual void Invoke( ClientSocket* socket , const NetState& ok ) =0; 92 | 93 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 94 | virtual ~ConnectCallback() {} 95 | #endif // FORCE_VIRTUAL_DESTRUCTOR 96 | 97 | }; 98 | 99 | class AcceptCallback { 100 | public: 101 | virtual void Invoke( Socket* socket , const NetState& ok ) =0; 102 | 103 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 104 | virtual ~AcceptCallback(){} 105 | #endif // FORCE_VIRTUAL_DESTRUCTOR 106 | 107 | }; 108 | 109 | class TimeoutCallback { 110 | public: 111 | virtual void Invoke( int time ) =0; 112 | 113 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 114 | virtual ~TimeoutCallback() {} 115 | #endif // FORCE_VIRTUAL_DESTRUCTOR 116 | 117 | }; 118 | 119 | class CloseCallback { 120 | public: 121 | virtual void InvokeClose( const NetState& ok ) = 0; 122 | virtual void InvokeData( std::size_t sz ) = 0; 123 | 124 | #ifdef FORCE_VIRTUAL_DESTRUCTOR 125 | virtual ~CloseCallback() {} 126 | #endif // FORCE_VIRTUAL_DESTRUCTOR 127 | 128 | }; 129 | 130 | namespace { 131 | 132 | template< typename N > struct ReadNotifier : public ReadCallback { 133 | virtual void Invoke( Socket* socket , std::size_t size , const NetState& ok ) { 134 | notifier->OnRead( socket , size , ok ); 135 | } 136 | N* notifier; 137 | ReadNotifier( N* n ) : notifier(n) {} 138 | }; 139 | 140 | template< typename N > struct WriteNotifier : public WriteCallback { 141 | virtual void Invoke( Socket* socket , std::size_t size , const NetState& ok ) { 142 | notifier->OnWrite( socket , size , ok ); 143 | } 144 | N* notifier; 145 | WriteNotifier( N* n ) : notifier(n) {} 146 | }; 147 | 148 | template< typename N > struct AcceptNotifier : public AcceptCallback { 149 | virtual void Invoke( Socket* socket , const NetState& ok ) { 150 | notifier->OnAccept(socket,ok); 151 | } 152 | N* notifier; 153 | AcceptNotifier( N* n ) : notifier(n) {} 154 | }; 155 | 156 | template< typename N > struct ConnectNotifier : public ConnectCallback { 157 | virtual void Invoke( ClientSocket* socket , const NetState& ok ) { 158 | notifier->OnConnect( socket , ok ); 159 | } 160 | N* notifier; 161 | ConnectNotifier( N* n ) :notifier(n) {} 162 | }; 163 | 164 | template< typename N > struct TimeoutNotifier : public TimeoutCallback { 165 | virtual void Invoke( int msec ) { 166 | notifier->OnTimeout(msec); 167 | } 168 | N* notifier; 169 | TimeoutNotifier( N* n ) :notifier(n) {} 170 | }; 171 | 172 | template< typename N > struct CloseNotifier_WithOnData : public CloseCallback { 173 | virtual void InvokeClose( const NetState& ok ) { 174 | notifier->OnClose( ok ); 175 | } 176 | virtual void InvokeData( std::size_t sz ) { 177 | notifier->OnData( sz ); 178 | } 179 | N* notifier; 180 | CloseNotifier_WithOnData( N* n ) : notifier(n) {} 181 | }; 182 | 183 | template< typename N > struct CloseNotifier_WithoutOnData : public CloseCallback { 184 | virtual void InvokeClose( const NetState& ok ) { 185 | notifier->OnClose( ok ); 186 | } 187 | virtual void InvokeData( std::size_t sz ) { 188 | sz = sz; 189 | } 190 | N* notifier; 191 | CloseNotifier_WithoutOnData( N* n ) : notifier(n) {} 192 | }; 193 | 194 | #define DECLARE_CONCEPT_CHECK(FN,SIG,SIGP)\ 195 | template< typename T > struct HasConcept_##FN { \ 196 | template< typename U , SIGP > struct Concept{ };\ 197 | template< typename U > \ 198 | static int Stub( ... ); \ 199 | template< typename U > \ 200 | static char Stub( Concept* ); \ 201 | static const bool result = sizeof( Stub(NULL) ) == sizeof(char); \ 202 | } 203 | 204 | DECLARE_CONCEPT_CHECK(OnRead,OnRead,void (T::*)(Socket*,std::size_t,const NetState&)); 205 | DECLARE_CONCEPT_CHECK(OnWrite,OnWrite,void (T::*)(Socket*,std::size_t,const NetState&)); 206 | DECLARE_CONCEPT_CHECK(OnAccept,OnAccept,void (T::*)(Socket*,const NetState&)); 207 | DECLARE_CONCEPT_CHECK(OnTimeout,OnTimeout,void (T::*)(int)); 208 | DECLARE_CONCEPT_CHECK(OnConnect,OnConnect,void (T::*)(Socket*,const NetState&)); 209 | DECLARE_CONCEPT_CHECK(OnClose_Data,OnData,void (T::*)(std::size_t)); 210 | DECLARE_CONCEPT_CHECK(OnClose_Close,OnClose,void (T::*)( const NetState& )); 211 | 212 | // On C++03 we don't have static assert 213 | template< bool V > struct static_assert_result; 214 | template<> struct static_assert_result{}; 215 | 216 | #define STATIC_ASSERT(C,M) \ 217 | do { \ 218 | static_assert_result< C > M; \ 219 | } while(0) 220 | 221 | } // namespace 222 | 223 | // Helper funtion to bind a any type T to a specific class and then we are able to 224 | // call its internal callback function inside of the WriteCallback function. 225 | 226 | template< typename T > 227 | ReadCallback* MakeReadCallback( T* n ) { 228 | STATIC_ASSERT( HasConcept_OnRead::result , No_On_Read_Is_Found ); 229 | return new ReadNotifier(n); 230 | } 231 | 232 | template< typename T > 233 | WriteCallback* MakeWriteCallback( T* n ) { 234 | STATIC_ASSERT( HasConcept_OnWrite::result , No_On_Write_Is_Found ); 235 | return new WriteNotifier(n); 236 | } 237 | 238 | template< typename T > 239 | AcceptCallback* MakeAcceptCallback( T* n ) { 240 | STATIC_ASSERT( HasConcept_OnAccept::result , No_On_Accept_Is_Found ); 241 | return new AcceptNotifier(n); 242 | } 243 | 244 | template< typename T > 245 | ConnectCallback* MakeConnectCallback( T* n ) { 246 | STATIC_ASSERT( HasConcept_OnConnect::result , No_On_Connect_Is_Found ); 247 | return new ConnectNotifier(n); 248 | } 249 | 250 | template< typename T > 251 | TimeoutCallback* MakeTimeoutCallback( T* n ) { 252 | STATIC_ASSERT( HasConcept_OnTimeout::result , No_On_Timeout_Is_Found ); 253 | return new TimeoutNotifier(n); 254 | } 255 | 256 | template< typename T > 257 | CloseCallback* MakeCloseCallback( T* n ) { 258 | STATIC_ASSERT( HasConcept_OnClose_Close::result , No_On_Close_Is_Found ); 259 | if( HasConcept_OnClose_Data::result ) { 260 | return new CloseNotifier_WithOnData(n); 261 | } else { 262 | return new CloseNotifier_WithoutOnData(n); 263 | } 264 | } 265 | 266 | // A very tiny and simple ScopePtr serves as the replacement of the std::unqiue_ptr 267 | // C++ 03 only has a std::auto_ptr which I don't want to use since it is deperacted 268 | template< typename T > 269 | class ScopePtr { 270 | public: 271 | ScopePtr() : 272 | ptr_(NULL) 273 | {} 274 | 275 | explicit ScopePtr( T* ptr ) : 276 | ptr_(ptr) 277 | {} 278 | 279 | ~ScopePtr() { 280 | delete ptr_; 281 | } 282 | 283 | // Reset the pointer by ptr typed with T. This function will clear the 284 | // ScopePtr when ptr == NULL 285 | void Reset( T* ptr ) { 286 | delete ptr_ ; // delete handle NULL properly 287 | ptr_ = ptr; 288 | } 289 | 290 | bool IsNull() const { 291 | return ptr_ == NULL; 292 | } 293 | 294 | bool operator == ( T* ptr ) const { 295 | return ptr_ == ptr; 296 | } 297 | 298 | bool operator != ( T* ptr ) const { 299 | return ptr_ != ptr; 300 | } 301 | 302 | void Swap( ScopePtr* ptr ) { 303 | T* p = ptr->Release(); 304 | ptr->ptr_ = ptr_; 305 | ptr_ = p; 306 | } 307 | 308 | public: 309 | 310 | T* get() const { 311 | return ptr_; 312 | } 313 | 314 | T* Release() { 315 | T* ret = ptr_; 316 | ptr_ = NULL; 317 | return ret; 318 | } 319 | 320 | T* operator->() const { 321 | assert( ptr_ != NULL ); 322 | return get(); 323 | } 324 | 325 | const T& operator *() const { 326 | assert( ptr_ != NULL ); 327 | return *get(); 328 | } 329 | 330 | T& operator *() { 331 | assert( ptr_ != NULL ); 332 | return *get(); 333 | } 334 | 335 | private: 336 | 337 | T* ptr_; 338 | 339 | DISALLOW_COPY_AND_ASSIGN(ScopePtr); 340 | }; 341 | 342 | template< typename T > 343 | bool operator == ( T* l , const ScopePtr& r ) { 344 | return l == r.get(); 345 | } 346 | 347 | template< typename T > 348 | bool operator != ( T* l , const ScopePtr& r ) { 349 | return l != r.get(); 350 | } 351 | 352 | // Create a tcp file descriptor and set up all its related attributes 353 | int CreateTcpFileDescriptor(); 354 | 355 | // Create a tcp server file descriptor. This creation will not set up communication 356 | // attributes like NO_DELAY 357 | int CreateTcpServerSocketFileDescriptor(); 358 | 359 | 360 | 361 | }// namespace detail 362 | 363 | class Buffer { 364 | private: 365 | 366 | // Rewind buffer if we need to do so. This includes 2 scenerios 367 | // 1) read_ptr_ hits the write_ptr_ , just rewind back and done 368 | // 2) write_ptr_ hits the capacity , we need to allocate more 369 | // spaces since we have no spaces to writing more data. 370 | 371 | void RewindBuffer() { 372 | if( UNLIKELY(read_ptr_ == write_ptr_) ) { 373 | read_ptr_ = write_ptr_ = 0; 374 | } 375 | // For write_ptr_ is same with capacity, we do nothing since 376 | // we can delay this operation until we really need more extra 377 | // spaces. Until then we do the reallocation operation 378 | } 379 | 380 | public: 381 | // Accessor object. This object enables user to 382 | // access the internal raw buffer inside of this 383 | // object without cauzing pain in the copy. This 384 | // class can be used by user to achieve zero copy 385 | class Accessor { 386 | public: 387 | ~Accessor() { 388 | if( UNLIKELY(has_committed_) ) 389 | return; 390 | *ptr_ref_ += committed_size_; 391 | owned_buffer_->RewindBuffer(); 392 | } 393 | 394 | void* address() const { 395 | return address_; 396 | } 397 | std::size_t size() const { 398 | return size_; 399 | } 400 | std::size_t committed_size() const { 401 | return committed_size_; 402 | } 403 | 404 | void set_committed_size( std::size_t committed_size ) { 405 | assert( !has_committed_ ); 406 | assert( committed_size <= size_ ); 407 | committed_size_ = committed_size; 408 | } 409 | 410 | void Commit( ) { 411 | *ptr_ref_ += committed_size_; 412 | owned_buffer_->RewindBuffer(); 413 | has_committed_ = true; 414 | committed_size_ = 0; 415 | } 416 | 417 | 418 | private: 419 | Accessor( std::size_t size , 420 | void* address , 421 | std::size_t* ptr_ref, 422 | Buffer* owned_buffer): 423 | 424 | size_(size), 425 | address_(address), 426 | ptr_ref_(ptr_ref), 427 | committed_size_(0), 428 | owned_buffer_(owned_buffer), 429 | has_committed_(false) 430 | {} 431 | 432 | private: 433 | // Size of this slice inside of the whole buffer 434 | std::size_t size_; 435 | // Address for this slice 436 | void* address_; 437 | // Pointer ref for updating the size 438 | std::size_t* ptr_ref_; 439 | // Committed size 440 | std::size_t committed_size_; 441 | // Owned buffer 442 | Buffer* owned_buffer_; 443 | // Has committed 444 | bool has_committed_; 445 | // For accessing its private constructor 446 | friend class Buffer; 447 | }; 448 | public: 449 | 450 | explicit Buffer( std::size_t capacity = 0 , bool fixed = false ) : 451 | read_ptr_(0), 452 | write_ptr_(0), 453 | capacity_(capacity), 454 | is_fixed_( false ), 455 | mem_(NULL) 456 | { Grow(capacity); } 457 | 458 | ~Buffer() { 459 | free(mem_); 460 | } 461 | 462 | Accessor GetReadAccessor() { 463 | return Accessor( readable_size() , 464 | static_cast(mem_)+read_ptr_, 465 | &read_ptr_, 466 | this); 467 | } 468 | 469 | Accessor GetWriteAccessor() { 470 | return Accessor( writable_size() , 471 | static_cast(mem_)+write_ptr_, 472 | &write_ptr_, 473 | this); 474 | } 475 | 476 | void* Read( std::size_t* size ) ; 477 | bool Write( const void* mem , std::size_t size ); 478 | std::size_t Fill( const void* mem , std::size_t size ); 479 | 480 | // Inject will not cause overhead for memory, after injecting, if it 481 | // requires to malloc new memory, there will not be any extra space 482 | bool Inject( const void* mem , std::size_t size ); 483 | 484 | std::size_t readable_size() const { 485 | return write_ptr_ - read_ptr_; 486 | } 487 | 488 | std::size_t writable_size() const { 489 | return capacity_ - write_ptr_; 490 | } 491 | 492 | std::size_t capacity() const { 493 | return capacity_; 494 | } 495 | 496 | void Clear() { 497 | write_ptr_ = read_ptr_ = 0; 498 | } 499 | 500 | bool Reserve( std::size_t capacity ) { 501 | if( is_fixed_ ) 502 | return false; 503 | if( writable_size() < capacity ) { 504 | Grow( capacity - writable_size() ); 505 | } 506 | return true; 507 | } 508 | 509 | private: 510 | 511 | void Grow( std::size_t capacity ); 512 | 513 | private: 514 | // ReadPtr, this pointer points to the first available readable character 515 | // if ReadPtr == WritePtr , then nothing is avaiable for reading 516 | std::size_t read_ptr_; 517 | 518 | // WritePtr, this pointer points to the first avaiable space for writing 519 | // the data. if WritePtr == Capacity, then no extra spaces can be found 520 | // for writing. 521 | std::size_t write_ptr_; 522 | 523 | // Capacity 524 | // Capacity represents how many actual bytes (in total) has been allocated 525 | // for the buffer object. 526 | std::size_t capacity_; 527 | 528 | // Fixed buffer. If this buffer is fixed, then no internal memory grow 529 | // operation will be used. Once the buffer runs out, it will just fail 530 | bool is_fixed_; 531 | 532 | // Pointer points to the memory buffer. 533 | void* mem_; 534 | 535 | friend class Accessor; 536 | 537 | DISALLOW_COPY_AND_ASSIGN(Buffer); 538 | }; 539 | 540 | // Endpoint is a class that is used to represent a tuple (ipv4,port). It is a convinient 541 | // class for user to 1) get endpoint from the string 2) convert this text representation 542 | // to the real struct inetaddr structure. 543 | class Endpoint { 544 | public: 545 | 546 | Endpoint( const std::string& address , uint16_t port ) { 547 | ParseFrom(address,port); 548 | } 549 | 550 | explicit Endpoint( const std::string& endpoint ) { 551 | ParseFrom(endpoint); 552 | } 553 | 554 | Endpoint() : 555 | port_( kEndpointError ) 556 | {} 557 | 558 | Endpoint( uint32_t ipv4 , uint16_t port ) : 559 | ipv4_(ipv4), 560 | port_(port) 561 | {} 562 | 563 | bool ParseFrom( const std::string& address , unsigned short port ) { 564 | if( StringToIpv4(address.c_str()) < 0 ) 565 | return false; 566 | port_ = port; 567 | return true; 568 | } 569 | 570 | bool ParseFrom( const std::string& address ) { 571 | int off; 572 | if( (off = StringToIpv4(address.c_str())) > 0 ) { 573 | // Checking for that \":\" here 574 | if( UNLIKELY(off > static_cast(address.size()) || address[off] != ':') ) { 575 | return false; 576 | } else { 577 | // Skip the Ip address part + ':' 578 | if( UNLIKELY((off = StringToPort(address.c_str()+off+1)) < 0) ) { 579 | return false; 580 | } else { 581 | return true; 582 | } 583 | } 584 | } 585 | return false; 586 | } 587 | 588 | bool HasError() const { 589 | return port_ == kEndpointError; 590 | } 591 | 592 | std::string IpV4ToString() const { 593 | char buf[1024]; 594 | Ipv4ToString(buf); 595 | return std::string(buf); 596 | } 597 | 598 | std::string PortToString() const { 599 | char buf[32]; 600 | PortToString(buf); 601 | return std::string(buf); 602 | } 603 | 604 | uint16_t port() const { 605 | assert( port_ != kEndpointError ); 606 | return static_cast(port_); 607 | } 608 | 609 | void set_port( uint16_t p ) { 610 | port_ = p; 611 | } 612 | 613 | uint32_t ipv4() const { 614 | return ipv4_; 615 | } 616 | 617 | void set_ipv4( uint32_t ipv4 ) { 618 | ipv4_ = ipv4; 619 | } 620 | 621 | std::string ToString() const { 622 | char addr[1024]; 623 | int length; 624 | length = Ipv4ToString(addr); 625 | addr[length]=':'; 626 | PortToString(addr+length+1); 627 | return std::string(addr); 628 | } 629 | 630 | private: 631 | // The input user should make sure that buffer has enough size 632 | int Ipv4ToString( char* buf ) const ; 633 | int PortToString( char* buf ) const ; 634 | 635 | int StringToIpv4( const char* buf ) ; 636 | int StringToPort( const char* buf ) ; 637 | 638 | // IPV4 compact representation. For future IPV6 supports, 639 | // just wrape this representation with a union. Problem is this 640 | // breaks users binary compatible. 641 | uint32_t ipv4_; 642 | 643 | // Port, in Linux endian( Big endian ) 644 | uint32_t port_; 645 | 646 | // This value cannot be a valid port , so just use it as a indicator 647 | // for parsing error. 648 | static const int kEndpointError = 1 << 24; 649 | }; 650 | 651 | // NetState 652 | // ===================================================================== 653 | // This class represents the error status for the related file descriptors. 654 | // User could use this fd to retrieve information about whether the fd has 655 | // error or not. It also provides function to get a readable text based 656 | // error description. 657 | // ===================================================================== 658 | 659 | namespace state_category { 660 | static const int kDefault = 0; 661 | static const int kSystem = 1; 662 | }// namespace state_category 663 | 664 | class NetState { 665 | public: 666 | NetState( int cate, int err ) { 667 | CheckPoint(cate,err); 668 | } 669 | 670 | NetState() : 671 | category_(state_category::kDefault), 672 | error_code_(0) { 673 | } 674 | 675 | bool CheckPoint( int cate , int err ) { 676 | category_ = cate; 677 | error_code_ = err; 678 | return err != 0; 679 | } 680 | 681 | int error_code() const { 682 | return error_code_; 683 | } 684 | 685 | int category() const { 686 | return category_; 687 | } 688 | 689 | bool HasError() const { 690 | return error_code_ != 0 ; 691 | } 692 | 693 | operator bool () const { 694 | return !HasError(); 695 | } 696 | 697 | void Clear() { 698 | error_code_ = 0; 699 | } 700 | 701 | private: 702 | // Error category for this one 703 | int category_; 704 | // Error code for the NetState class 705 | int error_code_; 706 | }; 707 | 708 | namespace detail { 709 | 710 | // A pollable is ensentially an entity. The solo goal for this class is 711 | // to represent the states of a pollable device(file descriptor). 712 | class Pollable { 713 | public: 714 | Pollable() : 715 | fd_(-1), 716 | notify_flag_( NULL ) , 717 | is_epoll_read_( false ), 718 | is_epoll_write_( false ), 719 | can_read_( false ), 720 | can_write_( false ) 721 | {} 722 | 723 | virtual ~Pollable() { 724 | // When this pollable gets destructed, its internal 725 | // fd MUST be recalimed. It means the fd_ must be 726 | // already set to invalid socket handler value 727 | assert( fd_ < 0 ); 728 | 729 | // Using notify flag to tell the watcher that this 730 | // object has already gets deleted since its dtor 731 | // has been invoked now 732 | if( notify_flag_ != NULL ) 733 | *notify_flag_ = true; 734 | } 735 | 736 | // Accessor(readonly) for internal states of Socket 737 | bool is_epoll_read() const { 738 | return is_epoll_read_; 739 | } 740 | 741 | bool is_epoll_write()const { 742 | return is_epoll_write_; 743 | } 744 | 745 | int fd() const { 746 | return fd_; 747 | } 748 | 749 | bool Valid() const { 750 | return fd_ > 0 ; 751 | } 752 | 753 | operator bool() const { 754 | return Valid(); 755 | } 756 | 757 | public: 758 | // This function gets called when the IOManager find that a signal attach 759 | // to this pollable is issued for read. IOManager will do nothing but telling 760 | // you that you can read without blocking 761 | virtual void OnReadNotify( ) = 0; 762 | virtual void OnWriteNotify( ) = 0; 763 | virtual void OnException( const NetState& ) =0; 764 | 765 | protected: 766 | 767 | void set_fd( int fd ) { 768 | fd_ = fd; 769 | } 770 | 771 | bool can_write() const { 772 | return can_write_; 773 | } 774 | 775 | void set_can_write( bool c ) { 776 | can_write_ = c; 777 | } 778 | 779 | bool can_read() const { 780 | return can_read_; 781 | } 782 | 783 | void set_can_read( bool c ) { 784 | can_read_ = c; 785 | } 786 | 787 | void set_notify_flag( bool* flag ) { 788 | notify_flag_ = flag; 789 | } 790 | 791 | private: 792 | // File descriptors 793 | int fd_; 794 | 795 | // A Hack to get notification whether this object 796 | // gets deleted or not. This is a must since we 797 | // need to get information whether user has deleted 798 | // this object during the callback function or not 799 | bool* notify_flag_; 800 | 801 | // If this fd has been added to epoll as epoll_read 802 | bool is_epoll_read_ ; 803 | 804 | // If this fd has been added to epoll as epoll_write 805 | bool is_epoll_write_; 806 | 807 | // Can read. This flag is used when there're data in 808 | // the kernel for edge trigger 809 | bool can_read_; 810 | 811 | // Can write. This flag is must since we will use edge trigger 812 | bool can_write_; 813 | 814 | friend class ::mnet::IOManager; 815 | friend class ::mnet::ServerSocket; 816 | }; 817 | 818 | }// namespace detail 819 | 820 | // Socket represents a communication socket. It can be a socket that is accepted 821 | // or a socket that initialized by connect. However, for listening, the user should 822 | // use ServerSocket. This socket will be added into the epoll fd using edge trigger. 823 | 824 | class Socket : public detail::Pollable { 825 | public: 826 | explicit Socket( IOManager* io_manager ) : 827 | io_manager_(io_manager), 828 | state_( NORMAL ) , 829 | eof_(false) {} 830 | // This function serves for retrieving the Local address for the underlying 831 | // file descriptor. 832 | void GetLocalEndpoint( Endpoint* addr ); 833 | // This function retrieve the peer side end point address for underlying file 834 | // descriptor 835 | void GetPeerEndpoint( Endpoint* addr ); 836 | 837 | // Operation for user level read and write 838 | template< typename T > 839 | void AsyncRead( T* notifier ); 840 | 841 | template< typename T > 842 | void AsyncWrite( T* notifier ); 843 | 844 | template< typename T > 845 | void AsyncClose( T* notifier ); 846 | 847 | // Closing this socket at once. This operation is entirely relied on the OS 848 | // no graceful shutdown is performed on each socket. This is OK in most cases, 849 | // however, AsyncClose can guarantee the socket been shutdown properly ( with 850 | // EOF received by local side). 851 | void Close() { 852 | assert( state_ == NORMAL ); 853 | // Ignore the close return status 854 | ::close(fd()); 855 | // Setting the fd to invalid value 856 | set_fd(-1); 857 | } 858 | 859 | const Buffer& read_buffer() const { 860 | return read_buffer_; 861 | } 862 | 863 | Buffer& read_buffer() { 864 | return read_buffer_; 865 | } 866 | 867 | const Buffer& write_buffer() const { 868 | return write_buffer_; 869 | } 870 | 871 | Buffer& write_buffer() { 872 | return write_buffer_; 873 | } 874 | 875 | protected: 876 | // The following OnRead/OnWrite function is for IOManager private usage. 877 | // User should not call this function. 878 | 879 | virtual void OnReadNotify(); 880 | virtual void OnWriteNotify(); 881 | virtual void OnException( const NetState& state ); 882 | 883 | IOManager* io_manager() const { 884 | return io_manager_; 885 | } 886 | 887 | private: 888 | std::size_t DoRead( NetState* state ); 889 | std::size_t DoWrite( NetState* state ); 890 | 891 | private: 892 | // Callback function 893 | detail::ScopePtr user_read_callback_; 894 | detail::ScopePtr user_write_callback_; 895 | detail::ScopePtr user_close_callback_; 896 | 897 | std::size_t prev_write_size_; 898 | 899 | // User level buffer management , per socket per buffer. 900 | Buffer read_buffer_ ; 901 | Buffer write_buffer_; 902 | 903 | // IO Manager for this socket 904 | IOManager* io_manager_; 905 | 906 | enum { 907 | CLOSING, 908 | CLOSED, 909 | NORMAL // Initial state for the socket 910 | }; 911 | 912 | int state_; 913 | 914 | // Flag to indicate that whether a EOF has been seen 915 | bool eof_; 916 | 917 | DISALLOW_COPY_AND_ASSIGN(Socket); 918 | }; 919 | 920 | // ClientSocket, client socket represents a socket that could be initialize with 921 | // async connection operation. 922 | class ClientSocket : public Socket { 923 | public: 924 | explicit ClientSocket( IOManager* io_manager ) : 925 | Socket( io_manager ) , 926 | state_( DISCONNECTED ) 927 | {} 928 | 929 | // This function is used to make this socket being connected to the peer. 930 | template< typename T> 931 | void AsyncConnect( const Endpoint& address , T* notifier ); 932 | 933 | private: 934 | 935 | virtual void OnReadNotify(); 936 | virtual void OnWriteNotify(); 937 | virtual void OnException( const NetState& state ); 938 | void DoConnect( NetState* state ); 939 | private: 940 | // Callback function for async connection operations 941 | detail::ScopePtr user_conn_callback_; 942 | 943 | // States for the ClientSocket 944 | enum { 945 | CONNECTING , 946 | DISCONNECTED, 947 | CONNECTED, 948 | }; 949 | 950 | // This state field indicates the status of this ClientSocket. The ClientSocket 951 | // is initialized with DISCONNECTED, then if the user specify the connect operation 952 | // it turns into the CONNECTING states, finally either an error happened which 953 | // makes the state_ be DISCONNECTED again or successfully connected. 954 | int state_; 955 | 956 | DISALLOW_COPY_AND_ASSIGN(ClientSocket); 957 | }; 958 | 959 | // ServerSocket class represents the class that is sololy for listening. This one will 960 | // be added into the epoll fd by level trigger. This is specifically needed if we 961 | // want to loop through different epoll set and allow level trigger just make code 962 | // simpler 963 | class ServerSocket : public detail::Pollable { 964 | public: 965 | ServerSocket(); 966 | 967 | ~ServerSocket(); 968 | 969 | // Set the underlying IOManager 970 | inline void SetIOManager( IOManager* io_manager ); 971 | 972 | // Binding the ServerSocket to a speicific end point and start 973 | // to listen. (This function is equavlent for bind + listen) 974 | bool Bind( const Endpoint& ep ); 975 | 976 | // Accept operations. Indeed this operation will not be held 977 | // by IOManager since IOManager only notify read/write operations. 978 | // It is for specific socket that has different states to interpret 979 | // this event notification. 980 | template< typename T > 981 | void AsyncAccept( Socket* socket , T* notifier ); 982 | 983 | IOManager* io_manager() const { 984 | return io_manager_; 985 | } 986 | 987 | private: 988 | 989 | virtual void OnReadNotify( ); 990 | virtual void OnWriteNotify( ) { 991 | // We will never register write notification for ServerSocket 992 | UNREACHABLE(return); 993 | } 994 | virtual void OnException( const NetState& state ); 995 | 996 | void set_io_manager( IOManager* io_manager ) { 997 | io_manager_ = io_manager; 998 | } 999 | 1000 | int DoAccept( NetState* state ); 1001 | 1002 | void HandleRunOutOfFD( int err ); 1003 | 1004 | private: 1005 | // User callback function 1006 | detail::ScopePtr user_accept_callback_; 1007 | Socket* new_accept_socket_; 1008 | 1009 | // The following fd is used to gracefully shutdown the remote the 1010 | // remote connection when we are run out the FD (EMFILE/ENFILE). 1011 | int dummy_fd_; 1012 | 1013 | // This field represents the manager that this listener has been added 1014 | // If it sets to zero, it means the listener has no attached IOManager 1015 | IOManager* io_manager_; 1016 | 1017 | // This flag is used to tell the state of the current listener 1018 | bool is_bind_; 1019 | 1020 | friend class IOManager; 1021 | DISALLOW_COPY_AND_ASSIGN(ServerSocket); 1022 | }; 1023 | 1024 | // IOManager class represents the reactor. It performs socket event notification 1025 | // and also timeout notification. This IOManager is a truely reactor, it spawn the 1026 | // notification when the IO event is ready ( performs the IO without blocking ). 1027 | // To achieve notification, the typical way is through callback function. In C++ 1028 | // 11 we can use std::function or we can use boost::function. However, to make the 1029 | // library stay small and simple, we will not use these tools. Additionally, no 1030 | // drop in replacement for std::function will be created here. We use a trick to 1031 | // make user do not need to inherit any base class. This trick involes the overhead 1032 | // for new/delete call , however, we assume that OS take care of it . The overhead 1033 | // is as follow, for each event (read/write/accept/connect/timeout) a new/delete will 1034 | // be invoked once. Maybe a simple memory pool can solve these overhead 1035 | 1036 | class IOManager { 1037 | public: 1038 | IOManager( std::size_t cap=0 ); 1039 | 1040 | ~IOManager(); 1041 | 1042 | // Schedule a notifier that is to be invoked after msec milliseconds passed 1043 | template< typename T > 1044 | void Schedule( int msec , T* notifier ); 1045 | 1046 | // Calling this function will BLOCK the IOManager into the main loop 1047 | NetState RunMainLoop(); 1048 | 1049 | // This function could be safely called from another thread. It will 1050 | // wake up a blocked IOManager for that thread. Once calling from this 1051 | // one, the RunMainLoop will return with an empty NetState . 1052 | void Interrupt(); 1053 | 1054 | private: 1055 | 1056 | // The following interface is privately used by Socket/ServerSocket/Connector class 1057 | void WatchRead( detail::Pollable* pollable ); 1058 | void WatchWrite( detail::Pollable* pollable ); 1059 | 1060 | // This function is used here to avoid potential stack overflow for accepting 1061 | // function 1062 | template< typename T > 1063 | void SetPendingAccept( Socket* new_accept_socket, T* notifier , const NetState& state ); 1064 | 1065 | // This function is used to watch the control fd, the reason why need another one 1066 | // is that for control fd, we use level trigger 1067 | void WatchControlFd(); 1068 | 1069 | private: 1070 | 1071 | void DispatchLoop( const struct epoll_event* evnt , std::size_t sz ); 1072 | uint64_t UpdateTimer( std::size_t event_sz , uint64_t prev_timer ); 1073 | 1074 | // This function is actually a hack to avoid potential stack overflow. The situation is 1075 | // as follow, if we invoke user's notifier just when we find that we can get a new fd 1076 | // from accept inside of function AsyncAccept, then user could call AsyncAccept( which is 1077 | // always the case ). It means user's notifier function can goto AsyncAccept again , then 1078 | // such loop can countinue if the accept can get more new fd. It can potentailly lead to 1079 | // stack overflow if too much concurrent connection is established. Puting such function 1080 | // invocation into the main event loop can break such call graph thus avoiding stack 1081 | // overflow potentially. 1082 | 1083 | void ExecutePendingAccept(); 1084 | 1085 | private: 1086 | // The maximum buffer for epoll_events buffer for epoll_wait on the stack 1087 | static const std::size_t kEpollEventLength = 1024; 1088 | 1089 | // control file descriptor 1090 | class CtrlFd : public detail::Pollable { 1091 | public: 1092 | CtrlFd() : 1093 | is_wake_up_(false) 1094 | {} 1095 | 1096 | virtual void OnReadNotify(); 1097 | virtual void OnWriteNotify( ) {} 1098 | virtual void OnException( const NetState& state ){ 1099 | is_wake_up_ = true; 1100 | } 1101 | 1102 | // Send only 1 bytes data serve as an notification 1103 | static const std::size_t kDataLen = 1; 1104 | 1105 | bool is_wake_up() const { 1106 | return is_wake_up_; 1107 | } 1108 | 1109 | void set_is_wake_up( bool b ) { 1110 | is_wake_up_ = b; 1111 | } 1112 | 1113 | private: 1114 | bool is_wake_up_; 1115 | }; 1116 | 1117 | CtrlFd ctrl_fd_; 1118 | 1119 | // Epoll file descriptors. 1120 | int epoll_fd_; 1121 | 1122 | // Safely transfer ownership of a pointer in STL is kind of like nightmare in C++03. 1123 | // STL is designed for value semantic, for pointer semantic it is very hard to make 1124 | // copy constructor and assignment operator happy without using smart pointer. For 1125 | // simplicity, the TimerStruct will _not_ own the pointer. The deletion will happened 1126 | // explicitly once it gets invoked. 1127 | 1128 | struct TimerStruct { 1129 | int time; 1130 | detail::TimeoutCallback* callback; 1131 | bool operator < ( const TimerStruct& rhs ) const { 1132 | return time > rhs.time; 1133 | } 1134 | 1135 | TimerStruct( int tm , detail::TimeoutCallback* cb ) : 1136 | time(tm), 1137 | callback(cb) 1138 | {} 1139 | }; 1140 | 1141 | // A timer heap , maintain the heap validation by using std::heap_pop 1142 | std::vector timer_queue_; 1143 | 1144 | // The pending accept events are listed here. This allows us to avoid potential 1145 | // stack overflow. This field is checked when we enter the loop every time, if 1146 | // a pending accept/error is there, then we just invoke it; otherwise we head to 1147 | // the epoll loop stuff there. 1148 | 1149 | Socket* new_accept_socket_; 1150 | NetState pending_accept_state_; 1151 | detail::ScopePtr pending_accept_callback_; 1152 | 1153 | 1154 | // A internal user level swap buffer. The default value for this buffer 1155 | // is the buffer size of TCP receive buffer size inside of kernel. This 1156 | // value decide how many system call we can save. 1157 | 1158 | void* swap_buffer_; 1159 | std::size_t swap_buffer_size_; 1160 | 1161 | // Friend class, those classes are classes that is inherited 1162 | // from the detail::Pollable class. This class needs to access the private 1163 | // API to watch the event notification. 1164 | 1165 | friend class Socket; 1166 | friend class ServerSocket; 1167 | friend class ClientSocket; 1168 | 1169 | DISALLOW_COPY_AND_ASSIGN(IOManager); 1170 | }; 1171 | } // namespace mnet 1172 | 1173 | // ---------------------------------------------------- 1174 | // Inline function or template function definition 1175 | // ---------------------------------------------------- 1176 | namespace mnet{ 1177 | 1178 | template< typename T > 1179 | void Socket::AsyncRead( T* notifier ) { 1180 | assert( state_ != CLOSED ); 1181 | assert( user_read_callback_.IsNull() ); 1182 | if( UNLIKELY(can_read()) ) { 1183 | if( UNLIKELY(eof_) ) { 1184 | // This socket has been shutdown before previous DoRead 1185 | // operation. We just call user notifier here 1186 | notifier->OnRead( this, 0 , NetState( 1187 | state_category::kSystem , 0) ); 1188 | return; 1189 | } else { 1190 | // We can directly read data from the kernel space 1191 | NetState state; 1192 | std::size_t sz = DoRead(&state); 1193 | // Checking if the read process goes smoothly or not 1194 | if( UNLIKELY(state) ) { 1195 | if( sz > 0 ) { 1196 | // Notify user that we have something for you. 1197 | notifier->OnRead( this , sz , NetState( 1198 | state_category::kSystem, 0) ); 1199 | return; 1200 | } 1201 | } else { 1202 | notifier->OnRead( this , sz , state ); 1203 | return; 1204 | } 1205 | } 1206 | } 1207 | // Setup the watch operation 1208 | io_manager_->WatchRead(this); 1209 | // Set up the read operations 1210 | user_read_callback_.Reset( detail::MakeReadCallback(notifier) ); 1211 | } 1212 | 1213 | template< typename T > 1214 | void Socket::AsyncWrite( T* notifier ) { 1215 | assert( state_ != CLOSED ); 1216 | assert( write_buffer().readable_size() != 0 ); 1217 | prev_write_size_ = 0; 1218 | 1219 | if( UNLIKELY(can_write()) ) { 1220 | NetState state; 1221 | // It means we don't need to let the epoll to watch us 1222 | // since we can do write without blocking here 1223 | prev_write_size_ = DoWrite( &state ); 1224 | if( UNLIKELY(!state) ) { 1225 | notifier->OnWrite( this, prev_write_size_ , state ); 1226 | return; 1227 | } 1228 | if( write_buffer().readable_size() == 0 ) { 1229 | // We have already written all the data into the kernel 1230 | // without blocking. 1231 | notifier->OnWrite( this, prev_write_size_ , NetState() ); 1232 | return; 1233 | } 1234 | } 1235 | // Watch the write operation in the reactor 1236 | io_manager_->WatchWrite(this); 1237 | // Set up the user callback function 1238 | user_write_callback_.Reset( detail::MakeWriteCallback(notifier) ); 1239 | } 1240 | 1241 | template< typename T > 1242 | void Socket::AsyncClose( T* notifier ) { 1243 | assert( state_ == NORMAL ); 1244 | // Issue the shutdown on the write pipe operations 1245 | ::shutdown( fd() , SHUT_WR ); 1246 | // Set the state to closing here 1247 | state_ = CLOSING; 1248 | 1249 | // Now we need to stuck on the read handler here 1250 | if( can_read() ) { 1251 | NetState state; 1252 | // Try to read the data from current fd 1253 | std::size_t sz = DoRead( &state ); 1254 | if( LIKELY(sz == 0 || !state) ) { 1255 | Close(); 1256 | notifier->OnClose(); 1257 | return; 1258 | } 1259 | } 1260 | // After shuting down, we are expecting for read here 1261 | io_manager_->WatchRead(this); 1262 | // Seting up the user close callback function 1263 | user_close_callback_.Reset( 1264 | detail::MakeCloseCallback(notifier)); 1265 | } 1266 | 1267 | template< typename T > 1268 | void ClientSocket::AsyncConnect( const Endpoint& endpoint , T* notifier ) { 1269 | assert( state_ == DISCONNECTED ); 1270 | int sock_fd = detail::CreateTcpFileDescriptor(); 1271 | if( UNLIKELY(sock_fd < 0) ) { 1272 | notifier->OnConnect( this , NetState(state_category::kSystem,errno) ); 1273 | return; 1274 | } 1275 | 1276 | set_fd( sock_fd ); 1277 | 1278 | struct sockaddr_in ipv4addr; 1279 | bzero(&ipv4addr,sizeof(ipv4addr)); 1280 | 1281 | // Initialize the sockaddr_in structure 1282 | ipv4addr.sin_family = AF_INET; // IpV4 1283 | ipv4addr.sin_port = htons(endpoint.port()); 1284 | ipv4addr.sin_addr.s_addr = htonl(endpoint.ipv4()); 1285 | 1286 | int ret = ::connect( fd() , 1287 | reinterpret_cast(&ipv4addr),sizeof(ipv4addr)); 1288 | 1289 | if( UNLIKELY(ret == 0) ) { 1290 | // Our connection is done here, this is possible when you 1291 | // connect to a local host then kernel just succeeded at once 1292 | // This typically happenes on FreeBSD. 1293 | // Now just call user's callback function directly 1294 | notifier->OnConnect( this , NetState( 1295 | state_category::kSystem, 0) ); 1296 | state_ = CONNECTED; 1297 | } else { 1298 | if ( UNLIKELY(errno != EINPROGRESS) ) { 1299 | // When the errno is not EINPROGRESS, this means that it is 1300 | // not a recoverable error. Just return from where we are 1301 | notifier->OnConnect( this , NetState(state_category::kSystem,errno) ); 1302 | return; 1303 | } 1304 | } 1305 | 1306 | // Now issue the connection on epoll. Epoll interpret this information 1307 | // as could write ( there're potential problem on *NIX system for this). 1308 | io_manager()->WatchWrite(this); 1309 | 1310 | // Setup the user callback function 1311 | user_conn_callback_.Reset( 1312 | detail::MakeConnectCallback(notifier)); 1313 | 1314 | state_ = CONNECTING; 1315 | } 1316 | 1317 | template< typename T > 1318 | void ServerSocket::AsyncAccept( Socket* socket , T* notifier ) { 1319 | assert( user_accept_callback_.IsNull() ); 1320 | assert( io_manager_ != NULL ); 1321 | 1322 | if( can_read() ) { 1323 | // Try to accept at first since for listen fd we use level trigger 1324 | // This cost you a tiny system call but may help save a epoll_wait 1325 | // wake up which will be much more costy than an accept 1326 | NetState state; 1327 | int nfd = DoAccept(&state); 1328 | if( UNLIKELY(nfd < 0) ) { 1329 | if( !state ) { 1330 | // We meet an error, just notify user about this situation 1331 | io_manager_->SetPendingAccept( socket, notifier,state ); 1332 | return; 1333 | } 1334 | } else { 1335 | socket->set_fd( nfd ); 1336 | io_manager_->SetPendingAccept( socket,notifier,state ); 1337 | return; 1338 | } 1339 | } 1340 | 1341 | // When we reach here, it means that we have no pending accepted fd 1342 | // in the kernel space. Now just issue the WatchRead on the listen 1343 | // fd until we get hitted. 1344 | io_manager_->WatchRead(this); 1345 | user_accept_callback_.Reset(detail::MakeAcceptCallback(notifier)); 1346 | new_accept_socket_ = socket; 1347 | 1348 | return; 1349 | } 1350 | 1351 | inline void ServerSocket::SetIOManager( mnet::IOManager* io_manager ) { 1352 | io_manager_ = io_manager; 1353 | io_manager->WatchRead( this ); 1354 | } 1355 | 1356 | template< typename T > 1357 | void IOManager::Schedule( int msec , T* notifier ) { 1358 | timer_queue_.push_back( TimerStruct(msec, detail::MakeTimeoutCallback(notifier)) ); 1359 | std::push_heap(timer_queue_.begin(),timer_queue_.end()); 1360 | } 1361 | 1362 | template< typename T > 1363 | void IOManager::SetPendingAccept( Socket* new_socket , T* notifier , const NetState& state ) { 1364 | assert( pending_accept_callback_.IsNull() ); 1365 | new_accept_socket_ = new_socket; 1366 | pending_accept_callback_.Reset( detail::MakeAcceptCallback(notifier) ); 1367 | pending_accept_state_ = state; 1368 | } 1369 | 1370 | }// namespace mnet 1371 | #endif // MNET_H_ 1372 | 1373 | --------------------------------------------------------------------------------