├── CMakeLists.txt ├── README.md ├── example ├── CMakeLists.txt ├── testserver └── testserver.cc ├── include ├── Acceptor.h ├── Buffer.h ├── Callbacks.h ├── Channel.h ├── CurrentThread.h ├── EPollPoller.h ├── EventLoop.h ├── EventLoopThread.h ├── EventLoopThreadPool.h ├── InetAddress.h ├── Logger.h ├── Poller.h ├── Socket.h ├── TcpConnection.h ├── TcpServer.h ├── Thread.h ├── Timestamp.h └── noncopyable.h └── src ├── Acceptor.cc ├── Buffer.cc ├── CMakeLists.txt ├── Channel.cc ├── CurrentThread.cc ├── DefaultPoller.cc ├── EPollPoller.cc ├── EventLoop.cc ├── EventLoopThread.cc ├── EventLoopThreadPool.cc ├── InetAddress.cc ├── Logger.cc ├── Poller.cc ├── Socket.cc ├── TcpConnection.cc ├── TcpServer.cc ├── Thread.cc └── Timestamp.cc /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #设置最低版本和项目名称 2 | cmake_minimum_required(VERSION 3.0) 3 | project(muduo-core) #锁定项目的工作目录 4 | 5 | #设置全局的c++标准 6 | set (CMAKE_CXX_STANDARD 11) 7 | set (CMAKE_CXX_STANDARD_REQUIRED True) 8 | 9 | #链接必要的库,比如刚刚我们写好的在src文件Cmakelists中muduo-core_lib静态库,还有全局链接库 10 | set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) 11 | #示例静态库的输出 12 | #set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/lib) # 设置静态库输出路径 13 | #设置全局链接库 14 | set(LIBS 15 | pthread 16 | ) 17 | 18 | #添加子目录 19 | add_subdirectory(src) 20 | add_subdirectory(example) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # muduo-core 2 | 3 | 【代码随想录知识星球】项目分享-muduo核心实现 4 | ## 项目介绍 5 | 6 | 本项目是参考 muduo 实现的基于 多Reactor 模型的多线程网络库。使用 C++ 11 编写去除 muduo 对 boost 的依赖。 7 | 8 | 项目已经实现了 Channel 模块、Poller 模块、事件循环模块、日志模块、线程池模块、一致性哈希轮询算法。 9 | 10 | ## 开发环境 11 | 12 | * linux kernel version5.15.0-113-generic (ubuntu 22.04.6) 13 | * gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 14 | * cmake version 3.22 15 | 16 | ## 并发模型 17 | 18 | ![image.png](https://cdn.nlark.com/yuque/0/2022/png/26752078/1670853134528-c88d27f2-10a2-46d3-b308-48f7632a2f09.png?x-oss-process=image%2Fresize%2Cw_937%2Climit_0) 19 | 20 | 项目采用主从 多Reactor多线程 模型,MainReactor 只负责监听派发新连接,在 MainReactor 中通过 Acceptor 接收新连接并通过设计好的轮询算法派发给 SubReactor,SubReactor 负责此连接的读写事件。 21 | 22 | 调用 TcpServer 的 start 函数后,会内部创建线程池。每个线程独立的运行一个事件循环,即 SubReactor。MainReactor 从线程池中轮询获取 SubReactor 并派发给它新连接,处理读写事件的 SubReactor 个数一般和 CPU 核心数相等。使用主从 Reactor 模型有诸多优点: 23 | 24 | 1. 响应快,不必为单个同步事件所阻塞,虽然 Reactor 本身依然是同步的; 25 | 2. 可以最大程度避免复杂的多线程及同步问题,并且避免多线程/进程的切换; 26 | 3. 扩展性好,可以方便通过增加 Reactor 实例个数充分利用 CPU 资源; 27 | 4. 复用性好,Reactor 模型本身与具体事件处理逻辑无关,具有很高的复用性; 28 | 29 | ## 构建项目 30 | 31 | 安装基本工具 32 | 33 | ```shell 34 | sudo apt-get update 35 | sudo apt-get install -y wget cmake build-essential unzip git 36 | ``` 37 | 38 | 39 | ## 编译指令 40 | 41 | 下载项目 42 | 43 | ```shell 44 | git clone https://github.com/youngyangyang04/muduo-core.git 45 | ``` 46 | 47 | 进入到muduo-core文件 48 | ```shell 49 | cd muduo-core 50 | ``` 51 | 52 | 创建build文件夹,并且进入build文件: 53 | ```shell 54 | mkdir build && cd build 55 | ``` 56 | 57 | 然后生成可执行程序文件: 58 | ```shell 59 | cmake .. && make -j${nproc} 60 | ``` 61 | 62 | 运行程序,进入example文件夹,并且执行可执行程序 63 | ```shell 64 | cd example && ./testserver 65 | ``` 66 | 67 | 68 | ## 功能介绍 69 | 70 | - **事件轮询与分发模块**:`EventLoop.*`、`Channel.*`、`Poller.*`、`EPollPoller.*`负责事件轮询检测,并实现事件分发处理。`EventLoop`对`Poller`进行轮询,`Poller`底层由`EPollPoller`实现。 71 | - **线程与事件绑定模块**:`Thread.*`、`EventLoopThread.*`、`EventLoopThreadPool.*`绑定线程与事件循环,完成`one loop per thread`模型。 72 | - **网络连接模块**:`TcpServer.*`、`TcpConnection.*`、`Acceptor.*`、`Socket.*`实现`mainloop`对网络连接的响应,并分发到各`subloop`。 73 | - **缓冲区模块**:`Buffer.*`提供自动扩容缓冲区,保证数据有序到达。 74 | 75 | ## 技术亮点 76 | 77 | 1. **高并发非阻塞网络库** 78 | `muduo`采用`Reactor`多模型多线程的结合,实现了高并发非阻塞的网络库。 79 | 80 | 2. **智能指针防止悬空指针** 81 | `TcpConnection`继承自`enable_shared_from_this`,其目的是防止在不该被释放对象的地方释放对象,导致悬空指针的产生。 82 | 这样可以避免用户可能在处理`OnMessage`事件时删除对象,确保`TcpConnection`以正确方式释放。 83 | 84 | 3. **唤醒机制** 85 | `EventLoop`中使用了`eventfd`来调用`wakeup()`,让`mainloop`唤醒`subloop`的`epoll_wait`阻塞。 86 | 87 | 4. **一致性哈希轮询算法** 88 | 新增`ConsistenHash`头文件,采用一致性哈希轮询算法,将`EventLoop`合理分发给每一个`TcpConnection`对象。 89 | 此外,支持自定义哈希函数,满足高并发需求。但需要注意虚拟节点数量不能过少。 90 | 91 | 5. **线程创建有序性** 92 | 在`Thread`中通过`C++ lambda`表达式以及信号量机制,保证线程创建的有序性,确保线程正常创建后再执行线程函数。 93 | 94 | 6. **非阻塞核心缓冲区** 95 | `Buffer.*`是`muduo`网络库非阻塞的核心模块。当触发相应的读写事件时,内核缓冲区可能没有足够空间一次性发送数据,此时有两种选择: 96 | - 第一种是将其设置为非阻塞,但可能造成 CPU 忙等待; 97 | - 第二种是阻塞等待内核缓冲区有空间再发送,但效率低下。 98 | 99 | 为了解决这些问题,`Buffer`模块将多余数据存储在用户缓冲区,并注册相应的读写事件监听,待事件再次触发时统一发送。 100 | 101 | 7. **灵活的日志模块** 102 | `Logger`支持设置日志等级。在调试代码时,可以开启`DEBUG`模式打印日志;而在服务器运行时,为了减少日志对性能的影响,可关闭`DEBUG`相关日志输出。 103 | 104 | 105 | ## 优化方向 106 | 107 | - 完善内存池和完善异步日志缓冲区、定时器、连接池。 108 | - 增加更多的测试用例,如HTTP、RPC。 109 | - 可以考虑引入协程库等模块 110 | 111 | ## 致谢 112 | 113 | - [作者-Shangyizhou]https://github.com/Shangyizhou/A-Tiny-Network-Library/tree/main 114 | - [作者-S1mpleBug]https://github.com/S1mpleBug/muduo_cpp11?tab=readme-ov-file 115 | - [作者-chenshuo]https://github.com/chenshuo/muduo 116 | - 《Linux高性能服务器编程》 117 | - 《Linux多线程服务端编程:使用muduo C++网络库》 118 | -------------------------------------------------------------------------------- /example/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 获取当前目录下的所有源文件 2 | file(GLOB EXAMPLE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) 3 | 4 | # 创建可执行文件 5 | add_executable(testserver ${EXAMPLE_SRCS}) 6 | 7 | # 链接必要的库,比如刚刚我们写好的在 src 文件 CMakeLists 中 muduo-core_lib 静态库,还有全局链接库 8 | target_link_libraries(testserver muduo_core ${LIBS}) 9 | 10 | # 设置编译选项 11 | target_compile_options(testserver PRIVATE -std=c++11 -Wall) 12 | 13 | # 设置 testserver 可执行文件输出目录 14 | set_target_properties(testserver PROPERTIES 15 | RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 16 | ) 17 | -------------------------------------------------------------------------------- /example/testserver: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/muduo-core/d7e18b74c19671a1a45d2299458fc1f1c9a726d7/example/testserver -------------------------------------------------------------------------------- /example/testserver.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "TcpServer.h" 4 | #include "Logger.h" 5 | 6 | class EchoServer 7 | { 8 | public: 9 | EchoServer(EventLoop *loop, const InetAddress &addr, const std::string &name) 10 | : server_(loop, addr, name) 11 | , loop_(loop) 12 | { 13 | // 注册回调函数 14 | server_.setConnectionCallback( 15 | std::bind(&EchoServer::onConnection, this, std::placeholders::_1)); 16 | 17 | server_.setMessageCallback( 18 | std::bind(&EchoServer::onMessage, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); 19 | 20 | // 设置合适的subloop线程数量 21 | server_.setThreadNum(3); 22 | } 23 | void start() 24 | { 25 | server_.start(); 26 | } 27 | 28 | private: 29 | // 连接建立或断开的回调函数 30 | void onConnection(const TcpConnectionPtr &conn) 31 | { 32 | if (conn->connected()) 33 | { 34 | LOG_INFO("Connection UP : %s", conn->peerAddress().toIpPort().c_str()); 35 | } 36 | else 37 | { 38 | LOG_INFO("Connection DOWN : %s", conn->peerAddress().toIpPort().c_str()); 39 | } 40 | } 41 | 42 | // 可读写事件回调 43 | void onMessage(const TcpConnectionPtr &conn, Buffer *buf, Timestamp time) 44 | { 45 | std::string msg = buf->retrieveAllAsString(); 46 | conn->send(msg); 47 | // conn->shutdown(); // 关闭写端 底层响应EPOLLHUP => 执行closeCallback_ 48 | } 49 | TcpServer server_; 50 | EventLoop *loop_; 51 | 52 | }; 53 | 54 | int main() { 55 | EventLoop loop; 56 | InetAddress addr(8080); 57 | EchoServer server(&loop, addr, "EchoServer"); 58 | server.start(); 59 | loop.loop(); 60 | return 0; 61 | } -------------------------------------------------------------------------------- /include/Acceptor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include "noncopyable.h" 6 | #include "Socket.h" 7 | #include "Channel.h" 8 | 9 | class EventLoop; 10 | class InetAddress; 11 | 12 | class Acceptor : noncopyable 13 | { 14 | public: 15 | using NewConnectionCallback = std::function; 16 | 17 | Acceptor(EventLoop *loop, const InetAddress &listenAddr, bool reuseport); 18 | ~Acceptor(); 19 | //设置新连接的回调函数 20 | void setNewConnectionCallback(const NewConnectionCallback &cb) { NewConnectionCallback_ = cb; } 21 | // 判断是否在监听 22 | bool listenning() const { return listenning_; } 23 | // 监听本地端口 24 | void listen(); 25 | 26 | private: 27 | void handleRead();//处理新用户的连接事件 28 | 29 | EventLoop *loop_; // Acceptor用的就是用户定义的那个baseLoop 也称作mainLoop 30 | Socket acceptSocket_;//专门用于接收新连接的socket 31 | Channel acceptChannel_;//专门用于监听新连接的channel 32 | NewConnectionCallback NewConnectionCallback_;//新连接的回调函数 33 | bool listenning_;//是否在监听 34 | }; -------------------------------------------------------------------------------- /include/Buffer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | // 网络库底层的缓冲区类型定义 9 | class Buffer 10 | { 11 | public: 12 | static const size_t kCheapPrepend = 8;//初始预留的prependabel空间大小 13 | static const size_t kInitialSize = 1024; 14 | 15 | explicit Buffer(size_t initalSize = kInitialSize) 16 | : buffer_(kCheapPrepend + initalSize) 17 | , readerIndex_(kCheapPrepend) 18 | , writerIndex_(kCheapPrepend) 19 | { 20 | } 21 | 22 | size_t readableBytes() const { return writerIndex_ - readerIndex_; } 23 | size_t writableBytes() const { return buffer_.size() - writerIndex_; } 24 | size_t prependableBytes() const { return readerIndex_; } 25 | 26 | // 返回缓冲区中可读数据的起始地址 27 | const char *peek() const { return begin() + readerIndex_; } 28 | void retrieve(size_t len) 29 | { 30 | if (len < readableBytes()) 31 | { 32 | readerIndex_ += len; // 说明应用只读取了可读缓冲区数据的一部分,就是len长度 还剩下readerIndex+=len到writerIndex_的数据未读 33 | } 34 | else // len == readableBytes() 35 | { 36 | retrieveAll(); 37 | } 38 | } 39 | void retrieveAll() 40 | { 41 | readerIndex_ = kCheapPrepend; 42 | writerIndex_ = kCheapPrepend; 43 | } 44 | 45 | // 把onMessage函数上报的Buffer数据 转成string类型的数据返回 46 | std::string retrieveAllAsString() { return retrieveAsString(readableBytes()); } 47 | std::string retrieveAsString(size_t len) 48 | { 49 | std::string result(peek(), len); 50 | retrieve(len); // 上面一句把缓冲区中可读的数据已经读取出来 这里肯定要对缓冲区进行复位操作 51 | return result; 52 | } 53 | 54 | // buffer_.size - writerIndex_ 55 | void ensureWritableBytes(size_t len) 56 | { 57 | if (writableBytes() < len) 58 | { 59 | makeSpace(len); // 扩容 60 | } 61 | } 62 | 63 | // 把[data, data+len]内存上的数据添加到writable缓冲区当中 64 | void append(const char *data, size_t len) 65 | { 66 | ensureWritableBytes(len); 67 | std::copy(data, data+len, beginWrite()); 68 | writerIndex_ += len; 69 | } 70 | char *beginWrite() { return begin() + writerIndex_; } 71 | const char *beginWrite() const { return begin() + writerIndex_; } 72 | 73 | // 从fd上读取数据 74 | ssize_t readFd(int fd, int *saveErrno); 75 | // 通过fd发送数据 76 | ssize_t writeFd(int fd, int *saveErrno); 77 | 78 | private: 79 | // vector底层数组首元素的地址 也就是数组的起始地址 80 | char *begin() { return &*buffer_.begin(); } 81 | const char *begin() const { return &*buffer_.begin(); } 82 | 83 | void makeSpace(size_t len) 84 | { 85 | /** 86 | * | kCheapPrepend |xxx| reader | writer | // xxx标示reader中已读的部分 87 | * | kCheapPrepend | reader | len | 88 | **/ 89 | if (writableBytes() + prependableBytes() < len + kCheapPrepend) // 也就是说 len > xxx前面剩余的空间 + writer的部分 90 | { 91 | buffer_.resize(writerIndex_ + len); 92 | } 93 | else // 这里说明 len <= xxx + writer 把reader搬到从xxx开始 使得xxx后面是一段连续空间 94 | { 95 | size_t readable = readableBytes(); // readable = reader的长度 96 | // 将当前缓冲区中从readerIndex_到writerIndex_的数据 97 | // 拷贝到缓冲区起始位置kCheapPrepend处,以便腾出更多的可写空间 98 | std::copy(begin() + readerIndex_, 99 | begin() + writerIndex_, 100 | begin() + kCheapPrepend); 101 | readerIndex_ = kCheapPrepend; 102 | writerIndex_ = readerIndex_ + readable; 103 | } 104 | } 105 | 106 | std::vector buffer_; 107 | size_t readerIndex_; 108 | size_t writerIndex_; 109 | }; 110 | -------------------------------------------------------------------------------- /include/Callbacks.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | class Buffer; 7 | class TcpConnection; 8 | class Timestamp; 9 | 10 | using TcpConnectionPtr = std::shared_ptr; 11 | using ConnectionCallback = std::function; 12 | using CloseCallback = std::function; 13 | using WriteCompleteCallback = std::function; 14 | using HighWaterMarkCallback = std::function; 15 | 16 | using MessageCallback = std::function; -------------------------------------------------------------------------------- /include/Channel.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "noncopyable.h" 7 | #include "Timestamp.h" 8 | 9 | class EventLoop; 10 | 11 | /** 12 | * 理清楚 EventLoop、Channel、Poller之间的关系 Reactor模型上对应多路事件分发器 13 | * Channel理解为通道 封装了sockfd和其感兴趣的event 如EPOLLIN、EPOLLOUT事件 还绑定了poller返回的具体事件 14 | **/ 15 | class Channel : noncopyable 16 | { 17 | public: 18 | using EventCallback = std::function; // muduo仍使用typedef 19 | using ReadEventCallback = std::function; 20 | 21 | Channel(EventLoop *loop, int fd); 22 | ~Channel(); 23 | 24 | // fd得到Poller通知以后 处理事件 handleEvent在EventLoop::loop()中调用 25 | void handleEvent(Timestamp receiveTime); 26 | 27 | // 设置回调函数对象 28 | void setReadCallback(ReadEventCallback cb) { readCallback_ = std::move(cb); } 29 | void setWriteCallback(EventCallback cb) { writeCallback_ = std::move(cb); } 30 | void setCloseCallback(EventCallback cb) { closeCallback_ = std::move(cb); } 31 | void setErrorCallback(EventCallback cb) { errorCallback_ = std::move(cb); } 32 | 33 | // 防止当channel被手动remove掉 channel还在执行回调操作 34 | void tie(const std::shared_ptr &); 35 | 36 | int fd() const { return fd_; } 37 | int events() const { return events_; } 38 | void set_revents(int revt) { revents_ = revt; } 39 | 40 | // 设置fd相应的事件状态 相当于epoll_ctl add delete 41 | void enableReading() { events_ |= kReadEvent; update(); } 42 | void disableReading() { events_ &= ~kReadEvent; update(); } 43 | void enableWriting() { events_ |= kWriteEvent; update(); } 44 | void disableWriting() { events_ &= ~kWriteEvent; update(); } 45 | void disableAll() { events_ = kNoneEvent; update(); } 46 | 47 | // 返回fd当前的事件状态 48 | bool isNoneEvent() const { return events_ == kNoneEvent; } 49 | bool isWriting() const { return events_ & kWriteEvent; } 50 | bool isReading() const { return events_ & kReadEvent; } 51 | 52 | int index() { return index_; } 53 | void set_index(int idx) { index_ = idx; } 54 | 55 | // one loop per thread 56 | EventLoop *ownerLoop() { return loop_; } 57 | void remove(); 58 | private: 59 | 60 | void update(); 61 | void handleEventWithGuard(Timestamp receiveTime); 62 | 63 | static const int kNoneEvent; 64 | static const int kReadEvent; 65 | static const int kWriteEvent; 66 | 67 | EventLoop *loop_; // 事件循环 68 | const int fd_; // fd,Poller监听的对象 69 | int events_; // 注册fd感兴趣的事件 70 | int revents_; // Poller返回的具体发生的事件 71 | int index_; 72 | 73 | std::weak_ptr tie_; 74 | bool tied_; 75 | 76 | // 因为channel通道里可获知fd最终发生的具体的事件events,所以它负责调用具体事件的回调操作 77 | ReadEventCallback readCallback_; 78 | EventCallback writeCallback_; 79 | EventCallback closeCallback_; 80 | EventCallback errorCallback_; 81 | }; -------------------------------------------------------------------------------- /include/CurrentThread.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace CurrentThread 7 | { 8 | extern __thread int t_cachedTid; // 保存tid缓存 因为系统调用非常耗时 拿到tid后将其保存 9 | 10 | void cacheTid(); 11 | 12 | inline int tid() // 内联函数只在当前文件中起作用 13 | { 14 | if (__builtin_expect(t_cachedTid == 0, 0)) // __builtin_expect 是一种底层优化 此语句意思是如果还未获取tid 进入if 通过cacheTid()系统调用获取tid 15 | { 16 | cacheTid(); 17 | } 18 | return t_cachedTid; 19 | } 20 | } -------------------------------------------------------------------------------- /include/EPollPoller.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "Poller.h" 7 | #include "Timestamp.h" 8 | 9 | /** 10 | * epoll的使用: 11 | * 1. epoll_create 12 | * 2. epoll_ctl (add, mod, del) 13 | * 3. epoll_wait 14 | **/ 15 | 16 | class Channel; 17 | 18 | class EPollPoller : public Poller 19 | { 20 | public: 21 | EPollPoller(EventLoop *loop); 22 | ~EPollPoller() override; 23 | 24 | // 重写基类Poller的抽象方法 25 | Timestamp poll(int timeoutMs, ChannelList *activeChannels) override; 26 | void updateChannel(Channel *channel) override; 27 | void removeChannel(Channel *channel) override; 28 | 29 | private: 30 | static const int kInitEventListSize = 16; 31 | 32 | // 填写活跃的连接 33 | void fillActiveChannels(int numEvents, ChannelList *activeChannels) const; 34 | // 更新channel通道 其实就是调用epoll_ctl 35 | void update(int operation, Channel *channel); 36 | 37 | using EventList = std::vector; // C++中可以省略struct 直接写epoll_event即可 38 | 39 | int epollfd_; // epoll_create创建返回的fd保存在epollfd_中 40 | EventList events_; // 用于存放epoll_wait返回的所有发生的事件的文件描述符事件集 41 | }; -------------------------------------------------------------------------------- /include/EventLoop.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "noncopyable.h" 10 | #include "Timestamp.h" 11 | #include "CurrentThread.h" 12 | 13 | class Channel; 14 | class Poller; 15 | 16 | // 事件循环类 主要包含了两个大模块 Channel Poller(epoll的抽象) 17 | class EventLoop : noncopyable 18 | { 19 | public: 20 | using Functor = std::function; 21 | 22 | EventLoop(); 23 | ~EventLoop(); 24 | 25 | // 开启事件循环 26 | void loop(); 27 | // 退出事件循环 28 | void quit(); 29 | 30 | Timestamp pollReturnTime() const { return pollRetureTime_; } 31 | 32 | // 在当前loop中执行 33 | void runInLoop(Functor cb); 34 | // 把上层注册的回调函数cb放入队列中 唤醒loop所在的线程执行cb 35 | void queueInLoop(Functor cb); 36 | 37 | // 通过eventfd唤醒loop所在的线程 38 | void wakeup(); 39 | 40 | // EventLoop的方法 => Poller的方法 41 | void updateChannel(Channel *channel); 42 | void removeChannel(Channel *channel); 43 | bool hasChannel(Channel *channel); 44 | 45 | // 判断EventLoop对象是否在自己的线程里 46 | bool isInLoopThread() const { return threadId_ == CurrentThread::tid(); } // threadId_为EventLoop创建时的线程id CurrentThread::tid()为当前线程id 47 | 48 | private: 49 | void handleRead(); // 给eventfd返回的文件描述符wakeupFd_绑定的事件回调 当wakeup()时 即有事件发生时 调用handleRead()读wakeupFd_的8字节 同时唤醒阻塞的epoll_wait 50 | void doPendingFunctors(); // 执行上层回调 51 | 52 | using ChannelList = std::vector; 53 | 54 | std::atomic_bool looping_; // 原子操作 底层通过CAS实现 55 | std::atomic_bool quit_; // 标识退出loop循环 56 | 57 | const pid_t threadId_; // 记录当前EventLoop是被哪个线程id创建的 即标识了当前EventLoop的所属线程id 58 | 59 | Timestamp pollRetureTime_; // Poller返回发生事件的Channels的时间点 60 | std::unique_ptr poller_; 61 | 62 | int wakeupFd_; // 作用:当mainLoop获取一个新用户的Channel 需通过轮询算法选择一个subLoop 通过该成员唤醒subLoop处理Channel 63 | std::unique_ptr wakeupChannel_; 64 | 65 | ChannelList activeChannels_; // 返回Poller检测到当前有事件发生的所有Channel列表 66 | 67 | std::atomic_bool callingPendingFunctors_; // 标识当前loop是否有需要执行的回调操作 68 | std::vector pendingFunctors_; // 存储loop需要执行的所有回调操作 69 | std::mutex mutex_; // 互斥锁 用来保护上面vector容器的线程安全操作 70 | }; -------------------------------------------------------------------------------- /include/EventLoopThread.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "noncopyable.h" 9 | #include "Thread.h" 10 | 11 | class EventLoop; 12 | 13 | class EventLoopThread : noncopyable 14 | { 15 | public: 16 | using ThreadInitCallback = std::function; 17 | 18 | EventLoopThread(const ThreadInitCallback &cb = ThreadInitCallback(), 19 | const std::string &name = std::string()); 20 | ~EventLoopThread(); 21 | 22 | EventLoop *startLoop(); 23 | 24 | private: 25 | void threadFunc(); 26 | 27 | EventLoop *loop_; 28 | bool exiting_; 29 | Thread thread_; 30 | std::mutex mutex_; // 互斥锁 31 | std::condition_variable cond_; // 条件变量 32 | ThreadInitCallback callback_; 33 | }; -------------------------------------------------------------------------------- /include/EventLoopThreadPool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "noncopyable.h" 9 | class EventLoop; 10 | class EventLoopThread; 11 | 12 | class EventLoopThreadPool : noncopyable 13 | { 14 | public: 15 | using ThreadInitCallback = std::function; 16 | 17 | EventLoopThreadPool(EventLoop *baseLoop, const std::string &nameArg); 18 | ~EventLoopThreadPool(); 19 | 20 | void setThreadNum(int numThreads) { numThreads_ = numThreads; } 21 | 22 | void start(const ThreadInitCallback &cb = ThreadInitCallback()); 23 | 24 | // 如果工作在多线程中,baseLoop_(mainLoop)会默认以轮询的方式分配Channel给subLoop 25 | EventLoop *getNextLoop(); 26 | 27 | std::vector getAllLoops(); // 获取所有的EventLoop 28 | 29 | bool started() const { return started_; } // 是否已经启动 30 | const std::string name() const { return name_; } // 获取名字 31 | 32 | private: 33 | EventLoop *baseLoop_; // 用户使用muduo创建的loop 如果线程数为1 那直接使用用户创建的loop 否则创建多EventLoop 34 | std::string name_;//线程池名称,通常由用户指定,线程池中EventLoopThread名称依赖于线程池名称。 35 | bool started_;//是否已经启动标志 36 | int numThreads_;//线程池中线程的数量 37 | int next_; // 新连接到来,所选择EventLoop的索引 38 | std::vector> threads_;//IO线程的列表 39 | std::vector loops_;//线程池中EventLoop的列表,指向的是EVentLoopThread线程函数创建的EventLoop对象。 40 | }; -------------------------------------------------------------------------------- /include/InetAddress.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | // 封装socket地址类型 8 | class InetAddress 9 | { 10 | public: 11 | explicit InetAddress(uint16_t port = 0, std::string ip = "127.0.0.1"); 12 | explicit InetAddress(const sockaddr_in &addr) 13 | : addr_(addr) 14 | { 15 | } 16 | 17 | std::string toIp() const; 18 | std::string toIpPort() const; 19 | uint16_t toPort() const; 20 | 21 | const sockaddr_in *getSockAddr() const { return &addr_; } 22 | void setSockAddr(const sockaddr_in &addr) { addr_ = addr; } 23 | 24 | private: 25 | sockaddr_in addr_; 26 | }; -------------------------------------------------------------------------------- /include/Logger.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include "noncopyable.h" 6 | 7 | // LOG_INFO("%s %d", arg1, arg2) 8 | #define LOG_INFO(logmsgFormat, ...) \ 9 | do \ 10 | { \ 11 | Logger &logger = Logger::instance(); \ 12 | logger.setLogLevel(INFO); \ 13 | char buf[1024] = {0}; \ 14 | snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ 15 | logger.log(buf); \ 16 | } while (0) 17 | 18 | #define LOG_ERROR(logmsgFormat, ...) \ 19 | do \ 20 | { \ 21 | Logger &logger = Logger::instance(); \ 22 | logger.setLogLevel(ERROR); \ 23 | char buf[1024] = {0}; \ 24 | snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ 25 | logger.log(buf); \ 26 | } while (0) 27 | 28 | #define LOG_FATAL(logmsgFormat, ...) \ 29 | do \ 30 | { \ 31 | Logger &logger = Logger::instance(); \ 32 | logger.setLogLevel(FATAL); \ 33 | char buf[1024] = {0}; \ 34 | snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ 35 | logger.log(buf); \ 36 | exit(-1); \ 37 | } while (0) 38 | 39 | #ifdef MUDEBUG 40 | #define LOG_DEBUG(logmsgFormat, ...) \ 41 | do \ 42 | { \ 43 | Logger &logger = Logger::instance(); \ 44 | logger.setLogLevel(DEBUG); \ 45 | char buf[1024] = {0}; \ 46 | snprintf(buf, 1024, logmsgFormat, ##__VA_ARGS__); \ 47 | logger.log(buf); \ 48 | } while (0) 49 | #else 50 | #define LOG_DEBUG(logmsgFormat, ...) 51 | #endif 52 | 53 | // 定义日志的级别 INFO ERROR FATAL DEBUG 54 | enum LogLevel 55 | { 56 | INFO, // 普通信息 57 | ERROR, // 错误信息 58 | FATAL, // core dump信息 59 | DEBUG, // 调试信息 60 | }; 61 | 62 | // 输出一个日志类 63 | 64 | class Logger : noncopyable 65 | { 66 | public: 67 | // 获取日志唯一的实例对象 单例 68 | static Logger &instance(); 69 | // 设置日志级别 70 | void setLogLevel(int level); 71 | // 写日志 72 | void log(std::string msg); 73 | 74 | private: 75 | int logLevel_; 76 | }; -------------------------------------------------------------------------------- /include/Poller.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "noncopyable.h" 7 | #include "Timestamp.h" 8 | 9 | class Channel; 10 | class EventLoop; 11 | 12 | // muduo库中多路事件分发器的核心IO复用模块 13 | class Poller 14 | { 15 | public: 16 | using ChannelList = std::vector; 17 | 18 | Poller(EventLoop *loop); 19 | virtual ~Poller() = default; 20 | 21 | // 给所有IO复用保留统一的接口 22 | virtual Timestamp poll(int timeoutMs, ChannelList *activeChannels) = 0; 23 | virtual void updateChannel(Channel *channel) = 0; 24 | virtual void removeChannel(Channel *channel) = 0; 25 | 26 | // 判断参数channel是否在当前的Poller当中 27 | bool hasChannel(Channel *channel) const; 28 | 29 | // EventLoop可以通过该接口获取默认的IO复用的具体实现 30 | static Poller *newDefaultPoller(EventLoop *loop); 31 | 32 | protected: 33 | // map的key:sockfd value:sockfd所属的channel通道类型 34 | using ChannelMap = std::unordered_map; 35 | ChannelMap channels_; 36 | 37 | private: 38 | EventLoop *ownerLoop_; // 定义Poller所属的事件循环EventLoop 39 | }; -------------------------------------------------------------------------------- /include/Socket.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "noncopyable.h" 4 | 5 | class InetAddress; 6 | 7 | // 封装socket fd 8 | class Socket : noncopyable 9 | { 10 | public: 11 | explicit Socket(int sockfd) 12 | : sockfd_(sockfd) 13 | { 14 | } 15 | ~Socket(); 16 | 17 | int fd() const { return sockfd_; } 18 | void bindAddress(const InetAddress &localaddr); 19 | void listen(); 20 | int accept(InetAddress *peeraddr); 21 | 22 | void shutdownWrite(); 23 | 24 | void setTcpNoDelay(bool on); 25 | void setReuseAddr(bool on); 26 | void setReusePort(bool on); 27 | void setKeepAlive(bool on); 28 | 29 | private: 30 | const int sockfd_; 31 | }; 32 | -------------------------------------------------------------------------------- /include/TcpConnection.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "noncopyable.h" 8 | #include "InetAddress.h" 9 | #include "Callbacks.h" 10 | #include "Buffer.h" 11 | #include "Timestamp.h" 12 | 13 | class Channel; 14 | class EventLoop; 15 | class Socket; 16 | 17 | /** 18 | * TcpServer => Acceptor => 有一个新用户连接,通过accept函数拿到connfd 19 | * => TcpConnection设置回调 => 设置到Channel => Poller => Channel回调 20 | **/ 21 | 22 | class TcpConnection : noncopyable, public std::enable_shared_from_this 23 | { 24 | public: 25 | TcpConnection(EventLoop *loop, 26 | const std::string &nameArg, 27 | int sockfd, 28 | const InetAddress &localAddr, 29 | const InetAddress &peerAddr); 30 | ~TcpConnection(); 31 | 32 | EventLoop *getLoop() const { return loop_; } 33 | const std::string &name() const { return name_; } 34 | const InetAddress &localAddress() const { return localAddr_; } 35 | const InetAddress &peerAddress() const { return peerAddr_; } 36 | 37 | bool connected() const { return state_ == kConnected; } 38 | 39 | // 发送数据 40 | void send(const std::string &buf); 41 | void sendFile(int fileDescriptor, off_t offset, size_t count); 42 | 43 | // 关闭半连接 44 | void shutdown(); 45 | 46 | void setConnectionCallback(const ConnectionCallback &cb) 47 | { connectionCallback_ = cb; } 48 | void setMessageCallback(const MessageCallback &cb) 49 | { messageCallback_ = cb; } 50 | void setWriteCompleteCallback(const WriteCompleteCallback &cb) 51 | { writeCompleteCallback_ = cb; } 52 | void setCloseCallback(const CloseCallback &cb) 53 | { closeCallback_ = cb; } 54 | void setHighWaterMarkCallback(const HighWaterMarkCallback &cb, size_t highWaterMark) 55 | { highWaterMarkCallback_ = cb; highWaterMark_ = highWaterMark; } 56 | 57 | // 连接建立 58 | void connectEstablished(); 59 | // 连接销毁 60 | void connectDestroyed(); 61 | 62 | private: 63 | enum StateE 64 | { 65 | kDisconnected, // 已经断开连接 66 | kConnecting, // 正在连接 67 | kConnected, // 已连接 68 | kDisconnecting // 正在断开连接 69 | }; 70 | void setState(StateE state) { state_ = state; } 71 | 72 | void handleRead(Timestamp receiveTime); 73 | void handleWrite();//处理写事件 74 | void handleClose(); 75 | void handleError(); 76 | 77 | void sendInLoop(const void *data, size_t len); 78 | void shutdownInLoop(); 79 | void sendFileInLoop(int fileDescriptor, off_t offset, size_t count); 80 | EventLoop *loop_; // 这里是baseloop还是subloop由TcpServer中创建的线程数决定 若为多Reactor 该loop_指向subloop 若为单Reactor 该loop_指向baseloop 81 | const std::string name_; 82 | std::atomic_int state_; 83 | bool reading_;//连接是否在监听读事件 84 | 85 | // Socket Channel 这里和Acceptor类似 Acceptor => mainloop TcpConnection => subloop 86 | std::unique_ptr socket_; 87 | std::unique_ptr channel_; 88 | 89 | const InetAddress localAddr_; 90 | const InetAddress peerAddr_; 91 | 92 | // 这些回调TcpServer也有 用户通过写入TcpServer注册 TcpServer再将注册的回调传递给TcpConnection TcpConnection再将回调注册到Channel中 93 | ConnectionCallback connectionCallback_; // 有新连接时的回调 94 | MessageCallback messageCallback_; // 有读写消息时的回调 95 | WriteCompleteCallback writeCompleteCallback_; // 消息发送完成以后的回调 96 | HighWaterMarkCallback highWaterMarkCallback_; // 高水位回调 97 | CloseCallback closeCallback_; // 关闭连接的回调 98 | size_t highWaterMark_; // 高水位阈值 99 | 100 | // 数据缓冲区 101 | Buffer inputBuffer_; // 接收数据的缓冲区 102 | Buffer outputBuffer_; // 发送数据的缓冲区 用户send向outputBuffer_发 103 | }; 104 | -------------------------------------------------------------------------------- /include/TcpServer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | /** 4 | * 用户使用muduo编写服务器程序 5 | **/ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "EventLoop.h" 14 | #include "Acceptor.h" 15 | #include "InetAddress.h" 16 | #include "noncopyable.h" 17 | #include "EventLoopThreadPool.h" 18 | #include "Callbacks.h" 19 | #include "TcpConnection.h" 20 | #include "Buffer.h" 21 | 22 | // 对外的服务器编程使用的类 23 | class TcpServer 24 | { 25 | public: 26 | using ThreadInitCallback = std::function; 27 | 28 | enum Option 29 | { 30 | kNoReusePort,//不允许重用本地端口 31 | kReusePort,//允许重用本地端口 32 | }; 33 | 34 | TcpServer(EventLoop *loop, 35 | const InetAddress &listenAddr, 36 | const std::string &nameArg, 37 | Option option = kNoReusePort); 38 | ~TcpServer(); 39 | 40 | void setThreadInitCallback(const ThreadInitCallback &cb) { threadInitCallback_ = cb; } 41 | void setConnectionCallback(const ConnectionCallback &cb) { connectionCallback_ = cb; } 42 | void setMessageCallback(const MessageCallback &cb) { messageCallback_ = cb; } 43 | void setWriteCompleteCallback(const WriteCompleteCallback &cb) { writeCompleteCallback_ = cb; } 44 | 45 | // 设置底层subloop的个数 46 | void setThreadNum(int numThreads); 47 | /** 48 | * 如果没有监听, 就启动服务器(监听). 49 | * 多次调用没有副作用. 50 | * 线程安全. 51 | */ 52 | void start(); 53 | 54 | private: 55 | void newConnection(int sockfd, const InetAddress &peerAddr); 56 | void removeConnection(const TcpConnectionPtr &conn); 57 | void removeConnectionInLoop(const TcpConnectionPtr &conn); 58 | 59 | using ConnectionMap = std::unordered_map; 60 | 61 | EventLoop *loop_; // baseloop 用户自定义的loop 62 | 63 | const std::string ipPort_; 64 | const std::string name_; 65 | 66 | std::unique_ptr acceptor_; // 运行在mainloop 任务就是监听新连接事件 67 | 68 | std::shared_ptr threadPool_; // one loop per thread 69 | 70 | ConnectionCallback connectionCallback_; //有新连接时的回调 71 | MessageCallback messageCallback_; // 有读写事件发生时的回调 72 | WriteCompleteCallback writeCompleteCallback_; // 消息发送完成后的回调 73 | 74 | ThreadInitCallback threadInitCallback_; // loop线程初始化的回调 75 | int numThreads_;//线程池中线程的数量。 76 | std::atomic_int started_; 77 | int nextConnId_; 78 | ConnectionMap connections_; // 保存所有的连接 79 | }; -------------------------------------------------------------------------------- /include/Thread.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "noncopyable.h" 11 | 12 | class Thread : noncopyable 13 | { 14 | public: 15 | using ThreadFunc = std::function; 16 | 17 | explicit Thread(ThreadFunc, const std::string &name = std::string()); 18 | ~Thread(); 19 | 20 | void start(); 21 | void join(); 22 | 23 | bool started() { return started_; } 24 | pid_t tid() const { return tid_; } 25 | const std::string &name() const { return name_; } 26 | 27 | static int numCreated() { return numCreated_; } 28 | 29 | private: 30 | void setDefaultName(); 31 | 32 | bool started_; 33 | bool joined_; 34 | std::shared_ptr thread_; 35 | pid_t tid_; // 在线程创建时再绑定 36 | ThreadFunc func_; // 线程回调函数 37 | std::string name_; 38 | static std::atomic_int numCreated_; 39 | }; -------------------------------------------------------------------------------- /include/Timestamp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | class Timestamp 7 | { 8 | public: 9 | Timestamp(); 10 | explicit Timestamp(int64_t microSecondsSinceEpoch); 11 | static Timestamp now(); 12 | std::string toString() const; 13 | 14 | private: 15 | int64_t microSecondsSinceEpoch_; 16 | }; -------------------------------------------------------------------------------- /include/noncopyable.h: -------------------------------------------------------------------------------- 1 | #pragma once // 防止头文件重复包含 2 | 3 | /** 4 | * noncopyable被继承后 派生类对象可正常构造和析构 但派生类对象无法进行拷贝构造和赋值构造 5 | **/ 6 | class noncopyable 7 | { 8 | public: 9 | noncopyable(const noncopyable &) = delete; 10 | noncopyable &operator=(const noncopyable &) = delete; 11 | // void operator=(const noncopyable &) = delete; // muduo将返回值变为void 这其实无可厚非 12 | protected: 13 | noncopyable() = default; 14 | ~noncopyable() = default; 15 | }; -------------------------------------------------------------------------------- /src/Acceptor.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "Acceptor.h" 7 | #include "Logger.h" 8 | #include "InetAddress.h" 9 | 10 | static int createNonblocking() 11 | { 12 | int sockfd = ::socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP); 13 | if (sockfd < 0) 14 | { 15 | LOG_FATAL("%s:%s:%d listen socket create err:%d\n", __FILE__, __FUNCTION__, __LINE__, errno); 16 | } 17 | return sockfd; 18 | } 19 | 20 | Acceptor::Acceptor(EventLoop *loop, const InetAddress &listenAddr, bool reuseport) 21 | : loop_(loop) 22 | , acceptSocket_(createNonblocking()) 23 | , acceptChannel_(loop, acceptSocket_.fd()) 24 | , listenning_(false) 25 | { 26 | acceptSocket_.setReuseAddr(true); 27 | acceptSocket_.setReusePort(true); 28 | acceptSocket_.bindAddress(listenAddr); 29 | // TcpServer::start() => Acceptor.listen() 如果有新用户连接 要执行一个回调(accept => connfd => 打包成Channel => 唤醒subloop) 30 | // baseloop监听到有事件发生 => acceptChannel_(listenfd) => 执行该回调函数 31 | acceptChannel_.setReadCallback( 32 | std::bind(&Acceptor::handleRead, this)); 33 | } 34 | 35 | Acceptor::~Acceptor() 36 | { 37 | acceptChannel_.disableAll(); // 把从Poller中感兴趣的事件删除掉 38 | acceptChannel_.remove(); // 调用EventLoop->removeChannel => Poller->removeChannel 把Poller的ChannelMap对应的部分删除 39 | } 40 | 41 | void Acceptor::listen() 42 | { 43 | listenning_ = true; 44 | acceptSocket_.listen(); // listen 45 | acceptChannel_.enableReading(); // acceptChannel_注册至Poller !重要 46 | } 47 | 48 | // listenfd有事件发生了,就是有新用户连接了 49 | void Acceptor::handleRead() 50 | { 51 | InetAddress peerAddr; 52 | int connfd = acceptSocket_.accept(&peerAddr); 53 | if (connfd >= 0) 54 | { 55 | if (NewConnectionCallback_) 56 | { 57 | NewConnectionCallback_(connfd, peerAddr); // 轮询找到subLoop 唤醒并分发当前的新客户端的Channel 58 | } 59 | else 60 | { 61 | ::close(connfd); 62 | } 63 | } 64 | else 65 | { 66 | LOG_ERROR("%s:%s:%d accept err:%d\n", __FILE__, __FUNCTION__, __LINE__, errno); 67 | if (errno == EMFILE) 68 | { 69 | LOG_ERROR("%s:%s:%d sockfd reached limit\n", __FILE__, __FUNCTION__, __LINE__); 70 | } 71 | } 72 | } -------------------------------------------------------------------------------- /src/Buffer.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "Buffer.h" 6 | 7 | /** 8 | * 从fd上读取数据 Poller工作在LT模式 9 | * Buffer缓冲区是有大小的! 但是从fd上读取数据的时候 却不知道tcp数据的最终大小 10 | * 11 | * @description: 从socket读到缓冲区的方法是使用readv先读至buffer_, 12 | * Buffer_空间如果不够会读入到栈上65536个字节大小的空间,然后以append的 13 | * 方式追加入buffer_。既考虑了避免系统调用带来开销,又不影响数据的接收。 14 | **/ 15 | ssize_t Buffer::readFd(int fd, int *saveErrno) 16 | { 17 | // 栈额外空间,用于从套接字往出读时,当buffer_暂时不够用时暂存数据,待buffer_重新分配足够空间后,在把数据交换给buffer_。 18 | char extrabuf[65536] = {0}; // 栈上内存空间 65536/1024 = 64KB 19 | 20 | /* 21 | struct iovec { 22 | ptr_t iov_base; // iov_base指向的缓冲区存放的是readv所接收的数据或是writev将要发送的数据 23 | size_t iov_len; // iov_len在各种情况下分别确定了接收的最大长度以及实际写入的长度 24 | }; 25 | */ 26 | 27 | // 使用iovec分配两个连续的缓冲区 28 | struct iovec vec[2]; 29 | const size_t writable = writableBytes(); // 这是Buffer底层缓冲区剩余的可写空间大小 不一定能完全存储从fd读出的数据 30 | 31 | // 第一块缓冲区,指向可写空间 32 | vec[0].iov_base = begin() + writerIndex_; 33 | vec[0].iov_len = writable; 34 | // 第二块缓冲区,指向栈空间 35 | vec[1].iov_base = extrabuf; 36 | vec[1].iov_len = sizeof(extrabuf); 37 | 38 | // when there is enough space in this buffer, don't read into extrabuf. 39 | // when extrabuf is used, we read 128k-1 bytes at most. 40 | // 这里之所以说最多128k-1字节,是因为若writable为64k-1,那么需要两个缓冲区 第一个64k-1 第二个64k 所以做多128k-1 41 | // 如果第一个缓冲区>=64k 那就只采用一个缓冲区 而不使用栈空间extrabuf[65536]的内容 42 | const int iovcnt = (writable < sizeof(extrabuf)) ? 2 : 1; 43 | const ssize_t n = ::readv(fd, vec, iovcnt); 44 | 45 | if (n < 0) 46 | { 47 | *saveErrno = errno; 48 | } 49 | else if (n <= writable) // Buffer的可写缓冲区已经够存储读出来的数据了 50 | { 51 | writerIndex_ += n; 52 | } 53 | else // extrabuf里面也写入了n-writable长度的数据 54 | { 55 | writerIndex_ = buffer_.size(); 56 | append(extrabuf, n - writable); // 对buffer_扩容 并将extrabuf存储的另一部分数据追加至buffer_ 57 | } 58 | return n; 59 | } 60 | 61 | // inputBuffer_.readFd表示将对端数据读到inputBuffer_中,移动writerIndex_指针 62 | // outputBuffer_.writeFd标示将数据写入到outputBuffer_中,从readerIndex_开始,可以写readableBytes()个字节 63 | ssize_t Buffer::writeFd(int fd, int *saveErrno) 64 | { 65 | ssize_t n = ::write(fd, peek(), readableBytes()); 66 | if (n < 0) 67 | { 68 | *saveErrno = errno; 69 | } 70 | return n; 71 | } -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #获取当前目录下的所有源文件 2 | file(GLOB SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) 3 | 4 | #创建静态库或共享库 5 | add_library(muduo_core SHARED ${SRC_FILES}) 6 | 7 | #设置头文件的路径 8 | target_include_directories(muduo_core PUBLIC ${CMAKE_SOURCE_DIR}/include) -------------------------------------------------------------------------------- /src/Channel.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Channel.h" 4 | #include "EventLoop.h" 5 | #include "Logger.h" 6 | 7 | const int Channel::kNoneEvent = 0; //空事件 8 | const int Channel::kReadEvent = EPOLLIN | EPOLLPRI; //读事件 9 | const int Channel::kWriteEvent = EPOLLOUT; //写事件 10 | 11 | // EventLoop: ChannelList Poller 12 | Channel::Channel(EventLoop *loop, int fd) 13 | : loop_(loop) 14 | , fd_(fd) 15 | , events_(0) 16 | , revents_(0) 17 | , index_(-1) 18 | , tied_(false) 19 | { 20 | } 21 | 22 | Channel::~Channel() 23 | { 24 | } 25 | 26 | // channel的tie方法什么时候调用过? TcpConnection => channel 27 | /** 28 | * TcpConnection中注册了Channel对应的回调函数,传入的回调函数均为TcpConnection 29 | * 对象的成员方法,因此可以说明一点就是:Channel的结束一定晚于TcpConnection对象! 30 | * 此处用tie去解决TcpConnection和Channel的生命周期时长问题,从而保证了Channel对象能够在 31 | * TcpConnection销毁前销毁。 32 | **/ 33 | void Channel::tie(const std::shared_ptr &obj) 34 | { 35 | tie_ = obj; 36 | tied_ = true; 37 | } 38 | //update 和remove => EpollPoller 更新channel在poller中的状态 39 | /** 40 | * 当改变channel所表示的fd的events事件后,update负责再poller里面更改fd相应的事件epoll_ctl 41 | **/ 42 | void Channel::update() 43 | { 44 | // 通过channel所属的eventloop,调用poller的相应方法,注册fd的events事件 45 | loop_->updateChannel(this); 46 | } 47 | 48 | // 在channel所属的EventLoop中把当前的channel删除掉 49 | void Channel::remove() 50 | { 51 | loop_->removeChannel(this); 52 | } 53 | 54 | void Channel::handleEvent(Timestamp receiveTime) 55 | { 56 | if (tied_) 57 | { 58 | std::shared_ptr guard = tie_.lock(); 59 | if (guard) 60 | { 61 | handleEventWithGuard(receiveTime); 62 | } 63 | // 如果提升失败了 就不做任何处理 说明Channel的TcpConnection对象已经不存在了 64 | } 65 | else 66 | { 67 | handleEventWithGuard(receiveTime); 68 | } 69 | } 70 | 71 | void Channel::handleEventWithGuard(Timestamp receiveTime) 72 | { 73 | LOG_INFO("channel handleEvent revents:%d\n", revents_); 74 | // 关闭 75 | if ((revents_ & EPOLLHUP) && !(revents_ & EPOLLIN)) // 当TcpConnection对应Channel 通过shutdown 关闭写端 epoll触发EPOLLHUP 76 | { 77 | if (closeCallback_) 78 | { 79 | closeCallback_(); 80 | } 81 | } 82 | // 错误 83 | if (revents_ & EPOLLERR) 84 | { 85 | if (errorCallback_) 86 | { 87 | errorCallback_(); 88 | } 89 | } 90 | // 读 91 | if (revents_ & (EPOLLIN | EPOLLPRI)) 92 | { 93 | if (readCallback_) 94 | { 95 | readCallback_(receiveTime); 96 | } 97 | } 98 | // 写 99 | if (revents_ & EPOLLOUT) 100 | { 101 | if (writeCallback_) 102 | { 103 | writeCallback_(); 104 | } 105 | } 106 | } -------------------------------------------------------------------------------- /src/CurrentThread.cc: -------------------------------------------------------------------------------- 1 | #include "CurrentThread.h" 2 | 3 | namespace CurrentThread 4 | { 5 | __thread int t_cachedTid = 0; 6 | 7 | void cacheTid() 8 | { 9 | if (t_cachedTid == 0) 10 | { 11 | t_cachedTid = static_cast(::syscall(SYS_gettid)); 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /src/DefaultPoller.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Poller.h" 4 | #include "EPollPoller.h" 5 | 6 | Poller *Poller::newDefaultPoller(EventLoop *loop) 7 | { 8 | if (::getenv("MUDUO_USE_POLL")) 9 | { 10 | return nullptr; // 生成poll的实例 11 | } 12 | else 13 | { 14 | return new EPollPoller(loop); // 生成epoll的实例 15 | } 16 | } -------------------------------------------------------------------------------- /src/EPollPoller.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "EPollPoller.h" 6 | #include "Logger.h" 7 | #include "Channel.h" 8 | 9 | const int kNew = -1; // 某个channel还没添加至Poller // channel的成员index_初始化为-1 10 | const int kAdded = 1; // 某个channel已经添加至Poller 11 | const int kDeleted = 2; // 某个channel已经从Poller删除 12 | 13 | EPollPoller::EPollPoller(EventLoop *loop) 14 | : Poller(loop) 15 | , epollfd_(::epoll_create1(EPOLL_CLOEXEC)) 16 | , events_(kInitEventListSize) // vector(16) 17 | { 18 | if (epollfd_ < 0) 19 | { 20 | LOG_FATAL("epoll_create error:%d \n", errno); 21 | } 22 | } 23 | 24 | EPollPoller::~EPollPoller() 25 | { 26 | ::close(epollfd_); 27 | } 28 | 29 | Timestamp EPollPoller::poll(int timeoutMs, ChannelList *activeChannels) 30 | { 31 | // 由于频繁调用poll 实际上应该用LOG_DEBUG输出日志更为合理 当遇到并发场景 关闭DEBUG日志提升效率 32 | LOG_INFO("func=%s => fd total count:%lu\n", __FUNCTION__, channels_.size()); 33 | 34 | int numEvents = ::epoll_wait(epollfd_, &*events_.begin(), static_cast(events_.size()), timeoutMs); 35 | int saveErrno = errno; 36 | Timestamp now(Timestamp::now()); 37 | 38 | if (numEvents > 0) 39 | { 40 | LOG_INFO("%d events happend\n", numEvents); // LOG_DEBUG最合理 41 | fillActiveChannels(numEvents, activeChannels); 42 | if (numEvents == events_.size()) // 扩容操作 43 | { 44 | events_.resize(events_.size() * 2); 45 | } 46 | } 47 | else if (numEvents == 0) 48 | { 49 | LOG_DEBUG("%s timeout!\n", __FUNCTION__); 50 | } 51 | else 52 | { 53 | if (saveErrno != EINTR) 54 | { 55 | errno = saveErrno; 56 | LOG_ERROR("EPollPoller::poll() error!"); 57 | } 58 | } 59 | return now; 60 | } 61 | 62 | // channel update remove => EventLoop updateChannel removeChannel => Poller updateChannel removeChannel 63 | void EPollPoller::updateChannel(Channel *channel) 64 | { 65 | const int index = channel->index(); 66 | LOG_INFO("func=%s => fd=%d events=%d index=%d\n", __FUNCTION__, channel->fd(), channel->events(), index); 67 | 68 | if (index == kNew || index == kDeleted) 69 | { 70 | if (index == kNew) 71 | { 72 | int fd = channel->fd(); 73 | channels_[fd] = channel; 74 | } 75 | else // index == kDeleted 76 | { 77 | } 78 | channel->set_index(kAdded); 79 | update(EPOLL_CTL_ADD, channel); 80 | } 81 | else // channel已经在Poller中注册过了 82 | { 83 | int fd = channel->fd(); 84 | if (channel->isNoneEvent()) 85 | { 86 | update(EPOLL_CTL_DEL, channel); 87 | channel->set_index(kDeleted); 88 | } 89 | else 90 | { 91 | update(EPOLL_CTL_MOD, channel); 92 | } 93 | } 94 | } 95 | 96 | // 从Poller中删除channel 97 | void EPollPoller::removeChannel(Channel *channel) 98 | { 99 | int fd = channel->fd(); 100 | channels_.erase(fd); 101 | 102 | LOG_INFO("func=%s => fd=%d\n", __FUNCTION__, fd); 103 | 104 | int index = channel->index(); 105 | if (index == kAdded) 106 | { 107 | update(EPOLL_CTL_DEL, channel); 108 | } 109 | channel->set_index(kNew); 110 | } 111 | 112 | // 填写活跃的连接 113 | void EPollPoller::fillActiveChannels(int numEvents, ChannelList *activeChannels) const 114 | { 115 | for (int i = 0; i < numEvents; ++i) 116 | { 117 | Channel *channel = static_cast(events_[i].data.ptr); 118 | channel->set_revents(events_[i].events); 119 | activeChannels->push_back(channel); // EventLoop就拿到了它的Poller给它返回的所有发生事件的channel列表了 120 | } 121 | } 122 | 123 | // 更新channel通道 其实就是调用epoll_ctl add/mod/del 124 | void EPollPoller::update(int operation, Channel *channel) 125 | { 126 | epoll_event event; 127 | ::memset(&event, 0, sizeof(event)); 128 | 129 | int fd = channel->fd(); 130 | 131 | event.events = channel->events(); 132 | event.data.fd = fd; 133 | event.data.ptr = channel; 134 | 135 | if (::epoll_ctl(epollfd_, operation, fd, &event) < 0) 136 | { 137 | if (operation == EPOLL_CTL_DEL) 138 | { 139 | LOG_ERROR("epoll_ctl del error:%d\n", errno); 140 | } 141 | else 142 | { 143 | LOG_FATAL("epoll_ctl add/mod error:%d\n", errno); 144 | } 145 | } 146 | } -------------------------------------------------------------------------------- /src/EventLoop.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "EventLoop.h" 8 | #include "Logger.h" 9 | #include "Channel.h" 10 | #include "Poller.h" 11 | 12 | // 防止一个线程创建多个EventLoop 13 | __thread EventLoop *t_loopInThisThread = nullptr; 14 | 15 | // 定义默认的Poller IO复用接口的超时时间 16 | const int kPollTimeMs = 10000; // 10000毫秒 = 10秒钟 17 | 18 | /* 创建线程之后主线程和子线程谁先运行是不确定的。 19 | * 通过一个eventfd在线程之间传递数据的好处是多个线程无需上锁就可以实现同步。 20 | * eventfd支持的最低内核版本为Linux 2.6.27,在2.6.26及之前的版本也可以使用eventfd,但是flags必须设置为0。 21 | * 函数原型: 22 | * #include 23 | * int eventfd(unsigned int initval, int flags); 24 | * 参数说明: 25 | * initval,初始化计数器的值。 26 | * flags, EFD_NONBLOCK,设置socket为非阻塞。 27 | * EFD_CLOEXEC,执行fork的时候,在父进程中的描述符会自动关闭,子进程中的描述符保留。 28 | * 场景: 29 | * eventfd可以用于同一个进程之中的线程之间的通信。 30 | * eventfd还可以用于同亲缘关系的进程之间的通信。 31 | * eventfd用于不同亲缘关系的进程之间通信的话需要把eventfd放在几个进程共享的共享内存中(没有测试过)。 32 | */ 33 | // 创建wakeupfd 用来notify唤醒subReactor处理新来的channel 34 | int createEventfd() 35 | { 36 | int evtfd = ::eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 37 | if (evtfd < 0) 38 | { 39 | LOG_FATAL("eventfd error:%d\n", errno); 40 | } 41 | return evtfd; 42 | } 43 | 44 | EventLoop::EventLoop() 45 | : looping_(false) 46 | , quit_(false) 47 | , callingPendingFunctors_(false) 48 | , threadId_(CurrentThread::tid()) 49 | , poller_(Poller::newDefaultPoller(this)) 50 | , wakeupFd_(createEventfd()) 51 | , wakeupChannel_(new Channel(this, wakeupFd_)) 52 | { 53 | LOG_DEBUG("EventLoop created %p in thread %d\n", this, threadId_); 54 | if (t_loopInThisThread) 55 | { 56 | LOG_FATAL("Another EventLoop %p exists in this thread %d\n", t_loopInThisThread, threadId_); 57 | } 58 | else 59 | { 60 | t_loopInThisThread = this; 61 | } 62 | 63 | wakeupChannel_->setReadCallback( 64 | std::bind(&EventLoop::handleRead, this)); // 设置wakeupfd的事件类型以及发生事件后的回调操作 65 | 66 | wakeupChannel_->enableReading(); // 每一个EventLoop都将监听wakeupChannel_的EPOLL读事件了 67 | } 68 | EventLoop::~EventLoop() 69 | { 70 | wakeupChannel_->disableAll(); // 给Channel移除所有感兴趣的事件 71 | wakeupChannel_->remove(); // 把Channel从EventLoop上删除掉 72 | ::close(wakeupFd_); 73 | t_loopInThisThread = nullptr; 74 | } 75 | 76 | // 开启事件循环 77 | void EventLoop::loop() 78 | { 79 | looping_ = true; 80 | quit_ = false; 81 | 82 | LOG_INFO("EventLoop %p start looping\n", this); 83 | 84 | while (!quit_) 85 | { 86 | activeChannels_.clear(); 87 | pollRetureTime_ = poller_->poll(kPollTimeMs, &activeChannels_); 88 | for (Channel *channel : activeChannels_) 89 | { 90 | // Poller监听哪些channel发生了事件 然后上报给EventLoop 通知channel处理相应的事件 91 | channel->handleEvent(pollRetureTime_); 92 | } 93 | /** 94 | * 执行当前EventLoop事件循环需要处理的回调操作 对于线程数 >=2 的情况 IO线程 mainloop(mainReactor) 主要工作: 95 | * accept接收连接 => 将accept返回的connfd打包为Channel => TcpServer::newConnection通过轮询将TcpConnection对象分配给subloop处理 96 | * 97 | * mainloop调用queueInLoop将回调加入subloop(该回调需要subloop执行 但subloop还在poller_->poll处阻塞) queueInLoop通过wakeup将subloop唤醒 98 | **/ 99 | doPendingFunctors(); 100 | } 101 | LOG_INFO("EventLoop %p stop looping.\n", this); 102 | looping_ = false; 103 | } 104 | 105 | /** 106 | * 退出事件循环 107 | * 1. 如果loop在自己的线程中调用quit成功了 说明当前线程已经执行完毕了loop()函数的poller_->poll并退出 108 | * 2. 如果不是当前EventLoop所属线程中调用quit退出EventLoop 需要唤醒EventLoop所属线程的epoll_wait 109 | * 110 | * 比如在一个subloop(worker)中调用mainloop(IO)的quit时 需要唤醒mainloop(IO)的poller_->poll 让其执行完loop()函数 111 | * 112 | * !!! 注意: 正常情况下 mainloop负责请求连接 将回调写入subloop中 通过生产者消费者模型即可实现线程安全的队列 113 | * !!! 但是muduo通过wakeup()机制 使用eventfd创建的wakeupFd_ notify 使得mainloop和subloop之间能够进行通信 114 | **/ 115 | void EventLoop::quit() 116 | { 117 | quit_ = true; 118 | 119 | if (!isInLoopThread()) 120 | { 121 | wakeup(); 122 | } 123 | } 124 | 125 | // 在当前loop中执行cb 126 | void EventLoop::runInLoop(Functor cb) 127 | { 128 | if (isInLoopThread()) // 当前EventLoop中执行回调 129 | { 130 | cb(); 131 | } 132 | else // 在非当前EventLoop线程中执行cb,就需要唤醒EventLoop所在线程执行cb 133 | { 134 | queueInLoop(cb); 135 | } 136 | } 137 | 138 | // 把cb放入队列中 唤醒loop所在的线程执行cb 139 | void EventLoop::queueInLoop(Functor cb) 140 | { 141 | { 142 | std::unique_lock lock(mutex_); 143 | pendingFunctors_.emplace_back(cb); 144 | } 145 | 146 | /** 147 | * || callingPendingFunctors的意思是 当前loop正在执行回调中 但是loop的pendingFunctors_中又加入了新的回调 需要通过wakeup写事件 148 | * 唤醒相应的需要执行上面回调操作的loop的线程 让loop()下一次poller_->poll()不再阻塞(阻塞的话会延迟前一次新加入的回调的执行),然后 149 | * 继续执行pendingFunctors_中的回调函数 150 | **/ 151 | if (!isInLoopThread() || callingPendingFunctors_) 152 | { 153 | wakeup(); // 唤醒loop所在线程 154 | } 155 | } 156 | 157 | void EventLoop::handleRead() 158 | { 159 | uint64_t one = 1; 160 | ssize_t n = read(wakeupFd_, &one, sizeof(one)); 161 | if (n != sizeof(one)) 162 | { 163 | LOG_ERROR("EventLoop::handleRead() reads %lu bytes instead of 8\n", n); 164 | } 165 | } 166 | 167 | // 用来唤醒loop所在线程 向wakeupFd_写一个数据 wakeupChannel就发生读事件 当前loop线程就会被唤醒 168 | void EventLoop::wakeup() 169 | { 170 | uint64_t one = 1; 171 | ssize_t n = write(wakeupFd_, &one, sizeof(one)); 172 | if (n != sizeof(one)) 173 | { 174 | LOG_ERROR("EventLoop::wakeup() writes %lu bytes instead of 8\n", n); 175 | } 176 | } 177 | 178 | // EventLoop的方法 => Poller的方法 179 | void EventLoop::updateChannel(Channel *channel) 180 | { 181 | poller_->updateChannel(channel); 182 | } 183 | 184 | void EventLoop::removeChannel(Channel *channel) 185 | { 186 | poller_->removeChannel(channel); 187 | } 188 | 189 | bool EventLoop::hasChannel(Channel *channel) 190 | { 191 | return poller_->hasChannel(channel); 192 | } 193 | 194 | void EventLoop::doPendingFunctors() 195 | { 196 | std::vector functors; 197 | callingPendingFunctors_ = true; 198 | 199 | { 200 | std::unique_lock lock(mutex_); 201 | functors.swap(pendingFunctors_); // 交换的方式减少了锁的临界区范围 提升效率 同时避免了死锁 如果执行functor()在临界区内 且functor()中调用queueInLoop()就会产生死锁 202 | } 203 | 204 | for (const Functor &functor : functors) 205 | { 206 | functor(); // 执行当前loop需要执行的回调操作 207 | } 208 | 209 | callingPendingFunctors_ = false; 210 | } 211 | -------------------------------------------------------------------------------- /src/EventLoopThread.cc: -------------------------------------------------------------------------------- 1 | #include "EventLoopThread.h" 2 | #include "EventLoop.h" 3 | 4 | EventLoopThread::EventLoopThread(const ThreadInitCallback &cb, 5 | const std::string &name) 6 | : loop_(nullptr) 7 | , exiting_(false) 8 | , thread_(std::bind(&EventLoopThread::threadFunc, this), name) 9 | , mutex_() 10 | , cond_() 11 | , callback_(cb) 12 | { 13 | } 14 | 15 | EventLoopThread::~EventLoopThread() 16 | { 17 | exiting_ = true; 18 | if (loop_ != nullptr) 19 | { 20 | loop_->quit(); 21 | thread_.join(); 22 | } 23 | } 24 | 25 | EventLoop *EventLoopThread::startLoop() 26 | { 27 | thread_.start(); // 启用底层线程Thread类对象thread_中通过start()创建的线程 28 | 29 | EventLoop *loop = nullptr; 30 | { 31 | std::unique_lock lock(mutex_); 32 | cond_.wait(lock, [this](){return loop_ != nullptr;}); 33 | loop = loop_; 34 | } 35 | return loop; 36 | } 37 | 38 | // 下面这个方法 是在单独的新线程里运行的 39 | void EventLoopThread::threadFunc() 40 | { 41 | EventLoop loop; // 创建一个独立的EventLoop对象 和上面的线程是一一对应的 级one loop per thread 42 | 43 | if (callback_) 44 | { 45 | callback_(&loop); 46 | } 47 | 48 | { 49 | std::unique_lock lock(mutex_); 50 | loop_ = &loop; 51 | cond_.notify_one(); 52 | } 53 | loop.loop(); // 执行EventLoop的loop() 开启了底层的Poller的poll() 54 | std::unique_lock lock(mutex_); 55 | loop_ = nullptr; 56 | } -------------------------------------------------------------------------------- /src/EventLoopThreadPool.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "EventLoopThreadPool.h" 4 | #include "EventLoopThread.h" 5 | #include "Logger.h" 6 | EventLoopThreadPool::EventLoopThreadPool(EventLoop *baseLoop, const std::string &nameArg) 7 | : baseLoop_(baseLoop), name_(nameArg), started_(false), numThreads_(0), next_(0) 8 | { 9 | } 10 | 11 | EventLoopThreadPool::~EventLoopThreadPool() 12 | { 13 | // Don't delete loop, it's stack variable 14 | } 15 | 16 | void EventLoopThreadPool::start(const ThreadInitCallback &cb) 17 | { 18 | started_ = true; 19 | 20 | for (int i = 0; i < numThreads_; ++i) 21 | { 22 | char buf[name_.size() + 32]; 23 | snprintf(buf, sizeof buf, "%s%d", name_.c_str(), i); 24 | EventLoopThread *t = new EventLoopThread(cb, buf); 25 | threads_.push_back(std::unique_ptr(t)); 26 | loops_.push_back(t->startLoop()); // 底层创建线程 绑定一个新的EventLoop 并返回该loop的地址 27 | } 28 | 29 | if (numThreads_ == 0 && cb) // 整个服务端只有一个线程运行baseLoop 30 | { 31 | cb(baseLoop_); 32 | } 33 | } 34 | 35 | // 如果工作在多线程中,baseLoop_(mainLoop)会默认以轮询的方式分配Channel给subLoop 36 | EventLoop *EventLoopThreadPool::getNextLoop() 37 | { 38 | // 如果只设置一个线程 也就是只有一个mainReactor 无subReactor 39 | // 那么轮询只有一个线程 getNextLoop()每次都返回当前的baseLoop_ 40 | EventLoop *loop = baseLoop_; 41 | 42 | // 通过轮询获取下一个处理事件的loop 43 | // 如果没设置多线程数量,则不会进去,相当于直接返回baseLoop 44 | if(!loops_.empty()) 45 | { 46 | loop = loops_[next_]; 47 | ++next_; 48 | // 轮询 49 | if(next_ >= loops_.size()) 50 | { 51 | next_ = 0; 52 | } 53 | } 54 | 55 | return loop; 56 | } 57 | 58 | 59 | 60 | std::vector EventLoopThreadPool::getAllLoops() 61 | { 62 | if (loops_.empty()) 63 | { 64 | return std::vector(1, baseLoop_); 65 | } 66 | else 67 | { 68 | return loops_; 69 | } 70 | } -------------------------------------------------------------------------------- /src/InetAddress.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "InetAddress.h" 5 | 6 | InetAddress::InetAddress(uint16_t port, std::string ip) 7 | { 8 | ::memset(&addr_, 0, sizeof(addr_)); 9 | addr_.sin_family = AF_INET; 10 | addr_.sin_port = ::htons(port); // 本地字节序转为网络字节序 11 | addr_.sin_addr.s_addr = ::inet_addr(ip.c_str()); 12 | } 13 | 14 | std::string InetAddress::toIp() const 15 | { 16 | // addr_ 17 | char buf[64] = {0}; 18 | ::inet_ntop(AF_INET, &addr_.sin_addr, buf, sizeof buf); 19 | return buf; 20 | } 21 | 22 | std::string InetAddress::toIpPort() const 23 | { 24 | // ip:port 25 | char buf[64] = {0}; 26 | ::inet_ntop(AF_INET, &addr_.sin_addr, buf, sizeof buf); 27 | size_t end = ::strlen(buf); 28 | uint16_t port = ::ntohs(addr_.sin_port); 29 | sprintf(buf+end, ":%u", port); 30 | return buf; 31 | } 32 | 33 | uint16_t InetAddress::toPort() const 34 | { 35 | return ::ntohs(addr_.sin_port); 36 | } 37 | 38 | #if 0 39 | #include 40 | int main() 41 | { 42 | InetAddress addr(8080); 43 | std::cout << addr.toIpPort() << std::endl; 44 | } 45 | #endif -------------------------------------------------------------------------------- /src/Logger.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Logger.h" 4 | #include "Timestamp.h" 5 | 6 | // 获取日志唯一的实例对象 单例 7 | Logger &Logger::instance() 8 | { 9 | static Logger logger; 10 | return logger; 11 | } 12 | 13 | // 设置日志级别 14 | void Logger::setLogLevel(int level) 15 | { 16 | logLevel_ = level; 17 | } 18 | 19 | // 写日志 [级别信息] time : msg 20 | void Logger::log(std::string msg) 21 | { 22 | std::string pre = ""; 23 | switch (logLevel_) 24 | { 25 | case INFO: 26 | pre = "[INFO]"; 27 | break; 28 | case ERROR: 29 | pre = "[ERROR]"; 30 | break; 31 | case FATAL: 32 | pre = "[FATAL]"; 33 | break; 34 | case DEBUG: 35 | pre = "[DEBUG]"; 36 | break; 37 | default: 38 | break; 39 | } 40 | 41 | // 打印时间和msg 42 | std::cout << pre + Timestamp::now().toString() << " : " << msg << std::endl; 43 | } -------------------------------------------------------------------------------- /src/Poller.cc: -------------------------------------------------------------------------------- 1 | #include "Poller.h" 2 | #include "Channel.h" 3 | 4 | Poller::Poller(EventLoop *loop) 5 | : ownerLoop_(loop) 6 | { 7 | } 8 | 9 | bool Poller::hasChannel(Channel *channel) const 10 | { 11 | auto it = channels_.find(channel->fd()); 12 | return it != channels_.end() && it->second == channel; 13 | } -------------------------------------------------------------------------------- /src/Socket.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "Socket.h" 9 | #include "Logger.h" 10 | #include "InetAddress.h" 11 | 12 | Socket::~Socket() 13 | { 14 | ::close(sockfd_); 15 | } 16 | 17 | void Socket::bindAddress(const InetAddress &localaddr) 18 | { 19 | if (0 != ::bind(sockfd_, (sockaddr *)localaddr.getSockAddr(), sizeof(sockaddr_in))) 20 | { 21 | LOG_FATAL("bind sockfd:%d fail\n", sockfd_); 22 | } 23 | } 24 | 25 | void Socket::listen() 26 | { 27 | if (0 != ::listen(sockfd_, 1024)) 28 | { 29 | LOG_FATAL("listen sockfd:%d fail\n", sockfd_); 30 | } 31 | } 32 | 33 | int Socket::accept(InetAddress *peeraddr) 34 | { 35 | /** 36 | * 1. accept函数的参数不合法 37 | * 2. 对返回的connfd没有设置非阻塞 38 | * Reactor模型 one loop per thread 39 | * poller + non-blocking IO 40 | **/ 41 | sockaddr_in addr; 42 | socklen_t len = sizeof(addr); 43 | ::memset(&addr, 0, sizeof(addr)); 44 | // fixed : int connfd = ::accept(sockfd_, (sockaddr *)&addr, &len); 45 | int connfd = ::accept4(sockfd_, (sockaddr *)&addr, &len, SOCK_NONBLOCK | SOCK_CLOEXEC); 46 | if (connfd >= 0) 47 | { 48 | peeraddr->setSockAddr(addr); 49 | } 50 | return connfd; 51 | } 52 | 53 | void Socket::shutdownWrite() 54 | { 55 | if (::shutdown(sockfd_, SHUT_WR) < 0) 56 | { 57 | LOG_ERROR("shutdownWrite error"); 58 | } 59 | } 60 | 61 | void Socket::setTcpNoDelay(bool on) 62 | { 63 | // TCP_NODELAY 用于禁用 Nagle 算法。 64 | // Nagle 算法用于减少网络上传输的小数据包数量。 65 | // 将 TCP_NODELAY 设置为 1 可以禁用该算法,允许小数据包立即发送。 66 | int optval = on ? 1 : 0; 67 | ::setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval)); 68 | } 69 | 70 | void Socket::setReuseAddr(bool on) 71 | { 72 | // SO_REUSEADDR 允许一个套接字强制绑定到一个已被其他套接字使用的端口。 73 | // 这对于需要重启并绑定到相同端口的服务器应用程序非常有用。 74 | int optval = on ? 1 : 0; 75 | ::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); 76 | } 77 | 78 | void Socket::setReusePort(bool on) 79 | { 80 | // SO_REUSEPORT 允许同一主机上的多个套接字绑定到相同的端口号。 81 | // 这对于在多个线程或进程之间负载均衡传入连接非常有用。 82 | int optval = on ? 1 : 0; 83 | ::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval)); 84 | } 85 | 86 | void Socket::setKeepAlive(bool on) 87 | { 88 | // SO_KEEPALIVE 启用在已连接的套接字上定期传输消息。 89 | // 如果另一端没有响应,则认为连接已断开并关闭。 90 | // 这对于检测网络中失效的对等方非常有用。 91 | int optval = on ? 1 : 0; 92 | ::setsockopt(sockfd_, SOL_SOCKET, SO_KEEPALIVE, &optval, sizeof(optval)); 93 | } -------------------------------------------------------------------------------- /src/TcpConnection.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include // for open 10 | #include // for close 11 | 12 | #include "TcpConnection.h" 13 | #include "Logger.h" 14 | #include "Socket.h" 15 | #include "Channel.h" 16 | #include "EventLoop.h" 17 | 18 | static EventLoop *CheckLoopNotNull(EventLoop *loop) 19 | { 20 | if (loop == nullptr) 21 | { 22 | LOG_FATAL("%s:%s:%d mainLoop is null!\n", __FILE__, __FUNCTION__, __LINE__); 23 | } 24 | return loop; 25 | } 26 | 27 | TcpConnection::TcpConnection(EventLoop *loop, 28 | const std::string &nameArg, 29 | int sockfd, 30 | const InetAddress &localAddr, 31 | const InetAddress &peerAddr) 32 | : loop_(CheckLoopNotNull(loop)) 33 | , name_(nameArg) 34 | , state_(kConnecting) 35 | , reading_(true) 36 | , socket_(new Socket(sockfd)) 37 | , channel_(new Channel(loop, sockfd)) 38 | , localAddr_(localAddr) 39 | , peerAddr_(peerAddr) 40 | , highWaterMark_(64 * 1024 * 1024) // 64M 41 | { 42 | // 下面给channel设置相应的回调函数 poller给channel通知感兴趣的事件发生了 channel会回调相应的回调函数 43 | channel_->setReadCallback( 44 | std::bind(&TcpConnection::handleRead, this, std::placeholders::_1)); 45 | channel_->setWriteCallback( 46 | std::bind(&TcpConnection::handleWrite, this)); 47 | channel_->setCloseCallback( 48 | std::bind(&TcpConnection::handleClose, this)); 49 | channel_->setErrorCallback( 50 | std::bind(&TcpConnection::handleError, this)); 51 | 52 | LOG_INFO("TcpConnection::ctor[%s] at fd=%d\n", name_.c_str(), sockfd); 53 | socket_->setKeepAlive(true); 54 | } 55 | 56 | TcpConnection::~TcpConnection() 57 | { 58 | LOG_INFO("TcpConnection::dtor[%s] at fd=%d state=%d\n", name_.c_str(), channel_->fd(), (int)state_); 59 | } 60 | 61 | void TcpConnection::send(const std::string &buf) 62 | { 63 | if (state_ == kConnected) 64 | { 65 | if (loop_->isInLoopThread()) // 这种是对于单个reactor的情况 用户调用conn->send时 loop_即为当前线程 66 | { 67 | sendInLoop(buf.c_str(), buf.size()); 68 | } 69 | else 70 | { 71 | loop_->runInLoop( 72 | std::bind(&TcpConnection::sendInLoop, this, buf.c_str(), buf.size())); 73 | } 74 | } 75 | } 76 | 77 | /** 78 | * 发送数据 应用写的快 而内核发送数据慢 需要把待发送数据写入缓冲区,而且设置了水位回调 79 | **/ 80 | void TcpConnection::sendInLoop(const void *data, size_t len) 81 | { 82 | ssize_t nwrote = 0; 83 | size_t remaining = len; 84 | bool faultError = false; 85 | 86 | if (state_ == kDisconnected) // 之前调用过该connection的shutdown 不能再进行发送了 87 | { 88 | LOG_ERROR("disconnected, give up writing"); 89 | } 90 | 91 | // 表示channel_第一次开始写数据或者缓冲区没有待发送数据 92 | if (!channel_->isWriting() && outputBuffer_.readableBytes() == 0) 93 | { 94 | nwrote = ::write(channel_->fd(), data, len); 95 | if (nwrote >= 0) 96 | { 97 | remaining = len - nwrote; 98 | if (remaining == 0 && writeCompleteCallback_) 99 | { 100 | // 既然在这里数据全部发送完成,就不用再给channel设置epollout事件了 101 | loop_->queueInLoop( 102 | std::bind(writeCompleteCallback_, shared_from_this())); 103 | } 104 | } 105 | else // nwrote < 0 106 | { 107 | nwrote = 0; 108 | if (errno != EWOULDBLOCK) // EWOULDBLOCK表示非阻塞情况下没有数据后的正常返回 等同于EAGAIN 109 | { 110 | LOG_ERROR("TcpConnection::sendInLoop"); 111 | if (errno == EPIPE || errno == ECONNRESET) // SIGPIPE RESET 112 | { 113 | faultError = true; 114 | } 115 | } 116 | } 117 | } 118 | /** 119 | * 说明当前这一次write并没有把数据全部发送出去 剩余的数据需要保存到缓冲区当中 120 | * 然后给channel注册EPOLLOUT事件,Poller发现tcp的发送缓冲区有空间后会通知 121 | * 相应的sock->channel,调用channel对应注册的writeCallback_回调方法, 122 | * channel的writeCallback_实际上就是TcpConnection设置的handleWrite回调, 123 | * 把发送缓冲区outputBuffer_的内容全部发送完成 124 | **/ 125 | if (!faultError && remaining > 0) 126 | { 127 | // 目前发送缓冲区剩余的待发送的数据的长度 128 | size_t oldLen = outputBuffer_.readableBytes(); 129 | if (oldLen + remaining >= highWaterMark_ && oldLen < highWaterMark_ && highWaterMarkCallback_) 130 | { 131 | loop_->queueInLoop( 132 | std::bind(highWaterMarkCallback_, shared_from_this(), oldLen + remaining)); 133 | } 134 | outputBuffer_.append((char *)data + nwrote, remaining); 135 | if (!channel_->isWriting()) 136 | { 137 | channel_->enableWriting(); // 这里一定要注册channel的写事件 否则poller不会给channel通知epollout 138 | } 139 | } 140 | } 141 | 142 | void TcpConnection::shutdown() 143 | { 144 | if (state_ == kConnected) 145 | { 146 | setState(kDisconnecting); 147 | loop_->runInLoop( 148 | std::bind(&TcpConnection::shutdownInLoop, this)); 149 | } 150 | } 151 | 152 | void TcpConnection::shutdownInLoop() 153 | { 154 | if (!channel_->isWriting()) // 说明当前outputBuffer_的数据全部向外发送完成 155 | { 156 | socket_->shutdownWrite(); 157 | } 158 | } 159 | 160 | // 连接建立 161 | void TcpConnection::connectEstablished() 162 | { 163 | setState(kConnected); 164 | channel_->tie(shared_from_this()); 165 | channel_->enableReading(); // 向poller注册channel的EPOLLIN读事件 166 | 167 | // 新连接建立 执行回调 168 | connectionCallback_(shared_from_this()); 169 | } 170 | // 连接销毁 171 | void TcpConnection::connectDestroyed() 172 | { 173 | if (state_ == kConnected) 174 | { 175 | setState(kDisconnected); 176 | channel_->disableAll(); // 把channel的所有感兴趣的事件从poller中删除掉 177 | connectionCallback_(shared_from_this()); 178 | } 179 | channel_->remove(); // 把channel从poller中删除掉 180 | } 181 | 182 | // 读是相对服务器而言的 当对端客户端有数据到达 服务器端检测到EPOLLIN 就会触发该fd上的回调 handleRead取读走对端发来的数据 183 | void TcpConnection::handleRead(Timestamp receiveTime) 184 | { 185 | int savedErrno = 0; 186 | ssize_t n = inputBuffer_.readFd(channel_->fd(), &savedErrno); 187 | if (n > 0) // 有数据到达 188 | { 189 | // 已建立连接的用户有可读事件发生了 调用用户传入的回调操作onMessage shared_from_this就是获取了TcpConnection的智能指针 190 | messageCallback_(shared_from_this(), &inputBuffer_, receiveTime); 191 | } 192 | else if (n == 0) // 客户端断开 193 | { 194 | handleClose(); 195 | } 196 | else // 出错了 197 | { 198 | errno = savedErrno; 199 | LOG_ERROR("TcpConnection::handleRead"); 200 | handleError(); 201 | } 202 | } 203 | 204 | void TcpConnection::handleWrite() 205 | { 206 | if (channel_->isWriting()) 207 | { 208 | int savedErrno = 0; 209 | ssize_t n = outputBuffer_.writeFd(channel_->fd(), &savedErrno); 210 | if (n > 0) 211 | { 212 | outputBuffer_.retrieve(n);//从缓冲区读取reable区域的数据移动readindex下标 213 | if (outputBuffer_.readableBytes() == 0) 214 | { 215 | channel_->disableWriting(); 216 | if (writeCompleteCallback_) 217 | { 218 | // TcpConnection对象在其所在的subloop中 向pendingFunctors_中加入回调 219 | loop_->queueInLoop( 220 | std::bind(writeCompleteCallback_, shared_from_this())); 221 | } 222 | if (state_ == kDisconnecting) 223 | { 224 | shutdownInLoop(); // 在当前所属的loop中把TcpConnection删除掉 225 | } 226 | } 227 | } 228 | else 229 | { 230 | LOG_ERROR("TcpConnection::handleWrite"); 231 | } 232 | } 233 | else 234 | { 235 | LOG_ERROR("TcpConnection fd=%d is down, no more writing", channel_->fd()); 236 | } 237 | } 238 | 239 | void TcpConnection::handleClose() 240 | { 241 | LOG_INFO("TcpConnection::handleClose fd=%d state=%d\n", channel_->fd(), (int)state_); 242 | setState(kDisconnected); 243 | channel_->disableAll(); 244 | 245 | TcpConnectionPtr connPtr(shared_from_this()); 246 | connectionCallback_(connPtr); // 连接回调 247 | closeCallback_(connPtr); // 执行关闭连接的回调 执行的是TcpServer::removeConnection回调方法 // must be the last line 248 | } 249 | 250 | void TcpConnection::handleError() 251 | { 252 | int optval; 253 | socklen_t optlen = sizeof optval; 254 | int err = 0; 255 | if (::getsockopt(channel_->fd(), SOL_SOCKET, SO_ERROR, &optval, &optlen) < 0) 256 | { 257 | err = errno; 258 | } 259 | else 260 | { 261 | err = optval; 262 | } 263 | LOG_ERROR("TcpConnection::handleError name:%s - SO_ERROR:%d\n", name_.c_str(), err); 264 | } 265 | 266 | // 新增的零拷贝发送函数 267 | void TcpConnection::sendFile(int fileDescriptor, off_t offset, size_t count) { 268 | if (connected()) { 269 | if (loop_->isInLoopThread()) { // 判断当前线程是否是loop循环的线程 270 | sendFileInLoop(fileDescriptor, offset, count); 271 | }else{ // 如果不是,则唤醒运行这个TcpConnection的线程执行Loop循环 272 | loop_->runInLoop( 273 | std::bind(&TcpConnection::sendFileInLoop, shared_from_this(), fileDescriptor, offset, count)); 274 | } 275 | } else { 276 | LOG_ERROR("TcpConnection::sendFile - not connected"); 277 | } 278 | } 279 | 280 | // 在事件循环中执行sendfile 281 | void TcpConnection::sendFileInLoop(int fileDescriptor, off_t offset, size_t count) { 282 | ssize_t bytesSent = 0; // 发送了多少字节数 283 | size_t remaining = count; // 还要多少数据要发送 284 | bool faultError = false; // 错误的标志位 285 | 286 | if (state_ == kDisconnecting) { // 表示此时连接已经断开就不需要发送数据了 287 | LOG_ERROR("disconnected, give up writing"); 288 | return; 289 | } 290 | 291 | // 表示Channel第一次开始写数据或者outputBuffer缓冲区中没有数据 292 | if (!channel_->isWriting() && outputBuffer_.readableBytes() == 0) { 293 | bytesSent = sendfile(socket_->fd(), fileDescriptor, &offset, remaining); 294 | if (bytesSent >= 0) { 295 | remaining -= bytesSent; 296 | if (remaining == 0 && writeCompleteCallback_) { 297 | // remaining为0意味着数据正好全部发送完,就不需要给其设置写事件的监听。 298 | loop_->queueInLoop(std::bind(writeCompleteCallback_, shared_from_this())); 299 | } 300 | } else { // bytesSent < 0 301 | if (errno != EWOULDBLOCK) { // 如果是非阻塞没有数据返回错误这个是正常显现等同于EAGAIN,否则就异常情况 302 | LOG_ERROR("TcpConnection::sendFileInLoop"); 303 | } 304 | if (errno == EPIPE || errno == ECONNRESET) { 305 | faultError = true; 306 | } 307 | } 308 | } 309 | // 处理剩余数据 310 | if (!faultError && remaining > 0) { 311 | // 继续发送剩余数据 312 | loop_->queueInLoop( 313 | std::bind(&TcpConnection::sendFileInLoop, shared_from_this(), fileDescriptor, offset, remaining)); 314 | } 315 | } -------------------------------------------------------------------------------- /src/TcpServer.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "TcpServer.h" 5 | #include "Logger.h" 6 | #include "TcpConnection.h" 7 | 8 | static EventLoop *CheckLoopNotNull(EventLoop *loop) 9 | { 10 | if (loop == nullptr) 11 | { 12 | LOG_FATAL("%s:%s:%d mainLoop is null!\n", __FILE__, __FUNCTION__, __LINE__); 13 | } 14 | return loop; 15 | } 16 | 17 | TcpServer::TcpServer(EventLoop *loop, 18 | const InetAddress &listenAddr, 19 | const std::string &nameArg, 20 | Option option) 21 | : loop_(CheckLoopNotNull(loop)) 22 | , ipPort_(listenAddr.toIpPort()) 23 | , name_(nameArg) 24 | , acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)) 25 | , threadPool_(new EventLoopThreadPool(loop, name_)) 26 | , connectionCallback_() 27 | , messageCallback_() 28 | , nextConnId_(1) 29 | , started_(0) 30 | { 31 | // 当有新用户连接时,Acceptor类中绑定的acceptChannel_会有读事件发生,执行handleRead()调用TcpServer::newConnection回调 32 | acceptor_->setNewConnectionCallback( 33 | std::bind(&TcpServer::newConnection, this, std::placeholders::_1, std::placeholders::_2)); 34 | } 35 | 36 | TcpServer::~TcpServer() 37 | { 38 | for(auto &item : connections_) 39 | { 40 | TcpConnectionPtr conn(item.second); 41 | item.second.reset(); // 把原始的智能指针复位 让栈空间的TcpConnectionPtr conn指向该对象 当conn出了其作用域 即可释放智能指针指向的对象 42 | // 销毁连接 43 | conn->getLoop()->runInLoop( 44 | std::bind(&TcpConnection::connectDestroyed, conn)); 45 | } 46 | } 47 | 48 | // 设置底层subloop的个数 49 | void TcpServer::setThreadNum(int numThreads) 50 | { 51 | int numThreads_=numThreads; 52 | threadPool_->setThreadNum(numThreads_); 53 | } 54 | 55 | // 开启服务器监听 56 | void TcpServer::start() 57 | { 58 | if (started_.fetch_add(1) == 0) // 防止一个TcpServer对象被start多次 59 | { 60 | threadPool_->start(threadInitCallback_); // 启动底层的loop线程池 61 | loop_->runInLoop(std::bind(&Acceptor::listen, acceptor_.get())); 62 | } 63 | } 64 | 65 | // 有一个新用户连接,acceptor会执行这个回调操作,负责将mainLoop接收到的请求连接(acceptChannel_会有读事件发生)通过回调轮询分发给subLoop去处理 66 | void TcpServer::newConnection(int sockfd, const InetAddress &peerAddr) 67 | { 68 | // 轮询算法 选择一个subLoop 来管理connfd对应的channel 69 | EventLoop *ioLoop = threadPool_->getNextLoop(); 70 | char buf[64] = {0}; 71 | snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_); 72 | ++nextConnId_; // 这里没有设置为原子类是因为其只在mainloop中执行 不涉及线程安全问题 73 | std::string connName = name_ + buf; 74 | 75 | LOG_INFO("TcpServer::newConnection [%s] - new connection [%s] from %s\n", 76 | name_.c_str(), connName.c_str(), peerAddr.toIpPort().c_str()); 77 | 78 | // 通过sockfd获取其绑定的本机的ip地址和端口信息 79 | sockaddr_in local; 80 | ::memset(&local, 0, sizeof(local)); 81 | socklen_t addrlen = sizeof(local); 82 | if(::getsockname(sockfd, (sockaddr *)&local, &addrlen) < 0) 83 | { 84 | LOG_ERROR("sockets::getLocalAddr"); 85 | } 86 | 87 | InetAddress localAddr(local); 88 | TcpConnectionPtr conn(new TcpConnection(ioLoop, 89 | connName, 90 | sockfd, 91 | localAddr, 92 | peerAddr)); 93 | connections_[connName] = conn; 94 | // 下面的回调都是用户设置给TcpServer => TcpConnection的,至于Channel绑定的则是TcpConnection设置的四个,handleRead,handleWrite... 这下面的回调用于handlexxx函数中 95 | conn->setConnectionCallback(connectionCallback_); 96 | conn->setMessageCallback(messageCallback_); 97 | conn->setWriteCompleteCallback(writeCompleteCallback_); 98 | 99 | // 设置了如何关闭连接的回调 100 | conn->setCloseCallback( 101 | std::bind(&TcpServer::removeConnection, this, std::placeholders::_1)); 102 | 103 | ioLoop->runInLoop( 104 | std::bind(&TcpConnection::connectEstablished, conn)); 105 | } 106 | 107 | void TcpServer::removeConnection(const TcpConnectionPtr &conn) 108 | { 109 | loop_->runInLoop( 110 | std::bind(&TcpServer::removeConnectionInLoop, this, conn)); 111 | } 112 | 113 | void TcpServer::removeConnectionInLoop(const TcpConnectionPtr &conn) 114 | { 115 | LOG_INFO("TcpServer::removeConnectionInLoop [%s] - connection %s\n", 116 | name_.c_str(), conn->name().c_str()); 117 | 118 | connections_.erase(conn->name()); 119 | EventLoop *ioLoop = conn->getLoop(); 120 | ioLoop->queueInLoop( 121 | std::bind(&TcpConnection::connectDestroyed, conn)); 122 | } -------------------------------------------------------------------------------- /src/Thread.cc: -------------------------------------------------------------------------------- 1 | #include "Thread.h" 2 | #include "CurrentThread.h" 3 | 4 | #include 5 | 6 | std::atomic_int Thread::numCreated_(0); 7 | 8 | Thread::Thread(ThreadFunc func, const std::string &name) 9 | : started_(false) 10 | , joined_(false) 11 | , tid_(0) 12 | , func_(std::move(func)) 13 | , name_(name) 14 | { 15 | setDefaultName(); 16 | } 17 | 18 | Thread::~Thread() 19 | { 20 | if (started_ && !joined_) 21 | { 22 | thread_->detach(); // thread类提供了设置分离线程的方法 线程运行后自动销毁(非阻塞) 23 | } 24 | } 25 | 26 | void Thread::start() // 一个Thread对象 记录的就是一个新线程的详细信息 27 | { 28 | started_ = true; 29 | sem_t sem; 30 | sem_init(&sem, false, 0); // false指的是 不设置进程间共享 31 | // 开启线程 32 | thread_ = std::shared_ptr(new std::thread([&]() { 33 | tid_ = CurrentThread::tid(); // 获取线程的tid值 34 | sem_post(&sem); 35 | func_(); // 开启一个新线程 专门执行该线程函数 36 | })); 37 | 38 | // 这里必须等待获取上面新创建的线程的tid值 39 | sem_wait(&sem); 40 | } 41 | 42 | // C++ std::thread 中join()和detach()的区别:https://blog.nowcoder.net/n/8fcd9bb6e2e94d9596cf0a45c8e5858a 43 | void Thread::join() 44 | { 45 | joined_ = true; 46 | thread_->join(); 47 | } 48 | 49 | void Thread::setDefaultName() 50 | { 51 | int num = ++numCreated_; 52 | if (name_.empty()) 53 | { 54 | char buf[32] = {0}; 55 | snprintf(buf, sizeof buf, "Thread%d", num); 56 | name_ = buf; 57 | } 58 | } -------------------------------------------------------------------------------- /src/Timestamp.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Timestamp.h" 4 | 5 | Timestamp::Timestamp() : microSecondsSinceEpoch_(0) 6 | { 7 | } 8 | 9 | Timestamp::Timestamp(int64_t microSecondsSinceEpoch) 10 | : microSecondsSinceEpoch_(microSecondsSinceEpoch) 11 | { 12 | } 13 | 14 | Timestamp Timestamp::now() 15 | { 16 | return Timestamp(time(NULL)); 17 | } 18 | std::string Timestamp::toString() const 19 | { 20 | char buf[128] = {0}; 21 | tm *tm_time = localtime(µSecondsSinceEpoch_); 22 | snprintf(buf, 128, "%4d/%02d/%02d %02d:%02d:%02d", 23 | tm_time->tm_year + 1900, 24 | tm_time->tm_mon + 1, 25 | tm_time->tm_mday, 26 | tm_time->tm_hour, 27 | tm_time->tm_min, 28 | tm_time->tm_sec); 29 | return buf; 30 | } 31 | 32 | // #include 33 | // int main() { 34 | // std::cout << Timestamp::now().toString() << std::endl; 35 | // return 0; 36 | // } --------------------------------------------------------------------------------