├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── doc └── getting_started.md ├── src ├── channel.rs ├── epoll.rs ├── event.rs ├── frame_reader.rs ├── frame_writer.rs ├── kqueue.rs ├── lib.rs ├── line_reader.rs ├── notification.rs ├── poller.rs ├── registrar.rs ├── timer.rs ├── timer_heap.rs ├── timerfd.rs └── user_event.rs └── tests ├── channel_test.rs ├── edge-trigger-test.rs ├── multithread-example.rs └── timer_test.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | *.swp 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | matrix: 7 | include: 8 | - os: osx 9 | rust: stable 10 | allow_failures: 11 | - rust: nightly 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "amy" 3 | version = "0.10.0" 4 | authors = ["Andrew J. Stone "] 5 | description = "Polling and Registration abstractions around kqueue and epoll for multithreaded async network programming" 6 | repository = "https://github.com/andrewjstone/amy" 7 | keywords = ["async", "epoll", "kqueue", "eventloop", "timer"] 8 | license = "Apache-2.0" 9 | 10 | [features] 11 | # On linux, don't use timerfd. Instead store timers in a binary heap and utilize the epoll timeout. 12 | no_timerfd = [] 13 | 14 | [dependencies] 15 | libc = "0.2" 16 | 17 | [dependencies.nix] 18 | version = "0.10" 19 | 20 | [dev-dependencies] 21 | assert_matches = "1.0" 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build 2 | Status](https://travis-ci.org/andrewjstone/amy.svg?branch=master)](https://travis-ci.org/andrewjstone/amy) 3 | 4 | [API Documentation](https://docs.rs/amy) 5 | 6 | ### Usage 7 | 8 | Add the following to your `Cargo.toml` 9 | 10 | ```toml 11 | [dependencies] 12 | amy = "0.5" 13 | ``` 14 | 15 | Add this to your crate root 16 | 17 | ```rust 18 | extern crate amy; 19 | ``` 20 | 21 | ### Introduction 22 | 23 | Amy is a Rust library supporting asynchronous I/O by abstracting over the kernel pollers 24 | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2) and 25 | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html). Amy has the following goals behind it's 26 | design: 27 | 28 | * Clean I/O Abstractions - Don't require users to understand anything about the internals of kernel 29 | pollers such as triggering modes, filters, file descriptors, etc... 30 | 31 | * Minimal Configuration - In line with clean abstractions, choices of polling 32 | modes are made inside the library in order to provide the desired semantics of the library 33 | without the user having to understand the low level details 34 | 35 | * Performance - Amy performs zero run-time allocations in the poller and registrar after initial 36 | startup and limits system calls via non-blocking algorithms where possible 37 | 38 | * Minimal implementation - Amy implements just enough code to get the job done. There isn't an 39 | abundance of types or traits. The code should also be readable in a linear fashion without 40 | jumping from file to file. This should make auditing for both correctness and performance 41 | easier. 42 | 43 | * Small and consistent API - There are only a few concepts to understand, and a few functions to 44 | use. 45 | 46 | * Reuse of Rust standard types and traits - Instead of creating a wrapper around every pollable 47 | I/O type such as TcpStreams, the library can use the standard library types directly. The 48 | only requirement is that these types implement the `AsRawFd` trait and are pollable by the 49 | kernel mechanism. 50 | 51 | The best way to get started writing your code with Amy is to take a look at the [Getting Started 52 | Guide] (https://github.com/andrewjstone/amy/blob/master/doc/getting_started.md). 53 | 54 | ### How is this different from Mio 55 | 56 | [Mio](https://github.com/carllerche/mio/) is a fantastic project from which Amy has cribbed many 57 | ideas. However, the two are distinct in a few specific areas. The core difference is that mio is 58 | inherently single threaded: registrations must be made on the same thread as the poller, and the 59 | poll loop must be woken up in order to add registrations. In contrast Amy allows registrations to be 60 | made on a separate thread from the poller without waking it. Amy also provides a smaller code base 61 | dedicated to async network programming. It does not allow arbitrary registration of events with the 62 | kernel poller, although this could be easily provided. Like Mio, Amy is a building block and the 63 | choice of whether to use one or the other is simply one of preference. 64 | 65 | The choice to use Mio or Amy is not necessarily clear, so a short list of features is drawn below, 66 | along with some (subjective) use cases showing the reasons to choose either Mio or Amy. 67 | 68 | Choose Mio if you: 69 | * Need Windows support 70 | * Are writing a single threaded server 71 | * Want to use the canonical Rust library for Async I/O 72 | 73 | Choose Amy if you: 74 | * Only need `\*nix` support 75 | * Are writing a multi-threaded server requiring cross thread registration 76 | * Want a small, easily auditable library, with little unsafe code 77 | * Are comfortable using something newer and less proven 78 | 79 | ### Limitations 80 | * Only works on systems that implement epoll and kqueue (Linux, BSDs, Mac OSX, etc...) 81 | * Doesn't work on Windows, although I believe it's possible to implement Poller and Registrar 82 | types on top of IOCP 83 | -------------------------------------------------------------------------------- /doc/getting_started.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Amy is an opinionated library that wraps Kqueue and Epoll to provide platform independent polling 4 | and registration abstractions useful for multithreaded asynchronous network programming. The main 5 | goal of Amy is to allow polling and registration of non-blocking async I/O types (sockets) to take 6 | place on separate threads. Therefore, Amy is not an event loop, and is most useful when used in a 7 | multi-threaded application. 8 | 9 | On registration of socket, Amy will return a unique ID. This id is to be used along with the socket 10 | when re-registering for new events. Ownership of the socket is never transferred to either the 11 | registrar or the poller. Only the raw file descriptor is copied from the socket when registering. On 12 | notification, the unique id and event type (Read, Write, or Both) will be returned from the Poller. 13 | 14 | A benefit of decoupling the Poller and Registrar is that the Poller thread never has to perform any 15 | serialization or reading/writing to the socket. It only has to poll for events and notify the owner 16 | of the socket that an event is ready. This can be done via a channel or other means. Any work 17 | needing to be done can be load balanced among threads by transferring ownership of the socket 18 | temporarily then re-registering as necessary. An additional benefit is that there is no need to wake 19 | up the polling thread when registering a socket, because the polling thread isn't intended to 20 | perform registrations. 21 | 22 | Additionally Amy provides support for polling on native timers and mpsc channel receivers. This 23 | allows a complete solution for building async network based applications. 24 | 25 | The rest of this guide will detail the core types and concepts and show examples of how Amy should 26 | be used. 27 | 28 | # Poller and Registrar 29 | 30 | There are 2 core abstractions in Amy: the 31 | [Poller](https://github.com/andrewjstone/amy/blob/master/src/poller.rs) and the 32 | [Registrar](https://github.com/andrewjstone/amy/blob/master/src/registrar.rs). These two 33 | abstractions are coupled via a unique instance of the kernel polling mechanism. A poller waits for 34 | events that are registered with a registrar. Since the registrar and poller are coupled via a kernel 35 | mechanism, registrations for events can take place on a different thread from the poller, although 36 | this is not required. The following example shows an instantiation of a poller on one thread and the 37 | registering of TCP sockets to implement a server on another thread. Polling returns 38 | [Notifications](https://github.com/andrewjstone/amy/blob/master/src/notification.rs) which contain 39 | the unique id of a registered object and whether the object is readable, writable or both. In this 40 | example the notifications are forwarded to the registrar thread so that they can be acted upon, 41 | since the register thread owns the sockets. 42 | 43 | ```Rust 44 | extern crate amy; 45 | 46 | use std::net::{TcpListener, TcpStream}; 47 | use std::thread; 48 | use std::collections::HashMap; 49 | use amy::{Poller, Event}; 50 | 51 | const IP: &'static str = "127.0.0.1:10002"; 52 | 53 | let poller = Poller::new().unwrap(); 54 | 55 | // The registrar is coupled to the specific poller instance 56 | let registrar = poller.get_registrar(); 57 | 58 | // We need a channel to send poll events from the poller thread to the registrar/worker thread 59 | let (tx, rx) = channel(); 60 | 61 | let handle = thread::spawn(move || { 62 | // We need to configure the listener in non-blocking mode or we're going to have a bad time 63 | let listener = TcpListener::bind(IP).unwrap(); 64 | listener.set_nonblocking(true).unwrap(); 65 | 66 | // register the listener for Read events and get back the unique id for the listener 67 | let listener_id = registrar.register(&listener, Event::Read).unwrap(); 68 | 69 | // Store a map of connections by unique id 70 | let connections = HashMap::new(); 71 | 72 | loop { 73 | let notification = rx.recv().unwrap(); 74 | if notification.id == listener.id { 75 | // We have a new connection that we need to accept. Let's do that and make it non-blocking 76 | let (mut socket, _) = listener.accept().unwrap(); 77 | socket.set_nonblocking(true).unwrap(); 78 | 79 | // Let's register the socket for Read + Write Events 80 | let socket_id = registrar.register(&socket, Event::Both).unwrap(); 81 | 82 | // Store the socket in the hashmap so we know which connection to read from or write to. 83 | // In practice we'd also have to maintain other state such as whether the socket is currently 84 | // readable or writable, what data has already been read or written, and the position in the 85 | // stream. Helpers will be shown in the section 'Reading from and Writing to non-blocking 86 | // sockets' 87 | connections.insert(socket_id, socket); 88 | } else { 89 | if let Some(conn) = connections.get_mut(¬ification.id) { 90 | // Read from and/or write to the socket 91 | } 92 | } 93 | } 94 | }); 95 | 96 | loop { 97 | // Poll for kernel events. Timeout after 5 seconds. 98 | let notifications = poller.wait(5000).unwrap(); 99 | 100 | // Send notifications to the registrar thread 101 | for n in notifications { 102 | tx.send(n).unwrap(); 103 | } 104 | } 105 | 106 | // Don't forget to join your threads 107 | handle.join().unwrap(); 108 | 109 | ``` 110 | 111 | # Reading from and writing to sockets 112 | To simplify introduction of the Poller and Registrar types, reading from and writing to non-blocking 113 | sockets was elided from the previous example. Since non-blocking sockets can return partial data in 114 | reads, or only allow writing some of the output intended at a time, they can be difficult to work 115 | with. For reads, the user must keep track of prior data read in a buffer and wait for the socket to 116 | become ready again so it can read the rest of a message. During writes, the user must maintain a 117 | cursor of the current position in the data to be written. Furthermore, writes may occur from other 118 | user code when the socket is not ready to be written. These writes must be queued for sending. 119 | 120 | There are, of course, a myriad of ways to handle these issues. However to simplify user experience 121 | and implementation, Amy provides a few helper types to manage reader and writer state. Two of these 122 | are the [FrameReader](https://github.com/andrewjstone/amy/blob/master/src/frame_reader.rs) and 123 | [FrameWriter](https://github.com/andrewjstone/amy/blob/master/src/frame_writer.rs) types, which 124 | allow reading and writing messages framed by 4 byte size headers. The messages themselves can be 125 | encoded using any format, but for this example we will demonstrate reading and writing using plain 126 | old ASCIIs data. Note that in order to be useful, the readers and writers should be bundled with the 127 | socket in a structure that can be retrieved when a notification with the matching id arrives from 128 | the poller. 129 | 130 | ```Rust 131 | 132 | use amy::{FrameReader, FrameWriter, Notification, Event, Registrar}; 133 | 134 | struct Conn { 135 | sock: TcpStream, 136 | reader: FrameReader, 137 | writer: FrameWriter 138 | } 139 | 140 | // Assume only TcpStream notifications for now. Error handling is done by the (elided) caller. 141 | fn handle_poll_notification(notification: Notification, 142 | registrar: &Registrar, 143 | connections: &mut HashMap) -> io::Result<()> { 144 | if let Some(conn) = connections.get_mut(¬ification.id) { 145 | match notification.event { 146 | Event::Read => { 147 | // Try to read the data from the connection sock. Ignore the amount of bytes read. 148 | let _ = try!(conn.reader.read(&mut conn.sock)); 149 | 150 | // Iterate through all available complete messages. Note that this iterator is mutable 151 | // in a non-traditional sense. It returns each complete message only once and removes it 152 | // from the reader. 153 | for msg in conn.reader.iter_mut() { 154 | println!("Received a complete message: {:?}", str::from_utf8(&msg).unwrap()); 155 | } 156 | }, 157 | Event::Write => { 158 | // Attempt to write *all* existing data queued for writing. `None` as the second 159 | // parameter means no new data. 160 | let _ = try!(conn.writer.write(&mut conn.sock, None)); 161 | }, 162 | Event::Both => { 163 | // Do a combination of the above clauses :) 164 | ... 165 | } 166 | } 167 | } 168 | Ok(()) 169 | } 170 | 171 | // Write data to some socket if possible 172 | // This is called directly from client code and does not get called after poll notifications 173 | fn user_write(id: usize, 174 | registrar: &Registrar, 175 | connections: &mut HashMap, 176 | data: Vec) -> io::Result() { 177 | if let Some(conn) = connections.get_mut(¬ification.id) { 178 | try!(conn.writer.write(&mut conn.sock, data)); 179 | } 180 | Ok(()) 181 | } 182 | 183 | ``` 184 | 185 | # Error handling 186 | When a socket encounters an error, it needs to be deregistered from the event loop. Assuming an 187 | error is returned to the caller, the caller can simply do the following, using the same connection structure 188 | as above: 189 | 190 | ```Rust 191 | if let Some(conn) = connections.remove(&id) { 192 | registrar.deregister(conn.sock); 193 | } 194 | ``` 195 | 196 | # Timers 197 | In most network code it's useful to be able to periodically send messages or decide when a 198 | connection is idle. For these use cases, Amy provides support for native timers using 199 | [TimerFd](http://man7.org/linux/man-pages/man2/timerfd_create.2.html) registered with epoll on Linux 200 | and Android and the built-in timer support in kqueue on other systems. Note that while these native 201 | timers are required to get started, they are somewhat heavyweight in that they require both system 202 | calls and waking up the poller in order to be used. For most applications, only a single (or a 203 | few) native timers should be used as scheduled ticks, with a higher level timing wheel used to 204 | provide application timeouts. Timing wheels are not provided in Amy, as the variety is high and 205 | application dependent. Luckily there are existing implementations out there and implementing a 206 | simple one on your own is not hard. Example usage of native timers via Amy is below: 207 | 208 | ```Rust 209 | use amy::Poller; 210 | 211 | const TIMER_TIMEOUT: usize = 50; // ms 212 | const POLL_TIMEOUT: usize = 5000; // ms 213 | 214 | let mut poller = Poller::new().unwrap(); 215 | let registrar = poller.get_registrar(); 216 | 217 | // A single use timer 218 | let timer = registrar.set_timeout(TIMER_TIMEOUT).unwrap(); 219 | 220 | // Wait for the single notification from the timer timeout 221 | let notifications = poller.wait(POLL_TIMEOUT).unwrap(); 222 | assert_eq!(timer.get_id(), notifications[0].id); 223 | 224 | // Set a recurring timer 225 | let timer = registrar.set_interval(TIMER_TIMEOUT).unwrap(); 226 | 227 | for _ in 1..5 { 228 | let notifications = poller.wait(POLL_TIMEOUT).unwrap(); 229 | // We must re-arm the timer everytime it's used. (Thanks Linux) 230 | // Note that this doesn't change the timing. It just ensures that a new notification occurs. 231 | timer.arm(); 232 | ... 233 | } 234 | 235 | // Only interval timers must be cancelled if not needed anymore 236 | registrar.cancel_timeout(timer).unwrap(); 237 | ``` 238 | 239 | # Channels 240 | Sometimes you want to use Amy in a single threaded manner. Other times you may need to notify the 241 | polling thread of some information. For both of these cases, using a channel to receive information from 242 | other threads may be necessary. To prevent unnecessary delays it'd be nice to wakeup the poller when 243 | there is a message waiting to be received. For these usecases Amy provides a wrapper around mpsc 244 | sync and async channels that automatically registers the channel receiver with the kernel poller in 245 | order to allow information to be delivered to the poll thread. Below is a minimal example showing 246 | async channel usage. Note that the return type of `channel()` is different in Amy than in the 247 | standard library. This is because the call can fail, since it has to register with the kernel 248 | poller. 249 | 250 | For more details see the [channel test](https://github.com/andrewjstone/amy/blob/master/tests/channel_test.rs). 251 | 252 | ```Rust 253 | let mut poller = Poller::new().unwrap(); 254 | let registrar= poller.get_registrar(); 255 | let (tx, rx) = registrar.channel().unwrap(); 256 | 257 | // Spawn a thread external to the poller to send on the channel 258 | thread::spawn(move || { 259 | // Send causes the poller to wakeup 260 | tx.send("a").unwrap(); 261 | }); 262 | 263 | let notifications = poller.wait(5000).unwrap(); 264 | assert_eq!(rx.get_id(), notifications[0].id); 265 | assert_eq!("a", rx.try_recv().unwrap()); 266 | ``` 267 | -------------------------------------------------------------------------------- /src/channel.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{mpsc, Arc}; 2 | use std::io; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use user_event::UserEvent; 5 | 6 | #[cfg(any(target_os = "linux", target_os = "android"))] 7 | use epoll::KernelRegistrar; 8 | 9 | #[cfg(any(target_os = "bitrig", target_os = "dragonfly", 10 | target_os = "freebsd", target_os = "ios", target_os = "macos", 11 | target_os = "netbsd", target_os = "openbsd"))] 12 | pub use kqueue::KernelRegistrar; 13 | 14 | pub fn channel(registrar: &mut KernelRegistrar) -> io::Result<(Sender, Receiver)> { 15 | let (tx, rx) = mpsc::channel(); 16 | let pending = Arc::new(AtomicUsize::new(0)); 17 | let user_event = Arc::new(registrar.register_user_event().map_err(|e| io::Error::from(e))?); 18 | 19 | let tx = Sender { 20 | tx: tx, 21 | user_event: user_event.clone(), 22 | pending: pending.clone() 23 | }; 24 | 25 | let rx = Receiver { 26 | rx: rx, 27 | user_event: user_event, 28 | pending: pending 29 | }; 30 | 31 | Ok((tx, rx)) 32 | } 33 | 34 | pub fn sync_channel(registrar: &mut KernelRegistrar, 35 | bound: usize) -> io::Result<(SyncSender, Receiver)> { 36 | let (tx, rx) = mpsc::sync_channel(bound); 37 | let pending = Arc::new(AtomicUsize::new(0)); 38 | let user_event = Arc::new(registrar.register_user_event().map_err(|e| io::Error::from(e))?); 39 | 40 | let tx = SyncSender { 41 | tx: tx, 42 | user_event: user_event.clone(), 43 | pending: pending.clone() 44 | }; 45 | 46 | let rx = Receiver { 47 | rx: rx, 48 | user_event: user_event, 49 | pending: pending 50 | }; 51 | 52 | Ok((tx, rx)) 53 | } 54 | 55 | 56 | #[derive(Debug)] 57 | pub struct Sender { 58 | tx: mpsc::Sender, 59 | user_event: Arc, 60 | pending: Arc 61 | } 62 | 63 | impl Clone for Sender { 64 | fn clone(&self) -> Sender { 65 | Sender { 66 | tx: self.tx.clone(), 67 | user_event: self.user_event.clone(), 68 | pending: self.pending.clone() 69 | } 70 | } 71 | } 72 | 73 | impl Sender { 74 | pub fn send(&self, msg: T) -> Result<(), ChannelError> { 75 | try!(self.tx.send(msg)); 76 | if self.pending.fetch_add(1, Ordering::SeqCst) == 0 { 77 | // Notify the kernel poller that a read is ready 78 | try!(self.user_event.trigger()); 79 | } 80 | Ok(()) 81 | } 82 | 83 | // Return the poll id for the channel 84 | pub fn get_id(&self) -> usize { 85 | self.user_event.get_id() 86 | } 87 | } 88 | 89 | #[derive(Debug, Clone)] 90 | pub struct SyncSender { 91 | tx: mpsc::SyncSender, 92 | user_event: Arc, 93 | pending: Arc 94 | } 95 | 96 | impl SyncSender { 97 | pub fn send(&self, msg: T) -> Result<(), ChannelError> { 98 | try!(self.tx.send(msg)); 99 | if self.pending.fetch_add(1, Ordering::SeqCst) == 0 { 100 | // Notify the kernel poller that a read is ready 101 | try!(self.user_event.trigger()); 102 | } 103 | Ok(()) 104 | } 105 | 106 | pub fn try_send(&self, msg: T) -> Result<(), ChannelError> { 107 | try!(self.tx.try_send(msg)); 108 | if self.pending.fetch_add(1, Ordering::SeqCst) == 0 { 109 | // Notify the kernel poller that a read is ready 110 | try!(self.user_event.trigger()); 111 | } 112 | Ok(()) 113 | } 114 | 115 | // Return the poll id for the channel 116 | pub fn get_id(&self) -> usize { 117 | self.user_event.get_id() 118 | } 119 | } 120 | 121 | pub struct Receiver { 122 | rx: mpsc::Receiver, 123 | user_event: Arc, 124 | pending: Arc 125 | } 126 | 127 | impl Receiver { 128 | pub fn try_recv(&self) -> Result> { 129 | if self.pending.load(Ordering::SeqCst) == 0 { 130 | // Clear the kernel event and prepare for edge triggering 131 | try!(self.user_event.clear()); 132 | 133 | // Try one last check to prevent a race condition where the sender puts a value on the 134 | // channel and writes the event after our pending.load check, but before we did the 135 | // read. If we just did a read this would result in a value remaining on the channel and 136 | // a poller that would never wake up. 137 | if self.pending.load(Ordering::SeqCst) == 0 { 138 | return Err(ChannelError::TryRecvError(mpsc::TryRecvError::Empty)); 139 | } 140 | // We still have pending events, re-activate the user event so the poller will wakeup 141 | try!(self.user_event.trigger()); 142 | } 143 | 144 | self.pending.fetch_sub(1, Ordering::SeqCst); 145 | self.rx.try_recv().map_err(|e| ChannelError::from(e)) 146 | } 147 | 148 | pub fn get_id(&self) -> usize { 149 | self.user_event.id 150 | } 151 | } 152 | 153 | #[cfg(not(any(target_os = "linux", target_os = "android")))] 154 | impl Drop for Receiver { 155 | fn drop(&mut self) { 156 | let _ = self.user_event.deregister(); 157 | } 158 | } 159 | 160 | #[derive(Debug)] 161 | pub enum ChannelError { 162 | SendError(mpsc::SendError), 163 | TrySendError(mpsc::TrySendError), 164 | TryRecvError(mpsc::TryRecvError), 165 | Io(io::Error) 166 | } 167 | 168 | impl From for ChannelError { 169 | fn from(e: io::Error) -> ChannelError { 170 | ChannelError::Io(e) 171 | } 172 | } 173 | 174 | impl From> for ChannelError { 175 | fn from(e: mpsc::SendError) -> ChannelError { 176 | ChannelError::SendError(e) 177 | } 178 | } 179 | 180 | impl From> for ChannelError { 181 | fn from(e: mpsc::TrySendError) -> ChannelError { 182 | ChannelError::TrySendError(e) 183 | } 184 | } 185 | 186 | impl From for ChannelError { 187 | fn from(e: mpsc::TryRecvError) -> ChannelError { 188 | ChannelError::TryRecvError(e) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/epoll.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "no_timerfd"))] 2 | use std::os::unix::io::IntoRawFd; 3 | #[cfg(not(feature = "no_timerfd"))] 4 | use timer::Timer; 5 | #[cfg(not(feature = "no_timerfd"))] 6 | use timerfd::TimerFd; 7 | #[cfg(not(feature = "no_timerfd"))] 8 | use std::collections::HashMap; 9 | 10 | use std::os::unix::io::{RawFd, AsRawFd}; 11 | use std::slice; 12 | use std::sync::Arc; 13 | use std::sync::atomic::{AtomicUsize, Ordering}; 14 | use nix::sys::epoll::*; 15 | use nix::sys::epoll::EpollFlags; 16 | use nix::sys::eventfd::{eventfd, EfdFlags}; 17 | use libc; 18 | use std::io::{Result, Error, ErrorKind}; 19 | use event::Event; 20 | use notification::Notification; 21 | use user_event::UserEvent; 22 | use channel::{channel, Sender, Receiver}; 23 | use nix_err_to_io_err; 24 | 25 | #[cfg(feature = "no_timerfd")] 26 | use timer_heap::{TimerHeap, TimerEntry}; 27 | 28 | static EPOLL_EVENT_SIZE: usize = 1024; 29 | 30 | #[derive(Debug, Clone)] 31 | pub enum TimerMsg { 32 | StartTimer {id: usize, timeout_ms: usize}, 33 | StartInterval {id: usize, timeout_ms: usize}, 34 | Cancel {id: usize} 35 | } 36 | 37 | pub struct KernelPoller { 38 | epfd: RawFd, 39 | registrar: KernelRegistrar, 40 | events: Vec, 41 | timer_rx: Receiver, 42 | 43 | #[cfg(not(feature = "no_timerfd"))] 44 | timers: HashMap, 45 | 46 | #[cfg(feature = "no_timerfd")] 47 | timers: TimerHeap 48 | } 49 | 50 | impl KernelPoller { 51 | 52 | #[cfg(not(feature = "no_timerfd"))] 53 | pub fn new() -> Result { 54 | let epfd = epoll_create().map_err(nix_err_to_io_err)?; 55 | let registrations = Arc::new(AtomicUsize::new(0)); 56 | let mut registrar = KernelRegistrar::new(epfd, registrations); 57 | let (tx, rx) = channel(&mut registrar)?; 58 | registrar.timer_tx = Some(tx); 59 | Ok(KernelPoller { 60 | epfd: epfd, 61 | registrar: registrar, 62 | events: Vec::with_capacity(EPOLL_EVENT_SIZE), 63 | timer_rx: rx, 64 | timers: HashMap::new() 65 | }) 66 | } 67 | 68 | #[cfg(feature = "no_timerfd")] 69 | pub fn new() -> Result { 70 | let epfd = epoll_create().map_err(nix_err_to_io_err)?; 71 | let registrations = Arc::new(AtomicUsize::new(0)); 72 | let mut registrar = KernelRegistrar::new(epfd, registrations); 73 | let (tx, rx) = channel(&mut registrar)?; 74 | registrar.timer_tx = Some(tx); 75 | Ok(KernelPoller { 76 | epfd: epfd, 77 | registrar: registrar, 78 | events: Vec::with_capacity(EPOLL_EVENT_SIZE), 79 | timer_rx: rx, 80 | timers: TimerHeap::new() 81 | }) 82 | } 83 | 84 | pub fn get_registrar(&self) -> KernelRegistrar { 85 | self.registrar.clone() 86 | } 87 | 88 | /// Wait for epoll events. Return a list of notifications. Notifications contain user data 89 | /// registered with epoll_ctl which is extracted from the data member returned from epoll_wait. 90 | #[cfg(not(feature = "no_timerfd"))] 91 | pub fn wait(&mut self, timeout_ms: usize) -> Result> { 92 | 93 | // We may have gotten a timer registration while awake, don't bother sleeping just to 94 | // immediately wake up again. 95 | self.receive_timer_messages()?; 96 | 97 | // Create a buffer to read events into 98 | let dst = unsafe { 99 | slice::from_raw_parts_mut(self.events.as_mut_ptr(), self.events.capacity()) 100 | }; 101 | 102 | let count = epoll_wait(self.epfd, dst, timeout_ms as isize).map_err(nix_err_to_io_err)?; 103 | 104 | // Set the length of the vector to what was filled in by the call to epoll_wait 105 | unsafe { self.events.set_len(count); } 106 | 107 | let mut timer_rx_notification = false; 108 | let mut notifications = Vec::with_capacity(count); 109 | let mut timer_ids = Vec::new(); 110 | for e in self.events.iter() { 111 | let id = e.data() as usize; 112 | if id == self.timer_rx.get_id() { 113 | timer_rx_notification = true; 114 | } else { 115 | if self.timers.contains_key(&id) { 116 | timer_ids.push(id); 117 | } 118 | notifications.push(Notification { 119 | id: id, 120 | event: event_from_flags(e.events()) 121 | }); 122 | } 123 | } 124 | if timer_rx_notification { 125 | self.receive_timer_messages()?; 126 | } 127 | 128 | self.handle_timer_notifications(timer_ids)?; 129 | 130 | Ok(notifications) 131 | } 132 | 133 | /// Wait for epoll events 134 | /// If timers are in use, the `timeout_ms` parameter may be ignored, as the epoll timeout 135 | /// becomes the minimum of the remaining time on the earliest timer scheduled to fire and 136 | /// `timeout_ms`. 137 | #[cfg(feature = "no_timerfd")] 138 | pub fn wait(&mut self, timeout_ms: usize) -> Result> { 139 | 140 | // We may have gotten a timer registration while awake, don't bother sleeping just to 141 | // immediately wake up again. 142 | self.receive_timer_messages(); 143 | 144 | // Create a buffer to read events into 145 | let dst = unsafe { 146 | slice::from_raw_parts_mut(self.events.as_mut_ptr(), self.events.capacity()) 147 | }; 148 | 149 | let expired = self.timers.expired(); 150 | if !expired.is_empty() { 151 | return Ok(expired); 152 | } 153 | 154 | let timeout = self.timers.earliest_timeout(timeout_ms); 155 | let count = epoll_wait(self.epfd, dst, timeout as isize).map_err(nix_err_to_io_err)?; 156 | 157 | // Set the length of the vector to what was filled in by the call to epoll_wait 158 | unsafe { self.events.set_len(count); } 159 | 160 | let mut timer_rx_notification = false; 161 | let mut notifications = Vec::with_capacity(count); 162 | for e in self.events.iter() { 163 | let id = e.data() as usize; 164 | if id == self.timer_rx.get_id() { 165 | timer_rx_notification = true; 166 | } else { 167 | notifications.push(Notification { 168 | id: id, 169 | event: event_from_flags(e.events()) 170 | }); 171 | } 172 | } 173 | if timer_rx_notification { 174 | self.receive_timer_messages(); 175 | } 176 | 177 | let expired = self.timers.expired(); 178 | notifications.extend(expired); 179 | 180 | Ok(notifications) 181 | } 182 | 183 | #[cfg(not(feature = "no_timerfd"))] 184 | fn receive_timer_messages(&mut self) -> Result<()> { 185 | while let Ok(msg) = self.timer_rx.try_recv() { 186 | match msg { 187 | TimerMsg::StartTimer {id, timeout_ms} => { 188 | let timer = self.set_timer(id, timeout_ms, false)?; 189 | self.timers.insert(id, timer); 190 | }, 191 | TimerMsg::StartInterval {id, timeout_ms} => { 192 | let timer = self.set_timer(id, timeout_ms, true)?; 193 | self.timers.insert(id, timer); 194 | }, 195 | TimerMsg::Cancel {id} => { 196 | // Removing the timer from the map will cause it to be dropped, which closes its fd 197 | // and subsequently removes it from epoll. 198 | self.timers.remove(&id); 199 | } 200 | } 201 | } 202 | Ok(()) 203 | } 204 | 205 | #[cfg(feature = "no_timerfd")] 206 | fn receive_timer_messages(&mut self) { 207 | while let Ok(msg) = self.timer_rx.try_recv() { 208 | match msg { 209 | TimerMsg::StartTimer {id, timeout_ms} => { 210 | let timer = TimerEntry::new(id, timeout_ms as u64, false); 211 | self.timers.insert(timer); 212 | }, 213 | TimerMsg::StartInterval {id, timeout_ms} => { 214 | let timer = TimerEntry::new(id, timeout_ms as u64, true); 215 | self.timers.insert(timer); 216 | }, 217 | TimerMsg::Cancel {id} => { 218 | // Removing the timer from the map will cause it to be dropped, which closes its fd 219 | // and subsequently removes it from epoll. 220 | self.timers.remove(id); 221 | } 222 | } 223 | } 224 | } 225 | 226 | #[cfg(not(feature = "no_timerfd"))] 227 | fn handle_timer_notifications(&mut self, ids: Vec) -> Result<()> { 228 | for id in ids { 229 | let mut interval = false; 230 | if let Some(timer) = self.timers.get(&id) { 231 | if timer.interval { 232 | interval = true; 233 | timer.arm()?; 234 | } 235 | } 236 | if !interval { 237 | self.timers.remove(&id); 238 | } 239 | } 240 | return Ok(()) 241 | } 242 | 243 | #[cfg(not(feature = "no_timerfd"))] 244 | fn set_timer(&self, id: usize, timeout: usize, recurring: bool) -> Result { 245 | let timer_fd = TimerFd::new(timeout, recurring).map_err(nix_err_to_io_err)?; 246 | let mut info = EpollEvent::new(flags_from_event(Event::Read), id as u64); 247 | let fd = timer_fd.into_raw_fd(); 248 | match epoll_ctl(self.epfd, EpollOp::EpollCtlAdd, fd, &mut info) { 249 | Ok(_) => Ok(Timer {fd: fd, interval: recurring}), 250 | Err(e) => { 251 | let _ = unsafe { libc::close(fd) }; 252 | Err(nix_err_to_io_err(e)) 253 | } 254 | } 255 | } 256 | } 257 | 258 | #[derive(Debug, Clone)] 259 | pub struct KernelRegistrar { 260 | epfd: RawFd, 261 | total_registrations: Arc, 262 | 263 | // This sender is strictly used to send timer registrations and cancellations to the poller 264 | // 265 | // Since initializing a channel requires an existing KernelRegistrar, we must first 266 | // create the channel with a registrar that has a `None` value for timer_tx. This is fine, as 267 | // channels have no need to create timers. 268 | timer_tx: Option> 269 | } 270 | 271 | impl KernelRegistrar { 272 | fn new(epfd: RawFd, registrations: Arc) -> KernelRegistrar { 273 | KernelRegistrar { 274 | epfd: epfd, 275 | total_registrations: registrations, 276 | timer_tx: None 277 | } 278 | } 279 | 280 | pub fn register(&self, sock: &T, event: Event) -> Result { 281 | let sock_fd = sock.as_raw_fd(); 282 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 283 | let mut info = EpollEvent::new(flags_from_event(event), id as u64); 284 | 285 | epoll_ctl(self.epfd, EpollOp::EpollCtlAdd, sock_fd, &mut info).map_err(nix_err_to_io_err)?; 286 | Ok(id) 287 | } 288 | 289 | pub fn reregister(&self, id: usize, sock: &T, event: Event) -> Result<()> { 290 | let sock_fd = sock.as_raw_fd(); 291 | let mut info = EpollEvent::new(flags_from_event(event), id as u64); 292 | Ok(epoll_ctl(self.epfd, EpollOp::EpollCtlMod, sock_fd, &mut info).map_err(nix_err_to_io_err)?) 293 | } 294 | 295 | pub fn deregister(&self, sock: &T) -> Result<()> { 296 | // info is unused by epoll on delete operations 297 | let mut info = EpollEvent::empty(); 298 | let sock_fd = sock.as_raw_fd(); 299 | Ok(epoll_ctl(self.epfd, EpollOp::EpollCtlDel, sock_fd, &mut info).map_err(nix_err_to_io_err)?) 300 | } 301 | 302 | pub fn register_user_event(&mut self) -> Result { 303 | let fd = eventfd(0, EfdFlags::EFD_CLOEXEC | EfdFlags::EFD_NONBLOCK).map_err(nix_err_to_io_err)?; 304 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 305 | let mut info = EpollEvent::new(flags_from_event(Event::Read), id as u64); 306 | match epoll_ctl(self.epfd, EpollOp::EpollCtlAdd, fd, &mut info) { 307 | Ok(_) => Ok(UserEvent {id: id, fd: fd}), 308 | Err(e) => { 309 | let _ = unsafe { libc::close(fd) }; 310 | Err(nix_err_to_io_err(e)) 311 | } 312 | } 313 | } 314 | 315 | pub fn deregister_user_event(&mut self, event: &UserEvent) -> Result<()> { 316 | self.deregister(event) 317 | } 318 | 319 | pub fn set_timeout(&self, timeout: usize) -> Result { 320 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 321 | self.timer_tx.as_ref().unwrap().send(TimerMsg::StartTimer {id: id, timeout_ms: timeout}) 322 | .map_err(|_| Error::new(ErrorKind::BrokenPipe, "Channel receiver dropped"))?; 323 | Ok(id) 324 | } 325 | 326 | pub fn set_interval(&self, timeout: usize) -> Result { 327 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 328 | self.timer_tx.as_ref().unwrap().send(TimerMsg::StartInterval {id: id, timeout_ms: timeout}) 329 | .map_err(|_| Error::new(ErrorKind::BrokenPipe, "Channel receiver dropped"))?; 330 | Ok(id) 331 | } 332 | 333 | pub fn cancel_timeout(&self, timer_id: usize) -> Result<()> { 334 | self.timer_tx.as_ref().unwrap().send(TimerMsg::Cancel {id: timer_id}) 335 | .map_err(|_| Error::new(ErrorKind::BrokenPipe, "Channel receiver dropped"))?; 336 | Ok(()) 337 | } 338 | } 339 | 340 | fn event_from_flags(flags: EpollFlags) -> Event { 341 | let mut event = Event::Read; 342 | if flags.contains(EpollFlags::EPOLLIN) && flags.contains(EpollFlags::EPOLLOUT) { 343 | event = Event::Both; 344 | } else if flags.contains(EpollFlags::EPOLLOUT) { 345 | event = Event::Write; 346 | } 347 | event 348 | } 349 | 350 | fn flags_from_event(event: Event) -> EpollFlags { 351 | let mut flags = EpollFlags::empty(); 352 | match event { 353 | Event::Read => { 354 | flags.insert(EpollFlags::EPOLLIN); 355 | }, 356 | Event::Write => { 357 | flags.insert(EpollFlags::EPOLLOUT); 358 | }, 359 | Event::Both => { 360 | flags.insert(EpollFlags::EPOLLIN); 361 | flags.insert(EpollFlags::EPOLLOUT); 362 | } 363 | } 364 | // All events are edge triggered 365 | flags.insert(EpollFlags::EPOLLET); 366 | flags 367 | } 368 | -------------------------------------------------------------------------------- /src/event.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Debug, Eq, PartialEq)] 2 | pub enum Event { 3 | Read, 4 | Write, 5 | Both 6 | } 7 | 8 | impl Event { 9 | pub fn readable(&self) -> bool { 10 | match *self { 11 | Event::Read | Event::Both => true, 12 | _ => false 13 | } 14 | } 15 | 16 | pub fn writable(&self) -> bool { 17 | match *self { 18 | Event::Write | Event::Both => true, 19 | _ => false 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/frame_reader.rs: -------------------------------------------------------------------------------- 1 | //! This reader composes frames of bytes started with a 4 byte frame header indicating the size of 2 | //! the buffer. An exact size buffer will be allocated once the 4 byte frame header is received. 3 | 4 | use std::io::{self, Read, Error, ErrorKind}; 5 | use std::collections::VecDeque; 6 | use std::mem; 7 | 8 | #[derive(Debug)] 9 | pub struct FrameReader { 10 | frames: Frames 11 | } 12 | 13 | impl FrameReader { 14 | pub fn new(max_frame_size: u32) -> FrameReader { 15 | FrameReader { 16 | frames: Frames::new(max_frame_size) 17 | } 18 | } 19 | 20 | pub fn read(&mut self, reader: &mut T) -> io::Result { 21 | self.frames.read(reader) 22 | } 23 | 24 | pub fn iter_mut(&mut self) -> Iter { 25 | Iter { 26 | frames: &mut self.frames 27 | } 28 | } 29 | } 30 | 31 | pub struct Iter<'a> { 32 | frames: &'a mut Frames 33 | } 34 | 35 | impl<'a> Iterator for Iter<'a> { 36 | type Item = Vec; 37 | 38 | fn next(&mut self) -> Option> { 39 | self.frames.completed_frames.pop_front() 40 | } 41 | } 42 | 43 | #[derive(Debug)] 44 | struct Frames { 45 | max_frame_size: u32, 46 | bytes_read: usize, 47 | header: [u8; 4], 48 | reading_header: bool, 49 | current: Vec, 50 | completed_frames: VecDeque> 51 | } 52 | 53 | impl Frames { 54 | pub fn new(max_frame_size: u32) -> Frames { 55 | Frames { 56 | max_frame_size: max_frame_size, 57 | bytes_read: 0, 58 | header: [0; 4], 59 | reading_header: true, 60 | current: Vec::with_capacity(0), 61 | completed_frames: VecDeque::new() 62 | } 63 | } 64 | 65 | /// Will read as much data as possible and build up frames to be retrieved from the iterator. 66 | /// 67 | /// Will stop reading when 0 bytes are retrieved from the latest call to `do_read` or the error 68 | /// kind is io::ErrorKind::WouldBlock. 69 | /// 70 | /// Returns an error or the total amount of bytes read. 71 | fn read(&mut self, reader: &mut T) -> io::Result { 72 | let mut total_bytes_read = 0; 73 | loop { 74 | match self.do_read(reader) { 75 | Ok(0) => { 76 | if total_bytes_read == 0 { 77 | return Err(Error::new(ErrorKind::UnexpectedEof, "Read 0 bytes")); 78 | } 79 | return Ok(total_bytes_read); 80 | }, 81 | Ok(bytes_read) => { 82 | total_bytes_read += bytes_read; 83 | }, 84 | Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(total_bytes_read), 85 | Err(e) => return Err(e) 86 | } 87 | } 88 | } 89 | 90 | fn do_read(&mut self, reader: &mut T) -> io::Result { 91 | if self.reading_header { 92 | self.read_header(reader) 93 | } else { 94 | self.read_value(reader) 95 | } 96 | } 97 | 98 | // TODO: Return an error if size is greater than max_frame_size 99 | fn read_header(&mut self, reader: &mut T) -> io::Result { 100 | let bytes_read = try!(reader.read(&mut self.header[self.bytes_read..])); 101 | self.bytes_read += bytes_read; 102 | if self.bytes_read == 4 { 103 | let len = unsafe { u32::from_be(mem::transmute(self.header)) }; 104 | self.bytes_read = 0; 105 | self.reading_header = false; 106 | self.current = Vec::with_capacity(len as usize); 107 | unsafe { self.current.set_len(len as usize); } 108 | } 109 | Ok(bytes_read) 110 | } 111 | 112 | fn read_value(&mut self, reader: &mut T) -> io::Result { 113 | let bytes_read = try!(reader.read(&mut self.current[self.bytes_read..])); 114 | self.bytes_read += bytes_read; 115 | if self.bytes_read == self.current.len() { 116 | self.completed_frames.push_back(mem::replace(&mut self.current, Vec::new())); 117 | self.bytes_read = 0; 118 | self.reading_header = true; 119 | } 120 | Ok(bytes_read) 121 | } 122 | } 123 | 124 | #[cfg(test)] 125 | mod tests { 126 | use std::{mem, thread}; 127 | use std::io::Cursor; 128 | use std::io::Write; 129 | use std::net::{TcpListener, TcpStream}; 130 | use super::FrameReader; 131 | 132 | #[test] 133 | fn partial_and_complete_reads() { 134 | let buf1 = String::from("Hello World").into_bytes(); 135 | let buf2 = String::from("Hi.").into_bytes(); 136 | let header1: [u8; 4] = unsafe { mem::transmute((buf1.len() as u32).to_be()) }; 137 | let header2: [u8; 4] = unsafe { mem::transmute((buf2.len() as u32).to_be()) }; 138 | 139 | let mut reader = FrameReader::new(1024); 140 | 141 | // Write a partial header 142 | let mut header = Cursor::new(&header1[0..2]); 143 | let bytes_read = reader.read(&mut header).unwrap(); 144 | assert_eq!(2, bytes_read); 145 | assert_eq!(None, reader.iter_mut().next()); 146 | 147 | // Complete writing just the header 148 | let mut header = Cursor::new(&header1[2..]); 149 | let bytes_read = reader.read(&mut header).unwrap(); 150 | assert_eq!(2, bytes_read); 151 | assert_eq!(None, reader.iter_mut().next()); 152 | 153 | // Write a partial value 154 | let mut data = Cursor::new(&buf1[0..5]); 155 | let bytes_read = reader.read(&mut data).unwrap(); 156 | assert_eq!(5, bytes_read); 157 | assert_eq!(None, reader.iter_mut().next()); 158 | 159 | // Complete writing the first value 160 | let mut data = Cursor::new(&buf1[5..]); 161 | let bytes_read = reader.read(&mut data).unwrap(); 162 | assert_eq!(6, bytes_read); 163 | let val = reader.iter_mut().next().unwrap(); 164 | assert_eq!(buf1, val); 165 | 166 | // Write an entire header and value 167 | let mut data = Cursor::new(Vec::with_capacity(7)); 168 | assert_eq!(4, data.write(&header2).unwrap()); 169 | assert_eq!(3, data.write(&buf2).unwrap()); 170 | data.set_position(0); 171 | let bytes_read = reader.read(&mut data).unwrap(); 172 | assert_eq!(7, bytes_read); 173 | assert_eq!(buf2, reader.iter_mut().next().unwrap()); 174 | } 175 | 176 | const IP: &'static str = "127.0.0.1:5003"; 177 | /// Test that we never get an io error, but instead get Ok(0) when the call to read would block 178 | #[test] 179 | fn would_block() { 180 | let listener = TcpListener::bind(IP).unwrap(); 181 | let h = thread::spawn(move || { 182 | for stream in listener.incoming() { 183 | if let Ok(mut conn) = stream { 184 | conn.set_nonblocking(true).unwrap(); 185 | let mut reader = FrameReader::new(1024); 186 | let result = reader.read(&mut conn); 187 | assert_matches!(result, Ok(0)); 188 | return; 189 | } 190 | } 191 | }); 192 | // Assign to a variable so the sock isn't dropped early 193 | // Name it with a preceding underscore so we don't get an unused variable warning 194 | let _sock = TcpStream::connect(IP).unwrap(); 195 | h.join().unwrap(); 196 | 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /src/frame_writer.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Write}; 2 | use std::collections::LinkedList as List; 3 | use std::mem; 4 | 5 | /// Abstraction for writing frame buffered data to non-blocking sockets. 6 | /// 7 | /// Note that an individual frame writer should only write to one socket, since it tracks whether or 8 | /// not the socket is writable directly. 9 | pub struct FrameWriter { 10 | is_empty: bool, 11 | is_writable: bool, 12 | current: Vec, 13 | written: usize, 14 | pending: List> 15 | } 16 | 17 | impl FrameWriter { 18 | pub fn new() -> FrameWriter { 19 | FrameWriter { 20 | is_empty: true, 21 | is_writable: true, 22 | current: Vec::new(), 23 | written: 0, 24 | pending: List::new() 25 | } 26 | } 27 | 28 | /// Write as much possible stored data to the writer, along with optional new data. 29 | /// 30 | /// If data is not `None`, compute it's header and put it on the front of the pending list 31 | /// followed by the data, as long as the writer isn't empty. If the writer is empty, compute the 32 | /// header, put it in current and push the data onto the pending list. 33 | /// 34 | /// Returns `Ok(true)` if the socket is still writable, and `Ok(false)` if it's not writable and 35 | /// needs to be re-registered. Returns any io::Error except for `EAGAIN` or `EWOULDBLOCK` which 36 | /// results in `OK(false)`. 37 | pub fn write(&mut self, writer: &mut T, data: Option>) -> io::Result { 38 | if let Some(frame) = data { 39 | self.append_frame(frame); 40 | } 41 | if self.is_empty { 42 | return Ok(self.is_writable); 43 | } 44 | if !self.is_writable { 45 | return Ok(false); 46 | } 47 | self.write_as_much_as_possible(writer) 48 | } 49 | 50 | /// Tell the frame writer that the corresponding writer is writable again. 51 | pub fn writable(&mut self) { 52 | self.is_writable = true; 53 | } 54 | 55 | pub fn is_writable(&self) -> bool { 56 | self.is_writable 57 | } 58 | 59 | pub fn is_empty(&self) -> bool { 60 | self.is_empty 61 | } 62 | 63 | fn append_frame(&mut self, frame: Vec) { 64 | let header = u32_to_vec(frame.len() as u32); 65 | if self.is_empty { 66 | self.current = header; 67 | self.pending.push_back(frame); 68 | self.is_empty = false; 69 | } else { 70 | self.pending.push_back(header); 71 | self.pending.push_back(frame); 72 | } 73 | } 74 | 75 | fn write_as_much_as_possible(&mut self, writer: &mut T) -> io::Result { 76 | loop { 77 | match writer.write(&self.current[self.written..]) { 78 | Ok(0) => { 79 | self.is_writable = false; 80 | return Ok(false); 81 | }, 82 | Ok(n) => { 83 | self.written += n; 84 | if self.written == self.current.len() { 85 | match self.pending.pop_front() { 86 | None => { 87 | self.written = 0; 88 | self.current = Vec::new(); 89 | self.is_empty = true; 90 | return Ok(true); 91 | }, 92 | Some(data) => { 93 | self.written = 0; 94 | self.current = data; 95 | } 96 | } 97 | } 98 | }, 99 | Err(e) => { 100 | if e.kind() == io::ErrorKind::WouldBlock { 101 | self.is_writable = false; 102 | return Ok(false); 103 | } 104 | return Err(e); 105 | } 106 | } 107 | } 108 | } 109 | 110 | } 111 | 112 | /// Convert a u32 in native order to a 4 byte vec in big endian 113 | pub fn u32_to_vec(n: u32) -> Vec { 114 | unsafe { 115 | let bytes: [u8; 4] = mem::transmute(n.to_be()); 116 | bytes.to_vec() 117 | } 118 | } 119 | 120 | #[cfg(test)] 121 | mod tests { 122 | use std::io::Cursor; 123 | use super::FrameWriter; 124 | 125 | #[test] 126 | fn call_write_on_empty_frame_writer() { 127 | let mut frame_writer = FrameWriter::new(); 128 | let mut buf = vec![0; 10]; 129 | assert_eq!(true, frame_writer.write(&mut buf, None).unwrap()); 130 | assert_eq!(true, frame_writer.is_empty); 131 | } 132 | 133 | #[test] 134 | fn call_write_on_empty_frame_writer_but_fill_writer_exactly() { 135 | let mut frame_writer = FrameWriter::new(); 136 | // Leave space for the 4 byte header 137 | let mut buf = vec![0; 14]; 138 | // We use a cursor wrapped around a slice instead of a vec because we want a fixed buffer 139 | // size. If we used a vec writes would always succeed since the vec would grow. 140 | let mut writer = Cursor::new(&mut buf[..]); 141 | let frame = vec![0; 10]; 142 | assert_eq!(true, frame_writer.write(&mut writer, Some(frame)).unwrap()); 143 | assert_eq!(true, frame_writer.is_empty); 144 | assert_eq!(false, frame_writer.write(&mut writer, Some(vec![0;1])).unwrap()); 145 | } 146 | 147 | #[test] 148 | fn write_until_full_reset_and_write_some_more() { 149 | let mut frame_writer = FrameWriter::new(); 150 | let mut buf = vec![0; 14]; 151 | let mut writer = Cursor::new(&mut buf[..]); 152 | let frame = vec![0; 11]; 153 | assert_eq!(false, frame_writer.write(&mut writer, Some(frame)).unwrap()); 154 | assert_eq!(false, frame_writer.is_empty); 155 | // At this point there is 1 more byte to be written stored in the frame writer 156 | assert_eq!(10, frame_writer.written); 157 | assert_eq!(true, frame_writer.pending.is_empty()); 158 | 159 | // Try to write the last byte, but the buffer is full 160 | assert_eq!(false, frame_writer.write(&mut writer, None).unwrap()); 161 | 162 | // Make the buffer writable and the buffer size 14 bytes again. 163 | frame_writer.writable(); 164 | writer.set_position(0); 165 | assert_eq!(true, frame_writer.is_writable); 166 | // Write the last byte remaining, plus a new 9 byte frame and it's 4 byte header. 167 | assert_eq!(true, frame_writer.write(&mut writer, Some(vec![0;9])).unwrap()); 168 | // Ensure that the frame writer was reset because there is no more data to write 169 | assert_eq!(true, frame_writer.is_empty); 170 | assert_eq!(0, frame_writer.written); 171 | assert_eq!(0, frame_writer.current.len()); 172 | } 173 | } 174 | 175 | 176 | -------------------------------------------------------------------------------- /src/kqueue.rs: -------------------------------------------------------------------------------- 1 | use std::os::unix::io::RawFd; 2 | use std::collections::HashMap; 3 | use std::slice; 4 | use std::sync::Arc; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | use std::os::unix::io::AsRawFd; 7 | use nix::sys::event::{ev_set, kqueue, kevent, KEvent, EventFilter, EventFlag, FilterFlag}; 8 | use libc::{uintptr_t, intptr_t}; 9 | use std::io::Result; 10 | use event::Event; 11 | use notification::Notification; 12 | use user_event::UserEvent; 13 | use nix_err_to_io_err; 14 | 15 | type UserData = intptr_t; 16 | 17 | static KQUEUE_EVENT_SIZE: usize = 1024; 18 | 19 | pub struct KernelPoller { 20 | kqueue: RawFd, 21 | registrar: KernelRegistrar, 22 | eventlist: Vec, 23 | notifications: HashMap 24 | } 25 | 26 | impl KernelPoller { 27 | pub fn new() -> Result { 28 | let kq = kqueue().map_err(nix_err_to_io_err)?; 29 | let registrations = Arc::new(AtomicUsize::new(1)); 30 | Ok(KernelPoller { 31 | kqueue: kq, 32 | registrar: KernelRegistrar::new(kq, registrations), 33 | eventlist: Vec::with_capacity(KQUEUE_EVENT_SIZE), 34 | notifications: HashMap::with_capacity(KQUEUE_EVENT_SIZE) 35 | }) 36 | } 37 | 38 | // This will always succeed. We implement it this way for api compatibility with epoll. 39 | pub fn get_registrar(&self) -> KernelRegistrar { 40 | self.registrar.clone() 41 | } 42 | 43 | // Wait for kevents and return a list of Notifications. Coalesce reads and writes for the same 44 | // socket into a single notification. If only a read or a write event for a given socket is 45 | // present in the eventlist, check the registration to see if there is another kevent registered 46 | // and remove it if so. We do this removal to prevent aliasing a pointer to the same 47 | // registration structure. 48 | pub fn wait(&mut self, timeout_ms: usize) -> Result> { 49 | 50 | // Create a buffer to read events into 51 | let dst = unsafe { 52 | slice::from_raw_parts_mut(self.eventlist.as_mut_ptr(), self.eventlist.capacity()) 53 | }; 54 | 55 | let count = kevent(self.kqueue, &[], dst, timeout_ms).map_err(nix_err_to_io_err)?; 56 | 57 | // Set the length of the vector to the number of events that was returned by kevent 58 | unsafe { self.eventlist.set_len(count); } 59 | 60 | self.coalesce_events(); 61 | Ok(self.notifications.drain().map(|(_, v)| v).collect()) 62 | } 63 | 64 | // Combine read and write events for the same socket into a single notification. 65 | fn coalesce_events(&mut self) { 66 | for e in self.eventlist.drain(..) { 67 | let event = event_from_filter(e.filter()); 68 | let new_notification = Notification { 69 | id: e.udata() as usize, 70 | event: event.clone() 71 | }; 72 | 73 | let mut notification = self.notifications.entry(e.ident() as RawFd) 74 | .or_insert(new_notification); 75 | if notification.event != event { 76 | notification.event = Event::Both 77 | } 78 | } 79 | } 80 | } 81 | 82 | 83 | #[derive(Debug, Clone)] 84 | pub struct KernelRegistrar { 85 | kqueue: RawFd, 86 | total_registrations: Arc 87 | } 88 | 89 | impl KernelRegistrar { 90 | // Explicitly not public. KernelRegistrar's are tied to KernelPollers and are retreived via 91 | // calls to poller.get_registrar(). 92 | fn new(kq: RawFd, registrations: Arc) -> KernelRegistrar { 93 | KernelRegistrar { 94 | kqueue: kq, 95 | total_registrations: registrations 96 | } 97 | } 98 | 99 | pub fn register(&self, sock: &T, event: Event) -> Result { 100 | let sock_fd = sock.as_raw_fd(); 101 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 102 | let changes = make_changelist(sock_fd, event, id as UserData); 103 | kevent(self.kqueue, &changes, &mut[], 0).map_err(nix_err_to_io_err)?; 104 | Ok(id) 105 | } 106 | 107 | pub fn reregister(&self, id: usize, sock: &T, event: Event) -> Result<()> { 108 | let sock_fd = sock.as_raw_fd(); 109 | let changes = make_changelist(sock_fd, event, id as UserData); 110 | kevent(self.kqueue, &changes, &mut[], 0).map_err(nix_err_to_io_err)?; 111 | Ok(()) 112 | } 113 | 114 | pub fn deregister(&self, sock: &T) -> Result<()> { 115 | let sock_fd = sock.as_raw_fd(); 116 | let mut changes = make_changelist(sock_fd, Event::Both, 0); 117 | for e in changes.iter_mut() { 118 | set_flags(e, EventFlag::EV_DELETE); 119 | } 120 | // Just ignore errors because, one of the events may not be present, but the deregister 121 | // signature ignores that fact. At this point, ownership of the socket is taken so it's 122 | // irrelevant anyway. 123 | let _ = kevent(self.kqueue, &changes, &mut[], 0); 124 | Ok(()) 125 | } 126 | 127 | pub fn register_user_event(&mut self) -> Result { 128 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 129 | let changes = vec![make_user_event(id)]; 130 | kevent(self.kqueue, &changes, &mut[], 0).map_err(nix_err_to_io_err)?; 131 | Ok(UserEvent {id: id, registrar: self.clone()}) 132 | } 133 | 134 | pub fn trigger_user_event(&self, event: &UserEvent) -> Result<()> { 135 | let mut e = make_user_event(event.get_id()); 136 | set_flags(&mut e, EventFlag::EV_ENABLE); 137 | set_fflags(&mut e, FilterFlag::NOTE_TRIGGER); 138 | kevent(self.kqueue, &vec![e], &mut[], 0).map_err(nix_err_to_io_err)?; 139 | Ok(()) 140 | } 141 | 142 | pub fn clear_user_event(&self, event: &UserEvent) -> Result<()> { 143 | let mut user_event = make_user_event(event.get_id()); 144 | set_flags(&mut user_event, EventFlag::EV_DISABLE); 145 | kevent(self.kqueue, &vec![user_event], &mut[], 0).map_err(nix_err_to_io_err)?; 146 | Ok(()) 147 | } 148 | 149 | pub fn deregister_user_event(&self, event_id: usize) -> Result<()> { 150 | let mut user_event = make_user_event(event_id); 151 | set_flags(&mut user_event, EventFlag::EV_DELETE); 152 | kevent(self.kqueue, &vec![user_event], &mut[], 0).map_err(nix_err_to_io_err)?; 153 | Ok(()) 154 | } 155 | 156 | pub fn set_timeout(&self, timeout: usize) -> Result { 157 | self.set_timer(timeout, false) 158 | } 159 | 160 | pub fn set_interval(&self, timeout: usize) -> Result { 161 | self.set_timer(timeout, true) 162 | } 163 | 164 | pub fn cancel_timeout(&self, timer_id: usize) -> Result<()> { 165 | let mut e = make_timer(timer_id, 0, false); 166 | set_flags(&mut e, EventFlag::EV_DELETE); 167 | kevent(self.kqueue, &vec![e], &mut[], 0).map_err(nix_err_to_io_err)?; 168 | Ok(()) 169 | } 170 | 171 | fn set_timer(&self, timeout: usize, recurring: bool) -> Result { 172 | let id = self.total_registrations.fetch_add(1, Ordering::SeqCst); 173 | let changes = vec![make_timer(id, timeout, recurring)]; 174 | kevent(self.kqueue, &changes, &mut[], 0).map_err(nix_err_to_io_err)?; 175 | Ok(id) 176 | } 177 | } 178 | 179 | fn event_from_filter(filter: EventFilter) -> Event { 180 | // TODO: Change Event to allow returning TIMER events instead of marking them READ 181 | if filter == EventFilter::EVFILT_READ || 182 | filter == EventFilter::EVFILT_TIMER || 183 | filter == EventFilter::EVFILT_USER { 184 | Event::Read 185 | } else { 186 | Event::Write 187 | } 188 | } 189 | 190 | // Each event in kqueue must have its own filter. In other words, there are seperate events for 191 | // reads and writes on the same socket. We create the proper number of KEvents based on the enum 192 | // variant in the `event` paramter. 193 | fn make_changelist(sock_fd: RawFd, event: Event, user_data: UserData) -> Vec { 194 | let mut ev = KEvent::new( 195 | sock_fd as uintptr_t, 196 | EventFilter::EVFILT_READ, 197 | EventFlag::EV_ADD | EventFlag::EV_CLEAR, 198 | FilterFlag::empty(), 199 | 0, 200 | user_data 201 | ); 202 | 203 | match event { 204 | Event::Read => vec![ev], 205 | Event::Write => { 206 | set_filter(&mut ev, EventFilter::EVFILT_WRITE); 207 | vec![ev] 208 | }, 209 | Event::Both => vec![ev, KEvent::new(ev.ident(), EventFilter::EVFILT_WRITE, ev.flags(), ev.fflags(), ev.data(), ev.udata())] 210 | } 211 | } 212 | 213 | fn make_user_event(id: usize) -> KEvent { 214 | KEvent::new( 215 | id as uintptr_t, 216 | EventFilter::EVFILT_USER, 217 | EventFlag::EV_ADD | EventFlag::EV_CLEAR | EventFlag::EV_ENABLE, 218 | FilterFlag::empty(), 219 | 0, 220 | id as UserData 221 | ) 222 | } 223 | 224 | fn set_filter(e: &mut KEvent, filter: EventFilter) { 225 | let ident = e.ident(); 226 | let flags = e.flags(); 227 | let fflags = e.fflags(); 228 | let udata = e.udata(); 229 | ev_set(e, ident, filter, flags, fflags, udata); 230 | } 231 | 232 | fn set_flags(e: &mut KEvent, flags: EventFlag) { 233 | let ident = e.ident(); 234 | let filter = e.filter(); 235 | let fflags = e.fflags(); 236 | let udata = e.udata(); 237 | ev_set(e, ident, filter, flags, fflags, udata); 238 | } 239 | 240 | fn set_fflags(e: &mut KEvent, fflags: FilterFlag) { 241 | let ident = e.ident(); 242 | let filter = e.filter(); 243 | let flags = e.flags(); 244 | let udata = e.udata(); 245 | ev_set(e, ident, filter, flags, fflags, udata); 246 | } 247 | 248 | fn make_timer(id: usize, timeout: usize, recurring: bool) -> KEvent { 249 | let mut flags = EventFlag::EV_ADD; 250 | if !recurring { 251 | flags |= EventFlag::EV_ONESHOT; 252 | } 253 | let ev = KEvent::new( 254 | id as uintptr_t, 255 | EventFilter::EVFILT_TIMER, 256 | flags, 257 | FilterFlag::empty(), // timeouts are in ms by default 258 | timeout as intptr_t, 259 | id as UserData 260 | ); 261 | ev 262 | } 263 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate libc; 2 | extern crate nix; 3 | 4 | #[cfg(test)] 5 | #[macro_use] 6 | extern crate assert_matches; 7 | 8 | mod event; 9 | mod notification; 10 | mod line_reader; 11 | mod frame_reader; 12 | mod frame_writer; 13 | mod poller; 14 | mod registrar; 15 | mod timer; 16 | mod user_event; 17 | mod channel; 18 | 19 | #[cfg(any(target_os = "linux", target_os = "android"))] 20 | mod epoll; 21 | #[cfg(any(target_os = "linux", target_os = "android"))] 22 | #[cfg(not(feature = "no_timerfd"))] 23 | mod timerfd; 24 | 25 | #[cfg(any(target_os = "linux", target_os = "android"))] 26 | #[cfg(feature = "no_timerfd")] 27 | mod timer_heap; 28 | 29 | #[cfg(any(target_os = "bitrig", target_os = "dragonfly", 30 | target_os = "freebsd", target_os = "ios", target_os = "macos", 31 | target_os = "netbsd", target_os = "openbsd"))] 32 | mod kqueue; 33 | 34 | pub use poller::Poller; 35 | pub use registrar::Registrar; 36 | pub use event::Event; 37 | pub use notification::Notification; 38 | pub use line_reader::LineReader; 39 | pub use frame_reader::FrameReader; 40 | pub use frame_writer::FrameWriter; 41 | pub use timer::Timer; 42 | pub use channel::{channel, Sender, Receiver, ChannelError}; 43 | 44 | use std::io::{self, ErrorKind}; 45 | use nix::Error::Sys; 46 | 47 | fn nix_err_to_io_err(err: nix::Error) -> io::Error { 48 | match err { 49 | Sys(errno) => { 50 | io::Error::from(errno) 51 | } 52 | _ => { 53 | io::Error::new(ErrorKind::InvalidData, err) 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/line_reader.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Read}; 2 | use std::str::{self, Utf8Error}; 3 | 4 | #[derive(Debug)] 5 | struct ReaderBuf { 6 | vec: Vec, 7 | msg_start: usize, 8 | write_loc: usize 9 | } 10 | 11 | impl ReaderBuf { 12 | pub fn new(buffer_size: usize) -> ReaderBuf { 13 | let mut vec = Vec::with_capacity(buffer_size); 14 | unsafe { vec.set_len(buffer_size); } 15 | ReaderBuf { 16 | vec: vec, 17 | msg_start: 0, 18 | write_loc: 0 19 | } 20 | } 21 | 22 | /// Take any unread bytes and move them to the beginning of the buffer. 23 | /// Reset the msg_start to 0 and write_loc to the length of the unwritten bytes. 24 | /// This is useful when there is an incomplete messge, and we want to continue 25 | /// reading new bytes so that we can eventually complete a message. Note that while 26 | /// this is somewhat inefficient, most decoders expect contiguous slices, so we can't 27 | /// use a ring buffer without some copying anyway. Additionally, this lets us re-use 28 | /// the buffer without an allocation, and is a simpler implementation. 29 | /// 30 | /// TODO: This can almost certainly be made faster with unsafe code. 31 | pub fn reset(&mut self) { 32 | let mut write_index = 0; 33 | for i in self.msg_start..self.write_loc { 34 | self.vec[write_index] = self.vec[i]; 35 | write_index = write_index + 1; 36 | } 37 | self.write_loc = write_index; 38 | self.msg_start = 0; 39 | } 40 | 41 | pub fn is_empty(&self) -> bool { 42 | self.write_loc == 0 43 | } 44 | } 45 | 46 | /// An iterator over lines available from the current spot in the buffer 47 | pub struct Iter<'a> { 48 | buf: &'a mut ReaderBuf 49 | } 50 | 51 | impl<'a> Iterator for Iter<'a> { 52 | type Item = Result; 53 | 54 | fn next(&mut self) -> Option> { 55 | if self.buf.msg_start == self.buf.write_loc { 56 | self.buf.reset(); 57 | return None; 58 | } 59 | 60 | let slice = &self.buf.vec[self.buf.msg_start..self.buf.write_loc]; 61 | match slice.iter().position(|&c| c == '\n' as u8) { 62 | Some(index) => { 63 | self.buf.msg_start = self.buf.msg_start + index + 1; 64 | Some(str::from_utf8(&slice[0..index+1]).map(|s| s.to_string())) 65 | }, 66 | None => None 67 | } 68 | } 69 | } 70 | 71 | /// Read and compose lines of text 72 | #[derive(Debug)] 73 | pub struct LineReader { 74 | buf: ReaderBuf 75 | } 76 | 77 | impl LineReader { 78 | pub fn new(buffer_size: usize) -> LineReader { 79 | LineReader { 80 | buf: ReaderBuf::new(buffer_size) 81 | } 82 | } 83 | 84 | pub fn read(&mut self, reader: &mut T) -> io::Result { 85 | let bytes_read = try!(reader.read(&mut self.buf.vec[self.buf.write_loc..])); 86 | self.buf.write_loc += bytes_read; 87 | Ok(bytes_read) 88 | } 89 | 90 | pub fn iter_mut(&mut self) -> Iter { 91 | Iter { 92 | buf: &mut self.buf 93 | } 94 | } 95 | 96 | pub fn is_empty(&self) -> bool { 97 | self.buf.is_empty() 98 | } 99 | } 100 | 101 | 102 | #[cfg(test)] 103 | mod tests { 104 | 105 | use std::io::Cursor; 106 | use super::LineReader; 107 | 108 | const TEXT: &'static str = "hello\nworld\nhow's\nit\ngoing?\n"; 109 | 110 | #[test] 111 | fn static_buffer_single_read() { 112 | let mut data = Cursor::new(TEXT); 113 | let mut line_reader = LineReader::new(1024); 114 | let bytes_read = line_reader.read(&mut data).unwrap(); 115 | assert_eq!(false, line_reader.is_empty()); 116 | assert_eq!(TEXT.len(), bytes_read); 117 | assert_eq!(5, line_reader.iter_mut().count()); 118 | assert_eq!(None, line_reader.iter_mut().next()); 119 | assert_eq!(true, line_reader.is_empty()); 120 | } 121 | 122 | #[test] 123 | fn static_buffer_partial_read_follow_by_complete_read() { 124 | let mut string = TEXT.to_string(); 125 | string.push_str("ok"); 126 | let mut data = Cursor::new(&string); 127 | let mut line_reader = LineReader::new(1024); 128 | let bytes_read = line_reader.read(&mut data).unwrap(); 129 | assert_eq!(false, line_reader.is_empty()); 130 | assert_eq!(string.len(), bytes_read); 131 | assert_eq!(5, line_reader.iter_mut().count()); 132 | assert_eq!(None, line_reader.iter_mut().next()); 133 | assert_eq!(false, line_reader.is_empty()); 134 | assert_eq!(1, line_reader.read(&mut Cursor::new("\n")).unwrap()); 135 | assert_eq!("ok\n".to_string(), line_reader.iter_mut().next().unwrap().unwrap()); 136 | assert_eq!(None, line_reader.iter_mut().next()); 137 | assert_eq!(true, line_reader.is_empty()); 138 | } 139 | 140 | } 141 | -------------------------------------------------------------------------------- /src/notification.rs: -------------------------------------------------------------------------------- 1 | use event::Event; 2 | 3 | #[derive(Clone, Debug, Eq, PartialEq)] 4 | pub struct Notification { 5 | // The unique identifier for a given socket. File descriptors can be re-used, Ids cannot. 6 | pub id: usize, 7 | pub event: Event 8 | } 9 | -------------------------------------------------------------------------------- /src/poller.rs: -------------------------------------------------------------------------------- 1 | use std::io::Result; 2 | 3 | use registrar::Registrar; 4 | use notification::Notification; 5 | 6 | #[cfg(any(target_os = "linux", target_os = "android"))] 7 | use epoll::KernelPoller; 8 | 9 | #[cfg(any(target_os = "bitrig", target_os = "dragonfly", 10 | target_os = "freebsd", target_os = "ios", target_os = "macos", 11 | target_os = "netbsd", target_os = "openbsd"))] 12 | pub use kqueue::KernelPoller; 13 | 14 | /// A Poller is an abstraction around a kernel I/O poller. Kernel pollers are platform specific. 15 | /// 16 | /// A Poller is tied to a Registrar of the same type. The registrar allows registering file 17 | /// descriptors with the poller, while the poller waits for read or write events on those file 18 | /// descriptors. 19 | pub struct Poller { 20 | registrar: Registrar, 21 | inner: KernelPoller 22 | } 23 | 24 | impl Poller { 25 | pub fn new() -> Result { 26 | let inner = KernelPoller::new()?; 27 | Ok(Poller { 28 | registrar: Registrar::new(inner.get_registrar()), 29 | inner: inner 30 | }) 31 | } 32 | 33 | /// Return a Registrar that can be used to register Sockets with a Poller. 34 | /// 35 | /// Registrars are cloneable and can be used on a different thread from the Poller. 36 | pub fn get_registrar(&self) -> Registrar { 37 | self.registrar.clone() 38 | } 39 | 40 | /// Wait for notifications from the Poller 41 | pub fn wait(&mut self, timeout_ms: usize) -> Result> { 42 | self.inner.wait(timeout_ms) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/registrar.rs: -------------------------------------------------------------------------------- 1 | use std::os::unix::io::AsRawFd; 2 | use std::io::Result; 3 | use std::fmt::Debug; 4 | use event::Event; 5 | use channel::{channel, sync_channel, Sender, SyncSender, Receiver}; 6 | 7 | #[cfg(any(target_os = "linux", target_os = "android"))] 8 | use epoll::KernelRegistrar; 9 | 10 | #[cfg(any(target_os = "bitrig", target_os = "dragonfly", 11 | target_os = "freebsd", target_os = "ios", target_os = "macos", 12 | target_os = "netbsd", target_os = "openbsd"))] 13 | pub use kqueue::KernelRegistrar; 14 | 15 | /// An abstraction for registering file descriptors with a kernel poller 16 | /// 17 | /// A Registrar is tied to a Poller of the same type, and registers sockets and unique IDs for those 18 | /// sockets as userdata that can be waited on by the poller. A Registar should only be retrieved via 19 | /// a call to Poller::get_registrar(&self), and not created on it's own. 20 | #[derive(Debug, Clone)] 21 | pub struct Registrar { 22 | inner: KernelRegistrar 23 | } 24 | 25 | impl Registrar { 26 | /// This method is public only so it can be used directly by the Poller. Do not Use it. 27 | #[doc(hidden)] 28 | pub fn new(inner: KernelRegistrar) -> Registrar { 29 | Registrar { 30 | inner: inner 31 | } 32 | } 33 | 34 | /// Register a socket for a given event type, with a Poller and return it's unique ID 35 | /// 36 | /// Note that if the sock type is not pollable, then an error will be returned. 37 | pub fn register(&self, sock: &T, event: Event) -> Result { 38 | self.inner.register(sock, event) 39 | } 40 | 41 | /// Reregister a socket with a Poller 42 | pub fn reregister(&self, id: usize, sock: &T, event: Event) -> Result<()> { 43 | self.inner.reregister(id, sock, event) 44 | } 45 | 46 | /// Remove a socket from a Poller 47 | /// 48 | /// Will return an error if the socket is not present in the poller when using epoll. Returns no 49 | /// error with kqueue. 50 | pub fn deregister(&self, sock: &T) -> Result<()> { 51 | self.inner.deregister(sock) 52 | } 53 | 54 | /// Set a timeout in ms that fires once 55 | /// 56 | /// Note that this timeout may be delivered late due to the time taken between the calls to 57 | /// `Poller::wait()` exceeding the timeout, but it will never be delivered early. 58 | /// 59 | /// Note that an error will be returned if the maximum number of file descriptors is already 60 | /// registered with the kernel poller. 61 | pub fn set_timeout(&self, timeout: usize) -> Result { 62 | self.inner.set_timeout(timeout) 63 | } 64 | 65 | /// Set a recurring timeout in ms 66 | /// 67 | /// A notification with the returned id will be sent at the given interval. The timeout can be 68 | /// cancelled with a call to `cancel_timeout()` 69 | /// 70 | /// Note that if `Poller::wait()` is not called in a loop, these timeouts as well as other 71 | /// notifications, will not be delivered. Timeouts may be delivered late, due to the time taken 72 | /// between calls to `Poller::wait()` exceeding the timeout, but will never be delivered early. 73 | /// 74 | /// Note that an error will be returned if the maximum number of file descriptors is already 75 | /// registered with the kernel poller. 76 | pub fn set_interval(&self, interval: usize) -> Result { 77 | self.inner.set_interval(interval) 78 | } 79 | 80 | /// Cancel a recurring timeout. 81 | /// 82 | /// Note that there may be timeouts in flight already that were not yet cancelled, so it is 83 | /// possible that you may receive notifications after the timeout was cancelled. This can be 84 | /// mitigated by keeping track of live timers and only processing timeout events for known live 85 | /// timers. 86 | /// 87 | /// An error will be returned if the timer is not registered with the kernel poller. 88 | pub fn cancel_timeout(&self, timer_id: usize) -> Result<()> { 89 | self.inner.cancel_timeout(timer_id) 90 | } 91 | 92 | /// Create an asynchronous mpsc channel where the Receiver is registered with the kernel poller. 93 | /// 94 | /// Each new Receiver gets registered using a user space event mechanism (either eventfd or 95 | /// kevent depending upon OS). When a send occurs the kernel notification mechanism (a syscall on 96 | /// a file descriptor) will be issued to alert the kernel poller to wakeup and issue a 97 | /// notification for the Receiver. However, since syscalls are expensive, an optimization is 98 | /// made where if the kernel poller is already set to awaken, or currently processing events, a 99 | /// new syscall will not be made. 100 | /// 101 | /// Standard rust mpsc channels are used internally and have non-blocking semantics. Note that 102 | /// the return type is different since the Receiver is being registered with the kernel poller 103 | /// and this can fail. 104 | /// 105 | /// When a Receiver is dropped it will become unregistered. 106 | pub fn channel(&mut self) -> Result<(Sender, Receiver)> { 107 | channel(&mut self.inner) 108 | } 109 | 110 | /// Create a synchronous mpsc channel where the Receiver is registered with the kernel poller. 111 | /// 112 | /// Each new Receiver gets registered using a user space event mechanism (either eventfd or 113 | /// kevent depending upon OS). When a send occurs the kernel notification mechanism (a syscall on 114 | /// a file descriptor) will be issued to alert the kernel poller to wakeup and issue a 115 | /// notification for the Receiver. However, since syscalls are expensive, an optimization is 116 | /// made where if the kernel poller is already set to awaken, or currently processing events, a 117 | /// new syscall will not be made. 118 | /// 119 | /// Standard rust synchronous mpsc channels are used internally and block when the queue is 120 | /// full, as given by the bound in the construcotr. Note that the return type is different 121 | /// since the Receiver is being registered with the kernel poller and this can fail. 122 | /// 123 | /// When a Receiver is dropped it will become unregistered. 124 | pub fn sync_channel(&mut self, bound: usize) -> Result<(SyncSender, Receiver)> { 125 | sync_channel(&mut self.inner, bound) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/timer.rs: -------------------------------------------------------------------------------- 1 | use std::os::unix::io::{RawFd, AsRawFd}; 2 | 3 | use std::io::{Error, Result}; 4 | use std::mem; 5 | use libc; 6 | 7 | /// An opaque handle to a kernel timer instance. 8 | /// 9 | /// On Linux this contains a file descriptor created with 10 | /// [timerfd_create()](http://man7.org/linux/man-pages/man2/timerfd_create.2.html) 11 | /// On systems using kqueue, a file descriptor is not needed, so it is set to 0. 12 | #[derive(Debug)] 13 | pub struct Timer { 14 | #[doc(hidden)] 15 | pub fd: RawFd, 16 | 17 | #[doc(hidden)] 18 | pub interval: bool 19 | } 20 | 21 | 22 | impl Timer { 23 | /// Re-arm a recurring timer. 24 | /// 25 | /// This method must be called when an interval timer notification is received. If not called, 26 | /// the next timer notification will not be received. 27 | /// 28 | /// The pattern is to store the timers in a hashmap keyed by their IDs. When a timer id is received, 29 | /// and the timer looked up the user should call arm(). 30 | /// 31 | /// This method doesn't actually change the timing of the recurring timer. An interval timer 32 | /// will fire exactly at the interval specified originally. This just allows the kernel poller 33 | /// to received the timer event and send a notification. On epoll based systems if a timer has 34 | /// already fired because the timer period has elapsed, the kernel poller will be woken up 35 | /// immediately after this call and a notification will be sent. This should not be a problem in 36 | /// practice as timers should be re-armed before the next timer fires. Otherwise the timer 37 | /// interval is too short to be useful. 38 | /// 39 | /// On Linux timers are file descriptors registered with epoll. Since we use edge triggering we 40 | /// need to read the file descriptors to change their state. Note that even if we used level 41 | /// triggering we'd still need to do this, but for a different reason. The timer would fire 42 | /// indefinitely as ready in the level triggered case, rather than never firing again as in the 43 | /// edge triggered case. 44 | /// 45 | pub fn arm(&self) -> Result<()> { 46 | let buf: u64 = 0; 47 | unsafe { 48 | let ptr: *mut libc::c_void = mem::transmute(&buf); 49 | if libc::read(self.fd, ptr, 8) < 0 { 50 | return Err(Error::last_os_error()); 51 | } 52 | Ok(()) 53 | } 54 | } 55 | } 56 | 57 | impl Drop for Timer { 58 | fn drop(&mut self) { 59 | unsafe { 60 | libc::close(self.fd); 61 | } 62 | } 63 | } 64 | 65 | impl AsRawFd for Timer { 66 | fn as_raw_fd(&self) -> RawFd { 67 | self.fd 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/timer_heap.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BinaryHeap; 2 | use std::cmp::{Ordering, Ord, PartialOrd, PartialEq}; 3 | use std::time::{Instant, Duration}; 4 | use event::Event; 5 | use notification::Notification; 6 | 7 | /// Store timers in a binary heap. Keep them sorted by which timer is going to expire first. 8 | pub struct TimerHeap { 9 | timers: BinaryHeap 10 | } 11 | 12 | impl TimerHeap { 13 | /// Create a new TimerHeap 14 | pub fn new() -> TimerHeap { 15 | TimerHeap { 16 | timers: BinaryHeap::new() 17 | } 18 | } 19 | 20 | /// Return the number of timers in the heap 21 | #[allow(dead_code)] // only used in tests right now 22 | pub fn len(&self) -> usize { 23 | self.timers.len() 24 | } 25 | 26 | /// Insert a TimerEntry into the heap 27 | pub fn insert(&mut self, entry: TimerEntry) { 28 | self.timers.push(entry); 29 | } 30 | 31 | /// Remove a TimerEnry by Id 32 | /// 33 | /// Return the entry if it exists, None otherwise 34 | /// 35 | /// Note, in a large heap this is probably expensive. 36 | pub fn remove(&mut self, id: usize) -> Option { 37 | let mut popped = Vec::with_capacity(self.timers.len()); 38 | while let Some(entry) = self.timers.pop() { 39 | if entry.id == id { 40 | self.timers.extend(popped); 41 | return Some(entry); 42 | } else { 43 | popped.push(entry); 44 | } 45 | } 46 | self.timers.extend(popped); 47 | None 48 | } 49 | 50 | /// Return the amount of time remaining (in ms) for the earliest expiring timer 51 | /// Return `None` if there are no timers in the heap 52 | pub fn time_remaining(&self) -> Option { 53 | self._time_remaining(Instant::now()) 54 | } 55 | 56 | /// A deterministically testable version of `time_remaining()` 57 | fn _time_remaining(&self, now: Instant) -> Option { 58 | self.timers.peek().map(|e| { 59 | if now > e.expires_at { 60 | return 0; 61 | } 62 | let duration = e.expires_at - now; 63 | // We add a millisecond if there is a fractional ms milliseconds in 64 | // duration.subsec_nanos() / 1000000 so that we never fire early. 65 | let nanos = duration.subsec_nanos() as u64; 66 | // TODO: This can almost certainly be done faster 67 | let subsec_ms = nanos / 1000000; 68 | let mut remaining = duration.as_secs()*1000 + subsec_ms; 69 | if subsec_ms * 1000000 < nanos { 70 | remaining += 1; 71 | } 72 | remaining 73 | }) 74 | } 75 | 76 | /// Return the earliest timeout based on a user timeout and the least remaining time in the 77 | /// next timer to fire. 78 | pub fn earliest_timeout(&self, user_timeout_ms: usize) -> usize { 79 | if let Some(remaining) = self.time_remaining() { 80 | if user_timeout_ms < remaining as usize { 81 | user_timeout_ms 82 | } else { 83 | remaining as usize 84 | } 85 | } else { 86 | user_timeout_ms 87 | } 88 | } 89 | 90 | /// Return all expired timer ids as Read notifications 91 | /// 92 | /// Any recurring timers will be re-added to the heap in the correct spot 93 | pub fn expired(&mut self) -> Vec { 94 | self._expired(Instant::now()) 95 | } 96 | 97 | /// A deterministically testable version of `expired()` 98 | pub fn _expired(&mut self, now: Instant) -> Vec { 99 | let mut expired = Vec::new(); 100 | while let Some(mut popped) = self.timers.pop() { 101 | if popped.expires_at <= now { 102 | expired.push(Notification {id: popped.id, event: Event::Read}); 103 | if popped.recurring { 104 | // We use the expired_at time so we don't keep skewing later and later 105 | // by adding the duration to the current time. 106 | popped.expires_at += popped.duration; 107 | self.timers.push(popped); 108 | } 109 | } else { 110 | self.timers.push(popped); 111 | return expired; 112 | } 113 | } 114 | expired 115 | } 116 | } 117 | 118 | #[derive(Eq, Debug)] 119 | pub struct TimerEntry { 120 | recurring: bool, 121 | duration: Duration, 122 | expires_at: Instant, 123 | id: usize 124 | } 125 | 126 | impl TimerEntry { 127 | pub fn new(id: usize, duration_ms: u64, recurring: bool) -> TimerEntry { 128 | let duration = Duration::from_millis(duration_ms); 129 | TimerEntry { 130 | recurring: recurring, 131 | duration: duration, 132 | expires_at: Instant::now() + duration, 133 | id: id 134 | } 135 | } 136 | } 137 | 138 | impl Ord for TimerEntry { 139 | // Order the times backwards because we are sorting them via a max heap 140 | fn cmp(&self, other: &TimerEntry) -> Ordering { 141 | if self.expires_at > other.expires_at { 142 | return Ordering::Less; 143 | } 144 | if self.expires_at < other.expires_at { 145 | return Ordering::Greater; 146 | } 147 | Ordering::Equal 148 | } 149 | } 150 | 151 | impl PartialOrd for TimerEntry { 152 | fn partial_cmp(&self, other: &TimerEntry) -> Option { 153 | Some(self.cmp(other)) 154 | } 155 | } 156 | 157 | impl PartialEq for TimerEntry { 158 | fn eq(&self, other: &TimerEntry) -> bool { 159 | self.expires_at == other.expires_at 160 | } 161 | } 162 | 163 | #[cfg(test)] 164 | mod tests { 165 | use super::{TimerHeap, TimerEntry}; 166 | use std::time::{Instant, Duration}; 167 | 168 | #[test] 169 | fn time_remaining() { 170 | let mut heap = TimerHeap::new(); 171 | let now = Instant::now(); 172 | let duration = Duration::from_millis(500); 173 | let entry = TimerEntry { 174 | id: 1, 175 | recurring: false, 176 | duration: duration, 177 | expires_at: now + duration 178 | }; 179 | heap.insert(entry); 180 | assert_matches!(heap._time_remaining(now), Some(500)); 181 | assert_matches!(heap._time_remaining(now + duration), Some(0)); 182 | assert_matches!(heap._time_remaining(now + duration + Duration::from_millis(100)), 183 | Some(0)); 184 | assert_matches!(heap.remove(2), None); 185 | let entry = heap.remove(1).unwrap(); 186 | assert_eq!(entry.id, 1); 187 | assert_matches!(heap._time_remaining(now), None); 188 | } 189 | 190 | #[test] 191 | fn expired_non_recurring() { 192 | let mut heap = TimerHeap::new(); 193 | let now = Instant::now(); 194 | let duration = Duration::from_millis(500); 195 | let entry = TimerEntry { 196 | id: 1, 197 | recurring: false, 198 | duration: duration, 199 | expires_at: now + duration 200 | }; 201 | heap.insert(entry); 202 | assert_eq!(heap._expired(now), vec![]); 203 | let v = heap._expired(now + duration); 204 | assert_eq!(v.len(), 1); 205 | assert_eq!(heap.len(), 0); 206 | assert_eq!(heap._expired(now + duration), vec![]); 207 | } 208 | 209 | #[test] 210 | fn expired_recurring() { 211 | let mut heap = TimerHeap::new(); 212 | let now = Instant::now(); 213 | let duration = Duration::from_millis(500); 214 | let entry = TimerEntry { 215 | id: 1, 216 | recurring: true, 217 | duration: duration, 218 | expires_at: now + duration 219 | }; 220 | heap.insert(entry); 221 | assert_eq!(heap._expired(now), vec![]); 222 | let v = heap._expired(now + duration); 223 | assert_eq!(v.len(), 1); 224 | assert_eq!(heap.len(), 1); 225 | assert_eq!(heap._expired(now + duration + Duration::from_millis(1)), vec![]); 226 | let v = heap._expired(now + duration + duration); 227 | assert_eq!(v.len(), 1); 228 | assert_eq!(heap.len(), 1); 229 | assert_eq!(heap._expired(now + duration + duration), vec![]); 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /src/timerfd.rs: -------------------------------------------------------------------------------- 1 | /// An interface to timerfd in linux 2 | /// 3 | /// This interface is specific to the use of Amy and not general enough to be in its own crate. 4 | 5 | use std::ptr; 6 | use std::mem; 7 | use std::os::unix::io::{IntoRawFd, RawFd}; 8 | use nix::{Error, Result}; 9 | use nix::errno::Errno; 10 | use libc::{self, c_int, CLOCK_MONOTONIC, O_NONBLOCK, O_CLOEXEC, timespec, time_t}; 11 | 12 | static TFD_NONBLOCK: c_int = O_NONBLOCK; 13 | static TFD_CLOEXEC: c_int = O_CLOEXEC; 14 | 15 | mod ffi { 16 | use libc::{c_int, timespec}; 17 | 18 | #[repr(C)] 19 | pub struct Itimerspec { 20 | pub it_interval: timespec, 21 | pub it_value: timespec 22 | } 23 | 24 | extern { 25 | pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; 26 | pub fn timerfd_settime(fd: c_int, 27 | flags: c_int, 28 | new: *const Itimerspec, 29 | old: *mut Itimerspec) -> c_int; 30 | } 31 | } 32 | 33 | pub struct TimerFd { 34 | fd: c_int 35 | } 36 | 37 | impl TimerFd { 38 | pub fn new(timeout_in_ms: usize, recurring: bool) -> Result { 39 | let fd = unsafe { ffi::timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC) }; 40 | if fd < 0 { 41 | return Err(Error::Sys(Errno::last())); 42 | } 43 | let timer_fd = TimerFd {fd: fd}; 44 | try!(arm_timer(fd, timeout_in_ms, recurring)); 45 | 46 | Ok(timer_fd) 47 | } 48 | 49 | } 50 | 51 | impl Drop for TimerFd { 52 | fn drop(&mut self) { 53 | unsafe { 54 | libc::close(self.fd); 55 | } 56 | } 57 | } 58 | 59 | impl IntoRawFd for TimerFd { 60 | fn into_raw_fd(self) -> RawFd { 61 | let fd = self.fd; 62 | // Don't run drop on self when it goes out of scope because the purpose of this function is 63 | // to take ownership of the fd 64 | mem::forget(self); 65 | fd 66 | } 67 | } 68 | 69 | fn arm_timer(fd: c_int, timeout: usize, recurring: bool) -> Result<()> { 70 | let it_value = ms_to_timespec(timeout as time_t); 71 | let it_interval = if recurring { 72 | it_value.clone() 73 | } else { 74 | timespec { 75 | tv_sec: 0, 76 | tv_nsec: 0 77 | } 78 | }; 79 | let itimerspec = ffi::Itimerspec { 80 | it_interval: it_interval, 81 | it_value: it_value 82 | }; 83 | 84 | let res = unsafe { ffi::timerfd_settime(fd, 0, &itimerspec, ptr::null_mut()) }; 85 | if res < 0 { 86 | return Err(Error::Sys(Errno::last())); 87 | } 88 | 89 | Ok(()) 90 | } 91 | 92 | fn ms_to_timespec(timeout_in_ms: time_t) -> timespec { 93 | timespec { 94 | tv_sec: timeout_in_ms / 1000, 95 | tv_nsec: (timeout_in_ms % 1000) * 1000 * 1000 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/user_event.rs: -------------------------------------------------------------------------------- 1 | use std::os::unix::io::{RawFd, AsRawFd}; 2 | use std::io::Result; 3 | 4 | #[cfg(any(target_os = "linux", target_os = "android"))] 5 | use std::io::Error; 6 | 7 | #[cfg(any(target_os = "linux", target_os = "android"))] 8 | use std::mem; 9 | #[cfg(any(target_os = "linux", target_os = "android"))] 10 | use libc; 11 | 12 | /// An opaque handle to a user level event. 13 | /// 14 | /// On Linux this contains a file descriptor created with 15 | /// [eventfd()](http://man7.org/linux/man-pages/man2/eventfd.2.html) 16 | #[cfg(any(target_os = "linux", target_os = "android"))] 17 | #[derive(Debug)] 18 | pub struct UserEvent { 19 | #[doc(hidden)] 20 | pub id: usize, 21 | 22 | #[doc(hidden)] 23 | pub fd: RawFd 24 | } 25 | 26 | #[cfg(any(target_os = "linux", target_os = "android"))] 27 | impl UserEvent { 28 | pub fn get_id(&self) -> usize { 29 | self.id 30 | } 31 | 32 | pub fn clear(&self) -> Result<()> { 33 | let buf: u64 = 0; 34 | unsafe { 35 | let ptr: *mut libc::c_void = mem::transmute(&buf); 36 | if libc::read(self.fd, ptr, 8) < 0 { 37 | return Err(Error::last_os_error()); 38 | } 39 | Ok(()) 40 | } 41 | } 42 | 43 | pub fn trigger(&self) -> Result<()> { 44 | let buf: u64 = 1; 45 | unsafe { 46 | let ptr: *const libc::c_void = mem::transmute(&buf); 47 | if libc::write(self.fd, ptr, 8) < 0 { 48 | return Err(Error::last_os_error()); 49 | } 50 | Ok(()) 51 | } 52 | } 53 | } 54 | 55 | #[cfg(any(target_os = "linux", target_os = "android"))] 56 | impl Drop for UserEvent { 57 | fn drop(&mut self) { 58 | unsafe { 59 | libc::close(self.fd); 60 | } 61 | } 62 | } 63 | 64 | #[cfg(any(target_os = "linux", target_os = "android"))] 65 | impl AsRawFd for UserEvent { 66 | fn as_raw_fd(&self) -> RawFd { 67 | self.fd 68 | } 69 | } 70 | 71 | /* NON LINUX SYSTEMS */ 72 | 73 | #[cfg(not(any(target_os = "linux", target_os = "android")))] 74 | pub use kqueue::KernelRegistrar; 75 | 76 | /// An opaque handle to a user level event. 77 | /// 78 | /// On Kqueue base systems there is no file descriptor 79 | /// The event is clone because of this fact. 80 | #[cfg(not(any(target_os = "linux", target_os = "android")))] 81 | #[derive(Debug, Clone)] 82 | pub struct UserEvent { 83 | #[doc(hidden)] 84 | pub id: usize, 85 | 86 | #[doc(hidden)] 87 | pub registrar: KernelRegistrar 88 | } 89 | 90 | #[cfg(not(any(target_os = "linux", target_os = "android")))] 91 | impl UserEvent { 92 | pub fn get_id(&self) -> usize { 93 | self.id 94 | } 95 | 96 | pub fn clear(&self) -> Result<()> { 97 | self.registrar.clear_user_event(&self) 98 | } 99 | 100 | pub fn trigger(&self) -> Result<()> { 101 | self.registrar.trigger_user_event(&self) 102 | } 103 | 104 | pub fn deregister(&self) -> Result<()> { 105 | self.registrar.deregister_user_event(self.id) 106 | } 107 | } 108 | 109 | // We don't actually need a RawFd for kqueue and don't want to shrink the id size from usize to i32 110 | #[cfg(not(any(target_os = "linux", target_os = "android")))] 111 | impl AsRawFd for UserEvent { 112 | fn as_raw_fd(&self) -> RawFd { 113 | 0 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /tests/channel_test.rs: -------------------------------------------------------------------------------- 1 | /// Test Channels where the receiver is pollable 2 | 3 | extern crate amy; 4 | 5 | use amy::{Poller, Event}; 6 | 7 | #[test] 8 | fn send_wakes_poller() { 9 | let mut poller = Poller::new().unwrap(); 10 | let mut registrar = poller.get_registrar(); 11 | let (tx, rx) = registrar.channel().unwrap(); 12 | 13 | // no notifications if nothing is registered 14 | let notifications = poller.wait(50).unwrap(); 15 | assert_eq!(0, notifications.len()); 16 | 17 | // Send causes the poller to wakeup 18 | tx.send("a").unwrap(); 19 | 20 | // We poll and get a notification that there is something to receive and receive the sent value 21 | let notifications = poller.wait(5000).unwrap(); 22 | assert_eq!(1, notifications.len()); 23 | assert_eq!(rx.get_id(), notifications[0].id); 24 | assert_eq!(Event::Read, notifications[0].event); 25 | assert_eq!("a", rx.try_recv().unwrap()); 26 | assert!(rx.try_recv().is_err()); 27 | } 28 | 29 | #[test] 30 | fn multiple_sends_wake_poller_once() { 31 | let mut poller = Poller::new().unwrap(); 32 | let mut registrar = poller.get_registrar(); 33 | let (tx, rx) = registrar.channel().unwrap(); 34 | 35 | tx.send("a").unwrap(); 36 | tx.send("b").unwrap(); 37 | 38 | let notifications = poller.wait(5000).unwrap(); 39 | assert_eq!(1, notifications.len()); 40 | assert_eq!(rx.get_id(), notifications[0].id); 41 | assert_eq!("a", rx.try_recv().unwrap()); 42 | assert_eq!("b", rx.try_recv().unwrap()); 43 | 44 | let notifications = poller.wait(50).unwrap(); 45 | assert_eq!(0, notifications.len()); 46 | } 47 | 48 | #[test] 49 | fn send_before_poll_and_after_poll_but_before_recv_only_wakes_poller_once() { 50 | let mut poller = Poller::new().unwrap(); 51 | let mut registrar = poller.get_registrar(); 52 | let (tx, rx) = registrar.channel().unwrap(); 53 | 54 | tx.send("a").unwrap(); 55 | 56 | let notifications = poller.wait(5000).unwrap(); 57 | assert_eq!(1, notifications.len()); 58 | assert_eq!(rx.get_id(), notifications[0].id); 59 | 60 | // We haven't done a receive yet, so this just increments the atomic counter instead of 61 | // triggering the poll wakeup. This is an optimization since atomic counters are much cheaper 62 | // than syscalls. 63 | tx.send("b").unwrap(); 64 | 65 | assert_eq!("a", rx.try_recv().unwrap()); 66 | assert_eq!("b", rx.try_recv().unwrap()); 67 | 68 | let notifications = poller.wait(50).unwrap(); 69 | assert_eq!(0, notifications.len()); 70 | } 71 | 72 | #[test] 73 | fn send_after_receive_after_poll_followed_by_recv_wakes_poller_again() { 74 | let mut poller = Poller::new().unwrap(); 75 | let mut registrar = poller.get_registrar(); 76 | let (tx, rx) = registrar.channel().unwrap(); 77 | 78 | tx.send("a").unwrap(); 79 | 80 | let notifications = poller.wait(5000).unwrap(); 81 | assert_eq!(1, notifications.len()); 82 | assert_eq!(rx.get_id(), notifications[0].id); 83 | assert_eq!("a", rx.try_recv().unwrap()); 84 | 85 | // At this point we did a receive so the atomic pending counter is reduced to zero again. Any 86 | // new send will trigger a state change on the file descriptor. This will cause the poller to 87 | // wakeup. A single recv will just decrement the counter, but not call a subsequent clear, 88 | // so the receiver will remain readable and wake the poller. Clear is only called when 89 | // rx.try_recv() is called one more time. 90 | 91 | tx.send("b").unwrap(); 92 | assert_eq!("b", rx.try_recv().unwrap()); 93 | let notifications = poller.wait(1000).unwrap(); 94 | assert_eq!(1, notifications.len()); 95 | assert_eq!(rx.get_id(), notifications[0].id); 96 | assert!(rx.try_recv().is_err()); 97 | } 98 | 99 | #[test] 100 | fn send_after_receive_after_poll_followed_by_recv_until_err_doesnt_wake_polller_again() { 101 | let mut poller = Poller::new().unwrap(); 102 | let mut registrar = poller.get_registrar(); 103 | let (tx, rx) = registrar.channel().unwrap(); 104 | 105 | tx.send("a").unwrap(); 106 | 107 | let notifications = poller.wait(5000).unwrap(); 108 | assert_eq!(1, notifications.len()); 109 | assert_eq!(rx.get_id(), notifications[0].id); 110 | assert_eq!("a", rx.try_recv().unwrap()); 111 | 112 | // At this point we did a receive so the atomic pending counter is reduced to zero again. Any 113 | // new send will trigger a state change on the file descriptor. This will cause the poller to 114 | // wakeup. However, if we do another receive, there will be nothing to receive since the 115 | // counter is 0. This will result in a clear on the file descriptor which will make it no 116 | // longer readable and therefore not wake the poller. 117 | 118 | tx.send("b").unwrap(); 119 | assert_eq!("b", rx.try_recv().unwrap()); 120 | assert!(rx.try_recv().is_err()); 121 | let notifications = poller.wait(50).unwrap(); 122 | assert_eq!(0, notifications.len()); 123 | } 124 | 125 | #[test] 126 | /// Ensure that when the user event is cleared that retriggering it wakes the poller 127 | fn send_poll_receive_twice_then_send_poll_receive_once() { 128 | let mut poller = Poller::new().unwrap(); 129 | let mut registrar = poller.get_registrar(); 130 | let (tx, rx) = registrar.channel().unwrap(); 131 | 132 | tx.send("a").unwrap(); 133 | 134 | let notifications = poller.wait(5000).unwrap(); 135 | assert_eq!(1, notifications.len()); 136 | assert_eq!(rx.get_id(), notifications[0].id); 137 | assert_eq!("a", rx.try_recv().unwrap()); 138 | assert!(rx.try_recv().is_err()); 139 | 140 | tx.send("b").unwrap(); 141 | 142 | let notifications = poller.wait(5000).unwrap(); 143 | assert_eq!(1, notifications.len()); 144 | assert_eq!(rx.get_id(), notifications[0].id); 145 | assert_eq!("b", rx.try_recv().unwrap()); 146 | } 147 | 148 | #[test] 149 | fn simple_sync_channel_test() { 150 | let mut poller = Poller::new().unwrap(); 151 | let mut registrar = poller.get_registrar(); 152 | let (tx, rx) = registrar.sync_channel(1).unwrap(); 153 | 154 | // no notifications if nothing is registered 155 | let notifications = poller.wait(50).unwrap(); 156 | assert_eq!(0, notifications.len()); 157 | 158 | // Send causes the poller to wakeup 159 | tx.send("a").unwrap(); 160 | 161 | // We poll and get a notification that there is something to receive and receive the sent value 162 | let notifications = poller.wait(5000).unwrap(); 163 | assert_eq!(1, notifications.len()); 164 | assert_eq!(rx.get_id(), notifications[0].id); 165 | assert_eq!(Event::Read, notifications[0].event); 166 | 167 | // Send should fail because buffer is of size 1 168 | tx.try_send("b").is_err(); 169 | 170 | assert_eq!("a", rx.try_recv().unwrap()); 171 | assert!(rx.try_recv().is_err()); 172 | 173 | // Send should succeed because we received the previous value 174 | tx.try_send("b").unwrap(); 175 | assert_eq!("b", rx.try_recv().unwrap()); 176 | } 177 | -------------------------------------------------------------------------------- /tests/edge-trigger-test.rs: -------------------------------------------------------------------------------- 1 | /// Ensure all sockets operate on edge trigger mode. 2 | 3 | extern crate amy; 4 | 5 | use std::net::{TcpListener, TcpStream}; 6 | use std::thread; 7 | use std::str; 8 | use std::io::{Read, Write}; 9 | 10 | use amy::{ 11 | Poller, 12 | Event, 13 | }; 14 | 15 | const IP: &'static str = "127.0.0.1:10008"; 16 | 17 | /// This test ensures that only one write event is received, even if no data is written. On a level 18 | /// triggered system, write events would come on every poll. 19 | #[test] 20 | fn edge_trigger() { 21 | 22 | // Spawn a listening thread and accept one connection 23 | thread::spawn(|| { 24 | let listener = TcpListener::bind(IP).unwrap(); 25 | let (mut sock, _) = listener.accept().unwrap(); 26 | // When the test completes, the client will send a "stop" message to shutdown the server. 27 | let mut buf = String::new(); 28 | sock.read_to_string(&mut buf).unwrap(); 29 | }); 30 | 31 | // Setup a client socket in non-blocking mode 32 | // Loop until we connect because the listener needs to start first 33 | let mut sock; 34 | loop { 35 | if let Ok(s) = TcpStream::connect(IP) { 36 | sock = s; 37 | break; 38 | } 39 | } 40 | sock.set_nonblocking(true).unwrap(); 41 | 42 | // Create the poller and registrar 43 | let mut poller = Poller::new().unwrap(); 44 | let registrar = poller.get_registrar(); 45 | 46 | // The socket should become writable once it's connected 47 | let id = registrar.register(&sock, Event::Write).unwrap(); 48 | let notifications = poller.wait(250).unwrap(); 49 | assert_eq!(1, notifications.len()); 50 | assert_eq!(id, notifications[0].id); 51 | assert_eq!(Event::Write, notifications[0].event); 52 | 53 | // Poll as second time. There should be no notification, since the socket is edge triggered. 54 | let notifications = poller.wait(250).unwrap(); 55 | assert_eq!(0, notifications.len()); 56 | 57 | // Tell the listening thread to stop itself 58 | sock.write("stop".as_bytes()).unwrap(); 59 | } 60 | -------------------------------------------------------------------------------- /tests/multithread-example.rs: -------------------------------------------------------------------------------- 1 | /// This example show the primary use case for Amy. Registering and handling events is done on a 2 | /// seperate thread from polling for events. This prevents having to wake up the poller to register 3 | /// a new event, as is done with typical event loops. Both epoll and kqueue support operations 4 | /// across threads, so this wakeup strategy is unnecessary. The channel from registrar to poller 5 | /// becomes the kernel instead of a user-space channel. When an event is ready and the poller 6 | /// returns, the event can be sent to a separate thread/thread pool for decoding and state 7 | /// management. When a new registration is required, that thread or another can simply register 8 | /// again. 9 | 10 | extern crate amy; 11 | 12 | use amy::{ 13 | Poller, 14 | Registrar, 15 | Event, 16 | Notification, 17 | LineReader 18 | }; 19 | 20 | use std::net::{TcpListener, TcpStream}; 21 | use std::thread; 22 | use std::sync::mpsc::{channel, Sender, Receiver}; 23 | use std::str; 24 | use std::io::{ErrorKind, Read, Write}; 25 | 26 | const IP: &'static str = "127.0.0.1:10002"; 27 | const DATA: &'static str = "Hello, World!\n"; 28 | 29 | #[test] 30 | fn primary_example() { 31 | let poller = Poller::new().unwrap(); 32 | let registrar = poller.get_registrar(); 33 | let (worker_tx, worker_rx) = channel(); 34 | let (client_tx, client_rx) = channel(); 35 | let (poller_tx, poller_rx) = channel(); 36 | 37 | // Setup a listen socket in non-blocking mode 38 | let listener = TcpListener::bind(IP).unwrap(); 39 | listener.set_nonblocking(true).unwrap(); 40 | 41 | let client_tx2 = client_tx.clone(); 42 | let h1 = thread::spawn(move || { 43 | run_worker(registrar, worker_rx, listener, client_tx2, poller_tx); 44 | }); 45 | 46 | let h2 = thread::spawn(move || { 47 | run_poller(poller, worker_tx, poller_rx, client_tx); 48 | }); 49 | 50 | let h3 = thread::spawn(|| { 51 | run_client(client_rx); 52 | }); 53 | 54 | for h in vec![h1, h2, h3] { 55 | h.join().unwrap(); 56 | } 57 | } 58 | 59 | // Create a tcp client that writes some data and expects to receive it back. 60 | // This client uses standard blocking sockets, and doesn't use the poller/registrar at all. 61 | // 62 | // This client drives the flow of the test. Lines of the client will be numbered to correspond with 63 | // sections of the poller and worker threads to describe what is happening. This should allow 64 | // understanding the asserts and flow of the test in context. 65 | fn run_client(rx: Receiver<()>) { 66 | // 1) Connect to the non-blocking listening socket registered with the poller by the worker. 67 | let mut sock = TcpStream::connect(IP).unwrap(); 68 | 69 | // Wait for the worker to signal that it accepted the connection 70 | let _ = rx.recv().unwrap(); 71 | 72 | // 2 + 3) Write a single line of data. This data causes a read event in the poller, which gets 73 | // forwarded to the worker who will read it. The worker will then register the socket again with 74 | // a write event, so that it can echo the data back. This write event gets forwarded to the 75 | // worker and it writes the data on the socket. 76 | sock.write_all(DATA.as_bytes()).unwrap(); 77 | 78 | // 4) At this point the poller has received the write event and forwarded it to the worker 79 | // which has written the line of data. This data is received and it is checked that it is indeed 80 | // an echo of the original data that was sent. 81 | let mut buf = vec![0; DATA.len()]; 82 | sock.read_exact(&mut buf).unwrap(); 83 | assert_eq!(DATA, str::from_utf8(&buf).unwrap()); 84 | 85 | // Wait for the poller to signal that it is done 86 | let _ = rx.recv().unwrap(); 87 | } 88 | 89 | /// This thread runs the poller and forwards notifications to a worker thread. 90 | fn run_poller(mut poller: Poller, 91 | worker_tx: Sender, 92 | rx: Receiver<()>, 93 | client_tx: Sender<()>) { 94 | 95 | // 1) Wait for a connection, and ensure we get one. We started listening in the worker thread. 96 | // The client has connected so we only get a single read event. Forward the notification to the 97 | // worker. 98 | let mut notifications = poller.wait(5000).unwrap(); 99 | assert_eq!(1, notifications.len()); 100 | let notification = notifications.pop().unwrap(); 101 | assert_eq!(Event::Read, notification.event); 102 | assert_eq!(1, notification.id); 103 | 104 | worker_tx.send(notification).unwrap(); 105 | 106 | // Wait for the worker to accept the socket so we don't get another notification for it 107 | // This is only needed to make the test deterministic 108 | let _ = rx.recv().unwrap(); 109 | 110 | // 2) Wait for a read event signalling data from the client. Only one line of data from a single 111 | // client was sent, so there is only one read notification. There is some user data registered 112 | // and the notification will be forwarded to the worker. 113 | let mut notifications = poller.wait(5000).unwrap(); 114 | assert_eq!(1, notifications.len()); 115 | let notification = notifications.pop().unwrap(); 116 | assert_eq!(Event::Read, notification.event); 117 | assert_eq!(2, notification.id); 118 | 119 | // Forward the notification to the worker 120 | worker_tx.send(notification).unwrap(); 121 | 122 | // Wait for the worker to do the read 123 | let _ = rx.recv().unwrap(); 124 | 125 | // 3) The worker will read the data off the socket after it receives the read notification, and 126 | // register a write event on the same socket. The write socket buffer is empty because it's the 127 | // first time writing to the stream, so a write event becomes available immediately after 128 | // polling, and forwarded to the worker who writes data on the socket. 129 | let mut notifications = poller.wait(5000).unwrap(); 130 | assert_eq!(1, notifications.len()); 131 | let notification = notifications.pop().unwrap(); 132 | assert_eq!(Event::Write, notification.event); 133 | assert_eq!(2, notification.id); 134 | 135 | // Forward the notification to the worker 136 | worker_tx.send(notification).unwrap(); 137 | 138 | // Wait for the worker to do the write 139 | let _ = rx.recv().unwrap(); 140 | 141 | // 4) We should be done here. So poll and wait for a timeout. 142 | let notifications = poller.wait(1000).unwrap(); 143 | assert_eq!(0, notifications.len()); 144 | 145 | // Signal the client to exit 146 | // This will cause one more notification for the closed connection 147 | client_tx.send(()).unwrap(); 148 | } 149 | 150 | // This thread registers sockets and receives notifications from the poller when they are ready 151 | fn run_worker(registrar: Registrar, 152 | rx: Receiver, 153 | listener: TcpListener, 154 | client_tx: Sender<()>, 155 | poller_tx: Sender<()>) { 156 | 157 | let listener_id = registrar.register(&listener, Event::Read).unwrap(); 158 | // This is the first registered socket, so it's Id is 1. 0 is used by a channel internal to the poller. 159 | assert_eq!(1, listener_id); 160 | 161 | // 1) Wait for a connection from the client to be noticed by the poller against the registered 162 | // listening socket. Then accept the connection and register it. 163 | let notification = rx.recv().unwrap(); 164 | assert_eq!(notification.event, Event::Read); 165 | assert_eq!(notification.id, listener_id); 166 | 167 | // Accept the socket and register it 168 | let (mut socket, _) = listener.accept().unwrap(); 169 | socket.set_nonblocking(true).unwrap(); 170 | let socket_id = registrar.register(&socket, Event::Read).unwrap(); 171 | // This is the second registration of a socket, so it's Id is 2. 172 | assert_eq!(2, socket_id); 173 | 174 | // Ensure when we accept again from the listener we get an ewouldblock 175 | if let Err(e) = listener.accept() { 176 | assert_eq!(ErrorKind::WouldBlock, e.kind()); 177 | } 178 | 179 | 180 | // Signal the client that the the connection notification was received 181 | // Note this isn't necessary in production, it's just here to make the test deterministic. 182 | client_tx.send(()).unwrap(); 183 | 184 | // Signal the poller that the connection was accepted 185 | // This also is only here to make the test deterministic 186 | poller_tx.send(()).unwrap(); 187 | 188 | // 2) Data was received on the socket from the client, the read event was handled by the poller 189 | // and forwarded to this worker. 190 | // 191 | // Receive notification that there is data to be read, read the data, and decode it 192 | // Note that it's small data that shouldn't fill our buffers and will be in one message 193 | let notification = rx.recv().unwrap(); 194 | assert_eq!(notification.event, Event::Read); 195 | assert_eq!(notification.id, socket_id); 196 | 197 | let mut line_reader = LineReader::new(1024); 198 | let bytes_read = line_reader.read(&mut socket).unwrap(); 199 | assert_eq!(bytes_read, DATA.len()); 200 | 201 | // Get a complete message from the line reader 202 | let text = line_reader.iter_mut().next().unwrap().unwrap(); 203 | assert_eq!(DATA.to_string(), text); 204 | 205 | // Signal that the data was read to the poller 206 | poller_tx.send(()).unwrap(); 207 | 208 | // Re-Register the socket for writing so we can echo the data back to the client 209 | registrar.reregister(socket_id, &socket, Event::Write).unwrap(); 210 | 211 | // 3) The socket was available for writing, and the notification was forwarded from the poller. 212 | // This worker receives the notification and proceeds to echo back the read data. 213 | let notification = rx.recv().unwrap(); 214 | assert_eq!(notification.event, Event::Write); 215 | assert_eq!(notification.id, socket_id); 216 | 217 | let bytes_written = socket.write(&text.as_bytes()).unwrap(); 218 | // Assume we have enough space in the outgoing buffer to write once 219 | // That's plausible in this test. Don't do this in production! 220 | assert_eq!(text.len(), bytes_written); 221 | 222 | // Signal that the data was written to the poller 223 | poller_tx.send(()).unwrap(); 224 | 225 | // 4) The data was sent, and we are done here. 226 | } 227 | -------------------------------------------------------------------------------- /tests/timer_test.rs: -------------------------------------------------------------------------------- 1 | /// Test the timer interface 2 | 3 | extern crate amy; 4 | 5 | use std::time::{Instant, Duration}; 6 | 7 | use amy::Poller; 8 | 9 | const TIMEOUT: usize = 50; // ms 10 | const POLL_TIMEOUT: usize = 5000; // ms 11 | 12 | // Use a shorter timeout, because we don't want to wait a whole 5 seconds 13 | // It's still longer than the poll timeout though. We have two timeouts so that we don't fail 14 | // earlier tests due to ridiculously slow machines. 15 | const FINAL_POLL_TIMEOUT: usize = 250; // ms 16 | 17 | #[test] 18 | fn test_set_timeout() { 19 | let mut poller = Poller::new().unwrap(); 20 | let registrar = poller.get_registrar(); 21 | let now = Instant::now(); 22 | let timer_id = registrar.set_timeout(TIMEOUT).unwrap(); 23 | let notifications = poller.wait(POLL_TIMEOUT).unwrap(); 24 | let elapsed = now.elapsed(); 25 | assert_eq!(1, notifications.len()); 26 | assert_eq!(timer_id, notifications[0].id); 27 | assert!(elapsed > Duration::from_millis(TIMEOUT as u64)); 28 | assert!(elapsed < Duration::from_millis(POLL_TIMEOUT as u64)); 29 | } 30 | 31 | #[test] 32 | fn test_set_interval() { 33 | let mut poller = Poller::new().unwrap(); 34 | let registrar = poller.get_registrar(); 35 | let timer_id = registrar.set_interval(TIMEOUT).unwrap(); 36 | let now = Instant::now(); 37 | for i in 1..5 { 38 | let notifications = poller.wait(POLL_TIMEOUT).unwrap(); 39 | let elapsed = now.elapsed(); 40 | assert_eq!(1, notifications.len()); 41 | assert_eq!(timer_id, notifications[0].id); 42 | assert!(elapsed > Duration::from_millis(i*TIMEOUT as u64)); 43 | assert!(elapsed < Duration::from_millis(POLL_TIMEOUT as u64)); 44 | } 45 | assert!(registrar.cancel_timeout(timer_id).is_ok()); 46 | let now = Instant::now(); 47 | let notifications = poller.wait(FINAL_POLL_TIMEOUT).unwrap(); 48 | assert!(now.elapsed() > Duration::from_millis(FINAL_POLL_TIMEOUT as u64)); 49 | assert_eq!(0, notifications.len()); 50 | } 51 | --------------------------------------------------------------------------------