├── .cargo └── config ├── .github └── dependabot.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.md ├── CODEOWNERS ├── Cargo.toml ├── LICENSE ├── README.md ├── coverage_config_x86_64.json ├── src ├── backend.rs ├── event_loop.rs ├── handler.rs ├── lib.rs └── vring.rs └── tests └── vhost-user-server.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | [target.aarch64-unknown-linux-musl] 2 | rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"] 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 3 8 | allow: 9 | - dependency-type: direct 10 | - dependency-type: indirect 11 | - package-ecosystem: gitsubmodule 12 | directory: "/" 13 | schedule: 14 | interval: weekly 15 | open-pull-requests-limit: 10 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rust-vmm-ci"] 2 | path = rust-vmm-ci 3 | url = https://github.com/rust-vmm/rust-vmm-ci.git 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | ## [Unreleased] 3 | 4 | ### Added 5 | 6 | ### Changed 7 | 8 | ### Fixed 9 | 10 | ### Deprecated 11 | 12 | ## v0.7.0 13 | 14 | ### Changed 15 | 16 | - Started using caret dependencies 17 | - Updated dependency nix 0.24 -> 0.25 18 | - Updated depepdency log 0.4.6 -> 0.4.17 19 | - Updated dependency vhost 0.4 -> 0.5 20 | - Updated dependency virtio-queue 0.5.0 -> 0.6 21 | - Updated dependency vm-memory 0.7 -> 0.9 22 | 23 | ## v0.6.0 24 | 25 | ### Changed 26 | 27 | - Moved to rust-vmm/virtio-queue v0.5.0 28 | 29 | ### Fixed 30 | 31 | - Fixed vring initialization logic 32 | 33 | ## v0.5.1 34 | 35 | ### Changed 36 | - Moved to rust-vmm/vmm-sys-util 0.10.0 37 | 38 | ## v0.5.0 39 | 40 | ### Changed 41 | 42 | - Moved to rust-vmm/virtio-queue v0.4.0 43 | 44 | ## v0.4.0 45 | 46 | ### Changed 47 | 48 | - Moved to rust-vmm/virtio-queue v0.3.0 49 | - Relaxed rust-vmm/vm-memory dependency to require ">=0.7" 50 | 51 | ## v0.3.0 52 | 53 | ### Changed 54 | 55 | - Moved to rust-vmm/vhost v0.4.0 56 | 57 | ## v0.2.0 58 | 59 | ### Added 60 | 61 | - Ability to run the daemon as a client 62 | - VringEpollHandler implements AsRawFd 63 | 64 | ## v0.1.0 65 | 66 | First release 67 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Add the list of code owners here (using their GitHub username) 2 | * @jiangliu @sboeuf @slp @stefano-garzarella 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vhost-user-backend" 3 | version = "0.7.0" 4 | authors = ["The Cloud Hypervisor Authors"] 5 | keywords = ["vhost-user", "virtio"] 6 | description = "A framework to build vhost-user backend service daemon" 7 | edition = "2018" 8 | license = "Apache-2.0" 9 | 10 | [badges] 11 | maintenance = { status = "deprecated" } 12 | 13 | [dependencies] 14 | libc = "0.2.39" 15 | log = "0.4.17" 16 | vhost = { version = "0.5", features = ["vhost-user-slave"] } 17 | virtio-bindings = "0.1" 18 | virtio-queue = "0.6" 19 | vm-memory = { version = "0.9", features = ["backend-mmap", "backend-atomic"] } 20 | vmm-sys-util = "0.10" 21 | 22 | [dev-dependencies] 23 | nix = "0.25" 24 | vhost = { version = "0.5", features = ["vhost-user-master", "vhost-user-slave"] } 25 | vm-memory = { version = "0.9", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] } 26 | tempfile = "3.2.0" 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vhost-user-backend 2 | 3 | The source code for this crate is moved to 4 | [vhost-user-backend](https://github.com/rust-vmm/vhost/). 5 | -------------------------------------------------------------------------------- /coverage_config_x86_64.json: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_score": 85.0, 3 | "exclude_path": "", 4 | "crate_features": "" 5 | } 6 | -------------------------------------------------------------------------------- /src/backend.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Intel Corporation. All Rights Reserved. 2 | // Copyright 2019-2021 Alibaba Cloud. All rights reserved. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | //! Traits for vhost user backend servers to implement virtio data plain services. 7 | //! 8 | //! Define two traits for vhost user backend servers to implement virtio data plane services. 9 | //! The only difference between the two traits is mutability. The [VhostUserBackend] trait is 10 | //! designed with interior mutability, so the implementor may choose the suitable way to protect 11 | //! itself from concurrent accesses. The [VhostUserBackendMut] is designed without interior 12 | //! mutability, and an implementation of: 13 | //! ```ignore 14 | //! impl VhostUserBackend for RwLock { } 15 | //! ``` 16 | //! is provided for convenience. 17 | //! 18 | //! [VhostUserBackend]: trait.VhostUserBackend.html 19 | //! [VhostUserBackendMut]: trait.VhostUserBackendMut.html 20 | 21 | use std::io::Result; 22 | use std::ops::Deref; 23 | use std::sync::{Arc, Mutex, RwLock}; 24 | 25 | use vhost::vhost_user::message::VhostUserProtocolFeatures; 26 | use vhost::vhost_user::SlaveFsCacheReq; 27 | use vm_memory::bitmap::Bitmap; 28 | use vmm_sys_util::epoll::EventSet; 29 | use vmm_sys_util::eventfd::EventFd; 30 | 31 | use super::vring::VringT; 32 | use super::GM; 33 | 34 | /// Trait with interior mutability for vhost user backend servers to implement concrete services. 35 | /// 36 | /// To support multi-threading and asynchronous IO, we enforce `Send + Sync` bound. 37 | pub trait VhostUserBackend: Send + Sync 38 | where 39 | V: VringT>, 40 | B: Bitmap + 'static, 41 | { 42 | /// Get number of queues supported. 43 | fn num_queues(&self) -> usize; 44 | 45 | /// Get maximum queue size supported. 46 | fn max_queue_size(&self) -> usize; 47 | 48 | /// Get available virtio features. 49 | fn features(&self) -> u64; 50 | 51 | /// Set acknowledged virtio features. 52 | fn acked_features(&self, _features: u64) {} 53 | 54 | /// Get available vhost protocol features. 55 | fn protocol_features(&self) -> VhostUserProtocolFeatures; 56 | 57 | /// Enable or disable the virtio EVENT_IDX feature 58 | fn set_event_idx(&self, enabled: bool); 59 | 60 | /// Get virtio device configuration. 61 | /// 62 | /// A default implementation is provided as we cannot expect all backends to implement this 63 | /// function. 64 | fn get_config(&self, _offset: u32, _size: u32) -> Vec { 65 | Vec::new() 66 | } 67 | 68 | /// Set virtio device configuration. 69 | /// 70 | /// A default implementation is provided as we cannot expect all backends to implement this 71 | /// function. 72 | fn set_config(&self, _offset: u32, _buf: &[u8]) -> Result<()> { 73 | Ok(()) 74 | } 75 | 76 | /// Update guest memory regions. 77 | fn update_memory(&self, mem: GM) -> Result<()>; 78 | 79 | /// Set handler for communicating with the master by the slave communication channel. 80 | /// 81 | /// A default implementation is provided as we cannot expect all backends to implement this 82 | /// function. 83 | /// 84 | /// TODO: this interface is designed only for vhost-user-fs, it should be refined. 85 | fn set_slave_req_fd(&self, _vu_req: SlaveFsCacheReq) {} 86 | 87 | /// Get the map to map queue index to worker thread index. 88 | /// 89 | /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0, 90 | /// the following two queues will be handled by worker thread 1, and the last four queues will 91 | /// be handled by worker thread 2. 92 | fn queues_per_thread(&self) -> Vec { 93 | vec![0xffff_ffff] 94 | } 95 | 96 | /// Provide an optional exit EventFd for the specified worker thread. 97 | /// 98 | /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO 99 | /// events by using epoll with the specified `token`. When the returned EventFd is written to, 100 | /// the worker thread will exit. 101 | fn exit_event(&self, _thread_index: usize) -> Option { 102 | None 103 | } 104 | 105 | /// Handle IO events for backend registered file descriptors. 106 | /// 107 | /// This function gets called if the backend registered some additional listeners onto specific 108 | /// file descriptors. The library can handle virtqueues on its own, but does not know what to 109 | /// do with events happening on custom listeners. 110 | fn handle_event( 111 | &self, 112 | device_event: u16, 113 | evset: EventSet, 114 | vrings: &[V], 115 | thread_id: usize, 116 | ) -> Result; 117 | } 118 | 119 | /// Trait without interior mutability for vhost user backend servers to implement concrete services. 120 | pub trait VhostUserBackendMut: Send + Sync 121 | where 122 | V: VringT>, 123 | B: Bitmap + 'static, 124 | { 125 | /// Get number of queues supported. 126 | fn num_queues(&self) -> usize; 127 | 128 | /// Get maximum queue size supported. 129 | fn max_queue_size(&self) -> usize; 130 | 131 | /// Get available virtio features. 132 | fn features(&self) -> u64; 133 | 134 | /// Set acknowledged virtio features. 135 | fn acked_features(&mut self, _features: u64) {} 136 | 137 | /// Get available vhost protocol features. 138 | fn protocol_features(&self) -> VhostUserProtocolFeatures; 139 | 140 | /// Enable or disable the virtio EVENT_IDX feature 141 | fn set_event_idx(&mut self, enabled: bool); 142 | 143 | /// Get virtio device configuration. 144 | /// 145 | /// A default implementation is provided as we cannot expect all backends to implement this 146 | /// function. 147 | fn get_config(&self, _offset: u32, _size: u32) -> Vec { 148 | Vec::new() 149 | } 150 | 151 | /// Set virtio device configuration. 152 | /// 153 | /// A default implementation is provided as we cannot expect all backends to implement this 154 | /// function. 155 | fn set_config(&mut self, _offset: u32, _buf: &[u8]) -> Result<()> { 156 | Ok(()) 157 | } 158 | 159 | /// Update guest memory regions. 160 | fn update_memory(&mut self, mem: GM) -> Result<()>; 161 | 162 | /// Set handler for communicating with the master by the slave communication channel. 163 | /// 164 | /// A default implementation is provided as we cannot expect all backends to implement this 165 | /// function. 166 | /// 167 | /// TODO: this interface is designed only for vhost-user-fs, it should be refined. 168 | fn set_slave_req_fd(&mut self, _vu_req: SlaveFsCacheReq) {} 169 | 170 | /// Get the map to map queue index to worker thread index. 171 | /// 172 | /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0, 173 | /// the following two queues will be handled by worker thread 1, and the last four queues will 174 | /// be handled by worker thread 2. 175 | fn queues_per_thread(&self) -> Vec { 176 | vec![0xffff_ffff] 177 | } 178 | 179 | /// Provide an optional exit EventFd for the specified worker thread. 180 | /// 181 | /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO 182 | /// events by using epoll with the specified `token`. When the returned EventFd is written to, 183 | /// the worker thread will exit. 184 | fn exit_event(&self, _thread_index: usize) -> Option { 185 | None 186 | } 187 | 188 | /// Handle IO events for backend registered file descriptors. 189 | /// 190 | /// This function gets called if the backend registered some additional listeners onto specific 191 | /// file descriptors. The library can handle virtqueues on its own, but does not know what to 192 | /// do with events happening on custom listeners. 193 | fn handle_event( 194 | &mut self, 195 | device_event: u16, 196 | evset: EventSet, 197 | vrings: &[V], 198 | thread_id: usize, 199 | ) -> Result; 200 | } 201 | 202 | impl, V, B> VhostUserBackend for Arc 203 | where 204 | V: VringT>, 205 | B: Bitmap + 'static, 206 | { 207 | fn num_queues(&self) -> usize { 208 | self.deref().num_queues() 209 | } 210 | 211 | fn max_queue_size(&self) -> usize { 212 | self.deref().max_queue_size() 213 | } 214 | 215 | fn features(&self) -> u64 { 216 | self.deref().features() 217 | } 218 | 219 | fn acked_features(&self, features: u64) { 220 | self.deref().acked_features(features) 221 | } 222 | 223 | fn protocol_features(&self) -> VhostUserProtocolFeatures { 224 | self.deref().protocol_features() 225 | } 226 | 227 | fn set_event_idx(&self, enabled: bool) { 228 | self.deref().set_event_idx(enabled) 229 | } 230 | 231 | fn get_config(&self, offset: u32, size: u32) -> Vec { 232 | self.deref().get_config(offset, size) 233 | } 234 | 235 | fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> { 236 | self.deref().set_config(offset, buf) 237 | } 238 | 239 | fn update_memory(&self, mem: GM) -> Result<()> { 240 | self.deref().update_memory(mem) 241 | } 242 | 243 | fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) { 244 | self.deref().set_slave_req_fd(vu_req) 245 | } 246 | 247 | fn queues_per_thread(&self) -> Vec { 248 | self.deref().queues_per_thread() 249 | } 250 | 251 | fn exit_event(&self, thread_index: usize) -> Option { 252 | self.deref().exit_event(thread_index) 253 | } 254 | 255 | fn handle_event( 256 | &self, 257 | device_event: u16, 258 | evset: EventSet, 259 | vrings: &[V], 260 | thread_id: usize, 261 | ) -> Result { 262 | self.deref() 263 | .handle_event(device_event, evset, vrings, thread_id) 264 | } 265 | } 266 | 267 | impl, V, B> VhostUserBackend for Mutex 268 | where 269 | V: VringT>, 270 | B: Bitmap + 'static, 271 | { 272 | fn num_queues(&self) -> usize { 273 | self.lock().unwrap().num_queues() 274 | } 275 | 276 | fn max_queue_size(&self) -> usize { 277 | self.lock().unwrap().max_queue_size() 278 | } 279 | 280 | fn features(&self) -> u64 { 281 | self.lock().unwrap().features() 282 | } 283 | 284 | fn acked_features(&self, features: u64) { 285 | self.lock().unwrap().acked_features(features) 286 | } 287 | 288 | fn protocol_features(&self) -> VhostUserProtocolFeatures { 289 | self.lock().unwrap().protocol_features() 290 | } 291 | 292 | fn set_event_idx(&self, enabled: bool) { 293 | self.lock().unwrap().set_event_idx(enabled) 294 | } 295 | 296 | fn get_config(&self, offset: u32, size: u32) -> Vec { 297 | self.lock().unwrap().get_config(offset, size) 298 | } 299 | 300 | fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> { 301 | self.lock().unwrap().set_config(offset, buf) 302 | } 303 | 304 | fn update_memory(&self, mem: GM) -> Result<()> { 305 | self.lock().unwrap().update_memory(mem) 306 | } 307 | 308 | fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) { 309 | self.lock().unwrap().set_slave_req_fd(vu_req) 310 | } 311 | 312 | fn queues_per_thread(&self) -> Vec { 313 | self.lock().unwrap().queues_per_thread() 314 | } 315 | 316 | fn exit_event(&self, thread_index: usize) -> Option { 317 | self.lock().unwrap().exit_event(thread_index) 318 | } 319 | 320 | fn handle_event( 321 | &self, 322 | device_event: u16, 323 | evset: EventSet, 324 | vrings: &[V], 325 | thread_id: usize, 326 | ) -> Result { 327 | self.lock() 328 | .unwrap() 329 | .handle_event(device_event, evset, vrings, thread_id) 330 | } 331 | } 332 | 333 | impl, V, B> VhostUserBackend for RwLock 334 | where 335 | V: VringT>, 336 | B: Bitmap + 'static, 337 | { 338 | fn num_queues(&self) -> usize { 339 | self.read().unwrap().num_queues() 340 | } 341 | 342 | fn max_queue_size(&self) -> usize { 343 | self.read().unwrap().max_queue_size() 344 | } 345 | 346 | fn features(&self) -> u64 { 347 | self.read().unwrap().features() 348 | } 349 | 350 | fn acked_features(&self, features: u64) { 351 | self.write().unwrap().acked_features(features) 352 | } 353 | 354 | fn protocol_features(&self) -> VhostUserProtocolFeatures { 355 | self.read().unwrap().protocol_features() 356 | } 357 | 358 | fn set_event_idx(&self, enabled: bool) { 359 | self.write().unwrap().set_event_idx(enabled) 360 | } 361 | 362 | fn get_config(&self, offset: u32, size: u32) -> Vec { 363 | self.read().unwrap().get_config(offset, size) 364 | } 365 | 366 | fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> { 367 | self.write().unwrap().set_config(offset, buf) 368 | } 369 | 370 | fn update_memory(&self, mem: GM) -> Result<()> { 371 | self.write().unwrap().update_memory(mem) 372 | } 373 | 374 | fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) { 375 | self.write().unwrap().set_slave_req_fd(vu_req) 376 | } 377 | 378 | fn queues_per_thread(&self) -> Vec { 379 | self.read().unwrap().queues_per_thread() 380 | } 381 | 382 | fn exit_event(&self, thread_index: usize) -> Option { 383 | self.read().unwrap().exit_event(thread_index) 384 | } 385 | 386 | fn handle_event( 387 | &self, 388 | device_event: u16, 389 | evset: EventSet, 390 | vrings: &[V], 391 | thread_id: usize, 392 | ) -> Result { 393 | self.write() 394 | .unwrap() 395 | .handle_event(device_event, evset, vrings, thread_id) 396 | } 397 | } 398 | 399 | #[cfg(test)] 400 | pub mod tests { 401 | use super::*; 402 | use crate::VringRwLock; 403 | use std::sync::Mutex; 404 | use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; 405 | 406 | pub struct MockVhostBackend { 407 | events: u64, 408 | event_idx: bool, 409 | acked_features: u64, 410 | } 411 | 412 | impl MockVhostBackend { 413 | pub fn new() -> Self { 414 | MockVhostBackend { 415 | events: 0, 416 | event_idx: false, 417 | acked_features: 0, 418 | } 419 | } 420 | } 421 | 422 | impl VhostUserBackendMut for MockVhostBackend { 423 | fn num_queues(&self) -> usize { 424 | 2 425 | } 426 | 427 | fn max_queue_size(&self) -> usize { 428 | 256 429 | } 430 | 431 | fn features(&self) -> u64 { 432 | 0xffff_ffff_ffff_ffff 433 | } 434 | 435 | fn acked_features(&mut self, features: u64) { 436 | self.acked_features = features; 437 | } 438 | 439 | fn protocol_features(&self) -> VhostUserProtocolFeatures { 440 | VhostUserProtocolFeatures::all() 441 | } 442 | 443 | fn set_event_idx(&mut self, enabled: bool) { 444 | self.event_idx = enabled; 445 | } 446 | 447 | fn get_config(&self, offset: u32, size: u32) -> Vec { 448 | assert_eq!(offset, 0x200); 449 | assert_eq!(size, 8); 450 | 451 | vec![0xa5u8; 8] 452 | } 453 | 454 | fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> { 455 | assert_eq!(offset, 0x200); 456 | assert_eq!(buf.len(), 8); 457 | assert_eq!(buf, &[0xa5u8; 8]); 458 | 459 | Ok(()) 460 | } 461 | 462 | fn update_memory(&mut self, _atomic_mem: GuestMemoryAtomic) -> Result<()> { 463 | Ok(()) 464 | } 465 | 466 | fn set_slave_req_fd(&mut self, _vu_req: SlaveFsCacheReq) {} 467 | 468 | fn queues_per_thread(&self) -> Vec { 469 | vec![1, 1] 470 | } 471 | 472 | fn exit_event(&self, _thread_index: usize) -> Option { 473 | let event_fd = EventFd::new(0).unwrap(); 474 | 475 | Some(event_fd) 476 | } 477 | 478 | fn handle_event( 479 | &mut self, 480 | _device_event: u16, 481 | _evset: EventSet, 482 | _vrings: &[VringRwLock], 483 | _thread_id: usize, 484 | ) -> Result { 485 | self.events += 1; 486 | 487 | Ok(false) 488 | } 489 | } 490 | 491 | #[test] 492 | fn test_new_mock_backend_mutex() { 493 | let backend = Arc::new(Mutex::new(MockVhostBackend::new())); 494 | 495 | assert_eq!(backend.num_queues(), 2); 496 | assert_eq!(backend.max_queue_size(), 256); 497 | assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff); 498 | assert_eq!( 499 | backend.protocol_features(), 500 | VhostUserProtocolFeatures::all() 501 | ); 502 | assert_eq!(backend.queues_per_thread(), [1, 1]); 503 | 504 | assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]); 505 | backend.set_config(0x200, &[0xa5; 8]).unwrap(); 506 | 507 | backend.acked_features(0xffff); 508 | assert_eq!(backend.lock().unwrap().acked_features, 0xffff); 509 | 510 | backend.set_event_idx(true); 511 | assert!(backend.lock().unwrap().event_idx); 512 | 513 | let _ = backend.exit_event(0).unwrap(); 514 | 515 | let mem = GuestMemoryAtomic::new( 516 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 517 | ); 518 | backend.update_memory(mem).unwrap(); 519 | } 520 | 521 | #[test] 522 | fn test_new_mock_backend_rwlock() { 523 | let backend = Arc::new(RwLock::new(MockVhostBackend::new())); 524 | 525 | assert_eq!(backend.num_queues(), 2); 526 | assert_eq!(backend.max_queue_size(), 256); 527 | assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff); 528 | assert_eq!( 529 | backend.protocol_features(), 530 | VhostUserProtocolFeatures::all() 531 | ); 532 | assert_eq!(backend.queues_per_thread(), [1, 1]); 533 | 534 | assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]); 535 | backend.set_config(0x200, &[0xa5; 8]).unwrap(); 536 | 537 | backend.acked_features(0xffff); 538 | assert_eq!(backend.read().unwrap().acked_features, 0xffff); 539 | 540 | backend.set_event_idx(true); 541 | assert!(backend.read().unwrap().event_idx); 542 | 543 | let _ = backend.exit_event(0).unwrap(); 544 | 545 | let mem = GuestMemoryAtomic::new( 546 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 547 | ); 548 | backend.update_memory(mem.clone()).unwrap(); 549 | 550 | let vring = VringRwLock::new(mem, 0x1000).unwrap(); 551 | backend 552 | .handle_event(0x1, EventSet::IN, &[vring], 0) 553 | .unwrap(); 554 | } 555 | } 556 | -------------------------------------------------------------------------------- /src/event_loop.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Intel Corporation. All Rights Reserved. 2 | // Copyright 2019-2021 Alibaba Cloud. All rights reserved. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | use std::fmt::{Display, Formatter}; 7 | use std::io::{self, Result}; 8 | use std::marker::PhantomData; 9 | use std::os::unix::io::{AsRawFd, RawFd}; 10 | 11 | use vm_memory::bitmap::Bitmap; 12 | use vmm_sys_util::epoll::{ControlOperation, Epoll, EpollEvent, EventSet}; 13 | use vmm_sys_util::eventfd::EventFd; 14 | 15 | use super::backend::VhostUserBackend; 16 | use super::vring::VringT; 17 | use super::GM; 18 | 19 | /// Errors related to vring epoll event handling. 20 | #[derive(Debug)] 21 | pub enum VringEpollError { 22 | /// Failed to create epoll file descriptor. 23 | EpollCreateFd(io::Error), 24 | /// Failed while waiting for events. 25 | EpollWait(io::Error), 26 | /// Could not register exit event 27 | RegisterExitEvent(io::Error), 28 | /// Failed to read the event from kick EventFd. 29 | HandleEventReadKick(io::Error), 30 | /// Failed to handle the event from the backend. 31 | HandleEventBackendHandling(io::Error), 32 | } 33 | 34 | impl Display for VringEpollError { 35 | fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { 36 | match self { 37 | VringEpollError::EpollCreateFd(e) => write!(f, "cannot create epoll fd: {}", e), 38 | VringEpollError::EpollWait(e) => write!(f, "failed to wait for epoll event: {}", e), 39 | VringEpollError::RegisterExitEvent(e) => write!(f, "cannot register exit event: {}", e), 40 | VringEpollError::HandleEventReadKick(e) => { 41 | write!(f, "cannot read vring kick event: {}", e) 42 | } 43 | VringEpollError::HandleEventBackendHandling(e) => { 44 | write!(f, "failed to handle epoll event: {}", e) 45 | } 46 | } 47 | } 48 | } 49 | 50 | impl std::error::Error for VringEpollError {} 51 | 52 | /// Result of vring epoll operations. 53 | pub type VringEpollResult = std::result::Result; 54 | 55 | /// Epoll event handler to manage and process epoll events for registered file descriptor. 56 | /// 57 | /// The `VringEpollHandler` structure provides interfaces to: 58 | /// - add file descriptors to be monitored by the epoll fd 59 | /// - remove registered file descriptors from the epoll fd 60 | /// - run the event loop to handle pending events on the epoll fd 61 | pub struct VringEpollHandler { 62 | epoll: Epoll, 63 | backend: S, 64 | vrings: Vec, 65 | thread_id: usize, 66 | exit_event_fd: Option, 67 | phantom: PhantomData, 68 | } 69 | 70 | impl VringEpollHandler { 71 | /// Send `exit event` to break the event loop. 72 | pub fn send_exit_event(&self) { 73 | if let Some(eventfd) = self.exit_event_fd.as_ref() { 74 | let _ = eventfd.write(1); 75 | } 76 | } 77 | } 78 | 79 | impl VringEpollHandler 80 | where 81 | S: VhostUserBackend, 82 | V: VringT>, 83 | B: Bitmap + 'static, 84 | { 85 | /// Create a `VringEpollHandler` instance. 86 | pub(crate) fn new(backend: S, vrings: Vec, thread_id: usize) -> VringEpollResult { 87 | let epoll = Epoll::new().map_err(VringEpollError::EpollCreateFd)?; 88 | 89 | let handler = match backend.exit_event(thread_id) { 90 | Some(exit_event_fd) => { 91 | let id = backend.num_queues(); 92 | epoll 93 | .ctl( 94 | ControlOperation::Add, 95 | exit_event_fd.as_raw_fd(), 96 | EpollEvent::new(EventSet::IN, id as u64), 97 | ) 98 | .map_err(VringEpollError::RegisterExitEvent)?; 99 | 100 | VringEpollHandler { 101 | epoll, 102 | backend, 103 | vrings, 104 | thread_id, 105 | exit_event_fd: Some(exit_event_fd), 106 | phantom: PhantomData, 107 | } 108 | } 109 | None => VringEpollHandler { 110 | epoll, 111 | backend, 112 | vrings, 113 | thread_id, 114 | exit_event_fd: None, 115 | phantom: PhantomData, 116 | }, 117 | }; 118 | 119 | Ok(handler) 120 | } 121 | 122 | /// Register an event into the epoll fd. 123 | /// 124 | /// When this event is later triggered, the backend implementation of `handle_event` will be 125 | /// called. 126 | pub fn register_listener(&self, fd: RawFd, ev_type: EventSet, data: u64) -> Result<()> { 127 | // `data` range [0...num_queues] is reserved for queues and exit event. 128 | if data <= self.backend.num_queues() as u64 { 129 | Err(io::Error::from_raw_os_error(libc::EINVAL)) 130 | } else { 131 | self.register_event(fd, ev_type, data) 132 | } 133 | } 134 | 135 | /// Unregister an event from the epoll fd. 136 | /// 137 | /// If the event is triggered after this function has been called, the event will be silently 138 | /// dropped. 139 | pub fn unregister_listener(&self, fd: RawFd, ev_type: EventSet, data: u64) -> Result<()> { 140 | // `data` range [0...num_queues] is reserved for queues and exit event. 141 | if data <= self.backend.num_queues() as u64 { 142 | Err(io::Error::from_raw_os_error(libc::EINVAL)) 143 | } else { 144 | self.unregister_event(fd, ev_type, data) 145 | } 146 | } 147 | 148 | pub(crate) fn register_event(&self, fd: RawFd, ev_type: EventSet, data: u64) -> Result<()> { 149 | self.epoll 150 | .ctl(ControlOperation::Add, fd, EpollEvent::new(ev_type, data)) 151 | } 152 | 153 | pub(crate) fn unregister_event(&self, fd: RawFd, ev_type: EventSet, data: u64) -> Result<()> { 154 | self.epoll 155 | .ctl(ControlOperation::Delete, fd, EpollEvent::new(ev_type, data)) 156 | } 157 | 158 | /// Run the event poll loop to handle all pending events on registered fds. 159 | /// 160 | /// The event loop will be terminated once an event is received from the `exit event fd` 161 | /// associated with the backend. 162 | pub(crate) fn run(&self) -> VringEpollResult<()> { 163 | const EPOLL_EVENTS_LEN: usize = 100; 164 | let mut events = vec![EpollEvent::new(EventSet::empty(), 0); EPOLL_EVENTS_LEN]; 165 | 166 | 'epoll: loop { 167 | let num_events = match self.epoll.wait(-1, &mut events[..]) { 168 | Ok(res) => res, 169 | Err(e) => { 170 | if e.kind() == io::ErrorKind::Interrupted { 171 | // It's well defined from the epoll_wait() syscall 172 | // documentation that the epoll loop can be interrupted 173 | // before any of the requested events occurred or the 174 | // timeout expired. In both those cases, epoll_wait() 175 | // returns an error of type EINTR, but this should not 176 | // be considered as a regular error. Instead it is more 177 | // appropriate to retry, by calling into epoll_wait(). 178 | continue; 179 | } 180 | return Err(VringEpollError::EpollWait(e)); 181 | } 182 | }; 183 | 184 | for event in events.iter().take(num_events) { 185 | let evset = match EventSet::from_bits(event.events) { 186 | Some(evset) => evset, 187 | None => { 188 | let evbits = event.events; 189 | println!("epoll: ignoring unknown event set: 0x{:x}", evbits); 190 | continue; 191 | } 192 | }; 193 | 194 | let ev_type = event.data() as u16; 195 | 196 | // handle_event() returns true if an event is received from the exit event fd. 197 | if self.handle_event(ev_type, evset)? { 198 | break 'epoll; 199 | } 200 | } 201 | } 202 | 203 | Ok(()) 204 | } 205 | 206 | fn handle_event(&self, device_event: u16, evset: EventSet) -> VringEpollResult { 207 | if self.exit_event_fd.is_some() && device_event as usize == self.backend.num_queues() { 208 | return Ok(true); 209 | } 210 | 211 | if (device_event as usize) < self.vrings.len() { 212 | let vring = &self.vrings[device_event as usize]; 213 | let enabled = vring 214 | .read_kick() 215 | .map_err(VringEpollError::HandleEventReadKick)?; 216 | 217 | // If the vring is not enabled, it should not be processed. 218 | if !enabled { 219 | return Ok(false); 220 | } 221 | } 222 | 223 | self.backend 224 | .handle_event(device_event, evset, &self.vrings, self.thread_id) 225 | .map_err(VringEpollError::HandleEventBackendHandling) 226 | } 227 | } 228 | 229 | impl AsRawFd for VringEpollHandler { 230 | fn as_raw_fd(&self) -> RawFd { 231 | self.epoll.as_raw_fd() 232 | } 233 | } 234 | 235 | #[cfg(test)] 236 | mod tests { 237 | use super::super::backend::tests::MockVhostBackend; 238 | use super::super::vring::VringRwLock; 239 | use super::*; 240 | use std::sync::{Arc, Mutex}; 241 | use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; 242 | use vmm_sys_util::eventfd::EventFd; 243 | 244 | #[test] 245 | fn test_vring_epoll_handler() { 246 | let mem = GuestMemoryAtomic::new( 247 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 248 | ); 249 | let vring = VringRwLock::new(mem, 0x1000).unwrap(); 250 | let backend = Arc::new(Mutex::new(MockVhostBackend::new())); 251 | 252 | let handler = VringEpollHandler::new(backend, vec![vring], 0x1).unwrap(); 253 | 254 | let eventfd = EventFd::new(0).unwrap(); 255 | handler 256 | .register_listener(eventfd.as_raw_fd(), EventSet::IN, 3) 257 | .unwrap(); 258 | // Register an already registered fd. 259 | handler 260 | .register_listener(eventfd.as_raw_fd(), EventSet::IN, 3) 261 | .unwrap_err(); 262 | // Register an invalid data. 263 | handler 264 | .register_listener(eventfd.as_raw_fd(), EventSet::IN, 1) 265 | .unwrap_err(); 266 | 267 | handler 268 | .unregister_listener(eventfd.as_raw_fd(), EventSet::IN, 3) 269 | .unwrap(); 270 | // unregister an already unregistered fd. 271 | handler 272 | .unregister_listener(eventfd.as_raw_fd(), EventSet::IN, 3) 273 | .unwrap_err(); 274 | // unregister an invalid data. 275 | handler 276 | .unregister_listener(eventfd.as_raw_fd(), EventSet::IN, 1) 277 | .unwrap_err(); 278 | // Check we retrieve the correct file descriptor 279 | assert_eq!(handler.as_raw_fd(), handler.epoll.as_raw_fd()); 280 | } 281 | } 282 | -------------------------------------------------------------------------------- /src/handler.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Intel Corporation. All Rights Reserved. 2 | // Copyright 2019-2021 Alibaba Cloud. All rights reserved. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | use std::error; 7 | use std::fs::File; 8 | use std::io; 9 | use std::os::unix::io::AsRawFd; 10 | use std::sync::Arc; 11 | use std::thread; 12 | 13 | use vhost::vhost_user::message::{ 14 | VhostUserConfigFlags, VhostUserMemoryRegion, VhostUserProtocolFeatures, 15 | VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags, 16 | VhostUserVringState, 17 | }; 18 | use vhost::vhost_user::{ 19 | Error as VhostUserError, Result as VhostUserResult, SlaveFsCacheReq, 20 | VhostUserSlaveReqHandlerMut, 21 | }; 22 | use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; 23 | use virtio_queue::{Error as VirtQueError, QueueT}; 24 | use vm_memory::bitmap::Bitmap; 25 | use vm_memory::mmap::NewBitmap; 26 | use vm_memory::{ 27 | FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap, MmapRegion, 28 | }; 29 | use vmm_sys_util::epoll::EventSet; 30 | 31 | use super::backend::VhostUserBackend; 32 | use super::event_loop::VringEpollHandler; 33 | use super::event_loop::{VringEpollError, VringEpollResult}; 34 | use super::vring::VringT; 35 | use super::GM; 36 | 37 | const MAX_MEM_SLOTS: u64 = 32; 38 | 39 | #[derive(Debug)] 40 | /// Errors related to vhost-user handler. 41 | pub enum VhostUserHandlerError { 42 | /// Failed to create a `Vring`. 43 | CreateVring(VirtQueError), 44 | /// Failed to create vring worker. 45 | CreateEpollHandler(VringEpollError), 46 | /// Failed to spawn vring worker. 47 | SpawnVringWorker(io::Error), 48 | /// Could not find the mapping from memory regions. 49 | MissingMemoryMapping, 50 | } 51 | 52 | impl std::fmt::Display for VhostUserHandlerError { 53 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 54 | match self { 55 | VhostUserHandlerError::CreateVring(e) => { 56 | write!(f, "failed to create vring: {}", e) 57 | } 58 | VhostUserHandlerError::CreateEpollHandler(e) => { 59 | write!(f, "failed to create vring epoll handler: {}", e) 60 | } 61 | VhostUserHandlerError::SpawnVringWorker(e) => { 62 | write!(f, "failed spawning the vring worker: {}", e) 63 | } 64 | VhostUserHandlerError::MissingMemoryMapping => write!(f, "Missing memory mapping"), 65 | } 66 | } 67 | } 68 | 69 | impl error::Error for VhostUserHandlerError {} 70 | 71 | /// Result of vhost-user handler operations. 72 | pub type VhostUserHandlerResult = std::result::Result; 73 | 74 | struct AddrMapping { 75 | vmm_addr: u64, 76 | size: u64, 77 | gpa_base: u64, 78 | } 79 | 80 | pub struct VhostUserHandler { 81 | backend: S, 82 | handlers: Vec>>, 83 | owned: bool, 84 | features_acked: bool, 85 | acked_features: u64, 86 | acked_protocol_features: u64, 87 | num_queues: usize, 88 | max_queue_size: usize, 89 | queues_per_thread: Vec, 90 | mappings: Vec, 91 | atomic_mem: GM, 92 | vrings: Vec, 93 | worker_threads: Vec>>, 94 | } 95 | 96 | // Ensure VhostUserHandler: Clone + Send + Sync + 'static. 97 | impl VhostUserHandler 98 | where 99 | S: VhostUserBackend + Clone + 'static, 100 | V: VringT> + Clone + Send + Sync + 'static, 101 | B: Bitmap + Clone + Send + Sync + 'static, 102 | { 103 | pub(crate) fn new(backend: S, atomic_mem: GM) -> VhostUserHandlerResult { 104 | let num_queues = backend.num_queues(); 105 | let max_queue_size = backend.max_queue_size(); 106 | let queues_per_thread = backend.queues_per_thread(); 107 | 108 | let mut vrings = Vec::new(); 109 | for _ in 0..num_queues { 110 | let vring = V::new(atomic_mem.clone(), max_queue_size as u16) 111 | .map_err(VhostUserHandlerError::CreateVring)?; 112 | vrings.push(vring); 113 | } 114 | 115 | let mut handlers = Vec::new(); 116 | let mut worker_threads = Vec::new(); 117 | for (thread_id, queues_mask) in queues_per_thread.iter().enumerate() { 118 | let mut thread_vrings = Vec::new(); 119 | for (index, vring) in vrings.iter().enumerate() { 120 | if (queues_mask >> index) & 1u64 == 1u64 { 121 | thread_vrings.push(vring.clone()); 122 | } 123 | } 124 | 125 | let handler = Arc::new( 126 | VringEpollHandler::new(backend.clone(), thread_vrings, thread_id) 127 | .map_err(VhostUserHandlerError::CreateEpollHandler)?, 128 | ); 129 | let handler2 = handler.clone(); 130 | let worker_thread = thread::Builder::new() 131 | .name("vring_worker".to_string()) 132 | .spawn(move || handler2.run()) 133 | .map_err(VhostUserHandlerError::SpawnVringWorker)?; 134 | 135 | handlers.push(handler); 136 | worker_threads.push(worker_thread); 137 | } 138 | 139 | Ok(VhostUserHandler { 140 | backend, 141 | handlers, 142 | owned: false, 143 | features_acked: false, 144 | acked_features: 0, 145 | acked_protocol_features: 0, 146 | num_queues, 147 | max_queue_size, 148 | queues_per_thread, 149 | mappings: Vec::new(), 150 | atomic_mem, 151 | vrings, 152 | worker_threads, 153 | }) 154 | } 155 | } 156 | 157 | impl VhostUserHandler { 158 | pub(crate) fn send_exit_event(&self) { 159 | for handler in self.handlers.iter() { 160 | handler.send_exit_event(); 161 | } 162 | } 163 | 164 | fn vmm_va_to_gpa(&self, vmm_va: u64) -> VhostUserHandlerResult { 165 | for mapping in self.mappings.iter() { 166 | if vmm_va >= mapping.vmm_addr && vmm_va < mapping.vmm_addr + mapping.size { 167 | return Ok(vmm_va - mapping.vmm_addr + mapping.gpa_base); 168 | } 169 | } 170 | 171 | Err(VhostUserHandlerError::MissingMemoryMapping) 172 | } 173 | } 174 | 175 | impl VhostUserHandler 176 | where 177 | S: VhostUserBackend, 178 | V: VringT>, 179 | B: Bitmap, 180 | { 181 | pub(crate) fn get_epoll_handlers(&self) -> Vec>> { 182 | self.handlers.clone() 183 | } 184 | 185 | fn vring_needs_init(&self, vring: &V) -> bool { 186 | let vring_state = vring.get_ref(); 187 | 188 | // If the vring wasn't initialized and we already have an EventFd for 189 | // VRING_KICK, initialize it now. 190 | !vring_state.get_queue().ready() && vring_state.get_kick().is_some() 191 | } 192 | 193 | fn initialize_vring(&self, vring: &V, index: u8) -> VhostUserResult<()> { 194 | assert!(vring.get_ref().get_kick().is_some()); 195 | 196 | if let Some(fd) = vring.get_ref().get_kick() { 197 | for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { 198 | let shifted_queues_mask = queues_mask >> index; 199 | if shifted_queues_mask & 1u64 == 1u64 { 200 | let evt_idx = queues_mask.count_ones() - shifted_queues_mask.count_ones(); 201 | self.handlers[thread_index] 202 | .register_event(fd.as_raw_fd(), EventSet::IN, u64::from(evt_idx)) 203 | .map_err(VhostUserError::ReqHandlerError)?; 204 | break; 205 | } 206 | } 207 | } 208 | 209 | self.vrings[index as usize].set_queue_ready(true); 210 | 211 | Ok(()) 212 | } 213 | } 214 | 215 | impl VhostUserSlaveReqHandlerMut for VhostUserHandler 216 | where 217 | S: VhostUserBackend, 218 | V: VringT>, 219 | B: NewBitmap + Clone, 220 | { 221 | fn set_owner(&mut self) -> VhostUserResult<()> { 222 | if self.owned { 223 | return Err(VhostUserError::InvalidOperation("already claimed")); 224 | } 225 | self.owned = true; 226 | Ok(()) 227 | } 228 | 229 | fn reset_owner(&mut self) -> VhostUserResult<()> { 230 | self.owned = false; 231 | self.features_acked = false; 232 | self.acked_features = 0; 233 | self.acked_protocol_features = 0; 234 | Ok(()) 235 | } 236 | 237 | fn get_features(&mut self) -> VhostUserResult { 238 | Ok(self.backend.features()) 239 | } 240 | 241 | fn set_features(&mut self, features: u64) -> VhostUserResult<()> { 242 | if (features & !self.backend.features()) != 0 { 243 | return Err(VhostUserError::InvalidParam); 244 | } 245 | 246 | self.acked_features = features; 247 | self.features_acked = true; 248 | 249 | // If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated, 250 | // the ring is initialized in an enabled state. 251 | // If VHOST_USER_F_PROTOCOL_FEATURES has been negotiated, 252 | // the ring is initialized in a disabled state. Client must not 253 | // pass data to/from the backend until ring is enabled by 254 | // VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has 255 | // been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. 256 | let vring_enabled = 257 | self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0; 258 | for vring in self.vrings.iter_mut() { 259 | vring.set_enabled(vring_enabled); 260 | } 261 | 262 | self.backend.acked_features(self.acked_features); 263 | 264 | Ok(()) 265 | } 266 | 267 | fn set_mem_table( 268 | &mut self, 269 | ctx: &[VhostUserMemoryRegion], 270 | files: Vec, 271 | ) -> VhostUserResult<()> { 272 | // We need to create tuple of ranges from the list of VhostUserMemoryRegion 273 | // that we get from the caller. 274 | let mut regions: Vec<(GuestAddress, usize, Option)> = Vec::new(); 275 | let mut mappings: Vec = Vec::new(); 276 | 277 | for (region, file) in ctx.iter().zip(files) { 278 | let g_addr = GuestAddress(region.guest_phys_addr); 279 | let len = region.memory_size as usize; 280 | let f_off = FileOffset::new(file, region.mmap_offset); 281 | 282 | regions.push((g_addr, len, Some(f_off))); 283 | mappings.push(AddrMapping { 284 | vmm_addr: region.user_addr, 285 | size: region.memory_size, 286 | gpa_base: region.guest_phys_addr, 287 | }); 288 | } 289 | 290 | let mem = GuestMemoryMmap::from_ranges_with_files(regions).map_err(|e| { 291 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 292 | })?; 293 | 294 | // Updating the inner GuestMemory object here will cause all our vrings to 295 | // see the new one the next time they call to `atomic_mem.memory()`. 296 | self.atomic_mem.lock().unwrap().replace(mem); 297 | 298 | self.backend 299 | .update_memory(self.atomic_mem.clone()) 300 | .map_err(|e| { 301 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 302 | })?; 303 | self.mappings = mappings; 304 | 305 | Ok(()) 306 | } 307 | 308 | fn set_vring_num(&mut self, index: u32, num: u32) -> VhostUserResult<()> { 309 | if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size { 310 | return Err(VhostUserError::InvalidParam); 311 | } 312 | self.vrings[index as usize].set_queue_size(num as u16); 313 | Ok(()) 314 | } 315 | 316 | fn set_vring_addr( 317 | &mut self, 318 | index: u32, 319 | _flags: VhostUserVringAddrFlags, 320 | descriptor: u64, 321 | used: u64, 322 | available: u64, 323 | _log: u64, 324 | ) -> VhostUserResult<()> { 325 | if index as usize >= self.num_queues { 326 | return Err(VhostUserError::InvalidParam); 327 | } 328 | 329 | if !self.mappings.is_empty() { 330 | let desc_table = self.vmm_va_to_gpa(descriptor).map_err(|e| { 331 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 332 | })?; 333 | let avail_ring = self.vmm_va_to_gpa(available).map_err(|e| { 334 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 335 | })?; 336 | let used_ring = self.vmm_va_to_gpa(used).map_err(|e| { 337 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 338 | })?; 339 | self.vrings[index as usize] 340 | .set_queue_info(desc_table, avail_ring, used_ring) 341 | .map_err(|_| VhostUserError::InvalidParam)?; 342 | Ok(()) 343 | } else { 344 | Err(VhostUserError::InvalidParam) 345 | } 346 | } 347 | 348 | fn set_vring_base(&mut self, index: u32, base: u32) -> VhostUserResult<()> { 349 | let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0; 350 | 351 | self.vrings[index as usize].set_queue_next_avail(base as u16); 352 | self.vrings[index as usize].set_queue_event_idx(event_idx); 353 | self.backend.set_event_idx(event_idx); 354 | 355 | Ok(()) 356 | } 357 | 358 | fn get_vring_base(&mut self, index: u32) -> VhostUserResult { 359 | if index as usize >= self.num_queues { 360 | return Err(VhostUserError::InvalidParam); 361 | } 362 | // Quote from vhost-user specification: 363 | // Client must start ring upon receiving a kick (that is, detecting 364 | // that file descriptor is readable) on the descriptor specified by 365 | // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving 366 | // VHOST_USER_GET_VRING_BASE. 367 | self.vrings[index as usize].set_queue_ready(false); 368 | if let Some(fd) = self.vrings[index as usize].get_ref().get_kick() { 369 | for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { 370 | let shifted_queues_mask = queues_mask >> index; 371 | if shifted_queues_mask & 1u64 == 1u64 { 372 | let evt_idx = queues_mask.count_ones() - shifted_queues_mask.count_ones(); 373 | self.handlers[thread_index] 374 | .unregister_event(fd.as_raw_fd(), EventSet::IN, u64::from(evt_idx)) 375 | .map_err(VhostUserError::ReqHandlerError)?; 376 | break; 377 | } 378 | } 379 | } 380 | 381 | self.vrings[index as usize].set_kick(None); 382 | self.vrings[index as usize].set_call(None); 383 | 384 | // Strictly speaking, we should do this upon receiving the first kick, 385 | // but it's actually easier to just do it here so we're ready in case 386 | // the vring gets re-initialized by the guest. 387 | self.vrings[index as usize] 388 | .get_mut() 389 | .get_queue_mut() 390 | .reset(); 391 | 392 | let next_avail = self.vrings[index as usize].queue_next_avail(); 393 | 394 | Ok(VhostUserVringState::new(index, u32::from(next_avail))) 395 | } 396 | 397 | fn set_vring_kick(&mut self, index: u8, file: Option) -> VhostUserResult<()> { 398 | if index as usize >= self.num_queues { 399 | return Err(VhostUserError::InvalidParam); 400 | } 401 | 402 | // SAFETY: EventFd requires that it has sole ownership of its fd. So 403 | // does File, so this is safe. 404 | // Ideally, we'd have a generic way to refer to a uniquely-owned fd, 405 | // such as that proposed by Rust RFC #3128. 406 | self.vrings[index as usize].set_kick(file); 407 | 408 | if self.vring_needs_init(&self.vrings[index as usize]) { 409 | self.initialize_vring(&self.vrings[index as usize], index)?; 410 | } 411 | 412 | Ok(()) 413 | } 414 | 415 | fn set_vring_call(&mut self, index: u8, file: Option) -> VhostUserResult<()> { 416 | if index as usize >= self.num_queues { 417 | return Err(VhostUserError::InvalidParam); 418 | } 419 | 420 | self.vrings[index as usize].set_call(file); 421 | 422 | if self.vring_needs_init(&self.vrings[index as usize]) { 423 | self.initialize_vring(&self.vrings[index as usize], index)?; 424 | } 425 | 426 | Ok(()) 427 | } 428 | 429 | fn set_vring_err(&mut self, index: u8, file: Option) -> VhostUserResult<()> { 430 | if index as usize >= self.num_queues { 431 | return Err(VhostUserError::InvalidParam); 432 | } 433 | 434 | self.vrings[index as usize].set_err(file); 435 | 436 | Ok(()) 437 | } 438 | 439 | fn get_protocol_features(&mut self) -> VhostUserResult { 440 | Ok(self.backend.protocol_features()) 441 | } 442 | 443 | fn set_protocol_features(&mut self, features: u64) -> VhostUserResult<()> { 444 | // Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must 445 | // support this message even before VHOST_USER_SET_FEATURES was 446 | // called. 447 | self.acked_protocol_features = features; 448 | Ok(()) 449 | } 450 | 451 | fn get_queue_num(&mut self) -> VhostUserResult { 452 | Ok(self.num_queues as u64) 453 | } 454 | 455 | fn set_vring_enable(&mut self, index: u32, enable: bool) -> VhostUserResult<()> { 456 | // This request should be handled only when VHOST_USER_F_PROTOCOL_FEATURES 457 | // has been negotiated. 458 | if self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 { 459 | return Err(VhostUserError::InvalidOperation( 460 | "protocol features not set", 461 | )); 462 | } else if index as usize >= self.num_queues { 463 | return Err(VhostUserError::InvalidParam); 464 | } 465 | 466 | // Slave must not pass data to/from the backend until ring is 467 | // enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1, 468 | // or after it has been disabled by VHOST_USER_SET_VRING_ENABLE 469 | // with parameter 0. 470 | self.vrings[index as usize].set_enabled(enable); 471 | 472 | Ok(()) 473 | } 474 | 475 | fn get_config( 476 | &mut self, 477 | offset: u32, 478 | size: u32, 479 | _flags: VhostUserConfigFlags, 480 | ) -> VhostUserResult> { 481 | Ok(self.backend.get_config(offset, size)) 482 | } 483 | 484 | fn set_config( 485 | &mut self, 486 | offset: u32, 487 | buf: &[u8], 488 | _flags: VhostUserConfigFlags, 489 | ) -> VhostUserResult<()> { 490 | self.backend 491 | .set_config(offset, buf) 492 | .map_err(VhostUserError::ReqHandlerError) 493 | } 494 | 495 | fn set_slave_req_fd(&mut self, vu_req: SlaveFsCacheReq) { 496 | if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() != 0 { 497 | vu_req.set_reply_ack_flag(true); 498 | } 499 | 500 | self.backend.set_slave_req_fd(vu_req); 501 | } 502 | 503 | fn get_inflight_fd( 504 | &mut self, 505 | _inflight: &vhost::vhost_user::message::VhostUserInflight, 506 | ) -> VhostUserResult<(vhost::vhost_user::message::VhostUserInflight, File)> { 507 | // Assume the backend hasn't negotiated the inflight feature; it 508 | // wouldn't be correct for the backend to do so, as we don't (yet) 509 | // provide a way for it to handle such requests. 510 | Err(VhostUserError::InvalidOperation("not supported")) 511 | } 512 | 513 | fn set_inflight_fd( 514 | &mut self, 515 | _inflight: &vhost::vhost_user::message::VhostUserInflight, 516 | _file: File, 517 | ) -> VhostUserResult<()> { 518 | Err(VhostUserError::InvalidOperation("not supported")) 519 | } 520 | 521 | fn get_max_mem_slots(&mut self) -> VhostUserResult { 522 | Ok(MAX_MEM_SLOTS) 523 | } 524 | 525 | fn add_mem_region( 526 | &mut self, 527 | region: &VhostUserSingleMemoryRegion, 528 | file: File, 529 | ) -> VhostUserResult<()> { 530 | let mmap_region = MmapRegion::from_file( 531 | FileOffset::new(file, region.mmap_offset), 532 | region.memory_size as usize, 533 | ) 534 | .map_err(|e| VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)))?; 535 | let guest_region = Arc::new( 536 | GuestRegionMmap::new(mmap_region, GuestAddress(region.guest_phys_addr)).map_err( 537 | |e| VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)), 538 | )?, 539 | ); 540 | 541 | let mem = self 542 | .atomic_mem 543 | .memory() 544 | .insert_region(guest_region) 545 | .map_err(|e| { 546 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 547 | })?; 548 | 549 | self.atomic_mem.lock().unwrap().replace(mem); 550 | 551 | self.backend 552 | .update_memory(self.atomic_mem.clone()) 553 | .map_err(|e| { 554 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 555 | })?; 556 | 557 | self.mappings.push(AddrMapping { 558 | vmm_addr: region.user_addr, 559 | size: region.memory_size, 560 | gpa_base: region.guest_phys_addr, 561 | }); 562 | 563 | Ok(()) 564 | } 565 | 566 | fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> VhostUserResult<()> { 567 | let (mem, _) = self 568 | .atomic_mem 569 | .memory() 570 | .remove_region(GuestAddress(region.guest_phys_addr), region.memory_size) 571 | .map_err(|e| { 572 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 573 | })?; 574 | 575 | self.atomic_mem.lock().unwrap().replace(mem); 576 | 577 | self.backend 578 | .update_memory(self.atomic_mem.clone()) 579 | .map_err(|e| { 580 | VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 581 | })?; 582 | 583 | self.mappings 584 | .retain(|mapping| mapping.gpa_base != region.guest_phys_addr); 585 | 586 | Ok(()) 587 | } 588 | } 589 | 590 | impl Drop for VhostUserHandler { 591 | fn drop(&mut self) { 592 | // Signal all working threads to exit. 593 | self.send_exit_event(); 594 | 595 | for thread in self.worker_threads.drain(..) { 596 | if let Err(e) = thread.join() { 597 | error!("Error in vring worker: {:?}", e); 598 | } 599 | } 600 | } 601 | } 602 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Intel Corporation. All Rights Reserved. 2 | // Copyright 2019-2021 Alibaba Cloud Computing. All rights reserved. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | //! A simple framework to run a vhost-user backend service. 7 | 8 | #[macro_use] 9 | extern crate log; 10 | 11 | use std::fmt::{Display, Formatter}; 12 | use std::sync::{Arc, Mutex}; 13 | use std::thread; 14 | 15 | use vhost::vhost_user::{Error as VhostUserError, Listener, SlaveListener, SlaveReqHandler}; 16 | use vm_memory::bitmap::Bitmap; 17 | use vm_memory::mmap::NewBitmap; 18 | use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap}; 19 | 20 | use self::handler::VhostUserHandler; 21 | 22 | mod backend; 23 | pub use self::backend::{VhostUserBackend, VhostUserBackendMut}; 24 | 25 | mod event_loop; 26 | pub use self::event_loop::VringEpollHandler; 27 | 28 | mod handler; 29 | pub use self::handler::VhostUserHandlerError; 30 | 31 | mod vring; 32 | pub use self::vring::{ 33 | VringMutex, VringRwLock, VringState, VringStateGuard, VringStateMutGuard, VringT, 34 | }; 35 | 36 | /// An alias for `GuestMemoryAtomic>` to simplify code. 37 | type GM = GuestMemoryAtomic>; 38 | 39 | #[derive(Debug)] 40 | /// Errors related to vhost-user daemon. 41 | pub enum Error { 42 | /// Failed to create a new vhost-user handler. 43 | NewVhostUserHandler(VhostUserHandlerError), 44 | /// Failed creating vhost-user slave listener. 45 | CreateSlaveListener(VhostUserError), 46 | /// Failed creating vhost-user slave handler. 47 | CreateSlaveReqHandler(VhostUserError), 48 | /// Failed starting daemon thread. 49 | StartDaemon(std::io::Error), 50 | /// Failed waiting for daemon thread. 51 | WaitDaemon(std::boxed::Box), 52 | /// Failed handling a vhost-user request. 53 | HandleRequest(VhostUserError), 54 | } 55 | 56 | impl Display for Error { 57 | fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { 58 | match self { 59 | Error::NewVhostUserHandler(e) => write!(f, "cannot create vhost user handler: {}", e), 60 | Error::CreateSlaveListener(e) => write!(f, "cannot create slave listener: {}", e), 61 | Error::CreateSlaveReqHandler(e) => write!(f, "cannot create slave req handler: {}", e), 62 | Error::StartDaemon(e) => write!(f, "failed to start daemon: {}", e), 63 | Error::WaitDaemon(_e) => write!(f, "failed to wait for daemon exit"), 64 | Error::HandleRequest(e) => write!(f, "failed to handle request: {}", e), 65 | } 66 | } 67 | } 68 | 69 | /// Result of vhost-user daemon operations. 70 | pub type Result = std::result::Result; 71 | 72 | /// Implement a simple framework to run a vhost-user service daemon. 73 | /// 74 | /// This structure is the public API the backend is allowed to interact with in order to run 75 | /// a fully functional vhost-user daemon. 76 | pub struct VhostUserDaemon { 77 | name: String, 78 | handler: Arc>>, 79 | main_thread: Option>>, 80 | } 81 | 82 | impl VhostUserDaemon 83 | where 84 | S: VhostUserBackend + Clone + 'static, 85 | V: VringT> + Clone + Send + Sync + 'static, 86 | B: NewBitmap + Clone + Send + Sync, 87 | { 88 | /// Create the daemon instance, providing the backend implementation of `VhostUserBackend`. 89 | /// 90 | /// Under the hood, this will start a dedicated thread responsible for listening onto 91 | /// registered event. Those events can be vring events or custom events from the backend, 92 | /// but they get to be registered later during the sequence. 93 | pub fn new( 94 | name: String, 95 | backend: S, 96 | atomic_mem: GuestMemoryAtomic>, 97 | ) -> Result { 98 | let handler = Arc::new(Mutex::new( 99 | VhostUserHandler::new(backend, atomic_mem).map_err(Error::NewVhostUserHandler)?, 100 | )); 101 | 102 | Ok(VhostUserDaemon { 103 | name, 104 | handler, 105 | main_thread: None, 106 | }) 107 | } 108 | 109 | /// Run a dedicated thread handling all requests coming through the socket. 110 | /// This runs in an infinite loop that should be terminating once the other 111 | /// end of the socket (the VMM) hangs up. 112 | /// 113 | /// This function is the common code for starting a new daemon, no matter if 114 | /// it acts as a client or a server. 115 | fn start_daemon( 116 | &mut self, 117 | mut handler: SlaveReqHandler>>, 118 | ) -> Result<()> { 119 | let handle = thread::Builder::new() 120 | .name(self.name.clone()) 121 | .spawn(move || loop { 122 | handler.handle_request().map_err(Error::HandleRequest)?; 123 | }) 124 | .map_err(Error::StartDaemon)?; 125 | 126 | self.main_thread = Some(handle); 127 | 128 | Ok(()) 129 | } 130 | 131 | /// Connect to the vhost-user socket and run a dedicated thread handling 132 | /// all requests coming through this socket. This runs in an infinite loop 133 | /// that should be terminating once the other end of the socket (the VMM) 134 | /// hangs up. 135 | pub fn start_client(&mut self, socket_path: &str) -> Result<()> { 136 | let slave_handler = SlaveReqHandler::connect(socket_path, self.handler.clone()) 137 | .map_err(Error::CreateSlaveReqHandler)?; 138 | self.start_daemon(slave_handler) 139 | } 140 | 141 | /// Listen to the vhost-user socket and run a dedicated thread handling all requests coming 142 | /// through this socket. 143 | /// 144 | /// This runs in an infinite loop that should be terminating once the other end of the socket 145 | /// (the VMM) disconnects. 146 | // TODO: the current implementation has limitations that only one incoming connection will be 147 | // handled from the listener. Should it be enhanced to support reconnection? 148 | pub fn start(&mut self, listener: Listener) -> Result<()> { 149 | let mut slave_listener = SlaveListener::new(listener, self.handler.clone()) 150 | .map_err(Error::CreateSlaveListener)?; 151 | let slave_handler = self.accept(&mut slave_listener)?; 152 | self.start_daemon(slave_handler) 153 | } 154 | 155 | fn accept( 156 | &self, 157 | slave_listener: &mut SlaveListener>>, 158 | ) -> Result>>> { 159 | loop { 160 | match slave_listener.accept() { 161 | Err(e) => return Err(Error::CreateSlaveListener(e)), 162 | Ok(Some(v)) => return Ok(v), 163 | Ok(None) => continue, 164 | } 165 | } 166 | } 167 | 168 | /// Wait for the thread handling the vhost-user socket connection to terminate. 169 | pub fn wait(&mut self) -> Result<()> { 170 | if let Some(handle) = self.main_thread.take() { 171 | match handle.join().map_err(Error::WaitDaemon)? { 172 | Ok(()) => Ok(()), 173 | Err(Error::HandleRequest(VhostUserError::SocketBroken(_))) => Ok(()), 174 | Err(e) => Err(e), 175 | } 176 | } else { 177 | Ok(()) 178 | } 179 | } 180 | 181 | /// Retrieve the vring epoll handler. 182 | /// 183 | /// This is necessary to perform further actions like registering and unregistering some extra 184 | /// event file descriptors. 185 | pub fn get_epoll_handlers(&self) -> Vec>> { 186 | // Do not expect poisoned lock. 187 | self.handler.lock().unwrap().get_epoll_handlers() 188 | } 189 | } 190 | 191 | #[cfg(test)] 192 | mod tests { 193 | use super::backend::tests::MockVhostBackend; 194 | use super::*; 195 | use std::os::unix::net::{UnixListener, UnixStream}; 196 | use std::sync::Barrier; 197 | use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; 198 | 199 | #[test] 200 | fn test_new_daemon() { 201 | let mem = GuestMemoryAtomic::new( 202 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 203 | ); 204 | let backend = Arc::new(Mutex::new(MockVhostBackend::new())); 205 | let mut daemon = VhostUserDaemon::new("test".to_owned(), backend, mem).unwrap(); 206 | 207 | let handlers = daemon.get_epoll_handlers(); 208 | assert_eq!(handlers.len(), 2); 209 | 210 | let barrier = Arc::new(Barrier::new(2)); 211 | let tmpdir = tempfile::tempdir().unwrap(); 212 | let mut path = tmpdir.path().to_path_buf(); 213 | path.push("socket"); 214 | 215 | let barrier2 = barrier.clone(); 216 | let path1 = path.clone(); 217 | let thread = thread::spawn(move || { 218 | barrier2.wait(); 219 | let socket = UnixStream::connect(&path1).unwrap(); 220 | barrier2.wait(); 221 | drop(socket) 222 | }); 223 | 224 | let listener = Listener::new(&path, false).unwrap(); 225 | barrier.wait(); 226 | daemon.start(listener).unwrap(); 227 | barrier.wait(); 228 | // Above process generates a `HandleRequest(PartialMessage)` error. 229 | daemon.wait().unwrap_err(); 230 | daemon.wait().unwrap(); 231 | thread.join().unwrap(); 232 | } 233 | 234 | #[test] 235 | fn test_new_daemon_client() { 236 | let mem = GuestMemoryAtomic::new( 237 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 238 | ); 239 | let backend = Arc::new(Mutex::new(MockVhostBackend::new())); 240 | let mut daemon = VhostUserDaemon::new("test".to_owned(), backend, mem).unwrap(); 241 | 242 | let handlers = daemon.get_epoll_handlers(); 243 | assert_eq!(handlers.len(), 2); 244 | 245 | let barrier = Arc::new(Barrier::new(2)); 246 | let tmpdir = tempfile::tempdir().unwrap(); 247 | let mut path = tmpdir.path().to_path_buf(); 248 | path.push("socket"); 249 | 250 | let barrier2 = barrier.clone(); 251 | let path1 = path.clone(); 252 | let thread = thread::spawn(move || { 253 | let listener = UnixListener::bind(&path1).unwrap(); 254 | barrier2.wait(); 255 | let (stream, _) = listener.accept().unwrap(); 256 | barrier2.wait(); 257 | drop(stream) 258 | }); 259 | 260 | barrier.wait(); 261 | daemon 262 | .start_client(path.as_path().to_str().unwrap()) 263 | .unwrap(); 264 | barrier.wait(); 265 | // Above process generates a `HandleRequest(PartialMessage)` error. 266 | daemon.wait().unwrap_err(); 267 | daemon.wait().unwrap(); 268 | thread.join().unwrap(); 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /src/vring.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Intel Corporation. All Rights Reserved. 2 | // Copyright 2021 Alibaba Cloud Computing. All rights reserved. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | //! Struct to maintain state information and manipulate vhost-user queues. 7 | 8 | use std::fs::File; 9 | use std::io; 10 | use std::ops::{Deref, DerefMut}; 11 | use std::os::unix::io::{FromRawFd, IntoRawFd}; 12 | use std::result::Result; 13 | use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; 14 | 15 | use virtio_queue::{Error as VirtQueError, Queue, QueueT}; 16 | use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap}; 17 | use vmm_sys_util::eventfd::EventFd; 18 | 19 | /// Trait for objects returned by `VringT::get_ref()`. 20 | pub trait VringStateGuard<'a, M: GuestAddressSpace> { 21 | /// Type for guard returned by `VringT::get_ref()`. 22 | type G: Deref>; 23 | } 24 | 25 | /// Trait for objects returned by `VringT::get_mut()`. 26 | pub trait VringStateMutGuard<'a, M: GuestAddressSpace> { 27 | /// Type for guard returned by `VringT::get_mut()`. 28 | type G: DerefMut>; 29 | } 30 | 31 | pub trait VringT: 32 | for<'a> VringStateGuard<'a, M> + for<'a> VringStateMutGuard<'a, M> 33 | { 34 | /// Create a new instance of Vring. 35 | fn new(mem: M, max_queue_size: u16) -> Result 36 | where 37 | Self: Sized; 38 | 39 | /// Get an immutable reference to the kick event fd. 40 | fn get_ref(&self) -> >::G; 41 | 42 | /// Get a mutable reference to the kick event fd. 43 | fn get_mut(&self) -> >::G; 44 | 45 | /// Add an used descriptor into the used queue. 46 | fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>; 47 | 48 | /// Notify the vhost-user master that used descriptors have been put into the used queue. 49 | fn signal_used_queue(&self) -> io::Result<()>; 50 | 51 | /// Enable event notification for queue. 52 | fn enable_notification(&self) -> Result; 53 | 54 | /// Disable event notification for queue. 55 | fn disable_notification(&self) -> Result<(), VirtQueError>; 56 | 57 | /// Check whether a notification to the guest is needed. 58 | fn needs_notification(&self) -> Result; 59 | 60 | /// Set vring enabled state. 61 | fn set_enabled(&self, enabled: bool); 62 | 63 | /// Set queue addresses for descriptor table, available ring and used ring. 64 | fn set_queue_info( 65 | &self, 66 | desc_table: u64, 67 | avail_ring: u64, 68 | used_ring: u64, 69 | ) -> Result<(), VirtQueError>; 70 | 71 | /// Get queue next avail head. 72 | fn queue_next_avail(&self) -> u16; 73 | 74 | /// Set queue next avail head. 75 | fn set_queue_next_avail(&self, base: u16); 76 | 77 | /// Set configured queue size. 78 | fn set_queue_size(&self, num: u16); 79 | 80 | /// Enable/disable queue event index feature. 81 | fn set_queue_event_idx(&self, enabled: bool); 82 | 83 | /// Set queue enabled state. 84 | fn set_queue_ready(&self, ready: bool); 85 | 86 | /// Set `EventFd` for kick. 87 | fn set_kick(&self, file: Option); 88 | 89 | /// Read event from the kick `EventFd`. 90 | fn read_kick(&self) -> io::Result; 91 | 92 | /// Set `EventFd` for call. 93 | fn set_call(&self, file: Option); 94 | 95 | /// Set `EventFd` for err. 96 | fn set_err(&self, file: Option); 97 | } 98 | 99 | /// Struct to maintain raw state information for a vhost-user queue. 100 | /// 101 | /// This struct maintains all information of a virito queue, and could be used as an `VringT` 102 | /// object for single-threaded context. 103 | pub struct VringState> { 104 | queue: Queue, 105 | kick: Option, 106 | call: Option, 107 | err: Option, 108 | enabled: bool, 109 | mem: M, 110 | } 111 | 112 | impl VringState { 113 | /// Create a new instance of Vring. 114 | fn new(mem: M, max_queue_size: u16) -> Result { 115 | Ok(VringState { 116 | queue: Queue::new(max_queue_size)?, 117 | kick: None, 118 | call: None, 119 | err: None, 120 | enabled: false, 121 | mem, 122 | }) 123 | } 124 | 125 | /// Get an immutable reference to the underlying raw `Queue` object. 126 | pub fn get_queue(&self) -> &Queue { 127 | &self.queue 128 | } 129 | 130 | /// Get a mutable reference to the underlying raw `Queue` object. 131 | pub fn get_queue_mut(&mut self) -> &mut Queue { 132 | &mut self.queue 133 | } 134 | 135 | /// Add an used descriptor into the used queue. 136 | pub fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> { 137 | self.queue 138 | .add_used(self.mem.memory().deref(), desc_index, len) 139 | } 140 | 141 | /// Notify the vhost-user master that used descriptors have been put into the used queue. 142 | pub fn signal_used_queue(&self) -> io::Result<()> { 143 | if let Some(call) = self.call.as_ref() { 144 | call.write(1) 145 | } else { 146 | Ok(()) 147 | } 148 | } 149 | 150 | /// Enable event notification for queue. 151 | pub fn enable_notification(&mut self) -> Result { 152 | self.queue.enable_notification(self.mem.memory().deref()) 153 | } 154 | 155 | /// Disable event notification for queue. 156 | pub fn disable_notification(&mut self) -> Result<(), VirtQueError> { 157 | self.queue.disable_notification(self.mem.memory().deref()) 158 | } 159 | 160 | /// Check whether a notification to the guest is needed. 161 | pub fn needs_notification(&mut self) -> Result { 162 | self.queue.needs_notification(self.mem.memory().deref()) 163 | } 164 | 165 | /// Set vring enabled state. 166 | pub fn set_enabled(&mut self, enabled: bool) { 167 | self.enabled = enabled; 168 | } 169 | 170 | /// Set queue addresses for descriptor table, available ring and used ring. 171 | pub fn set_queue_info( 172 | &mut self, 173 | desc_table: u64, 174 | avail_ring: u64, 175 | used_ring: u64, 176 | ) -> Result<(), VirtQueError> { 177 | self.queue 178 | .try_set_desc_table_address(GuestAddress(desc_table))?; 179 | self.queue 180 | .try_set_avail_ring_address(GuestAddress(avail_ring))?; 181 | self.queue 182 | .try_set_used_ring_address(GuestAddress(used_ring)) 183 | } 184 | 185 | /// Get queue next avail head. 186 | fn queue_next_avail(&self) -> u16 { 187 | self.queue.next_avail() 188 | } 189 | 190 | /// Set queue next avail head. 191 | fn set_queue_next_avail(&mut self, base: u16) { 192 | self.queue.set_next_avail(base); 193 | } 194 | 195 | /// Set configured queue size. 196 | fn set_queue_size(&mut self, num: u16) { 197 | self.queue.set_size(num); 198 | } 199 | 200 | /// Enable/disable queue event index feature. 201 | fn set_queue_event_idx(&mut self, enabled: bool) { 202 | self.queue.set_event_idx(enabled); 203 | } 204 | 205 | /// Set queue enabled state. 206 | fn set_queue_ready(&mut self, ready: bool) { 207 | self.queue.set_ready(ready); 208 | } 209 | 210 | /// Get the `EventFd` for kick. 211 | pub fn get_kick(&self) -> &Option { 212 | &self.kick 213 | } 214 | 215 | /// Set `EventFd` for kick. 216 | fn set_kick(&mut self, file: Option) { 217 | // SAFETY: 218 | // EventFd requires that it has sole ownership of its fd. So does File, so this is safe. 219 | // Ideally, we'd have a generic way to refer to a uniquely-owned fd, such as that proposed 220 | // by Rust RFC #3128. 221 | self.kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) }); 222 | } 223 | 224 | /// Read event from the kick `EventFd`. 225 | fn read_kick(&self) -> io::Result { 226 | if let Some(kick) = &self.kick { 227 | kick.read()?; 228 | } 229 | 230 | Ok(self.enabled) 231 | } 232 | 233 | /// Set `EventFd` for call. 234 | fn set_call(&mut self, file: Option) { 235 | // SAFETY: see comment in set_kick() 236 | self.call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) }); 237 | } 238 | 239 | /// Get the `EventFd` for call. 240 | pub fn get_call(&self) -> &Option { 241 | &self.call 242 | } 243 | 244 | /// Set `EventFd` for err. 245 | fn set_err(&mut self, file: Option) { 246 | // SAFETY: see comment in set_kick() 247 | self.err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) }); 248 | } 249 | } 250 | 251 | /// A `VringState` object protected by Mutex for multi-threading context. 252 | #[derive(Clone)] 253 | pub struct VringMutex> { 254 | state: Arc>>, 255 | } 256 | 257 | impl VringMutex { 258 | /// Get a mutable guard to the underlying raw `VringState` object. 259 | fn lock(&self) -> MutexGuard> { 260 | self.state.lock().unwrap() 261 | } 262 | } 263 | 264 | impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringMutex { 265 | type G = MutexGuard<'a, VringState>; 266 | } 267 | 268 | impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringMutex { 269 | type G = MutexGuard<'a, VringState>; 270 | } 271 | 272 | impl VringT for VringMutex { 273 | fn new(mem: M, max_queue_size: u16) -> Result { 274 | Ok(VringMutex { 275 | state: Arc::new(Mutex::new(VringState::new(mem, max_queue_size)?)), 276 | }) 277 | } 278 | 279 | fn get_ref(&self) -> >::G { 280 | self.state.lock().unwrap() 281 | } 282 | 283 | fn get_mut(&self) -> >::G { 284 | self.lock() 285 | } 286 | 287 | fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> { 288 | self.lock().add_used(desc_index, len) 289 | } 290 | 291 | fn signal_used_queue(&self) -> io::Result<()> { 292 | self.get_ref().signal_used_queue() 293 | } 294 | 295 | fn enable_notification(&self) -> Result { 296 | self.lock().enable_notification() 297 | } 298 | 299 | fn disable_notification(&self) -> Result<(), VirtQueError> { 300 | self.lock().disable_notification() 301 | } 302 | 303 | fn needs_notification(&self) -> Result { 304 | self.lock().needs_notification() 305 | } 306 | 307 | fn set_enabled(&self, enabled: bool) { 308 | self.lock().set_enabled(enabled) 309 | } 310 | 311 | fn set_queue_info( 312 | &self, 313 | desc_table: u64, 314 | avail_ring: u64, 315 | used_ring: u64, 316 | ) -> Result<(), VirtQueError> { 317 | self.lock() 318 | .set_queue_info(desc_table, avail_ring, used_ring) 319 | } 320 | 321 | fn queue_next_avail(&self) -> u16 { 322 | self.get_ref().queue_next_avail() 323 | } 324 | 325 | fn set_queue_next_avail(&self, base: u16) { 326 | self.lock().set_queue_next_avail(base) 327 | } 328 | 329 | fn set_queue_size(&self, num: u16) { 330 | self.lock().set_queue_size(num); 331 | } 332 | 333 | fn set_queue_event_idx(&self, enabled: bool) { 334 | self.lock().set_queue_event_idx(enabled); 335 | } 336 | 337 | fn set_queue_ready(&self, ready: bool) { 338 | self.lock().set_queue_ready(ready); 339 | } 340 | 341 | fn set_kick(&self, file: Option) { 342 | self.lock().set_kick(file); 343 | } 344 | 345 | fn read_kick(&self) -> io::Result { 346 | self.get_ref().read_kick() 347 | } 348 | 349 | fn set_call(&self, file: Option) { 350 | self.lock().set_call(file) 351 | } 352 | 353 | fn set_err(&self, file: Option) { 354 | self.lock().set_err(file) 355 | } 356 | } 357 | 358 | /// A `VringState` object protected by RwLock for multi-threading context. 359 | #[derive(Clone)] 360 | pub struct VringRwLock> { 361 | state: Arc>>, 362 | } 363 | 364 | impl VringRwLock { 365 | /// Get a mutable guard to the underlying raw `VringState` object. 366 | fn write_lock(&self) -> RwLockWriteGuard> { 367 | self.state.write().unwrap() 368 | } 369 | } 370 | 371 | impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringRwLock { 372 | type G = RwLockReadGuard<'a, VringState>; 373 | } 374 | 375 | impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringRwLock { 376 | type G = RwLockWriteGuard<'a, VringState>; 377 | } 378 | 379 | impl VringT for VringRwLock { 380 | fn new(mem: M, max_queue_size: u16) -> Result { 381 | Ok(VringRwLock { 382 | state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size)?)), 383 | }) 384 | } 385 | 386 | fn get_ref(&self) -> >::G { 387 | self.state.read().unwrap() 388 | } 389 | 390 | fn get_mut(&self) -> >::G { 391 | self.write_lock() 392 | } 393 | 394 | fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> { 395 | self.write_lock().add_used(desc_index, len) 396 | } 397 | 398 | fn signal_used_queue(&self) -> io::Result<()> { 399 | self.get_ref().signal_used_queue() 400 | } 401 | 402 | fn enable_notification(&self) -> Result { 403 | self.write_lock().enable_notification() 404 | } 405 | 406 | fn disable_notification(&self) -> Result<(), VirtQueError> { 407 | self.write_lock().disable_notification() 408 | } 409 | 410 | fn needs_notification(&self) -> Result { 411 | self.write_lock().needs_notification() 412 | } 413 | 414 | fn set_enabled(&self, enabled: bool) { 415 | self.write_lock().set_enabled(enabled) 416 | } 417 | 418 | fn set_queue_info( 419 | &self, 420 | desc_table: u64, 421 | avail_ring: u64, 422 | used_ring: u64, 423 | ) -> Result<(), VirtQueError> { 424 | self.write_lock() 425 | .set_queue_info(desc_table, avail_ring, used_ring) 426 | } 427 | 428 | fn queue_next_avail(&self) -> u16 { 429 | self.get_ref().queue_next_avail() 430 | } 431 | 432 | fn set_queue_next_avail(&self, base: u16) { 433 | self.write_lock().set_queue_next_avail(base) 434 | } 435 | 436 | fn set_queue_size(&self, num: u16) { 437 | self.write_lock().set_queue_size(num); 438 | } 439 | 440 | fn set_queue_event_idx(&self, enabled: bool) { 441 | self.write_lock().set_queue_event_idx(enabled); 442 | } 443 | 444 | fn set_queue_ready(&self, ready: bool) { 445 | self.write_lock().set_queue_ready(ready); 446 | } 447 | 448 | fn set_kick(&self, file: Option) { 449 | self.write_lock().set_kick(file); 450 | } 451 | 452 | fn read_kick(&self) -> io::Result { 453 | self.get_ref().read_kick() 454 | } 455 | 456 | fn set_call(&self, file: Option) { 457 | self.write_lock().set_call(file) 458 | } 459 | 460 | fn set_err(&self, file: Option) { 461 | self.write_lock().set_err(file) 462 | } 463 | } 464 | 465 | #[cfg(test)] 466 | mod tests { 467 | use super::*; 468 | use std::os::unix::io::AsRawFd; 469 | use vm_memory::bitmap::AtomicBitmap; 470 | use vmm_sys_util::eventfd::EventFd; 471 | 472 | #[test] 473 | fn test_new_vring() { 474 | let mem = GuestMemoryAtomic::new( 475 | GuestMemoryMmap::::from_ranges(&[(GuestAddress(0x100000), 0x10000)]) 476 | .unwrap(), 477 | ); 478 | let vring = VringMutex::new(mem, 0x1000).unwrap(); 479 | 480 | assert!(vring.get_ref().get_kick().is_none()); 481 | assert!(!vring.get_mut().enabled); 482 | assert!(!vring.lock().queue.ready()); 483 | assert!(!vring.lock().queue.event_idx_enabled()); 484 | 485 | vring.set_enabled(true); 486 | assert!(vring.get_ref().enabled); 487 | 488 | vring.set_queue_info(0x100100, 0x100200, 0x100300).unwrap(); 489 | assert_eq!(vring.lock().get_queue().desc_table(), 0x100100); 490 | assert_eq!(vring.lock().get_queue().avail_ring(), 0x100200); 491 | assert_eq!(vring.lock().get_queue().used_ring(), 0x100300); 492 | 493 | assert_eq!(vring.queue_next_avail(), 0); 494 | vring.set_queue_next_avail(0x20); 495 | assert_eq!(vring.queue_next_avail(), 0x20); 496 | 497 | vring.set_queue_size(0x200); 498 | assert_eq!(vring.lock().queue.size(), 0x200); 499 | 500 | vring.set_queue_event_idx(true); 501 | assert!(vring.lock().queue.event_idx_enabled()); 502 | 503 | vring.set_queue_ready(true); 504 | assert!(vring.lock().queue.ready()); 505 | } 506 | 507 | #[test] 508 | fn test_vring_set_fd() { 509 | let mem = GuestMemoryAtomic::new( 510 | GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(), 511 | ); 512 | let vring = VringMutex::new(mem, 0x1000).unwrap(); 513 | 514 | vring.set_enabled(true); 515 | assert!(vring.get_ref().enabled); 516 | 517 | let eventfd = EventFd::new(0).unwrap(); 518 | let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) }; 519 | assert!(vring.get_mut().kick.is_none()); 520 | assert!(vring.read_kick().unwrap()); 521 | vring.set_kick(Some(file)); 522 | eventfd.write(1).unwrap(); 523 | assert!(vring.read_kick().unwrap()); 524 | assert!(vring.get_ref().kick.is_some()); 525 | vring.set_kick(None); 526 | assert!(vring.get_ref().kick.is_none()); 527 | std::mem::forget(eventfd); 528 | 529 | let eventfd = EventFd::new(0).unwrap(); 530 | let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) }; 531 | assert!(vring.get_ref().call.is_none()); 532 | vring.set_call(Some(file)); 533 | assert!(vring.get_ref().call.is_some()); 534 | vring.set_call(None); 535 | assert!(vring.get_ref().call.is_none()); 536 | std::mem::forget(eventfd); 537 | 538 | let eventfd = EventFd::new(0).unwrap(); 539 | let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) }; 540 | assert!(vring.get_ref().err.is_none()); 541 | vring.set_err(Some(file)); 542 | assert!(vring.get_ref().err.is_some()); 543 | vring.set_err(None); 544 | assert!(vring.get_ref().err.is_none()); 545 | std::mem::forget(eventfd); 546 | } 547 | } 548 | -------------------------------------------------------------------------------- /tests/vhost-user-server.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CString; 2 | use std::fs::File; 3 | use std::io::Result; 4 | use std::os::unix::io::{AsRawFd, FromRawFd}; 5 | use std::os::unix::net::UnixStream; 6 | use std::path::Path; 7 | use std::sync::{Arc, Barrier, Mutex}; 8 | use std::thread; 9 | 10 | use vhost::vhost_user::message::{ 11 | VhostUserConfigFlags, VhostUserHeaderFlag, VhostUserInflight, VhostUserProtocolFeatures, 12 | }; 13 | use vhost::vhost_user::{Listener, Master, SlaveFsCacheReq, VhostUserMaster}; 14 | use vhost::{VhostBackend, VhostUserMemoryRegionInfo, VringConfigData}; 15 | use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock}; 16 | use vm_memory::{ 17 | FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, 18 | }; 19 | use vmm_sys_util::epoll::EventSet; 20 | use vmm_sys_util::eventfd::EventFd; 21 | 22 | struct MockVhostBackend { 23 | events: u64, 24 | event_idx: bool, 25 | acked_features: u64, 26 | } 27 | 28 | impl MockVhostBackend { 29 | fn new() -> Self { 30 | MockVhostBackend { 31 | events: 0, 32 | event_idx: false, 33 | acked_features: 0, 34 | } 35 | } 36 | } 37 | 38 | impl VhostUserBackendMut for MockVhostBackend { 39 | fn num_queues(&self) -> usize { 40 | 2 41 | } 42 | 43 | fn max_queue_size(&self) -> usize { 44 | 256 45 | } 46 | 47 | fn features(&self) -> u64 { 48 | 0xffff_ffff_ffff_ffff 49 | } 50 | 51 | fn acked_features(&mut self, features: u64) { 52 | self.acked_features = features; 53 | } 54 | 55 | fn protocol_features(&self) -> VhostUserProtocolFeatures { 56 | VhostUserProtocolFeatures::all() 57 | } 58 | 59 | fn set_event_idx(&mut self, enabled: bool) { 60 | self.event_idx = enabled; 61 | } 62 | 63 | fn get_config(&self, offset: u32, size: u32) -> Vec { 64 | assert_eq!(offset, 0x200); 65 | assert_eq!(size, 8); 66 | 67 | vec![0xa5u8; 8] 68 | } 69 | 70 | fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> { 71 | assert_eq!(offset, 0x200); 72 | assert_eq!(buf, &[0xa5u8; 8]); 73 | 74 | Ok(()) 75 | } 76 | 77 | fn update_memory(&mut self, atomic_mem: GuestMemoryAtomic) -> Result<()> { 78 | let mem = atomic_mem.memory(); 79 | let region = mem.find_region(GuestAddress(0x100000)).unwrap(); 80 | assert_eq!(region.size(), 0x100000); 81 | Ok(()) 82 | } 83 | 84 | fn set_slave_req_fd(&mut self, _vu_req: SlaveFsCacheReq) {} 85 | 86 | fn queues_per_thread(&self) -> Vec { 87 | vec![1, 1] 88 | } 89 | 90 | fn exit_event(&self, _thread_index: usize) -> Option { 91 | let event_fd = EventFd::new(0).unwrap(); 92 | 93 | Some(event_fd) 94 | } 95 | 96 | fn handle_event( 97 | &mut self, 98 | _device_event: u16, 99 | _evset: EventSet, 100 | _vrings: &[VringRwLock], 101 | _thread_id: usize, 102 | ) -> Result { 103 | self.events += 1; 104 | 105 | Ok(false) 106 | } 107 | } 108 | 109 | fn setup_master(path: &Path, barrier: Arc) -> Master { 110 | barrier.wait(); 111 | let mut master = Master::connect(path, 1).unwrap(); 112 | master.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY); 113 | // Wait before issue service requests. 114 | barrier.wait(); 115 | 116 | let features = master.get_features().unwrap(); 117 | let proto = master.get_protocol_features().unwrap(); 118 | master.set_features(features).unwrap(); 119 | master.set_protocol_features(proto).unwrap(); 120 | assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK)); 121 | 122 | master 123 | } 124 | 125 | fn vhost_user_client(path: &Path, barrier: Arc) { 126 | barrier.wait(); 127 | let mut master = Master::connect(path, 1).unwrap(); 128 | master.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY); 129 | // Wait before issue service requests. 130 | barrier.wait(); 131 | 132 | let features = master.get_features().unwrap(); 133 | let proto = master.get_protocol_features().unwrap(); 134 | master.set_features(features).unwrap(); 135 | master.set_protocol_features(proto).unwrap(); 136 | assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK)); 137 | 138 | let queue_num = master.get_queue_num().unwrap(); 139 | assert_eq!(queue_num, 2); 140 | 141 | master.set_owner().unwrap(); 142 | //master.set_owner().unwrap_err(); 143 | master.reset_owner().unwrap(); 144 | master.reset_owner().unwrap(); 145 | master.set_owner().unwrap(); 146 | 147 | master.set_features(features).unwrap(); 148 | master.set_protocol_features(proto).unwrap(); 149 | assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK)); 150 | 151 | let memfd = nix::sys::memfd::memfd_create( 152 | &CString::new("test").unwrap(), 153 | nix::sys::memfd::MemFdCreateFlag::empty(), 154 | ) 155 | .unwrap(); 156 | let file = unsafe { File::from_raw_fd(memfd) }; 157 | file.set_len(0x100000).unwrap(); 158 | let file_offset = FileOffset::new(file, 0); 159 | let mem = GuestMemoryMmap::<()>::from_ranges_with_files(&[( 160 | GuestAddress(0x100000), 161 | 0x100000, 162 | Some(file_offset), 163 | )]) 164 | .unwrap(); 165 | let addr = mem.get_host_address(GuestAddress(0x100000)).unwrap() as u64; 166 | let reg = mem.find_region(GuestAddress(0x100000)).unwrap(); 167 | let fd = reg.file_offset().unwrap(); 168 | let regions = [VhostUserMemoryRegionInfo { 169 | guest_phys_addr: 0x100000, 170 | memory_size: 0x100000, 171 | userspace_addr: addr, 172 | mmap_offset: 0, 173 | mmap_handle: fd.file().as_raw_fd(), 174 | }]; 175 | master.set_mem_table(®ions).unwrap(); 176 | 177 | master.set_vring_num(0, 256).unwrap(); 178 | 179 | let config = VringConfigData { 180 | queue_max_size: 256, 181 | queue_size: 256, 182 | flags: 0, 183 | desc_table_addr: addr, 184 | used_ring_addr: addr + 0x10000, 185 | avail_ring_addr: addr + 0x20000, 186 | log_addr: None, 187 | }; 188 | master.set_vring_addr(0, &config).unwrap(); 189 | 190 | let eventfd = EventFd::new(0).unwrap(); 191 | master.set_vring_kick(0, &eventfd).unwrap(); 192 | master.set_vring_call(0, &eventfd).unwrap(); 193 | master.set_vring_err(0, &eventfd).unwrap(); 194 | master.set_vring_enable(0, true).unwrap(); 195 | 196 | let buf = [0u8; 8]; 197 | let (_cfg, data) = master 198 | .get_config(0x200, 8, VhostUserConfigFlags::empty(), &buf) 199 | .unwrap(); 200 | assert_eq!(&data, &[0xa5u8; 8]); 201 | master 202 | .set_config(0x200, VhostUserConfigFlags::empty(), &data) 203 | .unwrap(); 204 | 205 | let (tx, _rx) = UnixStream::pair().unwrap(); 206 | master.set_slave_request_fd(&tx).unwrap(); 207 | 208 | let state = master.get_vring_base(0).unwrap(); 209 | master.set_vring_base(0, state as u16).unwrap(); 210 | 211 | assert_eq!(master.get_max_mem_slots().unwrap(), 32); 212 | let region = VhostUserMemoryRegionInfo { 213 | guest_phys_addr: 0x800000, 214 | memory_size: 0x100000, 215 | userspace_addr: addr, 216 | mmap_offset: 0, 217 | mmap_handle: fd.file().as_raw_fd(), 218 | }; 219 | master.add_mem_region(®ion).unwrap(); 220 | master.remove_mem_region(®ion).unwrap(); 221 | } 222 | 223 | fn vhost_user_server(cb: fn(&Path, Arc)) { 224 | let mem = GuestMemoryAtomic::new(GuestMemoryMmap::<()>::new()); 225 | let backend = Arc::new(Mutex::new(MockVhostBackend::new())); 226 | let mut daemon = VhostUserDaemon::new("test".to_owned(), backend, mem).unwrap(); 227 | 228 | let barrier = Arc::new(Barrier::new(2)); 229 | let tmpdir = tempfile::tempdir().unwrap(); 230 | let mut path = tmpdir.path().to_path_buf(); 231 | path.push("socket"); 232 | 233 | let barrier2 = barrier.clone(); 234 | let path1 = path.clone(); 235 | let thread = thread::spawn(move || cb(&path1, barrier2)); 236 | 237 | let listener = Listener::new(&path, false).unwrap(); 238 | barrier.wait(); 239 | daemon.start(listener).unwrap(); 240 | barrier.wait(); 241 | 242 | // handle service requests from clients. 243 | thread.join().unwrap(); 244 | } 245 | 246 | #[test] 247 | fn test_vhost_user_server() { 248 | vhost_user_server(vhost_user_client); 249 | } 250 | 251 | fn vhost_user_enable(path: &Path, barrier: Arc) { 252 | let master = setup_master(path, barrier); 253 | master.set_owner().unwrap(); 254 | master.set_owner().unwrap_err(); 255 | } 256 | 257 | #[test] 258 | fn test_vhost_user_enable() { 259 | vhost_user_server(vhost_user_enable); 260 | } 261 | 262 | fn vhost_user_set_inflight(path: &Path, barrier: Arc) { 263 | let mut master = setup_master(path, barrier); 264 | let eventfd = EventFd::new(0).unwrap(); 265 | // No implementation for inflight_fd yet. 266 | let inflight = VhostUserInflight { 267 | mmap_size: 0x100000, 268 | mmap_offset: 0, 269 | num_queues: 1, 270 | queue_size: 256, 271 | }; 272 | master 273 | .set_inflight_fd(&inflight, eventfd.as_raw_fd()) 274 | .unwrap_err(); 275 | } 276 | 277 | #[test] 278 | fn test_vhost_user_set_inflight() { 279 | vhost_user_server(vhost_user_set_inflight); 280 | } 281 | 282 | fn vhost_user_get_inflight(path: &Path, barrier: Arc) { 283 | let mut master = setup_master(path, barrier); 284 | // No implementation for inflight_fd yet. 285 | let inflight = VhostUserInflight { 286 | mmap_size: 0x100000, 287 | mmap_offset: 0, 288 | num_queues: 1, 289 | queue_size: 256, 290 | }; 291 | assert!(master.get_inflight_fd(&inflight).is_err()); 292 | } 293 | 294 | #[test] 295 | fn test_vhost_user_get_inflight() { 296 | vhost_user_server(vhost_user_get_inflight); 297 | } 298 | --------------------------------------------------------------------------------