├── .gitignore ├── rustfmt.toml ├── images ├── after_free.png ├── first_node.png └── interval_tree_allocation.png ├── .gitmodules ├── coverage_config_x86_64.json ├── CODEOWNERS ├── src ├── allocation_engine │ ├── mod.rs │ ├── DESIGN.md │ └── interval_tree.rs ├── id_allocator.rs ├── address_allocator.rs └── lib.rs ├── .github └── dependabot.yml ├── Cargo.toml ├── CHANGELOG.md ├── LICENSE-BSD-3-Clause ├── README.md └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target/ 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | format_code_in_doc_comments=true -------------------------------------------------------------------------------- /images/after_free.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rust-vmm/vm-allocator/HEAD/images/after_free.png -------------------------------------------------------------------------------- /images/first_node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rust-vmm/vm-allocator/HEAD/images/first_node.png -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rust-vmm-ci"] 2 | path = rust-vmm-ci 3 | url = https://github.com/rust-vmm/rust-vmm-ci.git 4 | -------------------------------------------------------------------------------- /coverage_config_x86_64.json: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_score": 95.38, 3 | "exclude_path": "", 4 | "crate_features": "" 5 | } 6 | -------------------------------------------------------------------------------- /images/interval_tree_allocation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rust-vmm/vm-allocator/HEAD/images/interval_tree_allocation.png -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | @AlexandruCihodaru 2 | @alsrdn 3 | @jiangliu 4 | @andreeaflorescu 5 | @gsserge 6 | @studychao 7 | @roypat 8 | @ShadowCurse 9 | -------------------------------------------------------------------------------- /src/allocation_engine/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 4 | 5 | mod interval_tree; 6 | 7 | pub(crate) use interval_tree::{IntervalTree, NodeState}; 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gitsubmodule 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 10 8 | - package-ecosystem: cargo 9 | directory: "/" 10 | schedule: 11 | interval: weekly 12 | open-pull-requests-limit: 10 13 | rebase-strategy: "disabled" 14 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vm-allocator" 3 | version = "0.1.3" 4 | description = "Helpers for allocating resources needed during the lifetime of a VM." 5 | repository = "https://github.com/rust-vmm/vm-allocator" 6 | authors = ["rust-vmm AWS maintainers "] 7 | readme = "README.md" 8 | keywords = ["resources", "allocation", "address", "virt"] 9 | license = "Apache-2.0 OR BSD-3-Clause" 10 | edition = "2018" 11 | 12 | [features] 13 | default = ["std"] 14 | std = [] 15 | 16 | [dependencies] 17 | libc = "0.2.39" 18 | thiserror = {version= "2.0", default-features = false} 19 | serde = { version = "1.0.137", optional = true, features = ["derive"] } 20 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Upcoming version 4 | 5 | ### Added 6 | 7 | - Support for no_std environments. 8 | 9 | ### Changed 10 | ### Fixed 11 | ### Removed 12 | ### Deprecated 13 | 14 | ## [v0.1.3] 15 | 16 | ### Added 17 | 18 | - [[#102]](https://github.com/rust-vmm/vm-allocator/pull/102): Derived 19 | Clone for IdAllocator type. 20 | 21 | ## [v0.1.2] 22 | 23 | ### Added 24 | 25 | - [[#40]](https://github.com/rust-vmm/vm-allocator/pull/40): Added serde 26 | support for IdAllocator and AddressAllocator. 27 | - [[#99]](https://github.com/rust-vmm/vm-allocator/pull/99): Added APIs to 28 | get the base address and size of an AddressAllocator. 29 | 30 | ## [v0.1.1] 31 | 32 | ### Fixed 33 | 34 | - [[#44]](https://github.com/rust-vmm/vm-allocator/pull/44): Fixed issue that 35 | did not allow the creating of inclusive ranges of size 1. 36 | 37 | ## [v0.1.0] 38 | 39 | ### Added 40 | 41 | - Added types for (de)allocation of resources needed by the VMM during the VM 42 | lifetime. 43 | -------------------------------------------------------------------------------- /LICENSE-BSD-3-Clause: -------------------------------------------------------------------------------- 1 | Copyright 2022 The rust-vmm authors. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software without 15 | specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vm-allocator 2 | 3 | [![crates.io](https://img.shields.io/crates/v/vm-allocator)](https://crates.io/crates/vm-allocator) 4 | [![docs.rs](https://img.shields.io/docsrs/vm-allocator)](https://docs.rs/vm-allocator/) 5 | 6 | `vm-allocator` is a crate designed to provide allocation and release strategies 7 | that are needed by the VMM during the lifetime of a virtual machine. Possible 8 | resource types that a VMM could allocate using vm-allocator are MMIO addresses, 9 | PIO addresses, GSI numbers, device IDs. 10 | 11 | This crate exports 2 allocators: one for resources that can be represented as 12 | integers, and one for addresses. The reason behind having two separate 13 | allocators is the need to add semantic meaning to the address allocator, by 14 | specifying configuration parameters such as the alignment that do not make 15 | sense in the context of IDs. 16 | 17 | The main components of the crate are: 18 | - [`IdAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.IdAllocator.html) 19 | which should be used for all resources that can be reduced to an integer type. 20 | - [`AddressAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.AddressAllocator.html) 21 | which should be used to allocate address ranges in different address spaces. 22 | This component is a wrapper over 23 | [`IntervalTree`](src/allocation_engine/interval_tree.rs) that adds semantics 24 | to address ranges. More details about the inner presentation of the address 25 | allocator can be found in the 26 | [Design Document](src/allocation_engine/DESIGN.md). 27 | 28 | ## ID Allocator 29 | 30 | ### Design 31 | 32 | This allocator should be used to allocate resources that can be reduced to an 33 | integer type like legacy GSI numbers or KVM memory slot IDs. The 34 | characteristics of such a resource are represented by the 35 | [`IdAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.IdAllocator.html) 36 | struct. 37 | 38 | The struct that defines the 39 | [`IdAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.IdAllocator.html) 40 | contains the end of the interval that is managed, a field that points at the 41 | next available ID and a 42 | [`BTreeSet`](https://doc.rust-lang.org/std/collections/struct.BTreeSet.html) 43 | that is used to store the released IDs. The reason for using a 44 | [`BTreeSet`](https://doc.rust-lang.org/std/collections/struct.BTreeSet.html) is 45 | that the average complexity for deletion and insertion is `O(log N)`, offering a 46 | better performance when compared to Vector for example. The entries are sorted, 47 | so we will always use the first available ID. 48 | 49 | #### Allocation policy 50 | 51 | When allocating a new ID we always try to return the smallest one available. To 52 | do that we first search in the 53 | [`BTreeSet`](https://doc.rust-lang.org/std/collections/struct.BTreeSet.html) for 54 | any ID that was released and if we cannot find anything there we return the next 55 | ID from the range that was never allocated. 56 | 57 | The 58 | [`IdAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.IdAllocator.html) 59 | struct implements methods for allocating and releasing IDs. 60 | 61 | ### Usage 62 | 63 | Add vm-allocator as a dependency in Cargo.toml 64 | 65 | ```toml 66 | [dependencies] 67 | vm-allocator = "*" 68 | ```` 69 | 70 | Then add `extern crate vm-allocator;` to the projects crate root. 71 | The VMM using this crate should instantiate an 72 | [`IdAllocator`](https://docs.rs/vm-allocator/latest/vm_allocator/struct.IdAllocator.html) 73 | object for each resource type they want to manage. 74 | 75 | ## License 76 | 77 | This project is licensed under either of 78 | 79 | - [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 80 | - [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) 81 | -------------------------------------------------------------------------------- /src/allocation_engine/DESIGN.md: -------------------------------------------------------------------------------- 1 | # Design 2 | 3 | ## Allocation engine 4 | 5 | This implementation uses an interval tree that is specialized for allocation of 6 | memory-mapped I/O and port I/O address space. The fields of the structures 7 | defined will have semantic meaning for this context (e.g. node state to indicate 8 | if a node in the tree is assigned or not to a device). 9 | 10 | We offer three options for placing a memory slot in the managed address space: 11 | 12 | 1. `LastMatch` -> When using this allocation policy the allocator will try to 13 | insert the range described by the constraint at the first available position 14 | starting from the end of the managed address space. 15 | 2. `FirstMatch` -> When using this allocation policy the allocator will try to 16 | insert the range described by the constraint at the first available position 17 | starting from the beginning of the managed address space. 18 | 3. `ExactMatch(u64)` -> When using this allocation policy the allocator will 19 | try to insert the range at the exact position described by the constraint, 20 | otherwise it will return an error. 21 | 22 | Struct `Constraint` is used to describe the overall information of the resource 23 | needed to be allocated. This structure is also used by `IntervalTree` to know 24 | where and how to allocate the resource. The fields that are mandatory for 25 | allocating a new memory slot are size of the slot, alignment of the slot and 26 | allocation policy. Optionally the user can specify a range where the allocator 27 | will place the allocated memory slot. 28 | 29 | ## Interval tree 30 | 31 | An interval tree is a tree data structure used for storing information about 32 | intervals. Specifically, it allows one to efficiently identify intervals that 33 | are overlapping with a given point, or another interval. We considered that 34 | this characteristic makes this data structure appropriate to be used as an 35 | allocation engine for memory slots inside an address space. The time complexity 36 | of an interval tree, namely `O(log ⁡n+m)` for queries, `O(log n)` for creation 37 | and `O(log n)` for insertion and deletion of nodes. The key of each node of the 38 | tree is represented using an inclusive range that contains the bounds of the 39 | address space. Each node in the tree can have two states, either `Free` or 40 | `Allocated`. Beside the information presented above the representation of a 41 | node also contains references two the children node of the current node. 42 | 43 | ## Usage 44 | 45 | To use the `IntervalTree` implementation as an address allocator one should 46 | first create an interval tree object and give an address space as a root node. 47 | After, the user should create a constraint with the size for the resource. 48 | Optionally the constraint could also contain the address alignment and the 49 | first address to be reserved. 50 | 51 | ## State transition 52 | 53 | At the beginning, the interval tree will contain just one node that will 54 | represent the whole address space, the state of this node will be free. 55 | 56 | ![IntervalTree creation example](/images/first_node.png) 57 | 58 | When we allocate a memory slot, one of the nodes that have the state free 59 | will be split accordingly. A new node that has as the key a range representing 60 | the allocated memory slot will be inserted in the tree. 61 | 62 | ![Node Allocation example](/images/interval_tree_allocation.png) 63 | 64 | When one of the allocated nodes is freed its state will be changed from 65 | `NodeState::Allocated` to `NodeState::Free` if there are two adjacent nodes 66 | that are not allocated then they will be merged in a single node. 67 | 68 | ![Node Freeing example](/images/after_free.png) 69 | 70 | ## Address aligning 71 | 72 | Memory slots allocated using address allocator can have the start aligned to a 73 | specified value. We offer the possibility to align the starting address either 74 | to the next boundary or the previous one. 75 | 76 | If the allocation policy used is `AllocPolicy::FirstMatch` all memory slots 77 | will start at an address aligned to the first multiple of specified alignment 78 | value that is greater or equal to the candidate address. 79 | 80 | Example: 81 | 82 | ```text 83 | initial_address = 0b0..01000000000000000000000000000001 84 | alignment = 0b0..00000000000000000001000000000000 85 | initial_addr + alignment - 1 = 0b0..01000000000000000001000000000000 86 | !(alignment - 1) = 0b1..11111111111111111111000000000000 87 | ``` 88 | 89 | The aligned address will be the result of bitwise AND between 90 | `initial address + alignment - 1` and bitwise NOT of `alignment - 1`. 91 | 92 | ```text 93 | 0b0..01000000000000000000000000000001(0x0000000040001000)& 94 | 0b1..11111111111111111111000000000000(0xFFFFFFFFFFFFF000) 95 | ------------------------------------------------------------------- 96 | 0b0..01000000000000000001000000000000(0x0000000040001000) 97 | ``` 98 | 99 | If the allocation policy used is `AllocPolicy::LastMatch` all memory slots will 100 | start at an address aligned to the first multiple of specified alignment value 101 | that is lower or equal to the candidate address. 102 | 103 | Example: 104 | 105 | ```text 106 | initial_address = 0b0..01000000000000000000000000000001 107 | alignment = 0b0..00000000000000000001000000000000 108 | !(alignment - 1) = 0b1..11111111111111111111000000000000 109 | ``` 110 | 111 | The aligned address will be the result of bitwise AND between `initial address` and 112 | bitwise NOT of `alignment - 1`. 113 | 114 | ```text 115 | 0b0..01000000000000000000000000000001(0x0000000040000001)& 116 | 0b1..11111111111111111111000000000000(0xFFFFFFFFFFFFF000) 117 | ------------------------------------------------------------------- 118 | 0b0..01000000000000000000000000000000(0x0000000040000000) 119 | ``` 120 | -------------------------------------------------------------------------------- /src/id_allocator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // Copyright © 2019 Intel Corporation. All Rights Reserved. 3 | // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 4 | 5 | //! Provides allocation and releasing strategy for IDs. 6 | //! 7 | //! This module implements an allocation strategies for all resource types 8 | //! that can be abstracted to an integer. 9 | 10 | use crate::{Error, Result}; 11 | use alloc::collections::BTreeSet; 12 | 13 | /// An unique ID allocator that allows management of IDs in a given interval. 14 | // Internal representation of IdAllocator. Contains the ends of the interval 15 | // that is managed, a field that points at the next available ID, and a 16 | // BTreeSet used to store the released IDs. The reason we chose a 17 | // BTreeSet is that the average complexity for deletion and insertion is 18 | // O(logN) compared to Vec for example, another benefit is that the entries 19 | // are sorted so we will always use the first available ID. 20 | #[derive(Debug, Clone)] 21 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 22 | pub struct IdAllocator { 23 | // Beginning of the range of IDs that we want to manage. 24 | range_base: u32, 25 | // First available id that was never allocated. 26 | next_id: Option, 27 | // End of the range of IDs that we want to manage. 28 | range_end: u32, 29 | // Set of all freed ids that can be reused at subsequent allocations. 30 | freed_ids: BTreeSet, 31 | } 32 | 33 | impl IdAllocator { 34 | /// Creates a new instance of IdAllocator that will be used to manage the 35 | /// allocation and release of ids from the interval specified by 36 | /// `range_base` and `range_end` 37 | pub fn new(range_base: u32, range_end: u32) -> Result { 38 | if range_end < range_base { 39 | return Err(Error::InvalidRange(range_base.into(), range_end.into())); 40 | } 41 | Ok(IdAllocator { 42 | range_base, 43 | next_id: Some(range_base), 44 | range_end, 45 | freed_ids: BTreeSet::::new(), 46 | }) 47 | } 48 | 49 | fn id_in_range(&self, id: u32) -> bool { 50 | // Check for out of range. 51 | self.range_base <= id && id <= self.range_end 52 | } 53 | 54 | /// Allocate an ID from the managed range. 55 | /// We first try to reuse one of the IDs released before. If there is no 56 | /// ID to reuse we return the next available one from the managed range. 57 | pub fn allocate_id(&mut self) -> Result { 58 | // If the set containing all freed ids is not empty we extract the 59 | // first entry from that set and return it. 60 | if !self.freed_ids.is_empty() { 61 | let ret_value = *self.freed_ids.iter().next().unwrap(); 62 | self.freed_ids.remove(&ret_value); 63 | return Ok(ret_value); 64 | } 65 | // If no id was freed before we return the next available id. 66 | if let Some(next_id) = self.next_id { 67 | if next_id > self.range_end { 68 | return Err(Error::ResourceNotAvailable); 69 | } 70 | // Prepare the next available id. If the addition overflows we 71 | // set the next_id field to None and return Overflow at the next 72 | // allocation request. 73 | self.next_id = next_id.checked_add(1); 74 | return Ok(next_id); 75 | } 76 | Err(Error::Overflow) 77 | } 78 | 79 | /// Frees an id from the managed range. 80 | pub fn free_id(&mut self, id: u32) -> Result { 81 | // Check if the id belongs to the managed range and if it was not 82 | // released before. Return error if any of the conditions is not met. 83 | if !self.id_in_range(id) { 84 | return Err(Error::OutOfRange(id)); 85 | } 86 | if let Some(next_id) = self.next_id { 87 | if next_id < id { 88 | return Err(Error::NeverAllocated(id)); 89 | } 90 | } 91 | 92 | // Insert the released id in the set of released id to avoid releasing 93 | // it in next iterations. 94 | self.freed_ids 95 | .insert(id) 96 | .then_some(id) 97 | .ok_or(Error::AlreadyReleased(id)) 98 | } 99 | } 100 | 101 | #[cfg(test)] 102 | mod tests { 103 | use crate::id_allocator::IdAllocator; 104 | use crate::Error; 105 | 106 | #[test] 107 | fn test_slot_id_allocation() { 108 | let faulty_allocator = IdAllocator::new(23, 5); 109 | assert_eq!(faulty_allocator.unwrap_err(), Error::InvalidRange(23, 5)); 110 | let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap(); 111 | assert_eq!(legacy_irq_allocator.range_base, 5); 112 | assert_eq!(legacy_irq_allocator.range_end, 23); 113 | 114 | let id = legacy_irq_allocator.allocate_id().unwrap(); 115 | assert_eq!(id, 5); 116 | assert_eq!(legacy_irq_allocator.next_id.unwrap(), 6); 117 | 118 | for _ in 1..19 { 119 | assert!(legacy_irq_allocator.allocate_id().is_ok()); 120 | } 121 | 122 | assert_eq!( 123 | legacy_irq_allocator.allocate_id().unwrap_err(), 124 | Error::ResourceNotAvailable 125 | ); 126 | } 127 | 128 | #[test] 129 | fn test_u32_overflow() { 130 | let mut allocator = IdAllocator::new(u32::MAX - 1, u32::MAX).unwrap(); 131 | assert_eq!(allocator.allocate_id().unwrap(), u32::MAX - 1); 132 | assert_eq!(allocator.allocate_id().unwrap(), u32::MAX); 133 | let res = allocator.allocate_id(); 134 | assert!(res.is_err()); 135 | assert_eq!(res.unwrap_err(), Error::Overflow); 136 | } 137 | 138 | #[test] 139 | fn test_slot_id_free() { 140 | let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap(); 141 | assert_eq!( 142 | legacy_irq_allocator.free_id(3).unwrap_err(), 143 | Error::OutOfRange(3) 144 | ); 145 | assert_eq!(legacy_irq_allocator.freed_ids.len(), 0); 146 | 147 | for _ in 1..10 { 148 | let _id = legacy_irq_allocator.allocate_id().unwrap(); 149 | } 150 | 151 | let irq = 10; 152 | legacy_irq_allocator.free_id(irq).unwrap(); 153 | assert!(legacy_irq_allocator.freed_ids.contains(&irq)); 154 | assert_eq!( 155 | legacy_irq_allocator.free_id(10).unwrap_err(), 156 | Error::AlreadyReleased(10) 157 | ); 158 | let irq = 9; 159 | legacy_irq_allocator.free_id(irq).unwrap(); 160 | assert_eq!(legacy_irq_allocator.freed_ids.len(), 2); 161 | assert_eq!(*legacy_irq_allocator.freed_ids.iter().next().unwrap(), 9); 162 | 163 | let irq = legacy_irq_allocator.allocate_id().unwrap(); 164 | assert_eq!(irq, 9); 165 | assert!(!legacy_irq_allocator.freed_ids.contains(&irq)); 166 | assert_eq!(legacy_irq_allocator.freed_ids.len(), 1); 167 | assert_eq!( 168 | legacy_irq_allocator.free_id(21).unwrap_err(), 169 | Error::NeverAllocated(21) 170 | ); 171 | } 172 | 173 | #[test] 174 | fn test_id_sanity_checks() { 175 | let legacy_irq_allocator = IdAllocator::new(5, 23).unwrap(); 176 | 177 | assert!(!legacy_irq_allocator.id_in_range(4)); 178 | assert!(legacy_irq_allocator.id_in_range(6)); 179 | assert!(!legacy_irq_allocator.id_in_range(25)); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/address_allocator.rs: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // Copyright © 2022 Alibaba Cloud. All rights reserved. 3 | // Copyright © 2019 Intel Corporation. All Rights Reserved. 4 | // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 5 | 6 | //! Provides allocation and releasing strategy for memory slots. 7 | //! 8 | //! This module implements an allocation strategies for memory slots in an 9 | //! address space (for example MMIO and PIO). 10 | 11 | use crate::allocation_engine::IntervalTree; 12 | use crate::{AllocPolicy, Constraint, Error, RangeInclusive, Result}; 13 | 14 | // Internal representation of AddressAllocator. Contains the managed address 15 | // space represented through an instance of RangeInclusive. The address 16 | // allocator also contains a node that represents the root of the interval tree 17 | // used for memory slots management. The reason we chose to use an interval tree 18 | // is that the average complexity for deletion and insertion is O(log N) and for 19 | // searching a node is O(N). 20 | /// An address space allocator. 21 | /// 22 | /// The `AddressAllocator` manages an address space by exporting functionality to reserve and 23 | /// free address ranges based on a user defined [Allocation Policy](enum.AllocPolicy.html). 24 | #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] 25 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 26 | pub struct AddressAllocator { 27 | // Address space that we want to manage. 28 | address_space: RangeInclusive, 29 | // Internal representation of the managed address space. Each node in the 30 | // tree will represent a memory location and can have two states either 31 | // `NodeState::Free` or `NodeState::Allocated`. 32 | interval_tree: IntervalTree, 33 | } 34 | 35 | impl AddressAllocator { 36 | /// Creates a new instance of AddressAllocator that will be used to manage 37 | /// the allocation and release of memory slots from the managed address 38 | /// space. 39 | pub fn new(base: u64, size: u64) -> std::result::Result { 40 | let end = base 41 | .checked_add(size.checked_sub(1).ok_or(Error::Underflow)?) 42 | .ok_or(Error::Overflow)?; 43 | let aux_range = RangeInclusive::new(base, end)?; 44 | Ok(AddressAllocator { 45 | address_space: aux_range, 46 | interval_tree: IntervalTree::new(aux_range), 47 | }) 48 | } 49 | 50 | /// Allocates a new aligned memory slot. Returns the allocated range in case of success. 51 | /// 52 | /// When the `ExactMatch` policy is used, the start address MUST be aligned to the 53 | /// alignment passed as a parameter. 54 | /// 55 | /// # Arguments: 56 | /// - `size`: size to allocate. 57 | /// - `alignment`: alignment to be used for the allocated resources. 58 | /// Valid alignments are a power of 2. 59 | /// - `policy`: allocation policy. 60 | pub fn allocate( 61 | &mut self, 62 | size: u64, 63 | alignment: u64, 64 | policy: AllocPolicy, 65 | ) -> Result { 66 | let constraint = Constraint::new(size, alignment, policy)?; 67 | self.interval_tree.allocate(constraint) 68 | } 69 | 70 | /// Deletes the specified memory slot or returns `ResourceNotAvailable` if 71 | /// the node was not allocated before. 72 | pub fn free(&mut self, key: &RangeInclusive) -> Result<()> { 73 | self.interval_tree.free(key) 74 | } 75 | 76 | /// First address of the allocator. 77 | pub fn base(&self) -> u64 { 78 | self.address_space.start() 79 | } 80 | 81 | /// Last address of the allocator. 82 | pub fn end(&self) -> u64 { 83 | self.address_space.end() 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use super::*; 90 | 91 | #[test] 92 | fn test_regression_exact_match_length_check() { 93 | let mut pool = AddressAllocator::new(0x0, 0x2000).unwrap(); 94 | let res = pool 95 | .allocate(0x1000, 0x1000, AllocPolicy::ExactMatch(0x1000)) 96 | .unwrap(); 97 | assert_eq!( 98 | pool.allocate(0x0, 0x1000, AllocPolicy::FirstMatch) 99 | .unwrap_err(), 100 | Error::InvalidSize(0x0) 101 | ); 102 | assert_eq!( 103 | pool.allocate(0x1000, 0x1000, AllocPolicy::ExactMatch(0x3)) 104 | .unwrap_err(), 105 | Error::UnalignedAddress 106 | ); 107 | assert_eq!(res, RangeInclusive::new(0x1000, 0x1FFF).unwrap()); 108 | let res = pool 109 | .allocate(0x1000, 0x1000, AllocPolicy::ExactMatch(0x0)) 110 | .unwrap(); 111 | assert_eq!(res, RangeInclusive::new(0x0, 0x0FFF).unwrap()); 112 | } 113 | 114 | #[test] 115 | fn test_new_fails_overflow() { 116 | assert_eq!( 117 | AddressAllocator::new(u64::MAX, 0x100).unwrap_err(), 118 | Error::Overflow 119 | ); 120 | } 121 | 122 | #[test] 123 | fn test_new_fails_size_zero() { 124 | assert_eq!( 125 | AddressAllocator::new(0x1000, 0x0).unwrap_err(), 126 | Error::Underflow 127 | ); 128 | } 129 | 130 | #[test] 131 | fn test_allocate_fails_alignment_zero() { 132 | let mut pool = AddressAllocator::new(0x1000, 0x10000).unwrap(); 133 | assert_eq!( 134 | pool.allocate(0x100, 0, AllocPolicy::FirstMatch) 135 | .unwrap_err(), 136 | Error::InvalidAlignment 137 | ); 138 | } 139 | 140 | #[test] 141 | fn test_allocate_fails_alignment_non_power_of_two() { 142 | let mut pool = AddressAllocator::new(0x1000, 0x10000).unwrap(); 143 | assert_eq!( 144 | pool.allocate(0x100, 200, AllocPolicy::FirstMatch) 145 | .unwrap_err(), 146 | Error::InvalidAlignment 147 | ); 148 | } 149 | 150 | #[test] 151 | fn test_allocate_fails_not_enough_space() { 152 | let mut pool = AddressAllocator::new(0x1000, 0x1000).unwrap(); 153 | assert_eq!( 154 | pool.allocate(0x800, 0x100, AllocPolicy::LastMatch).unwrap(), 155 | RangeInclusive::new(0x1800, 0x1FFF).unwrap() 156 | ); 157 | assert_eq!( 158 | pool.allocate(0x900, 0x100, AllocPolicy::FirstMatch) 159 | .unwrap_err(), 160 | Error::ResourceNotAvailable 161 | ); 162 | assert_eq!( 163 | pool.allocate(0x400, 0x100, AllocPolicy::FirstMatch) 164 | .unwrap(), 165 | RangeInclusive::new(0x1000, 0x13FF).unwrap() 166 | ); 167 | } 168 | 169 | #[test] 170 | fn test_allocate_with_alignment_first_ok() { 171 | let mut pool = AddressAllocator::new(0x1000, 0x1000).unwrap(); 172 | assert_eq!( 173 | pool.allocate(0x110, 0x100, AllocPolicy::FirstMatch) 174 | .unwrap(), 175 | RangeInclusive::new(0x1000, 0x110F).unwrap() 176 | ); 177 | assert_eq!( 178 | pool.allocate(0x100, 0x100, AllocPolicy::FirstMatch) 179 | .unwrap(), 180 | RangeInclusive::new(0x1200, 0x12FF).unwrap() 181 | ); 182 | assert_eq!( 183 | pool.allocate(0x10, 0x100, AllocPolicy::FirstMatch).unwrap(), 184 | RangeInclusive::new(0x1300, 0x130F).unwrap() 185 | ); 186 | } 187 | 188 | #[test] 189 | fn test_allocate_with_alignment_last_ok() { 190 | let mut pool_reverse = AddressAllocator::new(0x1000, 0x10000).unwrap(); 191 | assert_eq!( 192 | pool_reverse 193 | .allocate(0x110, 0x100, AllocPolicy::LastMatch) 194 | .unwrap(), 195 | RangeInclusive::new(0x10E00, 0x10F0F).unwrap() 196 | ); 197 | assert_eq!( 198 | pool_reverse 199 | .allocate(0x100, 0x100, AllocPolicy::LastMatch) 200 | .unwrap(), 201 | RangeInclusive::new(0x10D00, 0x10DFF).unwrap() 202 | ); 203 | assert_eq!( 204 | pool_reverse 205 | .allocate(0x10, 0x100, AllocPolicy::LastMatch) 206 | .unwrap(), 207 | RangeInclusive::new(0x10C00, 0x10C0F).unwrap() 208 | ); 209 | } 210 | 211 | #[test] 212 | fn test_allocate_address_not_enough_space() { 213 | let mut pool = AddressAllocator::new(0x1000, 0x1000).unwrap(); 214 | // First range is [0x1000:0x17FF] 215 | assert_eq!( 216 | pool.allocate(0x800, 0x100, AllocPolicy::FirstMatch) 217 | .unwrap(), 218 | RangeInclusive::new(0x1000, 0x17FF).unwrap() 219 | ); 220 | // Second range is [0x1A00:0x1BFF] 221 | assert_eq!( 222 | pool.allocate(0x200, 0x100, AllocPolicy::ExactMatch(0x1A00)) 223 | .unwrap(), 224 | RangeInclusive::new(0x1A00, 0x1BFF).unwrap() 225 | ); 226 | // There is 0x200 between the first 2 ranges. 227 | // We ask for an available address but the range is too big 228 | assert_eq!( 229 | pool.allocate(0x800, 0x100, AllocPolicy::ExactMatch(0x1800)) 230 | .unwrap_err(), 231 | Error::ResourceNotAvailable 232 | ); 233 | // We ask for an available address, with a small enough range 234 | assert_eq!( 235 | pool.allocate(0x100, 0x100, AllocPolicy::ExactMatch(0x1800)) 236 | .unwrap(), 237 | RangeInclusive::new(0x1800, 0x18FF).unwrap() 238 | ); 239 | } 240 | 241 | #[test] 242 | fn test_tree_allocate_address_free_and_realloc() { 243 | let mut pool = AddressAllocator::new(0x1000, 0x1000).unwrap(); 244 | assert_eq!( 245 | pool.allocate(0x800, 0x100, AllocPolicy::FirstMatch) 246 | .unwrap(), 247 | RangeInclusive::new(0x1000, 0x17FF).unwrap() 248 | ); 249 | 250 | let _ = pool.free(&RangeInclusive::new(0x1000, 0x17FF).unwrap()); 251 | assert_eq!( 252 | pool.allocate(0x800, 0x100, AllocPolicy::FirstMatch) 253 | .unwrap(), 254 | RangeInclusive::new(0x1000, 0x17FF).unwrap() 255 | ); 256 | } 257 | 258 | #[test] 259 | fn test_allow_range_size_one_left() { 260 | let mut pool = AddressAllocator::new(1, 1000).unwrap(); 261 | assert_eq!( 262 | pool.allocate(10, 2, AllocPolicy::FirstMatch).unwrap(), 263 | RangeInclusive::new(2, 11).unwrap() 264 | ); 265 | assert_eq!( 266 | pool.allocate(1, 1, AllocPolicy::FirstMatch).unwrap(), 267 | RangeInclusive::new(1, 1).unwrap() 268 | ); 269 | } 270 | 271 | #[test] 272 | fn test_allocate_address_fail_free_and_realloc() { 273 | let mut pool = AddressAllocator::new(0x0, 0x1000).unwrap(); 274 | //First allocation fails 275 | assert_eq!( 276 | pool.allocate(0x2000, 0x100, AllocPolicy::FirstMatch) 277 | .unwrap_err(), 278 | Error::ResourceNotAvailable 279 | ); 280 | // We try to free a range that was not allocated. 281 | assert_eq!( 282 | pool.free(&RangeInclusive::new(0x1200, 0x3200).unwrap()) 283 | .unwrap_err(), 284 | Error::ResourceNotAvailable 285 | ); 286 | // Now we try an allocation that should succeed. 287 | assert_eq!( 288 | pool.allocate(0x4FE, 0x100, AllocPolicy::ExactMatch(0x500)) 289 | .unwrap(), 290 | RangeInclusive::new(0x500, 0x9FD).unwrap() 291 | ); 292 | assert!(pool 293 | .free(&RangeInclusive::new(0x500, 0x9FD).unwrap()) 294 | .is_ok()); 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 3 | 4 | //! Manages system resources that can be allocated to VMs and their devices. 5 | //! 6 | //! # Example 7 | //! 8 | //! Depending on the use case of the VMM, both the `IDAllocator` and the `AddressAllocator` 9 | //! can be used. In the example below we assume that the `IDAllocator` is used for allocating 10 | //! unique identifiers for VM devices. We use the address allocator for allocating MMIO ranges 11 | //! for virtio devices. 12 | //! 13 | //! In the example below we use constants that are typical for the x86 platform, but this has no 14 | //! impact on the code actually working on aarch64. 15 | //! 16 | //! ```rust 17 | //! use std::collections::HashMap; 18 | //! use std::process::id; 19 | //! use vm_allocator::{AddressAllocator, AllocPolicy, Error, IdAllocator, RangeInclusive, Result}; 20 | //! 21 | //! const FIRST_ADDR_PAST_32BITS: u64 = 1 << 32; 22 | //! const MEM_32BIT_GAP_SIZE: u64 = 768 << 20; 23 | //! const MMIO_MEM_START: u64 = FIRST_ADDR_PAST_32BITS - MEM_32BIT_GAP_SIZE; 24 | //! const PAGE_SIZE: u64 = 0x1000; 25 | //! 26 | //! struct DeviceManager { 27 | //! id_allocator: IdAllocator, 28 | //! mmio_allocator: AddressAllocator, 29 | //! slots: HashMap, 30 | //! } 31 | //! 32 | //! #[derive(Clone, Copy)] 33 | //! struct DeviceSlot { 34 | //! id: u32, 35 | //! mmio_range: RangeInclusive, 36 | //! } 37 | //! 38 | //! impl DeviceManager { 39 | //! pub fn new() -> Result { 40 | //! Ok(DeviceManager { 41 | //! id_allocator: IdAllocator::new(0, 100)?, 42 | //! mmio_allocator: AddressAllocator::new(MMIO_MEM_START, MEM_32BIT_GAP_SIZE)?, 43 | //! slots: HashMap::new(), 44 | //! }) 45 | //! } 46 | //! 47 | //! pub fn reserve_device_slot(&mut self) -> Result { 48 | //! // We're reserving the first available address that is aligned to the page size. 49 | //! // For each device we reserve one page of addresses. 50 | //! let mmio_range = 51 | //! self.mmio_allocator 52 | //! .allocate(PAGE_SIZE, PAGE_SIZE, AllocPolicy::FirstMatch)?; 53 | //! let slot = DeviceSlot { 54 | //! id: self.id_allocator.allocate_id()?, 55 | //! mmio_range, 56 | //! }; 57 | //! self.slots.insert(slot.id, slot.mmio_range); 58 | //! Ok(slot) 59 | //! } 60 | //! 61 | //! // Free the device slot corresponding to the passed device ID. 62 | //! pub fn free_device_slot(&mut self, id: u32) -> Result<()> { 63 | //! let mmio_range = self.slots.get(&id).ok_or(Error::NeverAllocated(id))?; 64 | //! let _ = self.id_allocator.free_id(id)?; 65 | //! self.mmio_allocator.free(mmio_range) 66 | //! } 67 | //! } 68 | //! 69 | //! # fn main() { 70 | //! # let mut dm = DeviceManager::new().unwrap(); 71 | //! # let slot = dm.reserve_device_slot().unwrap(); 72 | //! # dm.free_device_slot(slot.id).unwrap(); 73 | //! # } 74 | //! ``` 75 | 76 | #![deny(missing_docs)] 77 | #![cfg_attr(not(feature = "std"), no_std)] 78 | 79 | extern crate alloc; 80 | 81 | #[cfg(not(feature = "std"))] 82 | extern crate core as std; 83 | 84 | mod address_allocator; 85 | /// Allocation engine used by address allocator. 86 | mod allocation_engine; 87 | mod id_allocator; 88 | 89 | use std::{cmp::max, cmp::min, result}; 90 | use thiserror::Error; 91 | 92 | use crate::allocation_engine::NodeState; 93 | pub use crate::{address_allocator::AddressAllocator, id_allocator::IdAllocator}; 94 | 95 | /// Default alignment that can be used for creating a `Constraint`. 96 | pub const DEFAULT_CONSTRAINT_ALIGN: u64 = 4; 97 | 98 | /// Error type for IdAllocator usage. 99 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Error)] 100 | pub enum Error { 101 | /// Management operations on desired resource resulted in overflow. 102 | #[error("Management operations on desired resource resulted in overflow.")] 103 | Overflow, 104 | /// An id that is not part of the specified range was requested to be released. 105 | #[error("Specified id: {0} is not in the range.")] 106 | OutOfRange(u32), 107 | /// An id that was already released was requested to be released. 108 | #[error("Specified id: {0} is already released.")] 109 | AlreadyReleased(u32), 110 | /// An id that was never allocated was requested to be released. 111 | #[error("Specified id: {0} was never allocated, can't release it.")] 112 | NeverAllocated(u32), 113 | /// The resource we want to use or update is not available. 114 | #[error("The requested resource is not available.")] 115 | ResourceNotAvailable, 116 | /// The range to manage is invalid. 117 | #[error("The range specified: {0}-{1} is not valid.")] 118 | InvalidRange(u64, u64), 119 | /// Alignment value is invalid 120 | #[error("Alignment value is invalid.")] 121 | InvalidAlignment, 122 | /// The range that we try to insert into the interval tree is overlapping 123 | /// with another node from the tree. 124 | #[error("Addresses are overlapping.{0:?} intersects with existing {1:?}")] 125 | Overlap(RangeInclusive, RangeInclusive), 126 | /// A node state can be changed just from Free to Allocated, other transitions 127 | /// are not valid. 128 | #[error("Invalid state transition for node {0:?} from {1:?} to NodeState::Free")] 129 | InvalidStateTransition(RangeInclusive, NodeState), 130 | /// Address is unaligned 131 | #[error("The address is not aligned.")] 132 | UnalignedAddress, 133 | /// Management operations on desired resource resulted in underflow. 134 | #[error("Management operations on desired resource resulted in underflow.")] 135 | Underflow, 136 | /// The size of the desired resource is not invalid. 137 | #[error("The specified size: {0} is not valid.")] 138 | InvalidSize(u64), 139 | } 140 | 141 | /// Wrapper over std::result::Result 142 | pub type Result = result::Result; 143 | 144 | /// A closed interval range [start, end]. 145 | /// The range describes a memory slot which is assigned by the VMM to a device. 146 | /// 147 | /// # Example 148 | /// 149 | /// ```rust 150 | /// use vm_allocator::RangeInclusive; 151 | /// 152 | /// let r = RangeInclusive::new(0x0, 0x100).unwrap(); 153 | /// assert_eq!(r.len(), 0x101); 154 | /// assert_eq!(r.start(), 0x0); 155 | /// assert_eq!(r.end(), 0x100); 156 | /// 157 | /// // Check if a region contains another region. 158 | /// let other = RangeInclusive::new(0x50, 0x80).unwrap(); 159 | /// assert!(r.contains(&other)); 160 | /// 161 | /// // Check if a region overlaps with another one. 162 | /// let other = RangeInclusive::new(0x99, 0x150).unwrap(); 163 | /// assert!(r.overlaps(&other)); 164 | /// ``` 165 | // This structure represents the key of the Node object in the interval tree implementation. 166 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Hash, Ord, Debug)] 167 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 168 | pub struct RangeInclusive { 169 | /// Lower boundary of the interval. 170 | start: u64, 171 | /// Upper boundary of the interval. 172 | end: u64, 173 | } 174 | 175 | #[allow(clippy::len_without_is_empty)] 176 | impl RangeInclusive { 177 | /// Creates a new RangeInclusive. 178 | pub fn new(start: u64, end: u64) -> Result { 179 | // The length of the interval [0, u64::MAX] is u64::MAX + 1 which does 180 | // not fit in a u64::MAX, hence we return `Error::InvalidRange` when 181 | // there is an attempt to use that range. 182 | if start > end || (start == 0 && end == u64::MAX) { 183 | return Err(Error::InvalidRange(start, end)); 184 | } 185 | Ok(RangeInclusive { start, end }) 186 | } 187 | 188 | /// Returns the length of the range. 189 | pub fn len(&self) -> u64 { 190 | self.end - self.start + 1 191 | } 192 | 193 | /// Returns true if the regions overlap. 194 | pub fn overlaps(&self, other: &RangeInclusive) -> bool { 195 | max(self.start, other.start) <= min(self.end, other.end) 196 | } 197 | 198 | /// Returns true if the current range contains the range passed as a parameter. 199 | pub fn contains(&self, other: &RangeInclusive) -> bool { 200 | self.start <= other.start && self.end >= other.end 201 | } 202 | 203 | /// Returns the lower boundary of the range. 204 | pub fn start(&self) -> u64 { 205 | self.start 206 | } 207 | 208 | /// Returns the upper boundary of the range. 209 | pub fn end(&self) -> u64 { 210 | self.end 211 | } 212 | } 213 | 214 | /// A resource allocation constraint. 215 | /// 216 | /// # Example 217 | /// 218 | /// ```rust 219 | /// use vm_allocator::{AllocPolicy, Constraint, Error, IdAllocator, DEFAULT_CONSTRAINT_ALIGN}; 220 | /// 221 | /// let constraint = 222 | /// Constraint::new(0x4, DEFAULT_CONSTRAINT_ALIGN, AllocPolicy::FirstMatch).unwrap(); 223 | /// assert_eq!(constraint.size(), 0x4); 224 | /// assert_eq!(constraint.align(), 0x4); 225 | /// 226 | /// // Alignments need to be a power of 2, otherwise an error is returned. 227 | /// assert_eq!( 228 | /// Constraint::new(0x4, 0x3, AllocPolicy::LastMatch).unwrap_err(), 229 | /// Error::InvalidAlignment 230 | /// ); 231 | /// 232 | /// // When using the ExactMatch policy, the start address must also be aligned, otherwise 233 | /// // an error is returned. 234 | /// assert_eq!( 235 | /// Constraint::new(0x4, 0x4, AllocPolicy::ExactMatch(0x3)).unwrap_err(), 236 | /// Error::UnalignedAddress 237 | /// ); 238 | /// ``` 239 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 240 | pub struct Constraint { 241 | /// Size to allocate. 242 | size: u64, 243 | /// Alignment for the allocated resource. 244 | align: u64, 245 | /// Resource allocation policy. 246 | policy: AllocPolicy, 247 | } 248 | 249 | impl Constraint { 250 | /// Creates a new constraint based on the passed configuration. 251 | /// 252 | /// When the `ExactMatch` policy is used, the start address MUST be aligned to the 253 | /// alignment passed as a parameter. 254 | /// 255 | /// # Arguments: 256 | /// - `size`: size to allocate. 257 | /// - `align`: alignment to be used for the allocated resources. 258 | /// Valid alignments are a power of 2. 259 | /// - `policy`: allocation policy. 260 | pub fn new(size: u64, align: u64, policy: AllocPolicy) -> Result { 261 | if size == 0 { 262 | return Err(Error::InvalidSize(size)); 263 | } 264 | 265 | if !align.is_power_of_two() || align == 0 { 266 | return Err(Error::InvalidAlignment); 267 | } 268 | 269 | if let AllocPolicy::ExactMatch(start_address) = policy { 270 | if start_address % align != 0 { 271 | return Err(Error::UnalignedAddress); 272 | } 273 | } 274 | 275 | Ok(Constraint { 276 | size, 277 | align, 278 | policy, 279 | }) 280 | } 281 | 282 | /// Returns the alignment constraint. 283 | pub fn align(self) -> u64 { 284 | self.align 285 | } 286 | 287 | /// Returns the size constraint. 288 | pub fn size(self) -> u64 { 289 | self.size 290 | } 291 | } 292 | 293 | /// Policy for resource allocation. 294 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] 295 | pub enum AllocPolicy { 296 | /// Allocate the first matched entry. 297 | #[default] 298 | FirstMatch, 299 | /// Allocate first matched entry from the end of the range. 300 | LastMatch, 301 | /// Allocate a memory slot starting with the specified address 302 | /// if it is available. 303 | ExactMatch(u64), 304 | } 305 | 306 | #[cfg(test)] 307 | mod tests { 308 | use super::*; 309 | 310 | #[test] 311 | fn test_new_range() { 312 | assert_eq!( 313 | RangeInclusive::new(2, 1).unwrap_err(), 314 | Error::InvalidRange(2, 1) 315 | ); 316 | assert_eq!( 317 | RangeInclusive::new(0, u64::MAX).unwrap_err(), 318 | Error::InvalidRange(0, u64::MAX) 319 | ); 320 | } 321 | 322 | #[test] 323 | fn test_range_overlaps() { 324 | let range_a = RangeInclusive::new(1u64, 4u64).unwrap(); 325 | let range_b = RangeInclusive::new(4u64, 6u64).unwrap(); 326 | let range_c = RangeInclusive::new(2u64, 3u64).unwrap(); 327 | let range_e = RangeInclusive::new(5u64, 6u64).unwrap(); 328 | 329 | assert!(range_a.overlaps(&range_b)); 330 | assert!(range_b.overlaps(&range_a)); 331 | assert!(range_a.overlaps(&range_c)); 332 | assert!(range_c.overlaps(&range_a)); 333 | assert!(!range_a.overlaps(&range_e)); 334 | assert!(!range_e.overlaps(&range_a)); 335 | 336 | assert_eq!(range_a.len(), 4); 337 | } 338 | 339 | #[test] 340 | fn test_range_contain() { 341 | let range_a = RangeInclusive::new(2u64, 6u64).unwrap(); 342 | assert!(range_a.contains(&RangeInclusive::new(2u64, 3u64).unwrap())); 343 | assert!(range_a.contains(&RangeInclusive::new(3u64, 4u64).unwrap())); 344 | assert!(range_a.contains(&RangeInclusive::new(5u64, 6u64).unwrap())); 345 | assert!(!range_a.contains(&RangeInclusive::new(1u64, 2u64).unwrap())); 346 | assert!(!range_a.contains(&RangeInclusive::new(1u64, 3u64).unwrap())); 347 | assert!(!range_a.contains(&RangeInclusive::new(1u64, 7u64).unwrap())); 348 | assert!(!range_a.contains(&RangeInclusive::new(7u64, 8u64).unwrap())); 349 | assert!(!range_a.contains(&RangeInclusive::new(6u64, 7u64).unwrap())); 350 | assert!(!range_a.contains(&RangeInclusive::new(7u64, 8u64).unwrap())); 351 | } 352 | 353 | #[test] 354 | fn test_range_ord() { 355 | let range_a = RangeInclusive::new(1, 4).unwrap(); 356 | let range_b = RangeInclusive::new(1, 4).unwrap(); 357 | let range_c = RangeInclusive::new(1, 3).unwrap(); 358 | let range_d = RangeInclusive::new(1, 5).unwrap(); 359 | 360 | assert_eq!(range_a, range_b); 361 | assert_eq!(range_b, range_a); 362 | assert!(range_a > range_c); 363 | assert!(range_c < range_a); 364 | assert!(range_a < range_d); 365 | assert!(range_d > range_a); 366 | } 367 | 368 | #[test] 369 | fn test_getters() { 370 | let range = RangeInclusive::new(3, 5).unwrap(); 371 | assert_eq!(range.start(), 3); 372 | assert_eq!(range.end(), 5); 373 | } 374 | 375 | #[test] 376 | fn test_range_upper_bound() { 377 | let range = RangeInclusive::new(0, u64::MAX); 378 | assert_eq!(range.unwrap_err(), Error::InvalidRange(0, u64::MAX)); 379 | } 380 | 381 | #[test] 382 | fn constraint_getter() { 383 | let bad_constraint = Constraint::new(0x1000, 0x1000, AllocPolicy::ExactMatch(0xC)); 384 | assert_eq!(bad_constraint.unwrap_err(), Error::UnalignedAddress); 385 | let constraint = Constraint::new(0x1000, 0x1000, AllocPolicy::default()).unwrap(); 386 | assert_eq!(constraint.align(), 0x1000); 387 | assert_eq!(constraint.size(), 0x1000); 388 | } 389 | } 390 | -------------------------------------------------------------------------------- /src/allocation_engine/interval_tree.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 4 | 5 | use std::cmp::{max, Ordering}; 6 | 7 | use alloc::boxed::Box; 8 | 9 | use crate::{AllocPolicy, Constraint, Error, RangeInclusive, Result}; 10 | 11 | /// Returns the first multiple of `alignment` that is lower or equal to the 12 | /// specified address. This method works only for alignment values that are a 13 | /// power of two. 14 | pub fn align_down(address: u64, alignment: u64) -> Result { 15 | if !alignment.is_power_of_two() { 16 | return Err(Error::InvalidAlignment); 17 | } 18 | // It is safe to subtract 1 as alignment is already checked to be greater 19 | // than 0. 20 | Ok(address & !(alignment - 1)) 21 | } 22 | 23 | /// Returns the first multiple of `alignment` that is greater or equal to the 24 | /// specified address. This method works only for alignment values that are a 25 | /// power of two. 26 | pub fn align_up(address: u64, alignment: u64) -> Result { 27 | if alignment == 0 { 28 | return Err(Error::InvalidAlignment); 29 | } 30 | // It is safe to subtract 1 as alignment is already checked to be greater 31 | // than 0. 32 | if let Some(intermediary_address) = address.checked_add(alignment - 1) { 33 | return align_down(intermediary_address, alignment); 34 | } 35 | Err(Error::Overflow) 36 | } 37 | 38 | /// Node state for interval tree nodes. 39 | /// 40 | /// Valid state transition: 41 | /// - None -> Free: IntervalTree::insert() 42 | /// - Free -> Allocated: IntervalTree::allocate() 43 | /// - Allocated -> Free: IntervalTree::free() 44 | /// - * -> None: IntervalTree::delete() 45 | #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)] 46 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 47 | pub enum NodeState { 48 | /// Node is free. 49 | Free, 50 | /// Node is allocated. 51 | Allocated, 52 | } 53 | 54 | impl NodeState { 55 | fn is_free(&self) -> bool { 56 | *self == NodeState::Free 57 | } 58 | } 59 | 60 | /// Internal tree node to implement interval tree. 61 | #[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] 62 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 63 | pub(crate) struct InnerNode { 64 | /// Interval handled by this node. 65 | key: RangeInclusive, 66 | /// NodeState, can be Free or Allocated. 67 | node_state: NodeState, 68 | /// Optional left child of current node. 69 | left: Option>, 70 | /// Optional right child of current node. 71 | right: Option>, 72 | /// Cached height of the node. 73 | height: u64, 74 | } 75 | 76 | impl InnerNode { 77 | /// Creates a new InnerNode object. 78 | fn new(key: RangeInclusive, node_state: NodeState) -> Self { 79 | InnerNode { 80 | key, 81 | node_state, 82 | left: None, 83 | right: None, 84 | height: 1, 85 | } 86 | } 87 | 88 | /// Returns a readonly reference to the node associated with the `key` or 89 | /// None if the searched key does not exist in the tree. 90 | fn search(&self, key: &RangeInclusive) -> Option<&InnerNode> { 91 | match self.key.cmp(key) { 92 | Ordering::Equal => Some(self), 93 | Ordering::Less => self.right.as_ref().and_then(|node| node.search(key)), 94 | Ordering::Greater => self.left.as_ref().and_then(|node| node.search(key)), 95 | } 96 | } 97 | 98 | /// Returns a readonly reference to the node associated with the `key` or 99 | /// None if there is no Node representing an interval that covers the 100 | /// searched key. For a key [a, b], this method will return a node with 101 | /// a key [c, d] such that c <= a and b <= d. 102 | fn search_superset(&self, key: &RangeInclusive) -> Option<&InnerNode> { 103 | if self.key.contains(key) { 104 | Some(self) 105 | } else if key.end < self.key.start { 106 | self.left 107 | .as_ref() 108 | .and_then(|node| node.search_superset(key)) 109 | } else { 110 | self.right 111 | .as_ref() 112 | .and_then(|node| node.search_superset(key)) 113 | } 114 | } 115 | 116 | /// Rotates the tree such that height difference between subtrees 117 | /// is not greater than abs(1). 118 | fn rotate(self: Box) -> Box { 119 | let l = height(&self.left); 120 | let r = height(&self.right); 121 | 122 | match (l as i64) - (r as i64) { 123 | -1..=1 => self, 124 | // Safe to unwrap as rotate_left_successor always returns Some when 125 | // the current node has a left child and we just checked that it 126 | // has at least one child otherwise this difference would not be two. 127 | 2 => self.rotate_left_successor().unwrap(), 128 | // Safe to unwrap as rotate_right_successor always returns Some when 129 | // the current node has a right child and we just checked that it 130 | // has at least one child otherwise this difference would not be 131 | // minus two. 132 | -2 => self.rotate_right_successor().unwrap(), 133 | _ => unreachable!(), 134 | } 135 | } 136 | 137 | /// Performs a single left rotation on this node. 138 | fn rotate_left(mut self: Box) -> Option> { 139 | if let Some(mut new_root) = self.right.take() { 140 | self.right = new_root.left.take(); 141 | self.update_cached_height(); 142 | new_root.left = Some(self); 143 | new_root.update_cached_height(); 144 | return Some(new_root); 145 | } 146 | None 147 | } 148 | 149 | /// Performs a single right rotation on this node. 150 | fn rotate_right(mut self: Box) -> Option> { 151 | if let Some(mut new_root) = self.left.take() { 152 | self.left = new_root.right.take(); 153 | self.update_cached_height(); 154 | new_root.right = Some(self); 155 | new_root.update_cached_height(); 156 | return Some(new_root); 157 | } 158 | None 159 | } 160 | 161 | /// Performs a rotation when the left successor is too high. 162 | fn rotate_left_successor(mut self: Box) -> Option> { 163 | if let Some(left) = self.left.take() { 164 | if height(&left.left) < height(&left.right) { 165 | self.left = left.rotate_left(); 166 | self.update_cached_height(); 167 | } else { 168 | self.left = Some(left); 169 | } 170 | return self.rotate_right(); 171 | } 172 | None 173 | } 174 | 175 | /// Performs a rotation when the right successor is too high. 176 | fn rotate_right_successor(mut self: Box) -> Option> { 177 | if let Some(right) = self.right.take() { 178 | if height(&right.left) > height(&right.right) { 179 | self.right = right.rotate_right(); 180 | self.update_cached_height(); 181 | } else { 182 | self.right = Some(right); 183 | } 184 | return self.rotate_left(); 185 | } 186 | None 187 | } 188 | 189 | /// Deletes the entry point of this tree structure. 190 | fn delete_root(mut self) -> Option> { 191 | match (self.left.take(), self.right.take()) { 192 | (None, None) => None, 193 | (Some(l), None) => Some(l), 194 | (None, Some(r)) => Some(r), 195 | (Some(l), Some(r)) => Some(Self::combine_subtrees(l, r)), 196 | } 197 | } 198 | 199 | /// Finds the minimal key below the tree and returns a new optional tree 200 | /// where the minimal value has been removed and the (optional) minimal node 201 | /// as tuple (min_node, remaining). 202 | fn get_new_root(mut self: Box) -> (Box, Option>) { 203 | match self.left.take() { 204 | None => { 205 | let remaining = self.right.take(); 206 | (self, remaining) 207 | } 208 | Some(left) => { 209 | let (min_node, left) = left.get_new_root(); 210 | self.left = left; 211 | self.update_cached_height(); 212 | (min_node, Some(self.rotate())) 213 | } 214 | } 215 | } 216 | 217 | /// Creates a single tree from the subtrees resulted from deleting the root 218 | /// node. 219 | fn combine_subtrees(l: Box, r: Box) -> Box { 220 | let (mut new_root, remaining) = r.get_new_root(); 221 | new_root.left = Some(l); 222 | new_root.right = remaining; 223 | new_root.update_cached_height(); 224 | new_root.rotate() 225 | } 226 | 227 | /// Updates cached information of the node. 228 | fn update_cached_height(&mut self) { 229 | // It is safe adding 1 to the height as it can not be greater than 50 230 | // hence no chance of overflowing. 231 | self.height = max(height(&self.left), height(&self.right)) + 1; 232 | } 233 | 234 | /// Insert a new node in the subtree. After the node is inserted the 235 | /// tree will be balanced. The node_state parameter is needed because in 236 | /// the AddressAllocator allocation logic we will need to insert both free 237 | /// and allocated nodes. 238 | fn insert( 239 | mut self: Box, 240 | key: RangeInclusive, 241 | node_state: NodeState, 242 | ) -> Result> { 243 | // The InnerNode structure has 48 a length of 48 bytes. With other nested 244 | // calls that are made during the insertion process the size occupied 245 | // on the stack by just one insert call is around 122 bytes. Considering 246 | // that the default stack size on Linux is 8K we could make around 73 247 | // calls to insert method before confronting with an stack overflow. To 248 | // be cautious we will use 50 as the maximum height of the tree. A 249 | // maximum height of 50 will result in the possibility to allocate 250 | // (2^50 - 1) memory slots. Considering the imposed maximum height the 251 | // recursion is safe to use. 252 | // It is safe adding 1 to the height as it can not be greater than 50 253 | // hence no chance of overflowing. 254 | if (self.height + 1) > 50 { 255 | return Err(Error::Overflow); 256 | } 257 | if self.key.overlaps(&key) { 258 | return Err(Error::Overlap(key, self.key)); 259 | } 260 | match self.key.cmp(&key) { 261 | // It is not possible for a RangeInclusive to be equal with an existing node 262 | // as the overlaps method will also catch this case and return the 263 | // corresponding error code. 264 | Ordering::Equal => unreachable!(), 265 | Ordering::Less => match self.right { 266 | None => self.right = Some(Box::new(InnerNode::new(key, node_state))), 267 | Some(right) => { 268 | self.right = Some(right.insert(key, node_state)?); 269 | } 270 | }, 271 | Ordering::Greater => match self.left { 272 | None => self.left = Some(Box::new(InnerNode::new(key, node_state))), 273 | Some(left) => { 274 | self.left = Some(left.insert(key, node_state)?); 275 | } 276 | }, 277 | } 278 | self.update_cached_height(); 279 | Ok(self.rotate()) 280 | } 281 | 282 | /// Update the state of an old node. This method should be used when we 283 | /// find an existing node with the state `NodeState::Free` that satisfies 284 | /// all constraints of an allocation request. The recursion is safe as we 285 | /// have in place a maximum height for the tree. 286 | fn mark_as_allocated(&mut self, key: &RangeInclusive) -> Result<()> { 287 | match self.key.cmp(key) { 288 | Ordering::Equal => { 289 | if self.node_state != NodeState::Free { 290 | return Err(Error::InvalidStateTransition(self.key, self.node_state)); 291 | } 292 | self.node_state = NodeState::Allocated; 293 | Ok(()) 294 | } 295 | Ordering::Less => match self.right.as_mut() { 296 | None => Err(Error::ResourceNotAvailable), 297 | Some(node) => node.mark_as_allocated(key), 298 | }, 299 | Ordering::Greater => match self.left.as_mut() { 300 | None => Err(Error::ResourceNotAvailable), 301 | Some(node) => node.mark_as_allocated(key), 302 | }, 303 | } 304 | } 305 | 306 | /// Delete `key` from the subtree. 307 | /// 308 | /// Note: it doesn't return whether the key exists in the subtree, so caller 309 | /// need to ensure the logic. 310 | fn delete(mut self: Box, key: &RangeInclusive) -> Option> { 311 | match self.key.cmp(key) { 312 | Ordering::Equal => { 313 | return self.delete_root(); 314 | } 315 | Ordering::Less => { 316 | if let Some(node) = self.right.take() { 317 | let right = node.delete(key); 318 | self.right = right; 319 | self.update_cached_height(); 320 | return Some(self.rotate()); 321 | } 322 | } 323 | Ordering::Greater => { 324 | if let Some(node) = self.left.take() { 325 | let left = node.delete(key); 326 | self.left = left; 327 | self.update_cached_height(); 328 | return Some(self.rotate()); 329 | } 330 | } 331 | } 332 | Some(self) 333 | } 334 | 335 | /// Returns the best node from the tree to place the desired memory slot 336 | /// and a RangeInclusive object with the start address aligned to the value specified 337 | /// in the constraint.The RangeInclusive returned by this method may be larger than 338 | /// what was requested. It's up for the caller to split the node if it wants 339 | /// to allocate the exact size from this node. 340 | fn find_candidate(&self, constraint: &Constraint) -> Result<(&Self, RangeInclusive)> { 341 | match constraint.policy { 342 | // Returns the first node from the managed address space that is 343 | // satisfying the specified constraints or `ResourceNotAvailable` 344 | // if the request can not be satisfied. 345 | AllocPolicy::FirstMatch => self.first_match(constraint), 346 | // Returns the last node from the managed address space that is 347 | // satisfying the specified constraints or `ResourceNotAvailable` 348 | // if the request can not be satisfied. 349 | AllocPolicy::LastMatch => self.last_match(constraint), 350 | // Returns the node containing the address specified or the 351 | // `ResourceNotAvailable` error if any of the sanity checks is not 352 | // passing. 353 | AllocPolicy::ExactMatch(start_address) => { 354 | // Search the node in the interval tree that contains the 355 | // desired starting address. 356 | let node = self 357 | .search_superset(&RangeInclusive::new( 358 | start_address, 359 | start_address.checked_add(1).ok_or(Error::Overflow)?, 360 | )?) 361 | .ok_or(Error::ResourceNotAvailable)?; 362 | let end_address = start_address 363 | .checked_add(constraint.size().checked_sub(1).ok_or(Error::Underflow)?) 364 | .ok_or(Error::Overflow)?; 365 | // We should check that starting from the desired address the 366 | // whole memory slot will fit in the selected node. 367 | let range = RangeInclusive::new(start_address, end_address)?; 368 | if !node.key.contains(&range) { 369 | return Err(Error::ResourceNotAvailable); 370 | } 371 | Ok((node, range)) 372 | } 373 | } 374 | } 375 | 376 | /// Returns the first node from the managed address space that is satisfying 377 | /// the specified constraints and the aligned address of the desired memory 378 | /// slot. Or if the request can not be satisfied `ResourceNotAvailable`. 379 | fn first_match(&self, constraint: &Constraint) -> Result<(&Self, RangeInclusive)> { 380 | // Searches the first free node from the tree. 381 | let mut res = self 382 | .left 383 | .as_ref() 384 | .map_or(Err(Error::ResourceNotAvailable), |node| { 385 | node.first_match(constraint) 386 | }); 387 | 388 | // If the result is Error::ResourceNotAvailable this means that we got 389 | // to the first free node from the tree. We check if this node is 390 | // satisfying all the constraints, if yes save the values and return 391 | // them at the end of the method. 392 | if res == Err(Error::ResourceNotAvailable) { 393 | res = self 394 | .check_constraint(constraint) 395 | .map_or(res, |node| Ok((self, node))) 396 | } 397 | 398 | // If res is still Error::ResourceNotAvailable we continue our search 399 | // on the right part of the tree, as the method is recursive the same 400 | // logic from above will apply. 401 | if res == Err(Error::ResourceNotAvailable) { 402 | res = self 403 | .right 404 | .as_ref() 405 | .map_or(Err(Error::ResourceNotAvailable), |node| { 406 | node.first_match(constraint) 407 | }); 408 | } 409 | res 410 | } 411 | 412 | /// Returns the last node from the managed address space that is satisfying 413 | /// the specified constraints and the aligned address of the desired memory 414 | /// slot. Or if the request can not be satisfied `ResourceNotAvailable`. 415 | fn last_match(&self, constraint: &Constraint) -> Result<(&Self, RangeInclusive)> { 416 | // Searches the last free node from the tree. 417 | let mut res = self 418 | .right 419 | .as_ref() 420 | .map_or(Err(Error::ResourceNotAvailable), |node| { 421 | node.last_match(constraint) 422 | }); 423 | 424 | // If the result is Error::ResourceNotAvailable this means that we got 425 | // to the last free node from the tree. We check if this node is 426 | // satisfying all the constraints, if yes save the values and return 427 | // them at the end of the method 428 | if res == Err(Error::ResourceNotAvailable) { 429 | res = self 430 | .check_constraint(constraint) 431 | .map_or(res, |node| Ok((self, node))) 432 | } 433 | 434 | // If res is still Error::ResourceNotAvailable we continue our search 435 | // on the left part of the tree, as the method is recursive the same 436 | // logic from above will apply. 437 | if res == Err(Error::ResourceNotAvailable) { 438 | res = self 439 | .left 440 | .as_ref() 441 | .map_or(Err(Error::ResourceNotAvailable), |node| { 442 | node.last_match(constraint) 443 | }); 444 | } 445 | res 446 | } 447 | 448 | /// Check that the candidate node is satisfying all the constraints for 449 | /// the requested memory slot. 450 | fn check_constraint(&self, constraint: &Constraint) -> Result { 451 | // Exit if node is already allocated. 452 | if !self.node_state.is_free() || self.key.len() < constraint.size { 453 | return Err(Error::ResourceNotAvailable); 454 | } 455 | let node_key = self.key; 456 | // Get the starting address for the memory slot. 457 | let range_start = match constraint.policy { 458 | AllocPolicy::FirstMatch => align_up(node_key.start(), constraint.align)?, 459 | AllocPolicy::LastMatch => { 460 | // This operation can not underflow as we check at the beginning 461 | // of this method that the requested node fits in the selected 462 | // node. The subsequent addition can not overflow as well since 463 | // we already subtract the desired length (e.g. Give a range 464 | // [x, u64::MAX] and we want to allocate a node with size Y and 465 | // AllocPolicy::LastMatch computing the candidate address will 466 | // not overflow as we subtract from u64::MAX Y in the step above). 467 | let candidate_address = node_key 468 | .end() 469 | .checked_sub(constraint.size()) 470 | .ok_or(Error::Underflow) 471 | .and_then(|addr| addr.checked_add(1).ok_or(Error::Overflow))?; 472 | let aligned_address = align_down(candidate_address, constraint.align)?; 473 | if aligned_address < self.key.start() { 474 | return Err(Error::UnalignedAddress); 475 | } 476 | aligned_address 477 | } 478 | AllocPolicy::ExactMatch(_) => unreachable!(), 479 | }; 480 | // Create the result range. 481 | let key = RangeInclusive::new(range_start, self.key.end())?; 482 | // Check if the desired memory slot does fit in the candidate node. 483 | if key.len() >= constraint.size() { 484 | return Ok(key); 485 | } 486 | Err(Error::ResourceNotAvailable) 487 | } 488 | } 489 | 490 | /// Compute height of the optional sub-tree. 491 | fn height(node: &Option>) -> u64 { 492 | node.as_ref().map_or(0, |n| n.height) 493 | } 494 | 495 | /// An interval tree implementation specialized for VMM memory slots management. 496 | #[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] 497 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 498 | pub struct IntervalTree { 499 | root: Option>, 500 | } 501 | 502 | impl IntervalTree { 503 | /// Creates a new IntervalTree object that is going to be used by the 504 | /// AddressAllocator. 505 | pub fn new(key: RangeInclusive) -> Self { 506 | IntervalTree { 507 | root: Some(Box::new(InnerNode::new(key, NodeState::Free))), 508 | } 509 | } 510 | 511 | fn search_superset(&self, key: &RangeInclusive) -> Option<&InnerNode> { 512 | match self.root { 513 | None => None, 514 | Some(ref node) => node.search_superset(key), 515 | } 516 | } 517 | 518 | fn insert(&mut self, key: RangeInclusive, node_state: NodeState) -> Result<()> { 519 | match self.root.take() { 520 | None => self.root = Some(Box::new(InnerNode::new(key, node_state))), 521 | Some(node) => self.root = Some(node.insert(key, node_state)?), 522 | }; 523 | Ok(()) 524 | } 525 | 526 | fn mark_as_allocated(&mut self, key: &RangeInclusive) -> Result<()> { 527 | match self.root.as_mut() { 528 | None => (), 529 | Some(node) => node.mark_as_allocated(key)?, 530 | }; 531 | Ok(()) 532 | } 533 | 534 | fn delete(&mut self, key: &RangeInclusive) -> Result<()> { 535 | if let Some(node) = self.root.take() { 536 | if node.search(key).is_none() { 537 | self.root = Some(node); 538 | return Err(Error::ResourceNotAvailable); 539 | } 540 | self.root = node.delete(key); 541 | } 542 | Ok(()) 543 | } 544 | 545 | /// This method implements the allocation logic for the address allocator. 546 | /// Given a set of constraints it will find the most suitable free node to 547 | /// fit the desired memory slot. This will modify the backing interval tree 548 | /// such that the RangeInclusive representing the desired memory slot will appear as 549 | /// an node with the state `NodeState::Allocated` while the leftovers of 550 | /// the previous node will be present in the tree as free nodes. 551 | pub fn allocate(&mut self, constraint: Constraint) -> Result { 552 | // Return ResourceNotAvailable if we can not get a reference to the 553 | // root node. 554 | let root = self.root.as_ref().ok_or(Error::ResourceNotAvailable)?; 555 | let (node, range) = root.find_candidate(&constraint)?; 556 | let node_key = node.key; 557 | // Create a new RangeInclusive starting at an address that is aligned to the 558 | // value specified by constraint. 559 | let result = RangeInclusive::new( 560 | range.start(), 561 | range 562 | .start() 563 | .checked_add(constraint.size()) 564 | .ok_or(Error::Overflow) 565 | .and_then(|addr| addr.checked_sub(1).ok_or(Error::Underflow))?, 566 | )?; 567 | 568 | // Allocate a resource from the node, no need to split the candidate node. 569 | if node_key.start() == result.start() && node_key.len() == constraint.size { 570 | self.mark_as_allocated(&node_key)?; 571 | return Ok(node_key); 572 | } 573 | 574 | // If we do not find a node that is a perfect match we delete the old 575 | // node and insert three new nodes. The first node will represent the 576 | // RangeInclusive [old_node.start, aligned_addr - 1] and will be marked as free. 577 | // The second node will have the state NodeState::Allocated and is 578 | // actually the requested memory slot. The last node will have the 579 | // state NodeState::Free and is what is left from the old node. 580 | self.delete(&node_key)?; 581 | if result.start > node_key.start() { 582 | self.insert( 583 | RangeInclusive::new( 584 | node_key.start(), 585 | result.start().checked_sub(1).ok_or(Error::Overflow)?, 586 | )?, 587 | NodeState::Free, 588 | )?; 589 | } 590 | 591 | self.insert(result, NodeState::Allocated)?; 592 | if result.end() < node_key.end() { 593 | self.insert( 594 | RangeInclusive::new( 595 | result.end().checked_add(1).ok_or(Error::Overflow)?, 596 | node_key.end(), 597 | )?, 598 | NodeState::Free, 599 | )?; 600 | } 601 | Ok(result) 602 | } 603 | 604 | /// Free an allocated range. 605 | pub fn free(&mut self, key: &RangeInclusive) -> Result<()> { 606 | self.delete(key)?; 607 | let mut range = *key; 608 | 609 | // If the deleted RangeInclusive did not start at 0 we try to find range that 610 | // are placed to its left so we can merge them together. 611 | if range.start() > 0 { 612 | if let Some(node) = self.search_superset(&RangeInclusive::new( 613 | range.start().checked_sub(2).ok_or(Error::Underflow)?, 614 | range.start().checked_sub(1).ok_or(Error::Underflow)?, 615 | )?) { 616 | if node.node_state == NodeState::Free { 617 | range = RangeInclusive::new(node.key.start(), range.end())?; 618 | } 619 | } 620 | } 621 | // If the deleted range did not end at u64::MAX we try to find ranges 622 | // that are placed to its left so we can merge them together. 623 | if range.end() < u64::MAX { 624 | if let Some(node) = self.search_superset(&RangeInclusive::new( 625 | range.end().checked_add(1).ok_or(Error::Overflow)?, 626 | range.end().checked_add(2).ok_or(Error::Overflow)?, 627 | )?) { 628 | if node.node_state == NodeState::Free { 629 | range = RangeInclusive::new(range.start(), node.key.end())?; 630 | } 631 | } 632 | } 633 | 634 | // If we merged the freed node to the one on its left we should delete 635 | // the left node as it now belongs to a bigger RangeInclusive that will be 636 | // inserted in the tree. 637 | if range.start() < key.start() { 638 | self.delete(&RangeInclusive::new( 639 | range.start(), 640 | key.start().checked_sub(1).ok_or(Error::Underflow)?, 641 | )?)?; 642 | } 643 | 644 | // If we merged the freed node to the one on its right we should delete 645 | // the right node as it now belongs to a bigger RangeInclusive that will be 646 | // inserted in the tree. 647 | if range.end() > key.end() { 648 | self.delete(&RangeInclusive::new( 649 | key.end().checked_add(1).ok_or(Error::Overflow)?, 650 | range.end(), 651 | )?)?; 652 | } 653 | // Insert in the tree the new created range. 654 | self.insert(range, NodeState::Free)?; 655 | Ok(()) 656 | } 657 | } 658 | 659 | #[cfg(test)] 660 | mod tests { 661 | use super::*; 662 | 663 | #[test] 664 | fn test_range_align_up() { 665 | assert_eq!(align_up(2, 0).unwrap_err(), Error::InvalidAlignment); 666 | assert_eq!(align_up(2, 1).unwrap(), 2); 667 | assert_eq!(align_up(2, 2).unwrap(), 2); 668 | assert_eq!(align_up(2, 4).unwrap(), 4); 669 | assert_eq!(align_up(2, 3).unwrap_err(), Error::InvalidAlignment); 670 | 671 | assert_eq!( 672 | align_up(0xFFFF_FFFF_FFFF_FFFDu64, 2).unwrap(), 673 | 0xFFFF_FFFF_FFFF_FFFEu64 674 | ); 675 | assert_eq!( 676 | align_up(0xFFFF_FFFF_FFFF_FFFDu64, 4).unwrap_err(), 677 | Error::Overflow 678 | ); 679 | } 680 | 681 | #[test] 682 | fn test_is_free() { 683 | let mut ns = NodeState::Allocated; 684 | assert!(!ns.is_free()); 685 | ns = NodeState::Free; 686 | assert!(ns.is_free()); 687 | } 688 | 689 | #[test] 690 | fn test_search() { 691 | let mut tree = Box::new(InnerNode::new( 692 | RangeInclusive::new(0x100, 0x110).unwrap(), 693 | NodeState::Allocated, 694 | )); 695 | let left_child = InnerNode::new(RangeInclusive::new(0x90, 0x99).unwrap(), NodeState::Free); 696 | 697 | tree = tree.insert(left_child.key, left_child.node_state).unwrap(); 698 | tree = tree 699 | .insert(RangeInclusive::new(0x200, 0x2FF).unwrap(), NodeState::Free) 700 | .unwrap(); 701 | 702 | assert_eq!( 703 | tree.search(&RangeInclusive::new(0x90, 0x99).unwrap()), 704 | Some(&left_child) 705 | ); 706 | assert_eq!( 707 | tree.search(&RangeInclusive::new(0x200, 0x250).unwrap()), 708 | None 709 | ); 710 | assert_eq!( 711 | tree.search(&RangeInclusive::new(0x111, 0x1fe).unwrap()), 712 | None 713 | ); 714 | } 715 | 716 | #[test] 717 | fn test_search_superset() { 718 | let mut tree = Box::new(InnerNode::new( 719 | RangeInclusive::new(0x100, 0x110).unwrap(), 720 | NodeState::Allocated, 721 | )); 722 | let right_child = 723 | InnerNode::new(RangeInclusive::new(0x200, 0x2FF).unwrap(), NodeState::Free); 724 | let left_child = InnerNode::new(RangeInclusive::new(0x90, 0x9F).unwrap(), NodeState::Free); 725 | 726 | tree = tree.insert(left_child.key, left_child.node_state).unwrap(); 727 | tree = tree 728 | .insert(right_child.key, right_child.node_state) 729 | .unwrap(); 730 | 731 | assert_eq!( 732 | tree.search_superset(&RangeInclusive::new(0x100, 0x101).unwrap()), 733 | Some(&(*tree)) 734 | ); 735 | assert_eq!( 736 | tree.search_superset(&RangeInclusive::new(0x90, 0x95).unwrap()), 737 | Some(&left_child) 738 | ); 739 | assert_eq!( 740 | tree.search_superset(&RangeInclusive::new(0x200, 0x201).unwrap()), 741 | Some(&right_child) 742 | ); 743 | assert_eq!( 744 | tree.search_superset(&RangeInclusive::new(0x200, 0x2FF).unwrap()), 745 | Some(&right_child) 746 | ); 747 | assert_eq!( 748 | tree.search_superset(&RangeInclusive::new(0x209, 0x210).unwrap()), 749 | Some(&right_child) 750 | ); 751 | assert_eq!( 752 | tree.search_superset(&RangeInclusive::new(0x2EF, 0x2FF).unwrap()), 753 | Some(&right_child) 754 | ); 755 | assert_eq!( 756 | tree.search_superset(&RangeInclusive::new(0x2FF, 0x300).unwrap()), 757 | None 758 | ); 759 | assert_eq!( 760 | tree.search_superset(&RangeInclusive::new(0x300, 0x301).unwrap()), 761 | None 762 | ); 763 | assert_eq!( 764 | tree.search_superset(&RangeInclusive::new(0x1FF, 0x300).unwrap()), 765 | None 766 | ); 767 | } 768 | 769 | fn is_balanced(tree: Option>) -> bool { 770 | if tree.is_none() { 771 | return true; 772 | } 773 | let left_height = height(&tree.as_ref().unwrap().left.clone()); 774 | let right_height = height(&tree.as_ref().unwrap().right.clone()); 775 | if (left_height as i64 - right_height as i64).abs() <= 1 776 | && is_balanced(tree.as_ref().unwrap().left.clone()) 777 | && is_balanced(tree.as_ref().unwrap().right.clone()) 778 | { 779 | return true; 780 | } 781 | false 782 | } 783 | 784 | #[test] 785 | fn test_tree_insert_balanced() { 786 | let mut tree = Box::new(InnerNode::new( 787 | RangeInclusive::new(0x300, 0x310).unwrap(), 788 | NodeState::Allocated, 789 | )); 790 | tree = tree 791 | .insert(RangeInclusive::new(0x100, 0x110).unwrap(), NodeState::Free) 792 | .unwrap(); 793 | tree = tree 794 | .insert(RangeInclusive::new(0x350, 0x360).unwrap(), NodeState::Free) 795 | .unwrap(); 796 | tree = tree 797 | .insert(RangeInclusive::new(0x340, 0x34F).unwrap(), NodeState::Free) 798 | .unwrap(); 799 | tree = tree 800 | .insert(RangeInclusive::new(0x311, 0x33F).unwrap(), NodeState::Free) 801 | .unwrap(); 802 | tree = tree.delete_root().unwrap(); 803 | assert!(is_balanced(Some(tree))); 804 | tree = Box::new(InnerNode::new( 805 | RangeInclusive::new(0x300, 0x310).unwrap(), 806 | NodeState::Allocated, 807 | )); 808 | tree = tree 809 | .insert(RangeInclusive::new(0x100, 0x110).unwrap(), NodeState::Free) 810 | .unwrap(); 811 | tree = tree 812 | .insert(RangeInclusive::new(0x90, 0x9F).unwrap(), NodeState::Free) 813 | .unwrap(); 814 | assert!(is_balanced(Some(tree.clone()))); 815 | tree = tree 816 | .insert(RangeInclusive::new(0x311, 0x313).unwrap(), NodeState::Free) 817 | .unwrap(); 818 | assert!(is_balanced(Some(tree.clone()))); 819 | tree = tree 820 | .insert(RangeInclusive::new(0x314, 0x316).unwrap(), NodeState::Free) 821 | .unwrap(); 822 | assert!(is_balanced(Some(tree.clone()))); 823 | tree = tree 824 | .insert(RangeInclusive::new(0x317, 0x319).unwrap(), NodeState::Free) 825 | .unwrap(); 826 | assert!(is_balanced(Some(tree.clone()))); 827 | tree = tree 828 | .insert(RangeInclusive::new(0x321, 0x323).unwrap(), NodeState::Free) 829 | .unwrap(); 830 | assert!(is_balanced(Some(tree.clone()))); 831 | 832 | tree = tree 833 | .delete(&RangeInclusive::new(0x321, 0x323).unwrap()) 834 | .unwrap(); 835 | tree = tree 836 | .delete(&RangeInclusive::new(0x314, 0x316).unwrap()) 837 | .unwrap(); 838 | tree = tree 839 | .delete(&RangeInclusive::new(0x317, 0x319).unwrap()) 840 | .unwrap(); 841 | assert!(is_balanced(Some(tree.clone()))); 842 | tree = tree 843 | .insert(RangeInclusive::new(0x80, 0x8F).unwrap(), NodeState::Free) 844 | .unwrap(); 845 | tree = tree 846 | .insert(RangeInclusive::new(0x70, 0x7F).unwrap(), NodeState::Free) 847 | .unwrap(); 848 | let _ = tree 849 | .insert(RangeInclusive::new(0x60, 0x6F).unwrap(), NodeState::Free) 850 | .unwrap(); 851 | } 852 | 853 | #[test] 854 | fn test_tree_insert_intersect_negative() { 855 | let mut tree = Box::new(InnerNode::new( 856 | RangeInclusive::new(0x100, 0x200).unwrap(), 857 | NodeState::Allocated, 858 | )); 859 | tree = tree 860 | .insert(RangeInclusive::new(0x201, 0x2FF).unwrap(), NodeState::Free) 861 | .unwrap(); 862 | assert!(is_balanced(Some(tree.clone()))); 863 | let res = tree 864 | .clone() 865 | .insert(RangeInclusive::new(0x201, 0x2FE).unwrap(), NodeState::Free); 866 | assert_eq!( 867 | res.unwrap_err(), 868 | Error::Overlap( 869 | RangeInclusive::new(0x201, 0x2FE).unwrap(), 870 | RangeInclusive::new(0x201, 0x2FF).unwrap() 871 | ) 872 | ); 873 | tree = tree 874 | .insert(RangeInclusive::new(0x90, 0x9F).unwrap(), NodeState::Free) 875 | .unwrap(); 876 | assert!(is_balanced(Some(tree.clone()))); 877 | let res = tree.insert(RangeInclusive::new(0x90, 0x9E).unwrap(), NodeState::Free); 878 | assert_eq!( 879 | res.unwrap_err(), 880 | Error::Overlap( 881 | RangeInclusive::new(0x90, 0x9E).unwrap(), 882 | RangeInclusive::new(0x90, 0x9F).unwrap() 883 | ) 884 | ); 885 | } 886 | 887 | #[test] 888 | fn test_tree_insert_duplicate_negative() { 889 | let range = RangeInclusive::new(0x100, 0x200).unwrap(); 890 | let tree = Box::new(InnerNode::new(range, NodeState::Allocated)); 891 | let res = tree.insert(range, NodeState::Free); 892 | assert_eq!(res.unwrap_err(), Error::Overlap(range, range)); 893 | } 894 | 895 | #[test] 896 | fn test_tree_stack_overflow_negative() { 897 | let mut inner_node = InnerNode::new( 898 | RangeInclusive::new(0x100, 0x200).unwrap(), 899 | NodeState::Allocated, 900 | ); 901 | inner_node.height = 50; 902 | let tree = Box::new(inner_node); 903 | let res = tree.insert(RangeInclusive::new(0x100, 0x200).unwrap(), NodeState::Free); 904 | assert_eq!(res.unwrap_err(), Error::Overflow); 905 | } 906 | 907 | #[test] 908 | fn test_tree_mark_as_allocated_invalid_transition() { 909 | let range = RangeInclusive::new(0x100, 0x110).unwrap(); 910 | let mut tree = Box::new(InnerNode::new(range, NodeState::Allocated)); 911 | assert_eq!( 912 | tree.mark_as_allocated(&range).unwrap_err(), 913 | Error::InvalidStateTransition(range, NodeState::Allocated) 914 | ); 915 | } 916 | 917 | #[test] 918 | fn test_tree_mark_as_allocated_resource_not_available() { 919 | let range = RangeInclusive::new(0x100, 0x110).unwrap(); 920 | let mut tree = Box::new(InnerNode::new(range, NodeState::Allocated)); 921 | assert_eq!( 922 | tree.mark_as_allocated(&RangeInclusive::new(0x111, 0x112).unwrap()) 923 | .unwrap_err(), 924 | Error::ResourceNotAvailable 925 | ); 926 | assert_eq!( 927 | tree.mark_as_allocated(&RangeInclusive::new(0x90, 0x92).unwrap()) 928 | .unwrap_err(), 929 | Error::ResourceNotAvailable 930 | ); 931 | } 932 | 933 | #[test] 934 | fn test_tree_mark_as_allocated() { 935 | let range = RangeInclusive::new(0x100, 0x110).unwrap(); 936 | let range2 = RangeInclusive::new(0x200, 0x2FF).unwrap(); 937 | let mut tree = Box::new(InnerNode::new(range, NodeState::Allocated)); 938 | tree = tree.insert(range2, NodeState::Free).unwrap(); 939 | assert!(tree.mark_as_allocated(&range2).is_ok()); 940 | assert_eq!( 941 | *tree.search(&range2).unwrap(), 942 | InnerNode::new(range2, NodeState::Allocated) 943 | ); 944 | } 945 | 946 | #[test] 947 | fn test_tree_delete() { 948 | let left_child = 949 | InnerNode::new(RangeInclusive::new(0x100, 0x110).unwrap(), NodeState::Free); 950 | let right_child = 951 | InnerNode::new(RangeInclusive::new(0x300, 0x3FF).unwrap(), NodeState::Free); 952 | let mut tree = Box::new(InnerNode::new( 953 | RangeInclusive::new(0x200, 0x290).unwrap(), 954 | NodeState::Free, 955 | )); 956 | tree = tree 957 | .insert(right_child.key, right_child.node_state) 958 | .unwrap(); 959 | tree = tree 960 | .delete(&RangeInclusive::new(0x200, 0x290).unwrap()) 961 | .unwrap(); 962 | assert!(is_balanced(Some(tree.clone()))); 963 | tree = tree 964 | .insert(RangeInclusive::new(0x200, 0x290).unwrap(), NodeState::Free) 965 | .unwrap(); 966 | tree = tree.insert(left_child.key, left_child.node_state).unwrap(); 967 | assert!(is_balanced(Some(tree.clone()))); 968 | 969 | assert_eq!( 970 | *tree 971 | .search(&RangeInclusive::new(0x100, 0x110).unwrap()) 972 | .unwrap(), 973 | left_child 974 | ); 975 | assert_eq!(*tree.search(&right_child.key).unwrap(), right_child); 976 | 977 | tree = tree 978 | .delete(&RangeInclusive::new(0x200, 0x290).unwrap()) 979 | .unwrap(); 980 | tree = tree 981 | .delete(&RangeInclusive::new(0x300, 0x3FF).unwrap()) 982 | .unwrap(); 983 | assert!(is_balanced(Some(tree.clone()))); 984 | assert_eq!( 985 | *tree 986 | .search(&RangeInclusive::new(0x100, 0x110).unwrap()) 987 | .unwrap(), 988 | left_child 989 | ); 990 | } 991 | 992 | #[test] 993 | fn test_integer_wrapping() { 994 | let mut tree = IntervalTree::new(RangeInclusive::new(0x1, 0xFFFFFFFFFFFFFFFF).unwrap()); 995 | 996 | // We have to create a valid constraint (that has an alignment that is a power of 2). 997 | // In case the size + the start address would overflow, we want to make sure the appropriate error is returned. 998 | let constraint = Constraint::new( 999 | 0x8000000000000000, 1000 | 0x8000000000000000, 1001 | AllocPolicy::ExactMatch(0x8000000000000000), 1002 | ) 1003 | .unwrap(); 1004 | let res = tree.allocate(constraint); 1005 | assert_eq!(res.unwrap_err(), Error::Overflow); 1006 | } 1007 | } 1008 | --------------------------------------------------------------------------------