├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── payloadfuzz ├── Cargo.toml ├── docs │ ├── valid_graph.dot │ └── valid_graph.png └── src │ ├── generator.rs │ ├── main.rs │ └── vmm.rs └── tools └── nopsled_analyzer ├── Cargo.toml └── src └── main.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "deps/libwhp"] 2 | path = deps/libwhp 3 | url = https://github.com/epakskape/libwhp 4 | [submodule "deps/rust-x86asm"] 5 | path = deps/rust-x86asm 6 | url = https://github.com/epakskape/rust-x86asm 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Matt Miller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # whpexp 2 | A collection of (not necessarily useful) [Windows Hypervisor Platform](https://docs.microsoft.com/en-us/virtualization/api/) experiments in Rust. 3 | 4 | # Experiments 5 | 6 | ## payloadfuzz 7 | 8 | The [payloadfuzz](https://github.com/epakskape/whpexp/tree/master/payloadfuzz) project experiments with generating payloads in different ways and then executing them within a bare bones protected mode virtual machine. 9 | 10 | ### Supported generators 11 | 12 | The reverse nop generator attempts to generate an x64 nop sled in a manner similar to [Opty2](https://github.com/rapid7/metasploit-framework/blob/master/modules/nops/x86/opty2.rb) in the Metasploit framework. To ensure that the nop sled can be executed from each offset, the nop sled is generated in reverse starting with the last byte and ending with the first byte. Rather than attempting to do this in a smart way, the reverse nop generator simply attempts to brute force the set of valid bytes that can precede other bytes. This is woefully inefficient, but it's a useful example. 13 | 14 | This graph provides an example of the valid nop sled byte sequences that were discovered after running this tool for about 12 hours. The graph can be interpreted as "byte X can precede byte Y" (X -> Y). The graph can be generated by using the [nopsled_analyzer](https://github.com/epakskape/whpexp/tree/master/tools/nopsled_analyzer) tool. 15 | 16 | ![Valid byte sequences](https://raw.githubusercontent.com/epakskape/whpexp/master/payloadfuzz/docs/valid_graph.png) 17 | 18 | ### Setup 19 | 20 | To run payloadfuzz, you need Rust nightly installed and need to install and start a local redis server. The redis server is used to store the collection of valid and invalid payloads for a given payloadfuzz session. 21 | 22 | ``` 23 | rustup default nightly 24 | wsl sudo apt-get install redis-server 25 | wsl redis-server 26 | ``` 27 | 28 | ### Building 29 | 30 | Use the following steps to clone and build payloadfuzz. 31 | 32 | ``` 33 | git clone https://github.com/epakskape/whpexp 34 | cd whpexp 35 | git submodule init 36 | git submodule update 37 | cd payloadfuzz 38 | cargo build 39 | ``` 40 | 41 | ### Running 42 | 43 | To run payloadfuzz, simply specify the generator you wish to use (via `-g `) and the number of VMs to run in parallel (via `-v `). 44 | 45 | # Credits 46 | 47 | - The authors of the [libwhp](https://crates.io/crates/libwhp) crate which provided the basis for building this and from which I used code (e.g. from their demo example). -------------------------------------------------------------------------------- /payloadfuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "payloadfuzz" 3 | version = "0.1.0" 4 | authors = ["Matt Miller "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | libwhp = { path = "..\\deps\\libwhp" } 9 | x86asm = { path = "..\\deps\\rust-x86asm" } 10 | rand = "0.7.3" 11 | clap = "2.33.1" 12 | futures = "0.3.5" 13 | hex = "0.4.2" 14 | tokio = { version = "0.2.21", features = ["full"] } 15 | url = "2.1.1" 16 | redis = "0.16.0" 17 | async-std = "1.6.1" -------------------------------------------------------------------------------- /payloadfuzz/docs/valid_graph.dot: -------------------------------------------------------------------------------- 1 | digraph { 2 | b_0x36 -> b_0x4F; 3 | b_0x40 -> b_0x92; 4 | b_0x3 -> b_0xFA; 5 | b_0xDC -> b_0xFA; 6 | b_0x38 -> b_0xFC; 7 | b_0xB7 -> b_0x1D; 8 | b_0xA8 -> b_0x95; 9 | b_0x90 -> b_0x8D; 10 | b_0x26 -> b_0x7C; 11 | b_0x83 -> b_0xF3; 12 | b_0xF6 -> b_0xD8; 13 | b_0xD2 -> b_0xF8; 14 | b_0x12 -> b_0xFB; 15 | b_0x1B -> b_0xF2; 16 | b_0x34 -> b_0x48; 17 | b_0x93 -> b_0x74; 18 | b_0x49 -> b_0x4A; 19 | b_0x88 -> b_0xF8; 20 | b_0xD3 -> b_0xD9; 21 | b_0x46 -> b_0xBB; 22 | b_0x15 -> b_0x42; 23 | b_0x14 -> b_0xFA; 24 | b_0xF3 -> b_0x1D; 25 | b_0x2E -> b_0xF2; 26 | b_0x4D -> b_0x7C; 27 | b_0xB3 -> b_0xB4; 28 | b_0xB1 -> b_0x4D; 29 | b_0x11 -> b_0xF8; 30 | b_0x74 -> b_0x44; 31 | b_0xD0 -> b_0xF3; 32 | b_0x65 -> b_0xF8; 33 | b_0x9E -> b_0x40; 34 | b_0x44 -> b_0x2E; 35 | b_0x19 -> b_0xF3; 36 | b_0xC0 -> b_0xFD; 37 | b_0x43 -> b_0x64; 38 | b_0x8B -> b_0xF3; 39 | b_0xF2 -> b_0x95; 40 | b_0x92 -> b_0x48; 41 | b_0x30 -> b_0xFD; 42 | b_0x4B -> b_0x4C; 43 | b_0xF8 -> b_0x1C; 44 | b_0x4C -> b_0x43; 45 | b_0x98 -> b_0x66; 46 | b_0x96 -> b_0x91; 47 | b_0x9F -> b_0x24; 48 | b_0x22 -> b_0xD0; 49 | b_0x66 -> b_0x2B; 50 | b_0x45 -> b_0x67; 51 | b_0x86 -> b_0xF9; 52 | b_0x33 -> b_0xF5; 53 | b_0x67 -> b_0x48; 54 | b_0xF9 -> b_0xF5; 55 | b_0xBB -> b_0x95; 56 | b_0x42 -> b_0xB1; 57 | b_0x8D -> b_0x1D; 58 | b_0x39 -> b_0xFD; 59 | b_0x95 -> b_0xF2; 60 | b_0x97 -> b_0xB0; 61 | b_0xB9 -> b_0x4B; 62 | b_0xFB -> b_0x15; 63 | b_0x25 -> b_0x45; 64 | b_0x18 -> b_0xF5; 65 | b_0x3B -> b_0xF9; 66 | b_0xDE -> b_0xF5; 67 | b_0x69 -> b_0xF2; 68 | b_0xB2 -> b_0x64; 69 | b_0x9 -> b_0xF3; 70 | b_0x8A -> b_0xF3; 71 | b_0x80 -> b_0xFD; 72 | b_0x81 -> b_0xFC; 73 | b_0x2D -> b_0xF8; 74 | b_0x4 -> b_0x3C; 75 | b_0x5 -> b_0x24; 76 | b_0x3A -> b_0xF5; 77 | b_0xB4 -> b_0x93; 78 | b_0xFC -> b_0xB6; 79 | b_0xD8 -> b_0xD2; 80 | b_0x85 -> b_0xF2; 81 | b_0xFD -> b_0x3D; 82 | b_0x13 -> b_0xF5; 83 | b_0xC1 -> b_0xFD; 84 | b_0x0 -> b_0xF8; 85 | b_0xBE -> b_0x4E; 86 | b_0x1 -> b_0xFA; 87 | b_0xBD -> b_0x33; 88 | b_0x31 -> b_0xF2; 89 | b_0x89 -> b_0xF8; 90 | b_0xBA -> b_0xA; 91 | b_0xB0 -> b_0x4B; 92 | b_0x3E -> b_0x1C; 93 | b_0xF5 -> b_0x4E; 94 | b_0xA9 -> b_0xB9; 95 | b_0x84 -> b_0xFD; 96 | b_0xA -> b_0xF5; 97 | b_0x9B -> b_0xFC; 98 | b_0x35 -> b_0x64; 99 | b_0x6B -> b_0xF5; 100 | b_0x1A -> b_0xF3; 101 | b_0xDF -> b_0xF2; 102 | b_0x1C -> b_0x45; 103 | b_0x2 -> b_0xFA; 104 | b_0xFA -> b_0x48; 105 | b_0x3C -> b_0x91; 106 | b_0x63 -> b_0xF3; 107 | b_0x91 -> b_0x36; 108 | b_0xBF -> b_0xB5; 109 | b_0x4A -> b_0x14; 110 | b_0x4F -> b_0x9E; 111 | b_0x8 -> b_0xF2; 112 | b_0x2A -> b_0xF3; 113 | b_0xF -> b_0x1A; 114 | b_0x10 -> b_0xF8; 115 | b_0x48 -> b_0x24; 116 | b_0x87 -> b_0xFD; 117 | b_0xB5 -> b_0x2B; 118 | b_0x23 -> b_0xFA; 119 | b_0x1D -> b_0x26; 120 | b_0x21 -> b_0xF3; 121 | b_0xC6 -> b_0xF8; 122 | b_0x32 -> b_0xF2; 123 | b_0x47 -> b_0x3E; 124 | b_0xD -> b_0x4A; 125 | b_0x29 -> b_0xFD; 126 | b_0x3D -> b_0xB1; 127 | b_0xD9 -> b_0xDC; 128 | b_0xB8 -> b_0x1D; 129 | b_0xD1 -> b_0xF9; 130 | b_0x24 -> b_0xB5; 131 | b_0xE3 -> b_0x7C; 132 | b_0xC -> b_0x46; 133 | b_0xB6 -> b_0x44; 134 | b_0x2C -> b_0x96; 135 | b_0x64 -> b_0x2B; 136 | b_0x20 -> b_0xF2; 137 | b_0xB -> b_0xF9; 138 | b_0x99 -> b_0x41; 139 | b_0x4E -> b_0x96; 140 | b_0x41 -> b_0x40; 141 | b_0x28 -> b_0xFA; 142 | b_0x2B -> b_0xF3; 143 | } 144 | -------------------------------------------------------------------------------- /payloadfuzz/docs/valid_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/epakskape/whpexp/575be8b21a661d56ae2464ba40b078e58fa4ec9a/payloadfuzz/docs/valid_graph.png -------------------------------------------------------------------------------- /payloadfuzz/src/generator.rs: -------------------------------------------------------------------------------- 1 | use rand::prelude::*; 2 | 3 | pub trait PayloadGenerator { 4 | fn generate(&mut self, previous_payload: &[u8]) -> Vec; 5 | fn is_valid( 6 | &self, 7 | previous_payload: &[u8], 8 | new_payload: &[u8], 9 | initial_rip: usize, 10 | final_rip: usize, 11 | initial_rsp: usize, 12 | final_rsp: usize, 13 | ) -> bool; 14 | } 15 | 16 | pub struct ReverseNopGenerator { 17 | rng: ThreadRng, 18 | } 19 | 20 | impl ReverseNopGenerator { 21 | pub fn new() -> ReverseNopGenerator { 22 | ReverseNopGenerator { 23 | rng: rand::thread_rng(), 24 | } 25 | } 26 | } 27 | 28 | // 29 | // The ReverseNopGenerator generates nop sleds backwards similar to nop sled 30 | // generators like Opty2 in Metasploit. 31 | // 32 | // The generate method prepends a random byte to the provided payload (if one 33 | // was provided) and returns this as the new payload. 34 | // 35 | impl PayloadGenerator for ReverseNopGenerator { 36 | fn generate(&mut self, previous_payload: &[u8]) -> Vec { 37 | let new_payload_len = previous_payload.len() + 1; 38 | let mut new_payload = vec![0; new_payload_len]; 39 | 40 | if previous_payload.len() > 0 { 41 | let new_slice = &mut new_payload[1..new_payload_len]; 42 | 43 | new_slice.copy_from_slice(&previous_payload); 44 | } 45 | 46 | new_payload[0] = self.rng.gen(); 47 | 48 | new_payload 49 | } 50 | 51 | fn is_valid( 52 | &self, 53 | _previous_payload: &[u8], 54 | new_payload: &[u8], 55 | initial_rip: usize, 56 | final_rip: usize, 57 | initial_rsp: usize, 58 | final_rsp: usize, 59 | ) -> bool { 60 | // Do not allow stack pointer manipulation 61 | if initial_rsp != final_rsp { 62 | return false; 63 | } 64 | 65 | // Require the payload to have executed all of the instructions in it. 66 | if final_rip > initial_rip 67 | && (final_rip - initial_rip) as usize == new_payload.len() { 68 | return true; 69 | } else { 70 | return false; 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /payloadfuzz/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * PayloadFuzz leverages the Windows Hypervisor Platform to test the execution 3 | * of arbitrary payloads using a supported payload generator. 4 | */ 5 | extern crate clap; 6 | extern crate futures; 7 | extern crate libwhp; 8 | extern crate redis; 9 | extern crate tokio; 10 | extern crate url; 11 | extern crate x86asm; 12 | 13 | pub mod generator; 14 | pub mod vmm; 15 | 16 | use crate::generator::*; 17 | use crate::vmm::*; 18 | 19 | use std::io::Write; 20 | use std::panic; 21 | use std::process::Command; 22 | use std::sync::{Arc, Condvar, Mutex}; 23 | use std::time::Duration; 24 | 25 | use async_std::future; 26 | 27 | use clap::{App, Arg}; 28 | 29 | use libwhp::*; 30 | 31 | use rand::prelude::*; 32 | 33 | use redis::AsyncCommands; 34 | use redis::RedisFuture; 35 | use redis::RedisResult; 36 | 37 | use std::io::Cursor; 38 | use x86asm::{InstructionReader, Mnemonic, Mode}; 39 | 40 | const CODE_VIRTUAL_BASE_ADDRESS: u64 = 0x20000000; 41 | const CODE_REGION_SIZE: u64 = 4096; 42 | 43 | #[tokio::main] 44 | async fn main() { 45 | let matches = App::new("cpufuzz") 46 | .arg( 47 | Arg::with_name("vm_count") 48 | .short("v") 49 | .long("vm_count") 50 | .takes_value(true) 51 | .help("The number of VMs to run concurrently"), 52 | ) 53 | .arg( 54 | Arg::with_name("instance") 55 | .short("i") 56 | .long("instance") 57 | .takes_value(true) 58 | .help("The instance identifier"), 59 | ) 60 | .arg( 61 | Arg::with_name("generator") 62 | .short("g") 63 | .long("generator") 64 | .takes_value(true) 65 | .help("The generator use for the payload (options: default, reverse_nop)"), 66 | ) 67 | .get_matches(); 68 | 69 | let vm_count: u32 = matches.value_of("vm_count").unwrap_or("1").parse().unwrap(); 70 | let instance_id: u32 = matches.value_of("instance").unwrap_or("0").parse().unwrap(); 71 | let generator_name: String = matches 72 | .value_of("generator") 73 | .unwrap_or("default") 74 | .to_string(); 75 | 76 | if instance_id == 0 { 77 | let mut children = Vec::new(); 78 | 79 | for child_instance_id in 0..vm_count { 80 | let instance_str = (child_instance_id + 1).to_string(); 81 | 82 | let child = Command::new(std::env::current_exe().unwrap()) 83 | .args(&["-i", &instance_str]) 84 | .args(&["-g", &generator_name]) 85 | .spawn() 86 | .expect("failed to execute child"); 87 | 88 | children.push(child); 89 | } 90 | 91 | for mut child in children { 92 | child.wait().unwrap(); 93 | } 94 | } else { 95 | let mut generator = match generator_name.as_str() { 96 | "default" => ReverseNopGenerator::new(), 97 | "reverse_nop" => ReverseNopGenerator::new(), 98 | _ => panic!("unknown generator"), 99 | }; 100 | 101 | worker(instance_id, &mut generator).await; 102 | } 103 | } 104 | 105 | async fn worker(vm_num: u32, generator: &mut T) { 106 | println!("Worker {} spawned", vm_num); 107 | 108 | let redis_client = 109 | redis::Client::open("redis://127.0.0.1").expect("failed to initialize redis client"); 110 | let mut redis_con = redis_client 111 | .get_async_connection() 112 | .await 113 | .expect("failed to connect to redis server at 127.0.0.1"); 114 | 115 | let mut rng = rand::thread_rng(); 116 | 117 | let mut num: usize = 0; 118 | let mut num_valid: usize = 0; 119 | 120 | loop { 121 | // Ideally we would only set up the VM once to avoid initializatio and 122 | // teardown costs, but to ensure consistent execution we need to ensure 123 | // that the entire VM state has been reinitialized. 124 | let mut vm = VirtualMachine::new( 125 | vm_num as usize, 126 | VirtualMachineConfig { 127 | processor_count: 1, 128 | memory_layout: MemoryLayout { 129 | physical_layout: vec! [ 130 | PhysicalMemoryRange { 131 | base_address: 0x10000000, 132 | region_size: 0x400000, 133 | region_type: MemoryRegionType::PageTables, 134 | ept_protection: MemoryProtection::ReadWrite 135 | }, 136 | PhysicalMemoryRange { 137 | base_address: 0x20000000, 138 | region_size: CODE_REGION_SIZE as usize, 139 | region_type: MemoryRegionType::Code, 140 | ept_protection: MemoryProtection::ReadWriteExecute 141 | }, 142 | PhysicalMemoryRange { 143 | base_address: 0x30000000, 144 | region_size: 0x1000, 145 | region_type: MemoryRegionType::Stack, 146 | ept_protection: MemoryProtection::ReadWrite 147 | } 148 | ], 149 | virtual_layout: vec! [ 150 | // Code virtual mapping 151 | VirtualMemoryDescriptor { 152 | base_address: CODE_VIRTUAL_BASE_ADDRESS, 153 | region_type: MemoryRegionType::Code, 154 | memory_descriptors: vec! [ 155 | MemoryDescriptor { 156 | physical_address: 0x20000000, 157 | virtual_protection: MemoryProtection::ReadWriteExecute 158 | } 159 | ] 160 | }, 161 | 162 | // Stack virtual mapping 163 | VirtualMemoryDescriptor { 164 | base_address: 0x30000000, 165 | region_type: MemoryRegionType::Stack, 166 | memory_descriptors: vec! [ 167 | MemoryDescriptor { 168 | physical_address: 0x30000000, 169 | virtual_protection: MemoryProtection::ReadWrite 170 | } 171 | ] 172 | }, 173 | ] 174 | }, 175 | }, 176 | ); 177 | 178 | vm.setup(); 179 | 180 | num += 1; 181 | 182 | if (num % 100) == 0 { 183 | println!( 184 | "Worker {} [valid={} / invalid={} = {} valid ratio]", 185 | vm.vm_id, 186 | num_valid, 187 | num, 188 | (num_valid as f32) / (num as f32) 189 | ); 190 | std::io::stdout().flush().ok(); 191 | } 192 | 193 | let mut buf: Vec = vec![0xcc; CODE_REGION_SIZE as usize]; 194 | 195 | let mut previous_payload: Vec; 196 | 197 | // Randomly select an existing key to determine if we can execute from within a random 198 | // offset into it 199 | let mut new_payload: Vec; 200 | 201 | loop { 202 | previous_payload = Vec::new(); 203 | 204 | if rng.gen_bool(0.50) == true { 205 | let res: RedisResult = redis_con.srandmember("valid").await; 206 | 207 | match res { 208 | Ok(rkey) => { 209 | previous_payload = hex::decode(rkey).unwrap(); 210 | } 211 | _ => {} 212 | }; 213 | } 214 | 215 | new_payload = generator.generate(&previous_payload); 216 | 217 | // Has the new payload we're about to test already been tested? If so, generate another 218 | // payload. 219 | let dur = Duration::from_secs(2); 220 | let get_fut: RedisFuture = redis_con.sismember("valid", hex::encode(&new_payload)); 221 | let get_res = future::timeout(dur, get_fut).await.unwrap(); 222 | 223 | if get_res.is_err() || get_res.unwrap() == 0 { 224 | break; 225 | } 226 | } 227 | 228 | // Copy the new payload into the staging buffer. 229 | let buf_slice = &mut buf[0..new_payload.len()]; 230 | 231 | buf_slice.copy_from_slice(&new_payload); 232 | 233 | let initial_rip = (CODE_VIRTUAL_BASE_ADDRESS + CODE_REGION_SIZE) as usize - new_payload.len(); 234 | 235 | // Copy the staging buffer into physical memory for the VM at the end of 236 | // the mapping to ensure that any attempt to execute beyond the mapping 237 | // will fault. 238 | { 239 | let mem = vm.get_physical_memory_slice_mut(initial_rip, new_payload.len()); 240 | mem.copy_from_slice(buf_slice); 241 | } 242 | 243 | let initial_rsp: usize; 244 | { 245 | let mut vpe0 = vm.virtual_processors.get(0).unwrap().write().unwrap(); 246 | 247 | // Set the general purpose registers to a high value to increase the likelihood of 248 | // generating a memory access fault if they are used to access memory. 249 | vm.set_initial_registers(&mut vpe0.vp, 0xf1f1f1f1_f1f1f1f1, initial_rip as u64); 250 | 251 | // Get the initial stack pointer for later comparison. 252 | let mut reg_names: [WHV_REGISTER_NAME; 1 as usize] = Default::default(); 253 | let mut reg_values: [WHV_REGISTER_VALUE; 1 as usize] = Default::default(); 254 | 255 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterRsp; 256 | reg_values[0].Reg64 = 0; 257 | 258 | // Create stack with stack base at high end of mapped payload 259 | vpe0.vp.get_registers(®_names, &mut reg_values).unwrap(); 260 | 261 | initial_rsp = unsafe { reg_values[0].Reg64 as usize }; 262 | } 263 | 264 | // Execute the VM. 265 | let pair = Arc::new((Mutex::new(false), Condvar::new())); 266 | let pair2 = pair.clone(); 267 | 268 | vm.execute(pair2); 269 | 270 | let mut cancelled = false; 271 | { 272 | let (lock, cvar) = &*pair; 273 | let mut done = lock.lock().unwrap(); 274 | 275 | let result = cvar.wait_timeout(done, Duration::from_micros(1)).unwrap(); 276 | 277 | done = result.0; 278 | 279 | // If execution timed out, then cancel execution of the virtual processors. 280 | if *done == false { 281 | let vpe0 = vm.virtual_processors.get(0).unwrap().read().unwrap(); 282 | vpe0.vp.cancel_run().unwrap(); 283 | cancelled = true; 284 | } 285 | } 286 | 287 | // If we cancelled the VM, then wait for the thread managing the VM to cleanly exit. 288 | if cancelled { 289 | let (lock, cvar) = &*pair; 290 | let mut done = lock.lock().unwrap(); 291 | 292 | loop { 293 | let result = cvar.wait_timeout(done, Duration::from_millis(10)).unwrap(); 294 | 295 | done = result.0; 296 | 297 | if *done == true { 298 | break; 299 | } 300 | } 301 | } 302 | 303 | // Check to see if the payload that was executed is valid or not. 304 | { 305 | let vpe0 = vm.virtual_processors.get(0).unwrap().read().unwrap(); 306 | 307 | let rip = vpe0.last_exit_context.VpContext.Rip as usize; 308 | 309 | // Query the current stack pointer for use when checking if the payload is valid. 310 | let mut reg_names: [WHV_REGISTER_NAME; 1 as usize] = Default::default(); 311 | let mut reg_values: [WHV_REGISTER_VALUE; 1 as usize] = Default::default(); 312 | 313 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterRsp; 314 | reg_values[0].Reg64 = 0; 315 | 316 | vpe0.vp.get_registers(®_names, &mut reg_values).unwrap(); 317 | 318 | let final_rsp = unsafe { reg_values[0].Reg64 }; 319 | 320 | // If the instruction pointer is equal to the start of the code 321 | // mapping (meaning the instruction didn't execute), then continue 322 | // to the next payload as this one is invalid. 323 | if rip <= CODE_VIRTUAL_BASE_ADDRESS as usize 324 | || rip > CODE_VIRTUAL_BASE_ADDRESS as usize + 0x1000 { 325 | continue; 326 | } 327 | 328 | let current_payload = vm.get_physical_memory_slice(initial_rip as usize, new_payload.len()); 329 | let starting_payload_slice = &buf[0..new_payload.len()]; 330 | 331 | // Test to see if the payload is generally valid and that the specific generator we are using 332 | // considers it to be valid. If it is valid, then we'll add the payload to the valid set 333 | // in redis, otherwise we'll add it to the invalid set. 334 | if is_generally_valid(current_payload, starting_payload_slice) 335 | && generator.is_valid( 336 | &previous_payload, 337 | &new_payload, 338 | initial_rip, 339 | rip, 340 | initial_rsp, 341 | final_rsp as usize, 342 | ) 343 | { 344 | num_valid += 1; 345 | 346 | let payload_hex = hex::encode(¤t_payload); 347 | 348 | let dur = Duration::from_secs(2); 349 | let set_fut: RedisFuture<()> = redis_con.sadd("valid", &payload_hex); 350 | 351 | let _ = future::timeout(dur, set_fut).await; 352 | } else { 353 | let new_payload_hex = hex::encode(new_payload); 354 | 355 | let dur = Duration::from_secs(2); 356 | let set_fut: RedisFuture<()> = redis_con.sadd("invalid", &new_payload_hex); 357 | 358 | let _ = future::timeout(dur, set_fut).await; 359 | } 360 | } 361 | } 362 | 363 | // Checks to see if the current payload is generally valid using the following logic: 364 | // - the payload does not exceed the maximum payload size. 365 | // - the current payload in memory matches the initial version that was stored in memory (e.g. not corrupted). 366 | // - the current payload does not contain a branch instruction 367 | fn is_generally_valid(current_payload: &[u8], starting_payload: &[u8]) -> bool { 368 | // Check to see if the payload itself was modified in memory. 369 | if current_payload != starting_payload { 370 | return false; 371 | } 372 | 373 | // Analyze the payload to determine if it should be treated as valid and stored 374 | // in the database. Treat payloads with branch instructions as invalid. 375 | let valid = panic::catch_unwind(|| { 376 | let instr_cursor = Cursor::new(current_payload); 377 | let mut instr_reader = InstructionReader::new(instr_cursor, Mode::Protected); 378 | 379 | let mut valid = true; 380 | loop { 381 | let instr_res = instr_reader.read(); 382 | 383 | if instr_res.is_err() { 384 | break; 385 | } 386 | 387 | let instr = instr_res.unwrap(); 388 | 389 | match instr.mnemonic { 390 | // Ignore conditional branch instructions 391 | Mnemonic::JA 392 | | Mnemonic::JAE 393 | | Mnemonic::JB 394 | | Mnemonic::JBE 395 | | Mnemonic::JC 396 | | Mnemonic::JCXZ 397 | | Mnemonic::JE 398 | | Mnemonic::JECXZ 399 | | Mnemonic::JG 400 | | Mnemonic::JGE 401 | | Mnemonic::JL 402 | | Mnemonic::JLE 403 | | Mnemonic::JNA 404 | | Mnemonic::JNAE 405 | | Mnemonic::JNB 406 | | Mnemonic::JNBE 407 | | Mnemonic::JNC 408 | | Mnemonic::JNE 409 | | Mnemonic::JNG 410 | | Mnemonic::JNGE 411 | | Mnemonic::JNL 412 | | Mnemonic::JNLE 413 | | Mnemonic::JNO 414 | | Mnemonic::JNP 415 | | Mnemonic::JNS 416 | | Mnemonic::JNZ 417 | | Mnemonic::JO 418 | | Mnemonic::JP 419 | | Mnemonic::JPE 420 | | Mnemonic::JPO 421 | | Mnemonic::JRCXZ 422 | | Mnemonic::JS 423 | | Mnemonic::JZ 424 | | Mnemonic::LOOP 425 | | Mnemonic::LOOPE 426 | | Mnemonic::LOOPNE => { 427 | valid = false; 428 | break; 429 | } 430 | 431 | // Ignore unconditional branch instructions 432 | Mnemonic::CALL | Mnemonic::JMP | Mnemonic::RET => { 433 | valid = false; 434 | break; 435 | } 436 | 437 | _ => {} 438 | }; 439 | } 440 | 441 | valid 442 | }); 443 | 444 | if valid.is_err() || valid.unwrap() == false { 445 | return false; 446 | } 447 | 448 | true 449 | } 450 | } 451 | 452 | #[cfg(test)] 453 | #[allow(dead_code)] 454 | mod tests { 455 | 456 | use crate::vmm::*; 457 | 458 | use std::sync::Arc; 459 | use std::sync::Condvar; 460 | use std::sync::Mutex; 461 | 462 | const CODE_VIRTUAL_BASE_ADDRESS: u64 = 0x20000000; 463 | const CODE_REGION_SIZE: u64 = 4096; 464 | 465 | #[test] 466 | pub fn test_vm_create() { 467 | let mut vm = VirtualMachine::new( 468 | 1, 469 | VirtualMachineConfig { 470 | processor_count: 1, 471 | memory_layout: MemoryLayout { 472 | physical_layout: vec! [ 473 | PhysicalMemoryRange { 474 | base_address: 0x10000000, 475 | region_size: 0x400000, 476 | region_type: MemoryRegionType::PageTables, 477 | ept_protection: MemoryProtection::ReadWrite 478 | }, 479 | PhysicalMemoryRange { 480 | base_address: 0x20000000, 481 | region_size: CODE_REGION_SIZE as usize, 482 | region_type: MemoryRegionType::Code, 483 | ept_protection: MemoryProtection::ReadWriteExecute 484 | }, 485 | PhysicalMemoryRange { 486 | base_address: 0x30000000, 487 | region_size: 0x1000, 488 | region_type: MemoryRegionType::Stack, 489 | ept_protection: MemoryProtection::ReadWrite 490 | } 491 | ], 492 | virtual_layout: vec! [ 493 | // Code virtual mapping 494 | VirtualMemoryDescriptor { 495 | base_address: CODE_VIRTUAL_BASE_ADDRESS, 496 | region_type: MemoryRegionType::Code, 497 | memory_descriptors: vec! [ 498 | MemoryDescriptor { 499 | physical_address: 0x20000000, 500 | virtual_protection: MemoryProtection::ReadWriteExecute 501 | } 502 | ] 503 | }, 504 | 505 | // Stack virtual mapping 506 | VirtualMemoryDescriptor { 507 | base_address: 0x30000000, 508 | region_type: MemoryRegionType::Stack, 509 | memory_descriptors: vec! [ 510 | MemoryDescriptor { 511 | physical_address: 0x30000000, 512 | virtual_protection: MemoryProtection::ReadWrite 513 | } 514 | ] 515 | }, 516 | ] 517 | }, 518 | }, 519 | ); 520 | 521 | vm.setup(); 522 | 523 | let pair = Arc::new((Mutex::new(false), Condvar::new())); 524 | let pair2 = pair.clone(); 525 | 526 | vm.execute(pair2); 527 | 528 | let (lock, cvar) = &*pair; 529 | let done = lock.lock().unwrap(); 530 | 531 | let _ = cvar.wait(done); 532 | } 533 | } 534 | -------------------------------------------------------------------------------- /payloadfuzz/src/vmm.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Simple VM encapsulation layer on top of libwhp. 3 | * 4 | * Much of the code is borrowed from the demo example from libwhp as currently found here: 5 | * https://github.com/insula-rs/libwhp/blob/master/examples/demo.rs 6 | */ 7 | 8 | extern crate libwhp; 9 | 10 | use std; 11 | use std::sync::Arc; 12 | use std::sync::Condvar; 13 | use std::sync::RwLock; 14 | use std::thread; 15 | 16 | use libwhp::instruction_emulator::*; 17 | use libwhp::memory::*; 18 | use libwhp::*; 19 | 20 | const PDE64_PRESENT: u64 = 1; 21 | const PDE64_RW: u64 = 1 << 1; 22 | const PDE64_USER: u64 = 1 << 2; 23 | const CR4_PAE: u64 = 1 << 5; 24 | const CR4_OSFXSR: u64 = 1 << 9; 25 | const CR4_OSXMMEXCPT: u64 = 1 << 10; 26 | 27 | const PAGE_SIZE: u64 = 0x1000; 28 | const PTE_FRAME_BIT_OFFSET: u64 = 12; 29 | const PTE_FRAME_MASK: u64 = 0xF_FFFFFFFF; 30 | 31 | const PTE_PML4E_OFFSET: u64 = 39; 32 | const PTE_PML4E_MASK: u64 = 0x1ff; 33 | 34 | const PTE_PDPTE_OFFSET: u64 = 30; 35 | const PTE_PDPTE_MASK: u64 = 0x1ff; 36 | 37 | const PTE_PDE_OFFSET: u64 = 21; 38 | const PTE_PDE_MASK: u64 = 0x1ff; 39 | 40 | const PTE_PTE_OFFSET: u64 = 12; 41 | const PTE_PTE_MASK: u64 = 0x1ff; 42 | 43 | fn pte_get_physical_address(pte: u64) -> u64 { 44 | ((pte >> PTE_FRAME_BIT_OFFSET) & PTE_FRAME_MASK) * PAGE_SIZE 45 | } 46 | 47 | const CR0_PE: u64 = 1; 48 | const CR0_MP: u64 = 1 << 1; 49 | const CR0_ET: u64 = 1 << 4; 50 | const CR0_NE: u64 = 1 << 5; 51 | const CR0_WP: u64 = 1 << 16; 52 | const CR0_AM: u64 = 1 << 18; 53 | const CR0_PG: u64 = 1 << 31; 54 | const EFER_LME: u64 = 1 << 8; 55 | const EFER_LMA: u64 = 1 << 10; 56 | 57 | const INT_VECTOR: u32 = 0x35; 58 | 59 | #[derive(Clone)] 60 | pub struct VirtualMachineConfig { 61 | pub processor_count: u32, 62 | pub memory_layout: MemoryLayout 63 | } 64 | 65 | pub struct VirtualMachine { 66 | pub vm_id: usize, 67 | pub vm_config: VirtualMachineConfig, 68 | partition: Partition, 69 | apic_enabled: bool, 70 | apic_present: bool, 71 | pub virtual_processors: Vec>>, 72 | physical_memory_map: Vec, 73 | pml4_addr: u64, 74 | } 75 | 76 | struct VirtualMachineCallbacks<'a> { 77 | vpe_rwlock: &'a RwLock, 78 | } 79 | 80 | pub struct VirtualProcessorExtension { 81 | pub vp: VirtualProcessor, 82 | pub last_exit_context: WHV_RUN_VP_EXIT_CONTEXT, 83 | } 84 | 85 | #[derive(Clone)] 86 | pub enum MemoryProtection { 87 | Read, 88 | ReadWrite, 89 | ReadWriteExecute 90 | } 91 | 92 | #[derive(Copy, Clone, PartialEq)] 93 | pub enum MemoryRegionType { 94 | PageTables, 95 | Code, 96 | Data, 97 | Stack 98 | } 99 | 100 | #[derive(Clone)] 101 | pub struct PhysicalMemoryRange { 102 | pub base_address: u64, 103 | pub region_size: usize, 104 | pub ept_protection: MemoryProtection, 105 | pub region_type: MemoryRegionType 106 | } 107 | 108 | pub struct PhysicalMemoryMapping { 109 | pub physical_map: GPARangeMapping, 110 | pub host_virtual_map: VirtualMemory, 111 | pub region_type: MemoryRegionType, 112 | pub allocated_page_map: Vec 113 | } 114 | 115 | #[derive(Clone)] 116 | pub struct MemoryDescriptor { 117 | pub physical_address: u64, 118 | pub virtual_protection: MemoryProtection 119 | } 120 | 121 | #[derive(Clone)] 122 | pub struct VirtualMemoryDescriptor { 123 | pub base_address: u64, 124 | pub region_type: MemoryRegionType, 125 | pub memory_descriptors: Vec 126 | } 127 | 128 | impl VirtualMemoryDescriptor { 129 | pub fn get_region_size(&self) -> usize { 130 | (self.memory_descriptors.len() * PAGE_SIZE as usize) as usize 131 | } 132 | } 133 | 134 | #[derive(Clone)] 135 | pub struct MemoryLayout { 136 | pub physical_layout: Vec, 137 | pub virtual_layout: Vec, 138 | } 139 | 140 | impl<'a> VirtualMachine { 141 | pub fn new(vm_id: usize, vm_config: VirtualMachineConfig) -> VirtualMachine { 142 | // Verify that the hypervisor is present 143 | let capability = 144 | get_capability(WHV_CAPABILITY_CODE::WHvCapabilityCodeHypervisorPresent).unwrap(); 145 | 146 | if unsafe { capability.HypervisorPresent } == FALSE { 147 | panic!("Hypervisor not present"); 148 | } 149 | 150 | // Check if APIC is present 151 | let capability = get_capability(WHV_CAPABILITY_CODE::WHvCapabilityCodeFeatures).unwrap(); 152 | let features: WHV_CAPABILITY_FEATURES = unsafe { capability.Features }; 153 | 154 | let processor_count = vm_config.processor_count; 155 | 156 | VirtualMachine { 157 | vm_id: vm_id, 158 | vm_config: vm_config, 159 | partition: Partition::new().unwrap(), 160 | apic_enabled: false, 161 | apic_present: features.LocalApicEmulation() != 0, 162 | virtual_processors: Vec::with_capacity(processor_count as usize), 163 | physical_memory_map: Vec::new(), 164 | pml4_addr: 0 165 | } 166 | } 167 | 168 | pub fn setup(&mut self) { 169 | // Set the processor count for the VM 170 | let mut property: WHV_PARTITION_PROPERTY = Default::default(); 171 | property.ProcessorCount = self.vm_config.processor_count; 172 | self.partition 173 | .set_property( 174 | WHV_PARTITION_PROPERTY_CODE::WHvPartitionPropertyCodeProcessorCount, 175 | &property, 176 | ) 177 | .unwrap(); 178 | 179 | // Set the extended VM exits for the VM 180 | property = Default::default(); 181 | unsafe { 182 | property.ExtendedVmExits.set_X64CpuidExit(1); 183 | property.ExtendedVmExits.set_X64MsrExit(1); 184 | property.ExtendedVmExits.set_ExceptionExit(1); 185 | } 186 | 187 | self.partition 188 | .set_property( 189 | WHV_PARTITION_PROPERTY_CODE::WHvPartitionPropertyCodeExtendedVmExits, 190 | &property, 191 | ) 192 | .unwrap(); 193 | 194 | let cpuids: [UINT32; 1] = [1]; 195 | self.partition.set_property_cpuid_exits(&cpuids).unwrap(); 196 | 197 | let mut cpuid_results: [WHV_X64_CPUID_RESULT; 1] = Default::default(); 198 | 199 | cpuid_results[0].Function = 0x40000000; 200 | let mut id_reg_values: [UINT32; 3] = [0; 3]; 201 | let id = "cpufuzz\0"; 202 | unsafe { 203 | std::ptr::copy_nonoverlapping( 204 | id.as_ptr(), 205 | id_reg_values.as_mut_ptr() as *mut u8, 206 | id.len(), 207 | ); 208 | } 209 | cpuid_results[0].Ebx = id_reg_values[0]; 210 | cpuid_results[0].Ecx = id_reg_values[1]; 211 | cpuid_results[0].Edx = id_reg_values[2]; 212 | 213 | self.partition 214 | .set_property_cpuid_results(&cpuid_results) 215 | .unwrap(); 216 | 217 | // Enable APIC if present for the VM 218 | if self.apic_present != false { 219 | self.enable_apic(); 220 | } 221 | 222 | // Set up the partition itself 223 | self.partition.setup().unwrap(); 224 | 225 | // Setup the backing physical memory for the VM 226 | self.setup_physical_memory(); 227 | 228 | // Setup the virtual processors for the VM 229 | self.setup_virtual_processors(); 230 | } 231 | 232 | pub fn execute(&self, completion: Arc<(std::sync::Mutex, Condvar)>) { 233 | let mut threads: Vec> = Vec::new(); 234 | 235 | for vpe_rwlock in self.virtual_processors.iter() { 236 | if self.apic_enabled { 237 | self.set_apic_base(&mut vpe_rwlock.write().unwrap().vp); 238 | self.send_ipi(&mut vpe_rwlock.write().unwrap().vp, INT_VECTOR); 239 | self.set_delivery_notifications(&mut vpe_rwlock.write().unwrap().vp); 240 | } 241 | 242 | let thread_vpe = vpe_rwlock.clone(); 243 | let thread_completion = completion.clone(); 244 | let thread = thread::spawn(move || { 245 | VirtualMachine::execute_vp(&thread_vpe); 246 | 247 | let (lock, cvar) = &*thread_completion; 248 | let mut done = lock.lock().unwrap(); 249 | *done = true; 250 | cvar.notify_all(); 251 | }); 252 | 253 | threads.push(thread); 254 | } 255 | } 256 | 257 | fn execute_vp(vpe_rwlock: &Arc>) { 258 | let _callbacks = VirtualMachineCallbacks { 259 | vpe_rwlock: vpe_rwlock, 260 | }; 261 | 262 | let _emulator = Emulator::::new().unwrap(); 263 | 264 | loop { 265 | let exit_context: WHV_RUN_VP_EXIT_CONTEXT; 266 | 267 | { 268 | let vpe = vpe_rwlock.read().unwrap(); 269 | 270 | exit_context = vpe.vp.run().unwrap(); 271 | } 272 | 273 | { 274 | let mut vpe = vpe_rwlock.write().unwrap(); 275 | 276 | vpe.last_exit_context = exit_context; 277 | } 278 | 279 | match exit_context.ExitReason { 280 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64Halt => { 281 | break; 282 | } 283 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonException => { 284 | break; 285 | } 286 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonMemoryAccess => { 287 | break; 288 | } 289 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64IoPortAccess => { 290 | break; 291 | } 292 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64Cpuid => { 293 | break; 294 | } 295 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64MsrAccess => { 296 | break; 297 | } 298 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64ApicEoi => { 299 | break; 300 | } 301 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonX64InterruptWindow => { 302 | break; 303 | } 304 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonUnrecoverableException => { 305 | break; 306 | } 307 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonCanceled => { 308 | break; 309 | } 310 | WHV_RUN_VP_EXIT_REASON::WHvRunVpExitReasonInvalidVpRegisterValue => { 311 | break; 312 | } 313 | _ => panic!("Unexpected exit type: {:?}", exit_context.ExitReason), 314 | }; 315 | } 316 | } 317 | 318 | fn setup_physical_memory(&mut self) { 319 | for physical_region in &self.vm_config.memory_layout.physical_layout { 320 | let virtual_map = VirtualMemory::new(physical_region.region_size) 321 | .expect("virtual memory mapping failed"); 322 | 323 | let physical_map_res = self.partition.map_gpa_range( 324 | &virtual_map, 325 | physical_region.base_address, 326 | physical_region.region_size as UINT64, 327 | match physical_region.ept_protection { 328 | MemoryProtection::Read => { 329 | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagRead 330 | }, 331 | MemoryProtection::ReadWrite => { 332 | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagRead 333 | | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagWrite 334 | }, 335 | MemoryProtection::ReadWriteExecute => { 336 | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagRead 337 | | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagWrite 338 | | WHV_MAP_GPA_RANGE_FLAGS::WHvMapGpaRangeFlagExecute 339 | } 340 | } 341 | ); 342 | 343 | let physical_map = match physical_map_res { 344 | Ok(physical_map) => { 345 | physical_map 346 | }, 347 | Err(err) => { 348 | panic!("map_gpa_range failed with {}", err); 349 | } 350 | }; 351 | 352 | self.physical_memory_map.push(PhysicalMemoryMapping { 353 | host_virtual_map: virtual_map, 354 | physical_map: physical_map, 355 | region_type: physical_region.region_type, 356 | allocated_page_map: Vec::new() 357 | }) 358 | } 359 | } 360 | 361 | pub fn get_physical_memory_slice_mut( 362 | &'a mut self, 363 | physical_address: usize, 364 | length: usize, 365 | ) -> &'a mut [u8] { 366 | for physical_mapping in &mut self.physical_memory_map { 367 | let mapping_base_address = physical_mapping.physical_map.get_guest_address() as usize; 368 | let mapping_size = physical_mapping.physical_map.get_size() as usize; 369 | 370 | if physical_address >= mapping_base_address 371 | && physical_address < mapping_base_address + mapping_size { 372 | let s = physical_mapping.host_virtual_map.as_slice_mut(); 373 | 374 | let offset = physical_address - mapping_base_address; 375 | 376 | return &mut s[offset..(offset+length)]; 377 | } 378 | } 379 | 380 | panic!("failed to find gpa {:X}", physical_address); 381 | } 382 | 383 | pub fn get_physical_memory_slice(&'a self, physical_address: usize, length: usize) -> &'a [u8] { 384 | for physical_mapping in &self.physical_memory_map { 385 | let mapping_base_address = physical_mapping.physical_map.get_guest_address() as usize; 386 | let mapping_size = physical_mapping.physical_map.get_size() as usize; 387 | 388 | if physical_address >= mapping_base_address 389 | && physical_address < mapping_base_address + mapping_size { 390 | let s = physical_mapping.host_virtual_map.as_slice(); 391 | 392 | let offset = physical_address - mapping_base_address; 393 | 394 | return &s[offset..(offset+length)]; 395 | } 396 | } 397 | 398 | panic!("failed to find gpa {:X}", physical_address); 399 | } 400 | 401 | fn get_virtual_region_by_type(&'a self, region_type: MemoryRegionType) -> &'a VirtualMemoryDescriptor { 402 | for virtual_memory_desc in &self.vm_config.memory_layout.virtual_layout { 403 | if virtual_memory_desc.region_type == region_type { 404 | return &virtual_memory_desc; 405 | } 406 | } 407 | 408 | panic!("failed to find region type"); 409 | } 410 | 411 | fn initialize_address_space(& mut self) -> u64 { 412 | 413 | let mut host_page_table_map_opt = None; 414 | 415 | for map in &mut self.physical_memory_map { 416 | if map.region_type == MemoryRegionType::PageTables { 417 | host_page_table_map_opt = Some(map); 418 | break; 419 | } 420 | } 421 | 422 | if host_page_table_map_opt.is_none() { 423 | panic!("failed to find page table mapping"); 424 | } 425 | 426 | let host_page_table_map = host_page_table_map_opt.unwrap(); 427 | let max_page_table_pages = host_page_table_map.host_virtual_map.get_size() / (PAGE_SIZE as usize); 428 | 429 | host_page_table_map.allocated_page_map = vec! [false; max_page_table_pages]; 430 | 431 | let page_table_base_gpa = host_page_table_map.physical_map.get_guest_address(); 432 | let page_table_base_hptr = host_page_table_map.host_virtual_map.as_ptr() as u64; 433 | 434 | // Allocate the PML4 page 435 | let pml4_base_gpa = allocate_gpa(host_page_table_map); 436 | let pml4_base_hptr = page_table_base_hptr + (pml4_base_gpa - page_table_base_gpa); 437 | 438 | for virtual_memory_desc in &self.vm_config.memory_layout.virtual_layout { 439 | 440 | let mut virtual_address = virtual_memory_desc.base_address; 441 | 442 | for memory_desc in &virtual_memory_desc.memory_descriptors { 443 | 444 | // PML4E lookup 445 | let pml4e_hptr = pml4_base_hptr + (((virtual_address >> PTE_PML4E_OFFSET) & PTE_PML4E_MASK) * 8); 446 | let pml4e = unsafe { *(pml4e_hptr as *mut u64) }; 447 | 448 | let pdpt_base_gpa; 449 | if pml4e == 0 { 450 | pdpt_base_gpa = allocate_gpa(host_page_table_map); 451 | 452 | let pdpt_base_pfn = pdpt_base_gpa / PAGE_SIZE; 453 | 454 | unsafe { *(pml4e_hptr as *mut u64) = PDE64_PRESENT | PDE64_RW | PDE64_USER | (pdpt_base_pfn << PTE_FRAME_BIT_OFFSET) }; 455 | } else { 456 | pdpt_base_gpa = pte_get_physical_address(pml4e); 457 | } 458 | 459 | // PDPTE lookup 460 | let pdpt_base_hptr = page_table_base_hptr + (pdpt_base_gpa - page_table_base_gpa); 461 | let pdpte_htpr = pdpt_base_hptr + (((virtual_address >> PTE_PDPTE_OFFSET) & PTE_PDPTE_MASK) * 8); 462 | let pdpte = unsafe { *(pdpte_htpr as *mut u64) }; 463 | 464 | let pde_base_gpa: u64; 465 | if pdpte == 0 { 466 | pde_base_gpa = allocate_gpa(host_page_table_map); 467 | 468 | let pde_base_pfn = pde_base_gpa / PAGE_SIZE; 469 | 470 | unsafe { *(pdpte_htpr as *mut u64) = PDE64_PRESENT | PDE64_RW | PDE64_USER | (pde_base_pfn << PTE_FRAME_BIT_OFFSET) }; 471 | } else { 472 | pde_base_gpa = pte_get_physical_address(pdpte); 473 | } 474 | 475 | // PDE lookup 476 | let pde_base_hptr = page_table_base_hptr + (pde_base_gpa - page_table_base_gpa); 477 | let pde_hptr = pde_base_hptr + (((virtual_address >> PTE_PDE_OFFSET) & PTE_PDE_MASK) * 8); 478 | let pde = unsafe { *(pde_hptr as *mut u64) }; 479 | 480 | let pte_base_gpa: u64; 481 | if pde == 0 { 482 | pte_base_gpa = allocate_gpa(host_page_table_map); 483 | 484 | let pte_base_pfn = pte_base_gpa / PAGE_SIZE; 485 | 486 | unsafe { *(pde_hptr as *mut u64) = PDE64_PRESENT | PDE64_RW | PDE64_USER | (pte_base_pfn << PTE_FRAME_BIT_OFFSET) }; 487 | } else { 488 | pte_base_gpa = pte_get_physical_address(pde); 489 | } 490 | 491 | // PTE initialization 492 | let pte_base_hptr = page_table_base_hptr + (pte_base_gpa - page_table_base_gpa); 493 | let pte_hptr = pte_base_hptr + (((virtual_address >> PTE_PTE_OFFSET) & PTE_PTE_MASK) * 8); 494 | 495 | let mut pte_value = PDE64_PRESENT | PDE64_RW | PDE64_USER; 496 | pte_value |= (memory_desc.physical_address / PAGE_SIZE) << PTE_FRAME_BIT_OFFSET; 497 | 498 | match memory_desc.virtual_protection { 499 | MemoryProtection::Read 500 | | MemoryProtection::ReadWrite => { 501 | pte_value |= 1 << 63; // NX bit 502 | }, 503 | _ => {} 504 | } 505 | 506 | unsafe { *(pte_hptr as *mut u64) = pte_value }; 507 | 508 | // Proceed to the next virtual address. 509 | virtual_address += PAGE_SIZE; 510 | } 511 | } 512 | 513 | pml4_base_gpa 514 | } 515 | 516 | fn setup_virtual_processors(&mut self) { 517 | for vp_index in 0..self.vm_config.processor_count { 518 | self.setup_virtual_processor(vp_index); 519 | } 520 | } 521 | 522 | fn setup_virtual_processor(&mut self, vp_index: u32) { 523 | let mut vp = self.partition.create_virtual_processor(vp_index).unwrap(); 524 | 525 | self.pml4_addr = self.initialize_address_space(); 526 | 527 | // Setup long mode for this VP 528 | self.set_initial_registers(&mut vp, 0, 0); 529 | 530 | // Configure the APIC 531 | self.setup_apic(&mut vp); 532 | 533 | let vpe_rwlock = RwLock::new(VirtualProcessorExtension { 534 | vp: vp, 535 | last_exit_context: Default::default(), 536 | }); 537 | 538 | self.virtual_processors.push(Arc::new(vpe_rwlock)); 539 | } 540 | 541 | pub fn set_initial_registers(&self, vp: &mut VirtualProcessor, gpr_default_value: u64, initial_rip: u64) { 542 | const NUM_REGS: UINT32 = 28; 543 | let mut reg_names: [WHV_REGISTER_NAME; NUM_REGS as usize] = Default::default(); 544 | let mut reg_values: [WHV_REGISTER_VALUE; NUM_REGS as usize] = Default::default(); 545 | 546 | // Initialize control registers with protected mode enabled but paging disabled initially. 547 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterCr3; 548 | reg_values[0].Reg64 = self.pml4_addr; 549 | reg_names[1] = WHV_REGISTER_NAME::WHvX64RegisterCr4; 550 | reg_values[1].Reg64 = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT; 551 | 552 | reg_names[2] = WHV_REGISTER_NAME::WHvX64RegisterCr0; 553 | reg_values[2].Reg64 = CR0_PE | CR0_MP | CR0_ET | CR0_NE | CR0_WP | CR0_AM | CR0_PG; 554 | reg_names[3] = WHV_REGISTER_NAME::WHvX64RegisterEfer; 555 | reg_values[3].Reg64 = EFER_LME | EFER_LMA; 556 | 557 | reg_names[4] = WHV_REGISTER_NAME::WHvX64RegisterCs; 558 | unsafe { 559 | let segment = &mut reg_values[4].Segment; 560 | segment.Base = 0; 561 | segment.Limit = 0xffffffff; 562 | segment.Selector = 1 << 3; 563 | segment.set_SegmentType(11); 564 | segment.set_NonSystemSegment(1); 565 | segment.set_Present(1); 566 | segment.set_Long(1); 567 | segment.set_Granularity(1); 568 | } 569 | 570 | reg_names[5] = WHV_REGISTER_NAME::WHvX64RegisterDs; 571 | unsafe { 572 | let segment = &mut reg_values[5].Segment; 573 | segment.Base = 0; 574 | segment.Limit = 0xffffffff; 575 | segment.Selector = 2 << 3; 576 | segment.set_SegmentType(3); 577 | segment.set_NonSystemSegment(1); 578 | segment.set_Present(1); 579 | segment.set_Long(1); 580 | segment.set_Granularity(1); 581 | } 582 | 583 | reg_names[6] = WHV_REGISTER_NAME::WHvX64RegisterEs; 584 | reg_values[6] = reg_values[5]; 585 | 586 | reg_names[7] = WHV_REGISTER_NAME::WHvX64RegisterFs; 587 | reg_values[7] = reg_values[5]; 588 | 589 | reg_names[8] = WHV_REGISTER_NAME::WHvX64RegisterGs; 590 | reg_values[8] = reg_values[5]; 591 | 592 | reg_names[9] = WHV_REGISTER_NAME::WHvX64RegisterSs; 593 | reg_values[9] = reg_values[5]; 594 | 595 | // Start with the Interrupt Flag off; guest will enable it when ready 596 | reg_names[10] = WHV_REGISTER_NAME::WHvX64RegisterRflags; 597 | reg_values[10].Reg64 = 0x0002; 598 | 599 | reg_names[11] = WHV_REGISTER_NAME::WHvX64RegisterRip; 600 | reg_values[11].Reg64 = initial_rip; 601 | 602 | // Create stack with stack base at high end of mapped payload 603 | reg_names[12] = WHV_REGISTER_NAME::WHvX64RegisterRsp; 604 | let stack_region = self.get_virtual_region_by_type(MemoryRegionType::Stack); 605 | reg_values[12].Reg64 = stack_region.base_address + stack_region.get_region_size() as UINT64; 606 | 607 | reg_names[13] = WHV_REGISTER_NAME::WHvX64RegisterRax; 608 | reg_values[13].Reg64 = gpr_default_value; 609 | reg_names[14] = WHV_REGISTER_NAME::WHvX64RegisterRbx; 610 | reg_values[14].Reg64 = gpr_default_value; 611 | reg_names[15] = WHV_REGISTER_NAME::WHvX64RegisterRcx; 612 | reg_values[15].Reg64 = gpr_default_value; 613 | reg_names[16] = WHV_REGISTER_NAME::WHvX64RegisterRdx; 614 | reg_values[16].Reg64 = gpr_default_value; 615 | reg_names[17] = WHV_REGISTER_NAME::WHvX64RegisterRdi; 616 | reg_values[17].Reg64 = gpr_default_value; 617 | reg_names[18] = WHV_REGISTER_NAME::WHvX64RegisterRsi; 618 | reg_values[18].Reg64 = gpr_default_value; 619 | reg_names[19] = WHV_REGISTER_NAME::WHvX64RegisterRbp; 620 | reg_values[19].Reg64 = gpr_default_value; 621 | reg_names[20] = WHV_REGISTER_NAME::WHvX64RegisterR8; 622 | reg_values[20].Reg64 = gpr_default_value; 623 | reg_names[21] = WHV_REGISTER_NAME::WHvX64RegisterR9; 624 | reg_values[22].Reg64 = gpr_default_value; 625 | reg_names[22] = WHV_REGISTER_NAME::WHvX64RegisterR10; 626 | reg_values[22].Reg64 = gpr_default_value; 627 | reg_names[23] = WHV_REGISTER_NAME::WHvX64RegisterR11; 628 | reg_values[23].Reg64 = gpr_default_value; 629 | reg_names[24] = WHV_REGISTER_NAME::WHvX64RegisterR12; 630 | reg_values[24].Reg64 = gpr_default_value; 631 | reg_names[25] = WHV_REGISTER_NAME::WHvX64RegisterR13; 632 | reg_values[25].Reg64 = gpr_default_value; 633 | reg_names[26] = WHV_REGISTER_NAME::WHvX64RegisterR14; 634 | reg_values[26].Reg64 = gpr_default_value; 635 | reg_names[27] = WHV_REGISTER_NAME::WHvX64RegisterR15; 636 | reg_values[27].Reg64 = gpr_default_value; 637 | 638 | // Create stack with stack base at high end of mapped payload 639 | vp.set_registers(®_names, ®_values).unwrap(); 640 | } 641 | 642 | fn enable_apic(&mut self) { 643 | let mut property: WHV_PARTITION_PROPERTY = Default::default(); 644 | property.LocalApicEmulationMode = 645 | WHV_X64_LOCAL_APIC_EMULATION_MODE::WHvX64LocalApicEmulationModeXApic; 646 | 647 | self.partition 648 | .set_property( 649 | WHV_PARTITION_PROPERTY_CODE::WHvPartitionPropertyCodeLocalApicEmulationMode, 650 | &property, 651 | ) 652 | .unwrap(); 653 | 654 | self.apic_enabled = true; 655 | } 656 | 657 | fn set_apic_base(&self, vp: &mut VirtualProcessor) { 658 | // Page table translations for this guest only cover the first 1GB of memory, 659 | // and the default APIC base falls above that. Set the APIC base to 660 | // something lower, within our range of virtual memory 661 | 662 | // Get the default APIC base register value to start 663 | const NUM_REGS: usize = 1; 664 | let mut reg_names: [WHV_REGISTER_NAME; NUM_REGS] = Default::default(); 665 | let mut reg_values: [WHV_REGISTER_VALUE; NUM_REGS] = Default::default(); 666 | 667 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterApicBase; 668 | 669 | // Get the registers as a baseline 670 | vp.get_registers(®_names, &mut reg_values).unwrap(); 671 | let mut flags = unsafe { reg_values[0].Reg64 }; 672 | 673 | // Mask off the bottom 12 bits, which are used to store flags 674 | flags = flags & 0xfff; 675 | 676 | // Set the APIC base to something lower within our translatable address 677 | // space 678 | let new_apic_base = 0x0fee_0000; 679 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterApicBase; 680 | reg_values[0].Reg64 = new_apic_base | flags; 681 | vp.set_registers(®_names, ®_values).unwrap(); 682 | } 683 | 684 | fn send_msi(&self, vp: &mut VirtualProcessor, message: &WHV_MSI_ENTRY) { 685 | let addr: UINT32 = unsafe { message.anon_struct.Address }; 686 | let data: UINT32 = unsafe { message.anon_struct.Data }; 687 | 688 | let dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 689 | let vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 690 | let dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 691 | let trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 692 | let delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 693 | 694 | let mut interrupt: WHV_INTERRUPT_CONTROL = Default::default(); 695 | 696 | interrupt.set_InterruptType(delivery as UINT64); 697 | 698 | if dest_mode == 0 { 699 | interrupt.set_DestinationMode( 700 | WHV_INTERRUPT_DESTINATION_MODE::WHvX64InterruptDestinationModePhysical as UINT64, 701 | ); 702 | } else { 703 | interrupt.set_DestinationMode( 704 | WHV_INTERRUPT_DESTINATION_MODE::WHvX64InterruptDestinationModeLogical as UINT64, 705 | ); 706 | } 707 | 708 | interrupt.set_TriggerMode(trigger_mode as UINT64); 709 | 710 | interrupt.Destination = dest; 711 | interrupt.Vector = vector; 712 | 713 | vp.request_interrupt(&mut interrupt).unwrap(); 714 | } 715 | 716 | fn send_ipi(&self, vp: &mut VirtualProcessor, vector: u32) { 717 | let mut message: WHV_MSI_ENTRY = Default::default(); 718 | 719 | // - Trigger mode is 'Edge' 720 | // - Interrupt type is 'Fixed' 721 | // - Destination mode is 'Physical' 722 | // - Destination is 0. Since Destination Mode is Physical, bits 56-59 723 | // contain the APIC ID of the target processor (APIC ID = 0) 724 | // Level = 1 and Destination Shorthand = 1, but the underlying API will 725 | // actually ignore this. 726 | message.anon_struct.Data = (0x00044000 | vector) as UINT32; 727 | message.anon_struct.Address = 0; 728 | 729 | VirtualMachine::send_msi(self, vp, &message); 730 | } 731 | 732 | fn set_delivery_notifications(&self, vp: &mut VirtualProcessor) { 733 | const NUM_REGS: usize = 1; 734 | let mut reg_values: [WHV_REGISTER_VALUE; NUM_REGS] = Default::default(); 735 | let mut reg_names: [WHV_REGISTER_NAME; NUM_REGS] = Default::default(); 736 | 737 | let mut notifications: WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER = Default::default(); 738 | notifications.set_InterruptNotification(1); 739 | reg_values[0].DeliverabilityNotifications = notifications; 740 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterDeliverabilityNotifications; 741 | vp.set_registers(®_names, ®_values).unwrap(); 742 | } 743 | 744 | fn setup_apic(&self, vp: &mut VirtualProcessor) { 745 | self.send_ipi(vp, INT_VECTOR); 746 | 747 | const NUM_REGS: usize = 1; 748 | let mut reg_values: [WHV_REGISTER_VALUE; NUM_REGS] = Default::default(); 749 | let mut reg_names: [WHV_REGISTER_NAME; NUM_REGS] = Default::default(); 750 | 751 | let mut notifications: WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER = Default::default(); 752 | notifications.set_InterruptNotification(1); 753 | reg_values[0].DeliverabilityNotifications = notifications; 754 | reg_names[0] = WHV_REGISTER_NAME::WHvX64RegisterDeliverabilityNotifications; 755 | vp.set_registers(®_names, ®_values).unwrap(); 756 | } 757 | } 758 | 759 | impl<'a> EmulatorCallbacks for VirtualMachineCallbacks<'a> { 760 | fn io_port(&mut self, _io_access: &mut WHV_EMULATOR_IO_ACCESS_INFO) -> HRESULT { 761 | S_OK 762 | } 763 | 764 | fn memory(&mut self, _memory_access: &mut WHV_EMULATOR_MEMORY_ACCESS_INFO) -> HRESULT { 765 | S_OK 766 | } 767 | 768 | fn get_virtual_processor_registers( 769 | &mut self, 770 | register_names: &[WHV_REGISTER_NAME], 771 | register_values: &mut [WHV_REGISTER_VALUE], 772 | ) -> HRESULT { 773 | self.vpe_rwlock 774 | .read() 775 | .unwrap() 776 | .vp 777 | .get_registers(register_names, register_values) 778 | .unwrap(); 779 | S_OK 780 | } 781 | 782 | fn set_virtual_processor_registers( 783 | &mut self, 784 | register_names: &[WHV_REGISTER_NAME], 785 | register_values: &[WHV_REGISTER_VALUE], 786 | ) -> HRESULT { 787 | self.vpe_rwlock 788 | .write() 789 | .unwrap() 790 | .vp 791 | .set_registers(register_names, register_values) 792 | .unwrap(); 793 | S_OK 794 | } 795 | 796 | fn translate_gva_page( 797 | &mut self, 798 | gva: WHV_GUEST_VIRTUAL_ADDRESS, 799 | translate_flags: WHV_TRANSLATE_GVA_FLAGS, 800 | translation_result: &mut WHV_TRANSLATE_GVA_RESULT_CODE, 801 | gpa: &mut WHV_GUEST_PHYSICAL_ADDRESS, 802 | ) -> HRESULT { 803 | let (translation_result1, gpa1) = self 804 | .vpe_rwlock 805 | .read() 806 | .unwrap() 807 | .vp 808 | .translate_gva(gva, translate_flags) 809 | .unwrap(); 810 | *translation_result = translation_result1.ResultCode; 811 | *gpa = gpa1; 812 | S_OK 813 | } 814 | } 815 | 816 | fn allocate_gpa(mapping: &mut PhysicalMemoryMapping) -> u64 { 817 | 818 | let mut pfn = 0; 819 | let mut found_pfn = false; 820 | 821 | for entry in &mapping.allocated_page_map { 822 | if *entry == false { 823 | found_pfn = true; 824 | break; 825 | } 826 | 827 | pfn += 1; 828 | } 829 | 830 | if found_pfn == false { 831 | panic!("ran out of physical memory to allocate"); 832 | } 833 | 834 | mapping.allocated_page_map[pfn] = true; 835 | 836 | mapping.physical_map.get_guest_address() + (pfn as u64 * PAGE_SIZE) 837 | } -------------------------------------------------------------------------------- /tools/nopsled_analyzer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nopsled_analyzer" 3 | version = "0.1.0" 4 | authors = ["Matt Miller "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | clap = "2.33.1" 9 | hex = "0.4.2" 10 | futures = "0.3.5" 11 | tokio = { version = "0.2.21", features = ["full"] } 12 | redis = "0.16.0" 13 | async-std = "1.6.1" -------------------------------------------------------------------------------- /tools/nopsled_analyzer/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate clap; 2 | extern crate futures; 3 | extern crate hex; 4 | extern crate redis; 5 | 6 | use std::time::Duration; 7 | 8 | use std::collections::HashMap; 9 | use std::collections::HashSet; 10 | use std::fs::File; 11 | use std::io::{prelude::*, BufReader}; 12 | 13 | use async_std::future; 14 | 15 | use std::cell::RefCell; 16 | use std::rc::Rc; 17 | 18 | use clap::{App, Arg}; 19 | 20 | use redis::AsyncCommands; 21 | use redis::RedisFuture; 22 | 23 | struct ByteChain { 24 | pub byte_value: u8, 25 | pub occurrences: u64, 26 | pub valid_precede: Box>>>, 27 | } 28 | 29 | impl ByteChain { 30 | pub fn new(byte_value_arg: u8) -> ByteChain { 31 | ByteChain { 32 | byte_value: byte_value_arg, 33 | occurrences: 1, 34 | valid_precede: Box::new(HashMap::>>::new()), 35 | } 36 | } 37 | } 38 | 39 | #[tokio::main] 40 | async fn main() { 41 | let matches = App::new("nopanalyzer") 42 | .arg( 43 | Arg::with_name("file_path") 44 | .short("f") 45 | .long("file_path") 46 | .takes_value(true) 47 | .help("The file containing the nops to analyze"), 48 | ) 49 | .arg( 50 | Arg::with_name("depth") 51 | .short("d") 52 | .long("depth") 53 | .takes_value(true) 54 | .help("The maximum depth to dispaly the tree"), 55 | ) 56 | .arg( 57 | Arg::with_name("command") 58 | .short("c") 59 | .long("command") 60 | .takes_value(true) 61 | .help("The command to execute (graph, tree)"), 62 | ) 63 | .arg( 64 | Arg::with_name("redis") 65 | .short("r") 66 | .long("redis") 67 | .takes_value(false) 68 | .help("Use the local redis server to acquire valid byte chains"), 69 | ) 70 | .get_matches(); 71 | 72 | let nop_file_path: String = matches.value_of("file_path").unwrap_or("").to_string(); 73 | let depth: u32 = matches.value_of("depth").unwrap_or("3").parse().unwrap(); 74 | let command = matches.value_of("command").unwrap_or("tree").to_string(); 75 | let use_redis = matches.is_present("redis"); 76 | 77 | let byte_chain_root: Rc> = Rc::new(RefCell::new(ByteChain::new(0))); 78 | let mut predecessor_map: HashMap = HashMap::new(); 79 | 80 | if nop_file_path.len() > 0 { 81 | // If a file was specified, then populate the byte chain from a file containing 82 | // hex strings representing each valid byte chain. 83 | let nop_file = File::open(nop_file_path).unwrap(); 84 | let reader = BufReader::new(nop_file); 85 | 86 | for line in reader.lines() { 87 | let line_str = line.unwrap(); 88 | let payload: Vec = hex::decode(&line_str).unwrap(); 89 | 90 | update_byte_chain(&mut predecessor_map, byte_chain_root.clone(), &payload); 91 | } 92 | } else if use_redis == true { 93 | // If redis should be used, then extract the valid byte chains from the redis server. 94 | redis_populate_byte_chain(&mut predecessor_map, byte_chain_root.clone()).await; 95 | } else { 96 | panic!("invalid mode"); 97 | } 98 | 99 | match command.as_ref() { 100 | "graph" => { 101 | // Generate a graph describing describing "X is a valid predecessor 102 | // for Y" as X -> Y. 103 | println!("digraph {{"); 104 | for pair in predecessor_map.into_iter() { 105 | println!("b_{:#02X} -> b_{:#02X};", pair.0, pair.1); 106 | } 107 | println!("}}"); 108 | } 109 | "tree" => { 110 | println!("Displaying successor tree to a depth of {}", depth); 111 | if depth > 0 { 112 | dump_byte_chain(byte_chain_root.clone(), depth, depth); 113 | } 114 | } 115 | _ => { 116 | panic!("unexpected command"); 117 | } 118 | } 119 | } 120 | 121 | async fn redis_populate_byte_chain( 122 | predecessor_map: &mut HashMap, 123 | byte_chain_root: Rc>, 124 | ) { 125 | let redis_client = 126 | redis::Client::open("redis://127.0.0.1").expect("failed to initialize redis client"); 127 | let mut redis_con = redis_client 128 | .get_async_connection() 129 | .await 130 | .expect("failed to connect to redis server at 127.0.0.1"); 131 | 132 | let dur = Duration::from_secs(2); 133 | let members_fut: RedisFuture> = redis_con.smembers("valid"); 134 | let members_res = future::timeout(dur, members_fut).await.unwrap(); 135 | 136 | if members_res.is_err() { 137 | panic!("unable to query valid payloads"); 138 | } 139 | 140 | let members: HashSet = members_res.unwrap(); 141 | 142 | for member in members { 143 | let member_raw = hex::decode(member).unwrap(); 144 | update_byte_chain(predecessor_map, byte_chain_root.clone(), &member_raw); 145 | } 146 | } 147 | 148 | fn dump_byte_chain(byte_chain_root: Rc>, depth: u32, max_depth: u32) { 149 | for kv in byte_chain_root.borrow().valid_precede.values() { 150 | let bc = kv.borrow(); 151 | 152 | for _i in depth..max_depth { 153 | print!(" "); 154 | } 155 | 156 | println!( 157 | "{:X} - occurrences {} valid preds {}", 158 | bc.byte_value, 159 | bc.occurrences, 160 | bc.valid_precede.len() 161 | ); 162 | 163 | if depth > 0 { 164 | dump_byte_chain(kv.clone(), depth - 1, max_depth); 165 | } 166 | } 167 | } 168 | 169 | fn update_byte_chain( 170 | predecessor_map: &mut HashMap, 171 | byte_chain_root: Rc>, 172 | payload: &[u8], 173 | ) { 174 | let mut current_byte_chain_ref = byte_chain_root; 175 | let mut iteration = 0; 176 | let mut last_byte_value = 0; 177 | 178 | for byte_value in payload.iter().rev() { 179 | if iteration > 1 { 180 | predecessor_map.insert(*byte_value, last_byte_value); 181 | } 182 | 183 | let next_byte_chain; 184 | { 185 | let mut current_byte_chain_mut = current_byte_chain_ref.borrow_mut(); 186 | 187 | if let Some(nbc) = current_byte_chain_mut.valid_precede.get(byte_value) { 188 | nbc.borrow_mut().occurrences += 1; 189 | next_byte_chain = nbc.clone(); 190 | } else { 191 | next_byte_chain = Rc::new(RefCell::new(ByteChain::new(*byte_value))); 192 | next_byte_chain.borrow_mut().byte_value = *byte_value; 193 | current_byte_chain_mut 194 | .valid_precede 195 | .insert(*byte_value, next_byte_chain.clone()); 196 | } 197 | } 198 | 199 | current_byte_chain_ref = next_byte_chain.clone(); 200 | last_byte_value = *byte_value; 201 | iteration += 1; 202 | } 203 | } 204 | --------------------------------------------------------------------------------