├── .gitmodules ├── CMakeLists.txt ├── LICENSE └── Source ├── CMakeLists.txt ├── Core ├── Bootloader │ ├── Bootloader.cpp │ ├── Bootloader.h │ ├── ELFLoader.cpp │ └── ELFLoader.h ├── CMakeLists.txt ├── CPU │ ├── AArch64Backend │ │ ├── AArch64.cpp │ │ └── AArch64.h │ ├── BlockCache.cpp │ ├── BlockCache.h │ ├── CPUBackend.h │ ├── CPUCore.cpp │ ├── CPUCore.h │ ├── CPUState.h │ ├── IR.cpp │ ├── IR.h │ ├── InterpreterBackend │ │ ├── Interpreter.cpp │ │ └── Interpreter.h │ ├── IntrusiveIRList.h │ ├── LLVMBackend │ │ ├── LLVM.cpp │ │ └── LLVM.h │ ├── OpcodeDispatch.cpp │ ├── OpcodeDispatch.h │ ├── PassManager.cpp │ ├── PassManager.h │ ├── X86Tables.cpp │ └── X86Tables.h ├── Core.cpp ├── Core.h ├── HLE │ └── Syscalls │ │ ├── FileManagement.cpp │ │ ├── FileManagement.h │ │ ├── Syscalls.cpp │ │ ├── Syscalls.h │ │ └── ThreadManagement.h ├── Memmap.cpp └── Memmap.h └── UI ├── CMakeLists.txt ├── HostInterface.cpp └── TestHarness.cpp /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "External/SonicUtils"] 2 | path = External/SonicUtils 3 | url = https://github.com/Sonicadvance1/SonicUtils.git 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(Emu) 3 | 4 | set(CMAKE_CXX_STANDARD 17) 5 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON) 6 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/Bin) 7 | 8 | add_definitions(-Wno-trigraphs) 9 | 10 | add_subdirectory(External/SonicUtils/) 11 | 12 | include_directories(External/SonicUtils/) 13 | include_directories(Source/) 14 | 15 | find_package(LLVM CONFIG QUIET) 16 | if(LLVM_FOUND AND TARGET LLVM) 17 | message(STATUS "LLVM found!") 18 | include_directories(${LLVM_INCLUDE_DIRS}) 19 | endif() 20 | 21 | 22 | add_subdirectory(Source/) 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | -------------------------------------------------------------------------------- /Source/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(Core/) 2 | add_subdirectory(UI/) 3 | -------------------------------------------------------------------------------- /Source/Core/Bootloader/Bootloader.cpp: -------------------------------------------------------------------------------- 1 | #include "Bootloader.h" 2 | #include "ELFLoader.h" 3 | #include "LogManager.h" 4 | 5 | namespace Emu { 6 | 7 | Bootloader::Bootloader() = default; 8 | Bootloader::~Bootloader() = default; 9 | 10 | bool Bootloader::Load(std::string const &File, std::vector const &Args) { 11 | if (Loader) { 12 | LogMan::Msg::E("Loader was already loaded"); 13 | return false; 14 | } 15 | 16 | Loader = std::make_unique(); 17 | 18 | bool Result = true; 19 | Result &= Loader->Load(File); 20 | Result &= Loader->SetArguments(Args); 21 | return Result; 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /Source/Core/Bootloader/Bootloader.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace Emu { 8 | class ELFLoader; 9 | 10 | class Bootloader { 11 | public: 12 | Bootloader(); 13 | ~Bootloader(); 14 | bool Load(std::string const &File, std::vector const &Args); 15 | 16 | private: 17 | std::unique_ptr Loader; 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /Source/Core/Bootloader/ELFLoader.cpp: -------------------------------------------------------------------------------- 1 | #include "ELFLoader.h" 2 | 3 | namespace Emu { 4 | bool ELFLoader::Load(std::string const &File) { 5 | return true; 6 | } 7 | 8 | bool ELFLoader::SetArguments(std::vector const &Args) { 9 | return true; 10 | } 11 | 12 | } 13 | -------------------------------------------------------------------------------- /Source/Core/Bootloader/ELFLoader.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace Emu { 7 | class ELFLoader { 8 | public: 9 | bool Load(std::string const &File); 10 | bool SetArguments(std::vector const &Args); 11 | }; 12 | } 13 | -------------------------------------------------------------------------------- /Source/Core/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(NAME Core) 2 | set(SRCS 3 | Bootloader/Bootloader.cpp 4 | Bootloader/ELFLoader.cpp 5 | CPU/BlockCache.cpp 6 | CPU/CPUCore.cpp 7 | CPU/IR.cpp 8 | CPU/OpcodeDispatch.cpp 9 | CPU/PassManager.cpp 10 | CPU/X86Tables.cpp 11 | CPU/AArch64Backend/AArch64.cpp 12 | CPU/InterpreterBackend/Interpreter.cpp 13 | CPU/LLVMBackend/LLVM.cpp 14 | HLE/Syscalls/Syscalls.cpp 15 | HLE/Syscalls/FileManagement.cpp 16 | Core.cpp 17 | Memmap.cpp) 18 | 19 | add_library(${NAME} STATIC ${SRCS} ) 20 | target_link_libraries(${NAME} rt) 21 | -------------------------------------------------------------------------------- /Source/Core/CPU/AArch64Backend/AArch64.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/CPUCore.h" 2 | #include "Core/CPU/IR.h" 3 | #include "Core/CPU/IntrusiveIRList.h" 4 | #include "AArch64.h" 5 | #include "LogManager.h" 6 | #include 7 | 8 | namespace Emu { 9 | 10 | void* AArch64::CompileCode(Emu::IR::IntrusiveIRList const *ir) { 11 | return nullptr; 12 | }; 13 | } 14 | -------------------------------------------------------------------------------- /Source/Core/CPU/AArch64Backend/AArch64.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Core/CPU/CPUBackend.h" 3 | 4 | namespace Emu { 5 | class AArch64 final : public CPUBackend { 6 | public: 7 | std::string GetName() override { return "AArch64"; } 8 | void* CompileCode(Emu::IR::IntrusiveIRList const *ir) override; 9 | private: 10 | }; 11 | } 12 | -------------------------------------------------------------------------------- /Source/Core/CPU/BlockCache.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sonicadvance1/X86Emu/329e9eb1a60ef8353d1200fda454f835002e1ff0/Source/Core/CPU/BlockCache.cpp -------------------------------------------------------------------------------- /Source/Core/CPU/BlockCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "LogManager.h" 3 | #include 4 | 5 | namespace Emu { 6 | class BlockCache { 7 | public: 8 | using BlockCacheType = std::map; 9 | using BlockCacheIter = BlockCacheType::iterator const; 10 | 11 | BlockCacheIter FindBlock(uint64_t Address) { 12 | return Blocks.find(Address); 13 | } 14 | 15 | BlockCacheIter End() { return Blocks.end(); } 16 | 17 | BlockCacheIter AddBlockMapping(uint64_t Address, void *Ptr) { 18 | auto ret = Blocks.insert(std::make_pair(Address, Ptr)); 19 | LogMan::Throw::A(ret.second, "Couldn't insert block"); 20 | return ret.first; 21 | } 22 | 23 | size_t Size() const { return Blocks.size(); } 24 | 25 | private: 26 | BlockCacheType Blocks; 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /Source/Core/CPU/CPUBackend.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "IntrusiveIRList.h" 3 | #include 4 | 5 | namespace Emu { 6 | class CPUBackend { 7 | public: 8 | virtual ~CPUBackend() = default; 9 | virtual std::string GetName() = 0; 10 | virtual void* CompileCode(Emu::IR::IntrusiveIRList const *ir) = 0; 11 | }; 12 | } 13 | -------------------------------------------------------------------------------- /Source/Core/CPU/CPUCore.cpp: -------------------------------------------------------------------------------- 1 | #include "CPUCore.h" 2 | #include "ELFLoader.h" 3 | #include "IR.h" 4 | #include "LogManager.h" 5 | #include "OpcodeDispatch.h" 6 | #include "X86Tables.h" 7 | #include "AArch64Backend/AArch64.h" 8 | #include "InterpreterBackend/Interpreter.h" 9 | #include "LLVMBackend/LLVM.h" 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | static uint64_t AlignUp(uint64_t value, uint64_t size) { 16 | return value + (size - value % size) % size; 17 | }; 18 | static uint64_t AlignDown(uint64_t value, uint64_t size) { 19 | return value - value % size; 20 | }; 21 | 22 | constexpr uint64_t PAGE_SIZE = 4096; 23 | constexpr uint64_t FS_OFFSET = 0xb000'0000; 24 | 25 | constexpr uint64_t STACK_SIZE = 8 * 1024 * 1024; 26 | constexpr uint64_t STACK_OFFSET = 0xc000'0000; 27 | 28 | static void hook_unmapped(uc_engine *uc, uc_mem_type type, uint64_t address, 29 | int size, int64_t value, void *user_data) { 30 | uint64_t rip; 31 | 32 | Emu::CPUCore::ThreadState *state = (Emu::CPUCore::ThreadState*)user_data; 33 | 34 | uc_reg_read(uc, UC_X86_REG_RIP, &rip); 35 | printf(">>> %ld: RIP is 0x%" PRIx64 "\n", state->threadmanager.GetTID(), rip); 36 | 37 | printf("Attempted to access 0x%zx with type %d, size 0x%08x\n", address, type, 38 | size); 39 | 40 | uc_mem_region *regions; 41 | uint32_t regioncount; 42 | uc_mem_regions(uc, ®ions, ®ioncount); 43 | 44 | for (uint32_t i = 0; i < regioncount; ++i) { 45 | printf("%d: %zx - %zx\n", i, regions[i].begin, regions[i].end); 46 | } 47 | } 48 | 49 | uint64_t LastInstSize = 0; 50 | std::atomic StopRunning {false}; 51 | bool SizesDidNotMatch = false; 52 | static void hook_code64(uc_engine *uc, uint64_t address, uint32_t size, 53 | void *user_data) { 54 | 55 | uint64_t rip; 56 | 57 | uc_reg_read(uc, UC_X86_REG_RIP, &rip); 58 | printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", 59 | address, size); 60 | 61 | std::string Name = "???"; 62 | printf(">>> RIP is 0x%" PRIx64 " %s\n", rip, Name.c_str()); 63 | 64 | if (LastInstSize != size) { 65 | StopRunning = true; 66 | SizesDidNotMatch = true; 67 | } 68 | } 69 | 70 | namespace Emu { 71 | void CPUCore::SetGS(ThreadState *Thread) { 72 | uc_x86_msr Val; 73 | Val.rid = 0xC0000101; 74 | 75 | Val.value = Thread->CPUState.gs; 76 | uc_reg_write(Thread->uc, UC_X86_REG_MSR, &Val); 77 | } 78 | 79 | void CPUCore::SetFS(ThreadState *Thread) { 80 | uc_x86_msr Val; 81 | Val.rid = 0xC0000100; 82 | Val.value = Thread->CPUState.fs; 83 | uc_reg_write(Thread->uc, UC_X86_REG_MSR, &Val); 84 | } 85 | 86 | void CPUCore::SetGS(ThreadState *Thread, uint64_t Value) { 87 | Thread->CPUState.gs = Value; 88 | SetGS(Thread); 89 | } 90 | 91 | void CPUCore::SetFS(ThreadState *Thread, uint64_t Value) { 92 | Thread->CPUState.fs = Value; 93 | SetFS(Thread); 94 | } 95 | 96 | void *CPUCore::MapRegion(ThreadState *Thread, uint64_t Offset, uint64_t Size) { 97 | void *Ptr = MemoryMapper->MapRegion(Offset, Size); 98 | 99 | uc_err err = uc_mem_map_ptr(Thread->uc, Offset, Size, UC_PROT_ALL, Ptr); 100 | if (err) { 101 | printf("Failed on uc_mem_map() with error returned %u: %s\n", err, 102 | uc_strerror(err)); 103 | } 104 | return Ptr; 105 | } 106 | 107 | void CPUCore::MapRegionOnAll(uint64_t Offset, uint64_t Size) { 108 | PauseThreads = true; 109 | while (NumThreadsPaused.load() != (Threads.size() - 1)); 110 | 111 | for (auto Thread : Threads) { 112 | MapRegion(Thread, Offset, Size); 113 | } 114 | PauseThreads = false; 115 | } 116 | 117 | CPUCore::CPUCore(Memmap *Mapper) 118 | : MemoryMapper{Mapper} 119 | , syscallhandler {this} { 120 | X86Tables::InitializeInfoTables(); 121 | IR::InstallOpcodeHandlers(); 122 | } 123 | 124 | void CPUCore::Init(std::string const &File) { 125 | // XXX: Allow runtime selection between cores 126 | #if 1 127 | Backend.reset(CreateLLVMBackend(this)); 128 | #elif 0 129 | Backend.reset(new AArch64()); 130 | #else 131 | Backend.reset(new Interpreter()); 132 | #endif 133 | InitThread(File); 134 | 135 | } 136 | 137 | void CPUCore::RunLoop() { 138 | for (auto &Thread : Threads) 139 | Thread->ExecutionThread.join(); 140 | } 141 | 142 | thread_local CPUCore::ThreadState* TLSThread; 143 | CPUCore::ThreadState *CPUCore::GetTLSThread() { 144 | return TLSThread; 145 | } 146 | 147 | void CPUCore::InitThread(std::string const &File) { 148 | ThreadState *threadstate{nullptr}; 149 | 150 | { 151 | std::lock_guard lk(CPUThreadLock); 152 | threadstate = Threads.emplace_back(new ThreadState{this}); 153 | threadstate->threadmanager.TID = ++lastThreadID; 154 | } 155 | 156 | threadstate->StopRunning = false; 157 | threadstate->ShouldStart = false; 158 | 159 | // Initialize default CPU state 160 | threadstate->CPUState.rip = ~0ULL; 161 | for (int i = 0; i < 16; ++i) { 162 | threadstate->CPUState.gregs[i] = 0; 163 | } 164 | for (int i = 0; i < 16; ++i) { 165 | threadstate->CPUState.xmm[i][0] = 0xDEADBEEFULL; 166 | threadstate->CPUState.xmm[i][1] = 0xBAD0DAD1ULL; 167 | } 168 | 169 | threadstate->CPUState.gs = 0; 170 | threadstate->CPUState.fs = 0; 171 | threadstate->CPUState.rflags = 2ULL; // Initializes to this value 172 | 173 | ::ELFLoader::ELFContainer file(File); 174 | 175 | uc_engine *uc; 176 | uc_err err; 177 | 178 | auto MemLayout = file.GetLayout(); 179 | printf("Emulate x86_64 code\n"); 180 | 181 | // Initialize emulator in X86-64bit mode 182 | err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); 183 | LogMan::Throw::A(!err, "Failed on uc_open()"); 184 | LogMan::Throw::A(uc != 0, "Failed on uc_open()"); 185 | 186 | threadstate->uc = uc; 187 | { 188 | uint64_t BasePtr = AlignDown(std::get<0>(MemLayout), PAGE_SIZE); 189 | uint64_t BaseSize = AlignUp(std::get<2>(MemLayout), PAGE_SIZE); 190 | 191 | MapRegion(threadstate, BasePtr, BaseSize); 192 | MapRegion(threadstate, STACK_OFFSET, STACK_SIZE); 193 | MapRegion(threadstate, FS_OFFSET, 0x1000); 194 | } 195 | 196 | uint64_t rsp = STACK_OFFSET + STACK_SIZE; 197 | 198 | const std::vector Values = { 199 | 2, 0, 0, 0, 0, 0, 0, 0, // Argument count 200 | 0, 0, 0, 0, 0, 0, 0, 0, // Argument0 pointer 201 | 0, 0, 0, 0, 0, 0, 0, 0, // Argument1 pointer 202 | 'B', 'u', 't', 't', 's', // Argument0 203 | '\0', 204 | }; 205 | 206 | rsp -= Values.size() + 0x1000; 207 | uint64_t arg0offset = rsp + 8; 208 | uint64_t arg0value = rsp + 24; 209 | uc_mem_write(uc, rsp, &Values.at(0), Values.size()); 210 | uc_mem_write(uc, arg0offset, &arg0value, 8); 211 | uc_mem_write(uc, arg0offset + 8, &arg0value, 8); 212 | 213 | auto Writer = [&](void *Data, uint64_t Addr, uint64_t Size) { 214 | // write machine code to be emulated to memory 215 | if (uc_mem_write(uc, Addr, Data, Size)) { 216 | LogMan::Msg::A("Failed to write emulation code to memory, quit!\n", ""); 217 | } 218 | }; 219 | 220 | file.WriteLoadableSections(Writer); 221 | 222 | threadstate->CPUState.gregs[REG_RSP] = rsp; 223 | threadstate->CPUState.rip = file.GetEntryPoint(); 224 | 225 | uc_reg_write(threadstate->uc, UC_X86_REG_RSP, &threadstate->CPUState.gregs[REG_RSP]); 226 | uc_reg_write(threadstate->uc, UC_X86_REG_RIP, &threadstate->CPUState.rip); 227 | 228 | // tracing all instructions in the range [EntryPoint, EntryPoint+20] 229 | // uc_hook_add(uc, &hooks.emplace_back(), UC_HOOK_CODE, (void*)hook_code64, nullptr, 1, 0); 230 | 231 | uc_hook_add(threadstate->uc, &threadstate->hooks.emplace_back(), UC_HOOK_MEM_UNMAPPED, (void *)hook_unmapped, threadstate, 1, 232 | 0); 233 | 234 | ParentThread = threadstate; 235 | // Kick off the execution thread 236 | threadstate->ExecutionThread = std::thread(&CPUCore::ExecutionThread, this, threadstate); 237 | std::lock_guard lk(threadstate->StartRunningMutex); 238 | threadstate->ShouldStart = true; 239 | threadstate->StartRunning.notify_all(); 240 | 241 | } 242 | 243 | CPUCore::ThreadState *CPUCore::NewThread(X86State *NewState, uint64_t parent_tid, uint64_t child_tid) { 244 | ThreadState *threadstate{nullptr}; 245 | ThreadState *parenthread = GetTLSThread(); 246 | 247 | { 248 | std::lock_guard lk(CPUThreadLock); 249 | threadstate = Threads.emplace_back(new ThreadState{this}); 250 | threadstate->threadmanager.TID = ++lastThreadID; 251 | } 252 | 253 | threadstate->StopRunning = false; 254 | threadstate->ShouldStart = false; 255 | // Initialize default CPU state 256 | // Since we are a new thread copy the data from the parent 257 | memcpy(&threadstate->CPUState, NewState, sizeof(X86State)); 258 | 259 | threadstate->threadmanager.parent_tid = parent_tid; 260 | threadstate->threadmanager.child_tid = child_tid; 261 | 262 | uc_engine *uc; 263 | uc_err err; 264 | 265 | printf("Emulate x86_64 code\n"); 266 | 267 | // Initialize emulator in X86-64bit mode 268 | err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); 269 | threadstate->uc = uc; 270 | LogMan::Throw::A(!err, "Failed on uc_open()"); 271 | 272 | { 273 | // Copy the parent's memory mapping 274 | auto Regions = MemoryMapper->MappedRegions; 275 | for (auto const& region : Regions) { 276 | uc_err err = uc_mem_map_ptr(uc, 277 | region.Offset, 278 | region.Size, 279 | UC_PROT_ALL, 280 | region.Ptr); 281 | if (err) { 282 | printf("Failed on uc_mem_map() with error 0x%zx - 0x%zx from %p returned %u: %s\n", 283 | region.Offset, 284 | region.Offset + region.Size, 285 | region.Ptr, 286 | err, 287 | uc_strerror(err)); 288 | } 289 | } 290 | } 291 | 292 | { 293 | std::array GPRs = { 294 | UC_X86_REG_RIP, 295 | UC_X86_REG_RAX, 296 | UC_X86_REG_RBX, 297 | UC_X86_REG_RCX, 298 | UC_X86_REG_RDX, 299 | UC_X86_REG_RSI, 300 | UC_X86_REG_RDI, 301 | UC_X86_REG_RBP, 302 | UC_X86_REG_RSP, 303 | UC_X86_REG_R8, 304 | UC_X86_REG_R9, 305 | UC_X86_REG_R10, 306 | UC_X86_REG_R11, 307 | UC_X86_REG_R12, 308 | UC_X86_REG_R13, 309 | UC_X86_REG_R14, 310 | UC_X86_REG_R15, 311 | UC_X86_REG_XMM0, 312 | UC_X86_REG_XMM1, 313 | UC_X86_REG_XMM2, 314 | UC_X86_REG_XMM3, 315 | UC_X86_REG_XMM4, 316 | UC_X86_REG_XMM5, 317 | UC_X86_REG_XMM6, 318 | UC_X86_REG_XMM7, 319 | UC_X86_REG_XMM8, 320 | UC_X86_REG_XMM9, 321 | UC_X86_REG_XMM10, 322 | UC_X86_REG_XMM11, 323 | UC_X86_REG_XMM12, 324 | UC_X86_REG_XMM13, 325 | UC_X86_REG_XMM14, 326 | UC_X86_REG_XMM15, 327 | UC_X86_REG_EFLAGS, 328 | }; 329 | std::array GPRPointers = { 330 | &threadstate->CPUState.rip, 331 | &threadstate->CPUState.gregs[0], 332 | &threadstate->CPUState.gregs[1], 333 | &threadstate->CPUState.gregs[2], 334 | &threadstate->CPUState.gregs[3], 335 | &threadstate->CPUState.gregs[4], 336 | &threadstate->CPUState.gregs[5], 337 | &threadstate->CPUState.gregs[6], 338 | &threadstate->CPUState.gregs[7], 339 | &threadstate->CPUState.gregs[8], 340 | &threadstate->CPUState.gregs[9], 341 | &threadstate->CPUState.gregs[10], 342 | &threadstate->CPUState.gregs[11], 343 | &threadstate->CPUState.gregs[12], 344 | &threadstate->CPUState.gregs[13], 345 | &threadstate->CPUState.gregs[14], 346 | &threadstate->CPUState.gregs[15], 347 | &threadstate->CPUState.xmm[0], 348 | &threadstate->CPUState.xmm[1], 349 | &threadstate->CPUState.xmm[2], 350 | &threadstate->CPUState.xmm[3], 351 | &threadstate->CPUState.xmm[4], 352 | &threadstate->CPUState.xmm[5], 353 | &threadstate->CPUState.xmm[6], 354 | &threadstate->CPUState.xmm[7], 355 | &threadstate->CPUState.xmm[8], 356 | &threadstate->CPUState.xmm[9], 357 | &threadstate->CPUState.xmm[10], 358 | &threadstate->CPUState.xmm[11], 359 | &threadstate->CPUState.xmm[12], 360 | &threadstate->CPUState.xmm[13], 361 | &threadstate->CPUState.xmm[14], 362 | &threadstate->CPUState.xmm[15], 363 | &threadstate->CPUState.rflags, 364 | }; 365 | static_assert(GPRs.size() == GPRPointers.size()); 366 | 367 | uc_reg_write_batch(uc, &GPRs[0], &GPRPointers[0], GPRs.size()); 368 | SetGS(threadstate, parenthread->CPUState.gs); 369 | SetFS(threadstate, parenthread->CPUState.fs); 370 | } 371 | 372 | // tracing all instructions in the range [EntryPoint, EntryPoint+20] 373 | // uc_hook_add(uc, &hooks.emplace_back(), UC_HOOK_CODE, (void*)hook_code64, nullptr, 1, 0); 374 | 375 | uc_hook_add(threadstate->uc, &threadstate->hooks.emplace_back(), UC_HOOK_MEM_UNMAPPED, (void *)hook_unmapped, threadstate, 1, 376 | 0); 377 | 378 | // Kick off the execution thread 379 | threadstate->ExecutionThread = std::thread(&CPUCore::ExecutionThread, this, threadstate); 380 | return threadstate; 381 | } 382 | 383 | void CPUCore::ExecutionThread(ThreadState *Thread) { 384 | printf("Spinning up the thread\n"); 385 | TLSThread = Thread; 386 | 387 | uint64_t TID = Thread->threadmanager.GetTID(); 388 | 389 | std::unique_lock lk(Thread->StartRunningMutex); 390 | Thread->StartRunning.wait(lk, [&Thread]{ return Thread->ShouldStart.load(); }); 391 | while (!StopRunning.load() && !Thread->StopRunning.load()) { 392 | // if (TID != 1) 393 | // printf(">>> %ld: RIP: 0x%zx\n", TID, Thread->CPUState.rip); 394 | 395 | if (0) { 396 | printf("\tRIP = 0x%zx\n", Thread->CPUState.rip); 397 | // printf("\tEFLAGS = 0x%zx\n", Thread->CPUState.rflags); 398 | // for (int i = 0; i < 16; ++i) { 399 | // printf("\tgreg[%d] = 0x%zx\n", i, Thread->CPUState.gregs[i]); 400 | // } 401 | } 402 | 403 | 404 | auto it = Thread->blockcache.FindBlock(Thread->CPUState.rip); 405 | if (it == Thread->blockcache.End()) { 406 | it = CompileBlock(Thread).first; 407 | } 408 | 409 | if (it != Thread->blockcache.End()) { 410 | // Holy crap, the block actually compiled? Run it! 411 | using BlockFn = void (*)(CPUCore *cpu); 412 | BlockFn Ptr; 413 | Ptr = (BlockFn)it->second; 414 | Ptr(this); 415 | } 416 | else { 417 | // printf("%ld fallback to unicorn\n", Thread->threadmanager.GetTID()); 418 | FallbackToUnicorn(Thread); 419 | } 420 | if (Thread->CPUState.rip == 0) { 421 | printf("%ld Hit zero\n", Thread->threadmanager.GetTID()); 422 | if (Thread->threadmanager.GetTID() == 1) { 423 | StopRunning = true; 424 | } 425 | break; 426 | } 427 | 428 | if (SizesDidNotMatch) 429 | printf("Instruction sizes didn't match!\n"); 430 | 431 | if (::StopRunning) 432 | break; 433 | if (PauseThreads.load()) { 434 | NumThreadsPaused++; 435 | while(PauseThreads.load()); 436 | NumThreadsPaused--; 437 | } 438 | } 439 | } 440 | 441 | std::pair CPUCore::CompileBlock(ThreadState *Thread) { 442 | void *CodePtr {nullptr}; 443 | uint64_t GuestRIP = Thread->CPUState.rip; 444 | 445 | uint8_t const *Code = MemoryMapper->GetPointer(GuestRIP); 446 | uint64_t TotalInstructions = 0; 447 | uint64_t TotalInstructionsLength = 0; 448 | static uint64_t MaxTotalInstructions = 0; 449 | bool Done = false; 450 | uint32_t MAXSIZE = ~0; 451 | bool HitRIPSetter = false; 452 | 453 | // Do we already have this in the IR cache? 454 | auto IR = Thread->irlists.find(GuestRIP); 455 | Emu::IR::IntrusiveIRList *IRList{nullptr}; 456 | if (IR == Thread->irlists.end()) { 457 | Thread->OpDispatcher.BeginBlock(); 458 | while (!Done) { 459 | bool HadDispatchError = false; 460 | auto Info = X86Tables::GetInstInfo(&Code[TotalInstructionsLength]); 461 | if (!Info.first) { 462 | printf("Unknown instruction encoding! 0x%zx\n", GuestRIP + TotalInstructionsLength); 463 | StopRunning = true; 464 | Thread->OpDispatcher.ResetWorkingList(); 465 | return std::make_pair(Thread->blockcache.End(), false); 466 | } 467 | 468 | LastInstSize = Info.second.Size; 469 | Thread->JITRIP = GuestRIP + TotalInstructionsLength; 470 | if (Info.second.Flags & X86Tables::DECODE_FLAG_LOCK) { 471 | HadDispatchError = true; 472 | } 473 | else if (Info.first->OpcodeDispatcher) { 474 | Thread->OpDispatcher.AddRIPMarker(GuestRIP + TotalInstructionsLength); 475 | auto Fn = Info.first->OpcodeDispatcher; 476 | std::invoke(Fn, Thread->OpDispatcher, Info, &Code[TotalInstructionsLength]); 477 | if (Thread->OpDispatcher.HadDecodeFailure()) { 478 | // printf("Decode failure at 0x%zx\n", GuestRIP + TotalInstructionsLength); 479 | HadDispatchError = true; 480 | } 481 | else { 482 | TotalInstructionsLength += Info.second.Size; 483 | TotalInstructions++; 484 | } 485 | } 486 | else { 487 | HadDispatchError = true; 488 | } 489 | 490 | if (HadDispatchError) { 491 | if (TotalInstructions == 0) { 492 | // Couldn't handle any instruction in op dispatcher 493 | Thread->OpDispatcher.ResetWorkingList(); 494 | return std::make_pair(Thread->blockcache.End(), false); 495 | } 496 | else { 497 | // We had some instructions. Early exit 498 | Done = true; 499 | } 500 | } 501 | if (Info.first->Flags & X86Tables::FLAGS_BLOCK_END) { 502 | Done = true; 503 | } 504 | if (!HadDispatchError && (Info.first->Flags & X86Tables::FLAGS_SETS_RIP)) { 505 | Done = true; 506 | HitRIPSetter = true; 507 | } 508 | 509 | if (TotalInstructions >= MAXSIZE) { 510 | Done = true; 511 | } 512 | } 513 | 514 | Thread->OpDispatcher.EndBlock(HitRIPSetter ? 0 : TotalInstructionsLength); 515 | 516 | auto IR = Thread->irlists.emplace(std::make_pair(GuestRIP, Thread->OpDispatcher.GetWorkingIR())); 517 | Thread->OpDispatcher.ResetWorkingList(); 518 | 519 | // XXX: Analysis 520 | AnalysisPasses.FunctionManager.Run(); 521 | AnalysisPasses.BlockManager.Run(); 522 | 523 | // XXX: Optimization 524 | OptimizationPasses.FunctionManager.Run(); 525 | OptimizationPasses.BlockManager.Run(); 526 | // XXX: Code Emission 527 | IRList = &IR.first->second; 528 | } 529 | else { 530 | IRList = &IR->second; 531 | } 532 | 533 | if (GuestRIP >= 0x402350 && GuestRIP < 0x4023bc) { 534 | printf("Created block of %ld instructions from 0x%lx\n", TotalInstructions, GuestRIP); 535 | MaxTotalInstructions = TotalInstructions; 536 | } 537 | 538 | if (GuestRIP == 0x402350) { 539 | IRList->Dump(); 540 | } 541 | 542 | CodePtr = Backend->CompileCode(IRList); 543 | if (CodePtr) 544 | return std::make_pair(Thread->blockcache.AddBlockMapping(GuestRIP, CodePtr), true); 545 | else 546 | return std::make_pair(Thread->blockcache.End(), false); 547 | } 548 | 549 | void CPUCore::FallbackToUnicorn(ThreadState *Thread) { 550 | std::array GPRs = { 551 | UC_X86_REG_RIP, 552 | UC_X86_REG_RAX, 553 | UC_X86_REG_RBX, 554 | UC_X86_REG_RCX, 555 | UC_X86_REG_RDX, 556 | UC_X86_REG_RSI, 557 | UC_X86_REG_RDI, 558 | UC_X86_REG_RBP, 559 | UC_X86_REG_RSP, 560 | UC_X86_REG_R8, 561 | UC_X86_REG_R9, 562 | UC_X86_REG_R10, 563 | UC_X86_REG_R11, 564 | UC_X86_REG_R12, 565 | UC_X86_REG_R13, 566 | UC_X86_REG_R14, 567 | UC_X86_REG_R15, 568 | UC_X86_REG_XMM0, 569 | UC_X86_REG_XMM1, 570 | UC_X86_REG_XMM2, 571 | UC_X86_REG_XMM3, 572 | UC_X86_REG_XMM4, 573 | UC_X86_REG_XMM5, 574 | UC_X86_REG_XMM6, 575 | UC_X86_REG_XMM7, 576 | UC_X86_REG_XMM8, 577 | UC_X86_REG_XMM9, 578 | UC_X86_REG_XMM10, 579 | UC_X86_REG_XMM11, 580 | UC_X86_REG_XMM12, 581 | UC_X86_REG_XMM13, 582 | UC_X86_REG_XMM14, 583 | UC_X86_REG_XMM15, 584 | UC_X86_REG_EFLAGS, 585 | }; 586 | std::array GPRPointers = { 587 | &Thread->CPUState.rip, 588 | &Thread->CPUState.gregs[0], 589 | &Thread->CPUState.gregs[1], 590 | &Thread->CPUState.gregs[2], 591 | &Thread->CPUState.gregs[3], 592 | &Thread->CPUState.gregs[4], 593 | &Thread->CPUState.gregs[5], 594 | &Thread->CPUState.gregs[6], 595 | &Thread->CPUState.gregs[7], 596 | &Thread->CPUState.gregs[8], 597 | &Thread->CPUState.gregs[9], 598 | &Thread->CPUState.gregs[10], 599 | &Thread->CPUState.gregs[11], 600 | &Thread->CPUState.gregs[12], 601 | &Thread->CPUState.gregs[13], 602 | &Thread->CPUState.gregs[14], 603 | &Thread->CPUState.gregs[15], 604 | &Thread->CPUState.xmm[0], 605 | &Thread->CPUState.xmm[1], 606 | &Thread->CPUState.xmm[2], 607 | &Thread->CPUState.xmm[3], 608 | &Thread->CPUState.xmm[4], 609 | &Thread->CPUState.xmm[5], 610 | &Thread->CPUState.xmm[6], 611 | &Thread->CPUState.xmm[7], 612 | &Thread->CPUState.xmm[8], 613 | &Thread->CPUState.xmm[9], 614 | &Thread->CPUState.xmm[10], 615 | &Thread->CPUState.xmm[11], 616 | &Thread->CPUState.xmm[12], 617 | &Thread->CPUState.xmm[13], 618 | &Thread->CPUState.xmm[14], 619 | &Thread->CPUState.xmm[15], 620 | &Thread->CPUState.rflags, 621 | }; 622 | static_assert(GPRs.size() == GPRPointers.size()); 623 | 624 | auto SetUnicornRegisters = [&]() { 625 | uc_reg_write_batch(Thread->uc, &GPRs[0], &GPRPointers[0], GPRs.size()); 626 | SetGS(Thread); 627 | SetFS(Thread); 628 | }; 629 | 630 | auto LoadUnicornRegisters = [&]() { 631 | uc_reg_read_batch(Thread->uc, &GPRs[0], &GPRPointers[0], GPRs.size()); 632 | { 633 | uc_x86_msr Val; 634 | Val.rid = 0xC0000101; 635 | Val.value = 0; 636 | uc_reg_read(Thread->uc, UC_X86_REG_MSR, &Val); 637 | Thread->CPUState.gs = Val.value; 638 | } 639 | { 640 | uc_x86_msr Val; 641 | Val.rid = 0xC0000100; 642 | Val.value = 0; 643 | uc_reg_read(Thread->uc, UC_X86_REG_MSR, &Val); 644 | Thread->CPUState.fs = Val.value; 645 | } 646 | }; 647 | 648 | SetUnicornRegisters(); 649 | uc_err err = uc_emu_start(Thread->uc, Thread->CPUState.rip, 0, 0, 1); 650 | if (err) { 651 | printf("Failed on uc_emu_start() with error returned %u: %s\n", err, 652 | uc_strerror(err)); 653 | StopRunning = true; 654 | } 655 | 656 | LoadUnicornRegisters(); 657 | } 658 | } 659 | -------------------------------------------------------------------------------- /Source/Core/CPU/CPUCore.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Core/CPU/BlockCache.h" 3 | #include "Core/CPU/CPUBackend.h" 4 | #include "Core/CPU/CPUState.h" 5 | #include "Core/CPU/PassManager.h" 6 | #include "Core/CPU/OpcodeDispatch.h" 7 | #include "Core/HLE/Syscalls/Syscalls.h" 8 | #include "Core/Memmap.h" 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | namespace Emu { 16 | class CPUCore final { 17 | public: 18 | friend SyscallHandler; 19 | CPUCore(Memmap *Mapper); 20 | 21 | struct ThreadState { 22 | ThreadState(CPUCore *cpu) 23 | : OpDispatcher{cpu} {} 24 | uc_engine *uc; 25 | std::vector hooks; 26 | std::map irlists; 27 | BlockCache blockcache; 28 | std::thread ExecutionThread; 29 | X86State CPUState{}; 30 | ThreadManagement threadmanager; 31 | IR::OpDispatchBuilder OpDispatcher; 32 | uint64_t JITRIP; 33 | std::condition_variable StartRunning; 34 | std::mutex StartRunningMutex; 35 | std::atomic ShouldStart; 36 | std::atomic StopRunning; 37 | }; 38 | 39 | std::atomic PauseThreads{false}; 40 | std::atomic NumThreadsPaused; 41 | void Init(std::string const &File); 42 | void RunLoop(); 43 | uint64_t lastThreadID = 0; 44 | std::vector Threads; 45 | ThreadState *ParentThread; 46 | std::mutex CPUThreadLock; 47 | 48 | static ThreadState *GetTLSThread(); 49 | 50 | Emu::IR::IntrusiveIRList const* GetIRList(ThreadState *Thread, uint64_t Address) { 51 | return &Thread->irlists.at(Address); 52 | } 53 | void SetGS(ThreadState *Thread, uint64_t Value); 54 | void SetFS(ThreadState *Thread, uint64_t Value); 55 | void *MapRegion(ThreadState *Thread, uint64_t Offset, uint64_t Size); 56 | void MapRegionOnAll(uint64_t Offset, uint64_t Size); 57 | 58 | void FallbackToUnicorn(ThreadState *Thread); 59 | 60 | ThreadState *NewThread(X86State *NewState, uint64_t parent_tid, uint64_t child_tid); 61 | 62 | Memmap *MemoryMapper; 63 | SyscallHandler syscallhandler; 64 | private: 65 | void InitThread(std::string const &File); 66 | void ExecutionThread(ThreadState *Thread); 67 | void SetGS(ThreadState *Thread); 68 | void SetFS(ThreadState *Thread); 69 | 70 | std::pair CompileBlock(ThreadState *Thread); 71 | std::atomic StopRunning {false}; 72 | uint32_t MaxBlockInstructions = 1; 73 | 74 | struct PassManagers { 75 | IR::BlockPassManager BlockManager; 76 | IR::FunctionPassManager FunctionManager; 77 | }; 78 | PassManagers AnalysisPasses; 79 | PassManagers OptimizationPasses; 80 | std::unique_ptr Backend; 81 | }; 82 | } 83 | -------------------------------------------------------------------------------- /Source/Core/CPU/CPUState.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | namespace Emu { 5 | 6 | constexpr unsigned REG_RAX = 0; 7 | constexpr unsigned REG_RBX = 1; 8 | constexpr unsigned REG_RCX = 2; 9 | constexpr unsigned REG_RDX = 3; 10 | constexpr unsigned REG_RSI = 4; 11 | constexpr unsigned REG_RDI = 5; 12 | constexpr unsigned REG_RBP = 6; 13 | constexpr unsigned REG_RSP = 7; 14 | constexpr unsigned REG_R8 = 8; 15 | constexpr unsigned REG_R9 = 9; 16 | constexpr unsigned REG_R10 = 10; 17 | constexpr unsigned REG_R11 = 11; 18 | constexpr unsigned REG_R12 = 12; 19 | constexpr unsigned REG_R13 = 13; 20 | constexpr unsigned REG_R14 = 14; 21 | constexpr unsigned REG_R15 = 15; 22 | 23 | struct X86State { 24 | uint64_t rip; 25 | uint64_t gregs[16]; 26 | uint64_t xmm[16][2]; 27 | uint64_t gs; 28 | uint64_t fs; 29 | uint64_t rflags; 30 | }; 31 | } 32 | -------------------------------------------------------------------------------- /Source/Core/CPU/IR.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/IR.h" 2 | #include "Core/CPU/IntrusiveIRList.h" 3 | 4 | namespace Emu::IR { 5 | 6 | constexpr std::array IRNames = { 7 | "Constant", // sizeof(IROp_Constant), 8 | "LoadContext", // sizeof(IROp_LoadContext), 9 | "StoreContext", // sizeof(IROp_StoreContext), 10 | 11 | // Function management 12 | "BeginFunction", // sizeof(IROp_BeginFunction), 13 | "EndFunction", // sizeof(IROp_EndFunction), 14 | "GetArgument", // sizeof(IROp_GetArgument), 15 | "AllocateContext", // sizeof(IROp_AllocateContext), 16 | 17 | // Block management 18 | "BeginBlock", // sizeof(IROp_BeginBlock), // BeginBlock 19 | "EndBlock", // sizeof(IROp_EndBlock), // EndBlock 20 | 21 | // Branching 22 | "Jump", // sizeof(IROp_Jump), 23 | "CondJump", // sizeof(IROp_CondJump), 24 | "Call", // sizeof(IROp_Call), 25 | "ExternCall", // sizeof(IROp_ExternCall), 26 | "Syscall", // sizeof(IROp_Syscall), 27 | "Return", // sizeof(IROp_Return), 28 | 29 | // Instructions 30 | "Add", // sizeof(IROp_Add), 31 | "Sub", // sizeof(IROp_Sub), 32 | "Or", // sizeof(IROp_Or), 33 | "Xor", // sizeof(IROp_Xor), 34 | "Shl", // sizeof(IROp_Shl), 35 | "Shr", // sizeof(IROp_Shr), 36 | "And", // sizeof(IROp_And), 37 | "Nand", // sizeof(IROp_Nand), 38 | "BitExtract", // sizeof(IROp_BitExtract), 39 | "Select", // sizeof(IROp_Select), 40 | "Trunc_32", // sizeof(IROp_Trunc_32), 41 | "Trunc_16", // sizeof(IROp_Trunc_16), 42 | 43 | // Memory 44 | "LoadMem", // sizeof(IROp_LoadMem), 45 | 46 | // Misc 47 | "JmpTarget", // sizeof(IROp_JmpTarget), 48 | "RIPMarker", // sizeof(IROp_RIPMarker), 49 | "END", 50 | }; 51 | 52 | static_assert(IRNames[OP_LASTOP] == "END"); 53 | 54 | std::string_view const& GetName(IROps Op) { 55 | return IRNames[Op]; 56 | } 57 | 58 | void DumpConstantOp(size_t Offset, IROp_Header const *op) { 59 | auto ConstantOp = op->C(); 60 | printf("%%%zd = %s 0x%zx\n", Offset, GetName(op->Op).data(), ConstantOp->Constant); 61 | } 62 | 63 | void DumpLoadContextOp(size_t Offset, IROp_Header const *op) { 64 | auto LoadContextOp = op->C(); 65 | printf("%%%zd = %s 0x%x\n", Offset, GetName(op->Op).data(), LoadContextOp->Offset); 66 | } 67 | 68 | void DumpStoreContextOp(size_t Offset, IROp_Header const *op) { 69 | auto StoreContextOp = op->C(); 70 | printf("%s 0x%x\n", GetName(op->Op).data(), StoreContextOp->Offset); 71 | } 72 | 73 | void DumpBeginBlockOp(size_t Offset, IROp_Header const *op) { 74 | auto BeginBlockOp = op->C(); 75 | printf("%s\n", GetName(op->Op).data()); 76 | } 77 | 78 | void DumpEndBlockOp(size_t Offset, IROp_Header const *op) { 79 | auto EndBlockOp = op->C(); 80 | printf("%s %zd\n", GetName(op->Op).data(), EndBlockOp->RIPIncrement); 81 | } 82 | 83 | void DumpBinOp(size_t Offset, IROp_Header const *op) { 84 | auto BinOp = op->C(); 85 | printf("%%%zd = %s %%%d %%%d\n", Offset, GetName(op->Op).data(), BinOp->Args[0], BinOp->Args[1]); 86 | } 87 | 88 | void DumpLoadMemOp(size_t Offset, IROp_Header const *op) { 89 | auto LoadMemOp = op->C(); 90 | printf("%%%zd = %s [%%%d", Offset, GetName(op->Op).data(), LoadMemOp->Arg[0]); 91 | if (LoadMemOp->Arg[1] != ~0) { 92 | printf(" + %%%d]\n", LoadMemOp->Arg[1]); 93 | } 94 | else { 95 | printf("]\n"); 96 | } 97 | } 98 | 99 | void DumpCondJump(size_t Offset, IROp_Header const *op) { 100 | auto CondJump = op->C(); 101 | printf("%s %%%d %%%d\n", GetName(op->Op).data(), CondJump->Cond, CondJump->Target); 102 | } 103 | 104 | void DumpJmpTarget(size_t Offset, IROp_Header const *op) { 105 | auto JmpTarget = op->C(); 106 | printf("%%%zd: %s\n", Offset, GetName(op->Op).data()); 107 | } 108 | 109 | void DumpSyscall(size_t Offset, IROp_Header const *op) { 110 | auto Syscall = op->C(); 111 | printf("%s", GetName(op->Op).data()); 112 | for (int i = 0; i < 7; ++i) 113 | printf(" %%%d", Syscall->Arguments[i]); 114 | printf("\n"); 115 | } 116 | 117 | void DumpRIPMarker(size_t Offset, IROp_Header const *op) { 118 | auto RIPMarker = op->C(); 119 | printf("%s 0x%zx\n", GetName(op->Op).data(), RIPMarker->RIP); 120 | } 121 | 122 | void DumpInvalid(size_t Offset, IROp_Header const *op) { 123 | printf("%zd Invalid %s\n", Offset, GetName(op->Op).data()); 124 | } 125 | 126 | using OpDumpVisitor = void (*)(size_t, IROp_Header const*); 127 | constexpr std::array IRDump = { 128 | DumpConstantOp, // sizeof(IROp_Constant), 129 | DumpLoadContextOp, // sizeof(IROp_LoadContext), 130 | DumpStoreContextOp, // sizeof(IROp_StoreContext), 131 | 132 | // Function Management 133 | DumpInvalid, // sizeof(IROp_BeginFunction), 134 | DumpInvalid, // sizeof(IROp_EndFunction), 135 | DumpInvalid, // sizeof(IROp_GetArgument), 136 | DumpInvalid, // sizeof(IROp_AllocateContext), 137 | 138 | // Block Management 139 | DumpBeginBlockOp, // sizeof(IROp_BeginBlock), // BeginBlock 140 | DumpEndBlockOp, // sizeof(IROp_EndBlock), // EndBlock 141 | 142 | // Branching 143 | DumpInvalid, // sizeof(IROp_Jump), 144 | DumpCondJump, // sizeof(IROp_CondJump), 145 | DumpInvalid, // sizeof(IROp_Call), 146 | DumpInvalid, // sizeof(IROp_ExternCall), 147 | DumpSyscall, // sizeof(IROp_Syscall), 148 | DumpInvalid, // sizeof(IROp_Return), 149 | 150 | // Instructions 151 | DumpBinOp, // sizeof(IROp_Add), 152 | DumpBinOp, // sizeof(IROp_Sub), 153 | DumpBinOp, // sizeof(IROp_Or), 154 | DumpBinOp, // sizeof(IROp_Xor), 155 | DumpBinOp, // sizeof(IROp_Shl), 156 | DumpBinOp, // sizeof(IROp_Shr), 157 | DumpBinOp, // sizeof(IROp_And), 158 | DumpBinOp, // sizeof(IROp_Nand), 159 | DumpBinOp, // sizeof(IROp_BitExtract), 160 | DumpBinOp, // sizeof(IROp_Select), 161 | DumpBinOp, // sizeof(IROp_Trunc_32), 162 | DumpBinOp, // sizeof(IROp_Trunc_16), 163 | 164 | // Memory 165 | DumpLoadMemOp, // sizeof(IROp_LoadMem), 166 | 167 | // Misc 168 | DumpJmpTarget, // sizeof(IROp_JmpTarget), 169 | DumpRIPMarker, // sizeof(IROp_RIPMarker), 170 | DumpInvalid, 171 | }; 172 | 173 | static_assert(IRDump[OP_LASTOP] == DumpInvalid); 174 | 175 | void VisitOp(size_t Offset, IROp_Header const* op) { 176 | IRDump[op->Op](Offset, op); 177 | } 178 | 179 | void Dump(IntrusiveIRList const* IR) { 180 | size_t Size = IR->GetOffset(); 181 | 182 | size_t i = 0; 183 | while (i != Size) { 184 | auto op = IR->GetOp(i); 185 | VisitOp(i, op); 186 | i += GetSize(op->Op); 187 | } 188 | } 189 | 190 | } 191 | -------------------------------------------------------------------------------- /Source/Core/CPU/IR.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | namespace Emu::IR { 6 | 7 | using AlignmentType = uint32_t; 8 | class IntrusiveIRList; 9 | enum IROps : unsigned { 10 | OP_CONSTANT = 0, 11 | OP_LOADCONTEXT, 12 | OP_STORECONTEXT, 13 | 14 | // Function managment 15 | OP_BEGINFUNCTION, 16 | OP_ENDFUNCTION, 17 | OP_GETARGUMENT, 18 | OP_ALLOCATE_CONTEXT, 19 | 20 | // Block management 21 | OP_BEGINBLOCK, 22 | OP_ENDBLOCK, 23 | 24 | // Branching 25 | OP_JUMP, 26 | OP_COND_JUMP, 27 | OP_CALL, 28 | OP_EXTERN_CALL, 29 | OP_SYSCALL, 30 | OP_RETURN, 31 | 32 | // Instructions 33 | OP_ADD, 34 | OP_SUB, 35 | OP_OR, 36 | OP_XOR, 37 | OP_SHL, 38 | OP_SHR, 39 | OP_AND, 40 | OP_NAND, 41 | OP_BITEXTRACT, 42 | OP_SELECT, 43 | OP_TRUNC_32, 44 | OP_TRUNC_16, 45 | 46 | // Memory 47 | OP_LOAD_MEM, 48 | 49 | // Misc 50 | OP_JUMP_TGT, 51 | OP_RIP_MARKER, 52 | 53 | OP_LASTOP, 54 | }; 55 | 56 | struct IROp_Header { 57 | void* Data[0]; 58 | IROps Op : 8; 59 | 60 | template 61 | T const* C() const { return reinterpret_cast(Data); } 62 | } __attribute__((packed)); 63 | 64 | enum TYPE_FLAGS { 65 | TYPE_I1 = (1 << 0), 66 | TYPE_I8 = (1 << 1), 67 | TYPE_I16 = (1 << 2), 68 | TYPE_I32 = (1 << 3), 69 | TYPE_I64 = (1 << 4), 70 | TYPE_SIGNED = (1 << 5), 71 | }; 72 | 73 | struct IROp_Constant { 74 | IROp_Header Header; 75 | uint8_t Flags; 76 | uint64_t Constant; 77 | }; 78 | 79 | struct IROp_LoadContext { 80 | IROp_Header Header; 81 | uint8_t Size; 82 | uint32_t Offset; 83 | }; 84 | 85 | struct IROp_StoreContext { 86 | IROp_Header Header; 87 | uint8_t Size; 88 | uint32_t Offset; 89 | AlignmentType Arg; 90 | }; 91 | 92 | struct IROp_BeginFunction { 93 | IROp_Header Header; 94 | uint8_t Arguments; 95 | bool HasReturn; 96 | }; 97 | 98 | struct IROp_GetArgument{ 99 | IROp_Header Header; 100 | uint8_t Argument; 101 | AlignmentType Arg; 102 | }; 103 | 104 | struct IROp_AllocateContext{ 105 | IROp_Header Header; 106 | uint64_t Size; 107 | AlignmentType Arg; 108 | }; 109 | 110 | struct IROp_Empty { 111 | IROp_Header Header; 112 | }; 113 | 114 | using IROp_EndFunction = IROp_Empty; 115 | using IROp_BeginBlock = IROp_Empty; 116 | using IROp_JmpTarget = IROp_Empty; 117 | 118 | struct IROp_EndBlock { 119 | IROp_Header Header; 120 | uint64_t RIPIncrement; 121 | }; 122 | 123 | // Instruction structs 124 | struct IROp_MonoOp { 125 | IROp_Header Header; 126 | AlignmentType Arg; 127 | }; 128 | 129 | struct IROp_BiOp { 130 | IROp_Header Header; 131 | AlignmentType Args[2]; 132 | }; 133 | 134 | struct IROp_TriOp { 135 | IROp_Header Header; 136 | AlignmentType Args[3]; 137 | }; 138 | 139 | // MonoOps 140 | using IROp_Return = IROp_MonoOp; 141 | using IROp_Trunc_32 = IROp_MonoOp; 142 | using IROp_Trunc_16 = IROp_MonoOp; 143 | 144 | // BiOps 145 | using IROp_Add = IROp_BiOp; 146 | using IROp_Sub = IROp_BiOp; 147 | using IROp_Or = IROp_BiOp; 148 | using IROp_Xor = IROp_BiOp; 149 | using IROp_Shl = IROp_BiOp; 150 | using IROp_Shr = IROp_BiOp; 151 | using IROp_And = IROp_BiOp; 152 | using IROp_Nand = IROp_BiOp; 153 | using IROp_BitExtract = IROp_BiOp; 154 | 155 | struct IROp_Select { 156 | enum ComparisonOp { 157 | COMP_EQ, 158 | COMP_NEQ, 159 | }; 160 | IROp_Header Header; 161 | ComparisonOp Op; 162 | AlignmentType Args[4]; 163 | }; 164 | 165 | struct IROp_LoadMem { 166 | IROp_Header Header; 167 | uint8_t Size; 168 | AlignmentType Arg[2]; 169 | }; 170 | 171 | struct IROp_Jump { 172 | IROp_Header Header; 173 | AlignmentType Target; 174 | }; 175 | 176 | struct IROp_CondJump { 177 | IROp_Header Header; 178 | AlignmentType Cond; 179 | AlignmentType Target; 180 | uint64_t RIPTarget; 181 | }; 182 | 183 | struct IROp_Call { 184 | IROp_Header Header; 185 | AlignmentType Target; 186 | }; 187 | 188 | struct IROp_ExternCall { 189 | IROp_Header Header; 190 | AlignmentType Target; 191 | }; 192 | 193 | struct IROp_Syscall { 194 | IROp_Header Header; 195 | // Maximum number of arguments including Syscall ID 196 | static constexpr std::size_t MAX_ARGS = 7; 197 | AlignmentType Arguments[MAX_ARGS]; 198 | }; 199 | 200 | struct IROp_RIPMarker { 201 | IROp_Header Header; 202 | uint64_t RIP; 203 | }; 204 | 205 | constexpr std::array IRSizes = { 206 | sizeof(IROp_Constant), 207 | sizeof(IROp_LoadContext), 208 | sizeof(IROp_StoreContext), 209 | 210 | // Function management 211 | sizeof(IROp_BeginFunction), 212 | sizeof(IROp_EndFunction), 213 | sizeof(IROp_GetArgument), 214 | sizeof(IROp_AllocateContext), 215 | 216 | // Block Management 217 | sizeof(IROp_BeginBlock), // BeginBlock 218 | sizeof(IROp_EndBlock), // EndBlock 219 | 220 | // Branching 221 | sizeof(IROp_Jump), 222 | sizeof(IROp_CondJump), 223 | sizeof(IROp_Call), 224 | sizeof(IROp_ExternCall), 225 | sizeof(IROp_Syscall), 226 | sizeof(IROp_Return), 227 | 228 | // Instructions 229 | sizeof(IROp_Add), 230 | sizeof(IROp_Sub), 231 | sizeof(IROp_Or), 232 | sizeof(IROp_Xor), 233 | sizeof(IROp_Shl), 234 | sizeof(IROp_Shr), 235 | sizeof(IROp_And), 236 | sizeof(IROp_Nand), 237 | sizeof(IROp_BitExtract), 238 | sizeof(IROp_Select), 239 | sizeof(IROp_Trunc_32), 240 | sizeof(IROp_Trunc_16), 241 | 242 | // Memory 243 | sizeof(IROp_LoadMem), 244 | 245 | // Misc 246 | sizeof(IROp_JmpTarget), 247 | sizeof(IROp_RIPMarker), 248 | -1ULL, 249 | }; 250 | 251 | // Try to make sure our array mape directly to the IROps enum 252 | static_assert(IRSizes[OP_LASTOP] == -1ULL); 253 | 254 | std::string_view const& GetName(IROps Op); 255 | static size_t GetSize(IROps Op) { return IRSizes[Op]; } 256 | 257 | void Dump(IntrusiveIRList const* IR); 258 | } 259 | -------------------------------------------------------------------------------- /Source/Core/CPU/InterpreterBackend/Interpreter.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/CPUCore.h" 2 | #include "Core/CPU/IR.h" 3 | #include "Core/CPU/IntrusiveIRList.h" 4 | #include "Interpreter.h" 5 | #include "LogManager.h" 6 | #include 7 | 8 | namespace Emu { 9 | 10 | static void TestCompilation(CPUCore *cpu) { 11 | auto threadstate = cpu->GetTLSThread(); 12 | auto IR = cpu->GetIRList(threadstate, threadstate->CPUState.rip); 13 | auto Size = IR->GetOffset(); 14 | 15 | size_t i = 0; 16 | // XXX: Change this map over to a register bank that supports real RA 17 | // Works now for testing 18 | std::map Values; 19 | 20 | bool End = false; 21 | while (i != Size && !End) { 22 | auto op = IR->GetOp(i); 23 | size_t opSize = Emu::IR::GetSize(op->Op); 24 | 25 | switch (op->Op) { 26 | case IR::OP_BEGINBLOCK: 27 | break; 28 | case IR::OP_JUMP_TGT: 29 | printf("Landed on jump target\n"); 30 | break; 31 | case IR::OP_ENDBLOCK: { 32 | auto EndOp = op->C(); 33 | threadstate->CPUState.rip += EndOp->RIPIncrement; 34 | // If we hit an end block that isn't at the end of the stream that means we need to early exit 35 | // Just set ourselves to the end regardless 36 | End = true; 37 | break; 38 | } 39 | case IR::OP_COND_JUMP: { 40 | auto JumpOp = op->C(); 41 | if (!!Values[JumpOp->Cond]) 42 | i = JumpOp->Target - opSize; 43 | } 44 | break; 45 | case IR::OP_CONSTANT: { 46 | auto ConstantOp = op->C(); 47 | Values[i] = ConstantOp->Constant; 48 | } 49 | break; 50 | case IR::OP_SYSCALL: { 51 | auto SyscallOp = op->C(); 52 | 53 | Emu::SyscallHandler::SyscallArguments Args; 54 | for (int i = 0; i < IR::IROp_Syscall::MAX_ARGS; ++i) 55 | Args.Argument[i] = Values[SyscallOp->Arguments[i]]; 56 | 57 | uint64_t Res = cpu->syscallhandler.HandleSyscall(&Args); 58 | Values[i] = Res; 59 | break; 60 | } 61 | case IR::OP_LOADCONTEXT: { 62 | auto LoadOp = op->C(); 63 | LogMan::Throw::A(LoadOp->Size == 8, "Can only handle 8 byte"); 64 | 65 | uintptr_t ContextPtr = reinterpret_cast(&threadstate->CPUState); 66 | ContextPtr += LoadOp->Offset; 67 | 68 | uint64_t *ContextData = reinterpret_cast(ContextPtr); 69 | Values[i] = *ContextData; 70 | break; 71 | } 72 | case IR::OP_STORECONTEXT: { 73 | auto StoreOp = op->C(); 74 | LogMan::Throw::A(StoreOp->Size == 8, "Can only handle 8 byte"); 75 | 76 | uintptr_t ContextPtr = reinterpret_cast(&threadstate->CPUState); 77 | ContextPtr += StoreOp->Offset; 78 | 79 | uint64_t *ContextData = reinterpret_cast(ContextPtr); 80 | *ContextData = Values[StoreOp->Arg]; 81 | } 82 | break; 83 | case IR::OP_ADD: { 84 | auto AddOp = op->C(); 85 | Values[i] = Values[AddOp->Args[0]] + Values[AddOp->Args[1]]; 86 | } 87 | break; 88 | case IR::OP_SUB: { 89 | auto SubOp = op->C(); 90 | Values[i] = Values[SubOp->Args[0]] - Values[SubOp->Args[1]]; 91 | } 92 | break; 93 | case IR::OP_OR: { 94 | auto OrOp = op->C(); 95 | Values[i] = Values[OrOp->Args[0]] | Values[OrOp->Args[1]]; 96 | } 97 | break; 98 | case IR::OP_XOR: { 99 | auto XorOp = op->C(); 100 | Values[i] = Values[XorOp->Args[0]] ^ Values[XorOp->Args[1]]; 101 | } 102 | break; 103 | case IR::OP_SHL: { 104 | auto ShlOp = op->C(); 105 | Values[i] = Values[ShlOp->Args[0]] << Values[ShlOp->Args[1]]; 106 | } 107 | break; 108 | case IR::OP_SHR: { 109 | auto ShrOp = op->C(); 110 | Values[i] = Values[ShrOp->Args[0]] >> Values[ShrOp->Args[1]]; 111 | } 112 | break; 113 | case IR::OP_AND: { 114 | auto AndOp = op->C(); 115 | Values[i] = Values[AndOp->Args[0]] & Values[AndOp->Args[1]]; 116 | } 117 | break; 118 | case IR::OP_NAND: { 119 | auto NandOp = op->C(); 120 | Values[i] = Values[NandOp->Args[0]] & ~(Values[NandOp->Args[1]]); 121 | } 122 | break; 123 | case IR::OP_BITEXTRACT: { 124 | auto BitExtractOp = op->C(); 125 | Values[i] = (Values[BitExtractOp->Args[0]] >> Values[BitExtractOp->Args[1]]) & 1; 126 | } 127 | break; 128 | case IR::OP_SELECT: { 129 | auto SelectOp = op->C(); 130 | switch (SelectOp->Op) { 131 | case IR::IROp_Select::COMP_EQ: 132 | Values[i] = Values[SelectOp->Args[0]] == Values[SelectOp->Args[1]] ? 133 | Values[SelectOp->Args[2]] : Values[SelectOp->Args[3]]; 134 | break; 135 | case IR::IROp_Select::COMP_NEQ: 136 | Values[i] = Values[SelectOp->Args[0]] != Values[SelectOp->Args[1]] ? 137 | Values[SelectOp->Args[2]] : Values[SelectOp->Args[3]]; 138 | break; 139 | 140 | }; 141 | } 142 | break; 143 | 144 | case IR::OP_TRUNC_32: { 145 | auto Trunc_32Op = op->C(); 146 | Values[i] = Values[Trunc_32Op->Arg] & 0xFFFFFFFFULL; 147 | } 148 | break; 149 | case IR::OP_TRUNC_16: { 150 | auto Trunc_16Op = op->C(); 151 | Values[i] = Values[Trunc_16Op->Arg] & 0x0000FFFFULL; 152 | } 153 | break; 154 | case IR::OP_LOAD_MEM: { 155 | auto LoadMemOp = op->C(); 156 | uint64_t Src = Values[LoadMemOp->Arg[0]]; 157 | if (LoadMemOp->Arg[1] != ~0) 158 | Src += Values[LoadMemOp->Arg[1]]; 159 | 160 | void *ptr = cpu->MemoryMapper->GetPointer(Src); 161 | switch (LoadMemOp->Size) { 162 | case 4: 163 | printf("32bit load from %zx(%d)\n", Src, *(uint32_t*)ptr); 164 | Values[i] = *(uint32_t*)ptr; 165 | break; 166 | case 8: 167 | Values[i] = *(uint64_t*)ptr; 168 | break; 169 | default: 170 | printf("Unknown LoadSize: %d\n", LoadMemOp->Size); 171 | std::abort(); 172 | break; 173 | } 174 | } 175 | break; 176 | default: 177 | printf("Unknown IR Op: %d(%s)\n", op->Op, Emu::IR::GetName(op->Op).data()); 178 | std::abort(); 179 | break; 180 | } 181 | 182 | i += opSize; 183 | } 184 | } 185 | 186 | void* Interpreter::CompileCode(Emu::IR::IntrusiveIRList const *ir) { 187 | return (void*)TestCompilation; 188 | }; 189 | } 190 | -------------------------------------------------------------------------------- /Source/Core/CPU/InterpreterBackend/Interpreter.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Core/CPU/CPUBackend.h" 3 | 4 | namespace Emu { 5 | class Interpreter final : public CPUBackend { 6 | public: 7 | std::string GetName() override { return "Interpreter"; } 8 | void* CompileCode(Emu::IR::IntrusiveIRList const *ir) override; 9 | private: 10 | 11 | 12 | }; 13 | } 14 | -------------------------------------------------------------------------------- /Source/Core/CPU/IntrusiveIRList.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "IR.h" 7 | 8 | namespace Emu::IR { 9 | 10 | class IntrusiveIRList final { 11 | public: 12 | IntrusiveIRList(size_t InitialSize) { 13 | IRList.resize(InitialSize); 14 | } 15 | 16 | IntrusiveIRList(const IntrusiveIRList &Other) { 17 | if (Other.CurrentOffset == 0) 18 | return; 19 | CurrentOffset = Other.CurrentOffset; 20 | IRList.resize(Other.CurrentOffset); 21 | memcpy(&IRList.at(0), &Other.IRList.at(0), Other.CurrentOffset); 22 | } 23 | 24 | template 25 | void CheckSize() { if ((CurrentOffset + sizeof(T)) > IRList.size()) IRList.resize(IRList.size() * 2); } 26 | 27 | // XXX: Clean this up 28 | template 29 | using IRPair = std::pair; 30 | 31 | template 32 | IRPair AllocateOp() { 33 | size_t OpEnum = IR::IRSizes[T2]; 34 | CheckSize(); 35 | auto Op = reinterpret_cast(&IRList.at(CurrentOffset)); 36 | Op->Header.Op = T2; 37 | AlignmentType Offset = CurrentOffset; 38 | CurrentOffset += Emu::IR::GetSize(T2); 39 | return std::make_pair(Op, Offset); 40 | } 41 | 42 | IntrusiveIRList& 43 | operator=(IntrusiveIRList Other) { 44 | IRList.resize(Other.CurrentOffset); 45 | memcpy(&IRList.at(0), &Other.IRList.at(0), Other.CurrentOffset); 46 | return *this; 47 | } 48 | 49 | size_t GetOffset() const { return CurrentOffset; } 50 | 51 | void Reset() { CurrentOffset = 0; } 52 | 53 | IROp_Header const* GetOp(size_t Offset) const { 54 | return reinterpret_cast(&IRList.at(Offset)); 55 | } 56 | 57 | template 58 | T const* GetOpAs(size_t Offset) const { 59 | return reinterpret_cast(&IRList.at(Offset)); 60 | } 61 | 62 | void Dump() const { Emu::IR::Dump(this); } 63 | 64 | private: 65 | AlignmentType CurrentOffset{0}; 66 | std::vector IRList; 67 | }; 68 | } 69 | -------------------------------------------------------------------------------- /Source/Core/CPU/LLVMBackend/LLVM.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/CPUCore.h" 2 | #include "Core/CPU/IR.h" 3 | #include "Core/CPU/IntrusiveIRList.h" 4 | #include "LLVM.h" 5 | #include "LogManager.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | using namespace llvm; 26 | 27 | namespace Emu { 28 | class LLVM final : public CPUBackend { 29 | public: 30 | LLVM(Emu::CPUCore* CPU); 31 | ~LLVM(); 32 | std::string GetName() override { return "LLVM"; } 33 | void* CompileCode(Emu::IR::IntrusiveIRList const *ir) override; 34 | 35 | private: 36 | void CreateGlobalVariables(llvm::ExecutionEngine *engine, llvm::Module *module); 37 | llvm::Value *CreateContextGEP(uint64_t Offset); 38 | void HandleIR(uint64_t Offset, IR::IROp_Header const* op); 39 | std::map Values; 40 | llvm::LLVMContext *con; 41 | LLVMContextRef conref; 42 | llvm::Module *mainmodule; 43 | llvm::IRBuilder<> *builder; 44 | std::vector functions; 45 | Emu::CPUCore *cpu; 46 | 47 | struct GlobalState { 48 | // CPUState 49 | llvm::Type *cpustatetype; 50 | GlobalVariable *cpustatevar; 51 | llvm::LoadInst *cpustate; 52 | llvm::Function *syscallfunction; 53 | llvm::Function *loadmem4function; 54 | llvm::Function *loadmem8function; 55 | }; 56 | GlobalState state; 57 | llvm::Function *func; 58 | std::vector BlockStack; 59 | std::unordered_map JumpTargets; 60 | std::unordered_map BlockJumpTargets; 61 | 62 | uint64_t CurrentRIP{0}; 63 | void FindJumpTargets(Emu::IR::IntrusiveIRList const *ir); 64 | }; 65 | 66 | LLVM::LLVM(Emu::CPUCore *CPU) 67 | : cpu {CPU} { 68 | using namespace llvm; 69 | InitializeNativeTarget(); 70 | InitializeNativeTargetAsmPrinter(); 71 | conref = LLVMContextCreate(); 72 | con = *llvm::unwrap(&conref); 73 | mainmodule = new llvm::Module("Main Module", *con); 74 | builder = new IRBuilder<>(*con); 75 | } 76 | 77 | LLVM::~LLVM() { 78 | delete builder; 79 | for (auto module : functions) 80 | delete module; 81 | LLVMContextDispose(conref); 82 | } 83 | 84 | static uint64_t LoadMem4(CPUCore *cpu, uint64_t Offset) { 85 | return *cpu->MemoryMapper->GetBaseOffset(Offset); 86 | } 87 | 88 | static uint64_t LoadMem8(CPUCore *cpu, uint64_t Offset) { 89 | return *cpu->MemoryMapper->GetBaseOffset(Offset); 90 | } 91 | 92 | void LLVM::CreateGlobalVariables(llvm::ExecutionEngine *engine, llvm::Module *module) { 93 | // CPUState types 94 | Type *i64 = Type::getInt64Ty(*con); 95 | 96 | 97 | //uint64_t rip; 98 | //uint64_t gregs[16]; 99 | //uint64_t xmm[16][2]; 100 | //uint64_t gs; 101 | //uint64_t fs; 102 | state.cpustatetype = StructType::create(*con, 103 | { 104 | i64, // RIP 105 | ArrayType::get(i64, 16), 106 | ArrayType::get(ArrayType::get(i64, 2), 16), 107 | i64, 108 | i64, 109 | i64, 110 | }); 111 | module->getOrInsertGlobal("X86StateGlobal", state.cpustatetype->getPointerTo()); 112 | state.cpustatevar = module->getNamedGlobal("X86StateGlobal"); 113 | state.cpustatevar->setInitializer(ConstantInt::getIntegerValue(state.cpustatetype->getPointerTo(), APInt(64, (uint64_t)&cpu->ParentThread->CPUState))); 114 | state.cpustate = builder->CreateLoad(state.cpustatevar, builder->getInt32(0), "X86State"); 115 | 116 | { 117 | // struct SyscallArguments { 118 | // static constexpr std::size_t MAX_ARGS = 7; 119 | // uint64_t Argument[MAX_ARGS]; 120 | // }; 121 | 122 | auto functype = FunctionType::get(i64, 123 | { 124 | i64, 125 | ArrayType::get(i64, SyscallHandler::SyscallArguments::MAX_ARGS)->getPointerTo(), 126 | }, 127 | false); 128 | state.syscallfunction = Function::Create(functype, 129 | Function::ExternalLinkage, 130 | "Syscall", 131 | module); 132 | 133 | using ClassPtr = uint64_t (Emu::SyscallHandler::*)(SyscallHandler::SyscallArguments *Args); 134 | union Test{ 135 | ClassPtr ClassData; 136 | void* Data; 137 | }; 138 | Test A; 139 | A.ClassData = &Emu::SyscallHandler::HandleSyscall; 140 | engine->addGlobalMapping(state.syscallfunction, A.Data); 141 | 142 | auto loadmemfunctype = FunctionType::get(i64, 143 | { 144 | i64, 145 | i64, 146 | }, 147 | false); 148 | 149 | state.loadmem4function = Function::Create(loadmemfunctype, 150 | Function::ExternalLinkage, 151 | "LoadMem4", 152 | module); 153 | engine->addGlobalMapping(state.loadmem4function, (void*)LoadMem4); 154 | 155 | state.loadmem8function = Function::Create(loadmemfunctype, 156 | Function::ExternalLinkage, 157 | "LoadMem8", 158 | module); 159 | engine->addGlobalMapping(state.loadmem8function, (void*)LoadMem8); 160 | } 161 | } 162 | 163 | llvm::Value *LLVM::CreateContextGEP(uint64_t Offset) { 164 | std::vector gepvalues = { 165 | builder->getInt32(0), // "first" instance of cpustate 166 | }; 167 | if (Offset == 0) { // RIP 168 | gepvalues.emplace_back(builder->getInt32(0)); 169 | } 170 | else if (Offset >= offsetof(X86State, gregs) && Offset < offsetof(X86State, xmm)) { 171 | gepvalues.emplace_back(builder->getInt32(1)); 172 | gepvalues.emplace_back(builder->getInt32((Offset - offsetof(X86State, gregs)) / 8)); 173 | } 174 | else if (Offset == offsetof(X86State, gs)) { 175 | gepvalues.emplace_back(builder->getInt32(3)); 176 | } 177 | else if (Offset == offsetof(X86State, fs)) { 178 | gepvalues.emplace_back(builder->getInt32(4)); 179 | } 180 | else if (Offset == offsetof(X86State, rflags)) { 181 | gepvalues.emplace_back(builder->getInt32(5)); 182 | } 183 | else 184 | std::abort(); 185 | 186 | auto gep = builder->CreateGEP(state.cpustate, gepvalues, "Context"); 187 | return gep; 188 | } 189 | 190 | void LLVM::HandleIR(uint64_t Offset, IR::IROp_Header const* op) { 191 | // printf("IR Op %zd: %d(%s)\n", Offset, op->Op, Emu::IR::GetName(op->Op).c_str()); 192 | 193 | switch (op->Op) { 194 | case IR::OP_BEGINBLOCK: 195 | break; 196 | case IR::OP_RIP_MARKER: { 197 | auto Op = op->C(); 198 | CurrentRIP = Op->RIP; 199 | if (JumpTargets.find(CurrentRIP) != JumpTargets.end()) { 200 | // We are a jump target! 201 | // Create a new block 202 | auto NewBlock = BasicBlock::Create(*con, "new", func); 203 | // Make our current block branch in to the new one 204 | builder->CreateBr(NewBlock); 205 | 206 | builder->SetInsertPoint(NewBlock); 207 | BlockJumpTargets[CurrentRIP] = NewBlock; 208 | printf("\tAdding Block Target: %zx\n", CurrentRIP); 209 | } 210 | break; 211 | } 212 | case IR::OP_ENDBLOCK: { 213 | auto EndOp = op->C(); 214 | auto downcountValue = builder->CreateGEP(state.cpustate, 215 | { 216 | builder->getInt32(0), 217 | builder->getInt32(0), 218 | }, "RIP"); 219 | auto load = builder->CreateLoad(downcountValue); 220 | auto newvalue = builder->CreateAdd(load, builder->getInt64(EndOp->RIPIncrement)); 221 | builder->CreateStore(newvalue, downcountValue); 222 | builder->CreateRetVoid(); 223 | 224 | // If we are at the end of a block and we have blocks in our stack then change over to that as an active block 225 | if (BlockStack.size()) { 226 | builder->SetInsertPoint(BlockStack.back()); 227 | BlockStack.pop_back(); 228 | } 229 | break; 230 | } 231 | case IR::OP_JUMP_TGT: 232 | printf("Landed on jump target\n"); 233 | break; 234 | case IR::OP_COND_JUMP: { 235 | // Conditional jump 236 | // if the value is true then it'll jump to the target 237 | // if the value is false then it'll fall through 238 | auto JumpOp = op->C(); 239 | 240 | auto TruePath = BasicBlock::Create(*con, "true", func); 241 | auto FalsePath = BasicBlock::Create(*con, "false", func); 242 | 243 | auto Comp = builder->CreateICmpNE(Values[JumpOp->Cond], builder->getInt64(0)); 244 | 245 | printf("\tJUMP wanting to go to RIP: 0x%zx\n", JumpOp->RIPTarget); 246 | auto target = BlockJumpTargets.find(JumpOp->RIPTarget); 247 | if (target != BlockJumpTargets.end()) { 248 | printf("\tCOND JUMP Has a found rip target!\n"); 249 | builder->CreateCondBr(Comp, target->second, FalsePath); 250 | builder->SetInsertPoint(TruePath); 251 | } else { 252 | 253 | builder->CreateCondBr(Comp, TruePath, FalsePath); 254 | 255 | builder->SetInsertPoint(TruePath); 256 | } 257 | 258 | // Will create a dead block for us in the case we have a real target 259 | BlockStack.emplace_back(FalsePath); 260 | 261 | } 262 | break; 263 | case IR::OP_CONSTANT: { 264 | auto ConstantOp = op->C(); 265 | Values[Offset] = builder->getInt64(ConstantOp->Constant); 266 | } 267 | break; 268 | case IR::OP_SYSCALL: { 269 | auto SyscallOp = op->C(); 270 | 271 | std::vector Args; 272 | Args.emplace_back(builder->getInt64((uint64_t)&cpu->syscallhandler)); 273 | 274 | auto args = builder->CreateAlloca(ArrayType::get(Type::getInt64Ty(*con), SyscallHandler::SyscallArguments::MAX_ARGS)); 275 | for (int i = 0; i < IR::IROp_Syscall::MAX_ARGS; ++i) { 276 | auto location = builder->CreateGEP(args, 277 | { 278 | builder->getInt32(0), 279 | builder->getInt32(i), 280 | }, 281 | "Arg"); 282 | builder->CreateStore(Values[SyscallOp->Arguments[i]], location); 283 | } 284 | Args.emplace_back(args); 285 | 286 | Values[Offset] = builder->CreateCall(state.syscallfunction, Args); 287 | break; 288 | } 289 | 290 | case IR::OP_LOADCONTEXT: { 291 | auto LoadOp = op->C(); 292 | LogMan::Throw::A(LoadOp->Size == 8, "Can only handle 8 byte"); 293 | 294 | auto Value = CreateContextGEP(LoadOp->Offset); 295 | auto load = builder->CreateLoad(Value); 296 | Values[Offset] = load; 297 | } 298 | break; 299 | case IR::OP_STORECONTEXT: { 300 | auto StoreOp = op->C(); 301 | LogMan::Throw::A(StoreOp->Size == 8, "Can only handle 8 byte"); 302 | 303 | auto Value = CreateContextGEP(StoreOp->Offset); 304 | builder->CreateStore(Values[StoreOp->Arg], Value); 305 | } 306 | break; 307 | case IR::OP_ADD: { 308 | auto AddOp = op->C(); 309 | Values[Offset] = builder->CreateAdd(Values[AddOp->Args[0]], Values[AddOp->Args[1]]); 310 | } 311 | break; 312 | case IR::OP_SUB: { 313 | auto SubOp = op->C(); 314 | Values[Offset] = builder->CreateSub(Values[SubOp->Args[0]], Values[SubOp->Args[1]]); 315 | } 316 | break; 317 | case IR::OP_OR: { 318 | auto OrOp = op->C(); 319 | Values[Offset] = builder->CreateOr(Values[OrOp->Args[0]], Values[OrOp->Args[1]]); 320 | } 321 | break; 322 | case IR::OP_XOR: { 323 | auto XorOp = op->C(); 324 | Values[Offset] = builder->CreateXor(Values[XorOp->Args[0]], Values[XorOp->Args[1]]); 325 | } 326 | break; 327 | case IR::OP_SHL: { 328 | auto ShlOp = op->C(); 329 | Values[Offset] = builder->CreateShl(Values[ShlOp->Args[0]], Values[ShlOp->Args[1]]); 330 | } 331 | break; 332 | case IR::OP_SHR: { 333 | auto ShrOp = op->C(); 334 | Values[Offset] = builder->CreateLShr(Values[ShrOp->Args[0]], Values[ShrOp->Args[1]]); 335 | } 336 | break; 337 | case IR::OP_AND: { 338 | auto AndOp = op->C(); 339 | Values[Offset] = builder->CreateAnd(Values[AndOp->Args[0]], Values[AndOp->Args[1]]); 340 | } 341 | break; 342 | case IR::OP_NAND: { 343 | auto NandOp = op->C(); 344 | Values[Offset] = builder->CreateAnd(Values[NandOp->Args[0]], builder->CreateNot(Values[NandOp->Args[1]])); 345 | } 346 | break; 347 | case IR::OP_SELECT: { 348 | auto SelectOp = op->C(); 349 | Value *Comp; 350 | switch (SelectOp->Op) { 351 | case IR::IROp_Select::COMP_EQ: 352 | Comp = builder->CreateICmpEQ(Values[SelectOp->Args[0]], Values[SelectOp->Args[1]]); 353 | break; 354 | case IR::IROp_Select::COMP_NEQ: 355 | Comp = builder->CreateICmpNE(Values[SelectOp->Args[0]], Values[SelectOp->Args[1]]); 356 | break; 357 | default: 358 | printf("Unknown comparison case\n"); 359 | std::abort(); 360 | break; 361 | }; 362 | Values[Offset] = builder->CreateSelect(Comp, Values[SelectOp->Args[2]], Values[SelectOp->Args[3]]); 363 | } 364 | break; 365 | case IR::OP_BITEXTRACT: { 366 | auto BitExtractOp = op->C(); 367 | auto SHR = builder->CreateLShr(Values[BitExtractOp->Args[0]], Values[BitExtractOp->Args[1]]); 368 | auto Mask = builder->CreateAnd(SHR, builder->getInt64(1)); 369 | Values[Offset] = Mask; 370 | } 371 | break; 372 | case IR::OP_TRUNC_32: { 373 | auto Trunc_32Op = op->C(); 374 | auto Arg = builder->CreateTrunc(Values[Trunc_32Op->Arg], Type::getInt32Ty(*con)); 375 | Arg = builder->CreateZExt(Arg, Type::getInt64Ty(*con)); 376 | Values[Offset] = Arg; 377 | } 378 | break; 379 | case IR::OP_LOAD_MEM: { 380 | auto LoadMemOp = op->C(); 381 | Value *Src = Values[LoadMemOp->Arg[0]]; 382 | if (LoadMemOp->Arg[1] != ~0) 383 | Src = builder->CreateAdd(Src, Values[LoadMemOp->Arg[1]]); 384 | #if 0 385 | std::vector Args; 386 | Args.emplace_back(builder->getInt64((uint64_t)cpu)); 387 | Args.emplace_back(Src); 388 | 389 | switch (LoadMemOp->Size) { 390 | case 4: 391 | Values[Offset] = builder->CreateCall(state.loadmem4function, Args); 392 | break; 393 | case 8: 394 | Values[Offset] = builder->CreateCall(state.loadmem8function, Args); 395 | break; 396 | default: 397 | printf("Unknown LoadSize: %d\n", LoadMemOp->Size); 398 | std::abort(); 399 | break; 400 | } 401 | #else 402 | Src = builder->CreateAdd(Src, builder->getInt64(cpu->MemoryMapper->GetBaseOffset(0))); 403 | 404 | switch (LoadMemOp->Size) { 405 | case 4: 406 | Src = builder->CreateIntToPtr(Src, Type::getInt32PtrTy(*con)); 407 | break; 408 | case 8: 409 | Src = builder->CreateIntToPtr(Src, Type::getInt64PtrTy(*con)); 410 | break; 411 | default: 412 | printf("Unknown LoadSize: %d\n", LoadMemOp->Size); 413 | std::abort(); 414 | break; 415 | } 416 | Values[Offset] = builder->CreateLoad(Src); 417 | #endif 418 | } 419 | break; 420 | 421 | default: 422 | printf("Unknown IR Op: %d(%s)\n", op->Op, Emu::IR::GetName(op->Op).data()); 423 | std::abort(); 424 | break; 425 | } 426 | } 427 | 428 | void LLVM::FindJumpTargets(Emu::IR::IntrusiveIRList const *ir) { 429 | auto Size = ir->GetOffset(); 430 | uint64_t i = 0; 431 | 432 | //printf("New Jump Target! Block: 0x%zx\n", cpu->ParentThread->CPUState.rip); 433 | uint64_t LocalRIP = cpu->ParentThread->CPUState.rip; 434 | 435 | // Walk through ops and remember IR Jump Targets 436 | std::unordered_map IRTargets; 437 | while (i != Size) { 438 | auto op = ir->GetOp(i); 439 | if (op->Op == IR::OP_COND_JUMP) { 440 | auto JumpOp = op->C(); 441 | IRTargets[JumpOp->Target] = true; 442 | JumpTargets[JumpOp->RIPTarget] = true; 443 | printf("\tAdding JumpTarget(COND_TGT): %zx\n", JumpOp->RIPTarget); 444 | 445 | } 446 | i += Emu::IR::GetSize(op->Op); 447 | } 448 | 449 | i = 0; 450 | while (i != Size) { 451 | auto op = ir->GetOp(i); 452 | if (op->Op == IR::OP_RIP_MARKER) { 453 | auto Op = op->C(); 454 | LocalRIP = Op->RIP; 455 | } 456 | else if (op->Op == IR::OP_JUMP_TGT) { 457 | JumpTargets[LocalRIP] = true; 458 | printf("\tAdding JumpTarget(TGT): %zx\n", LocalRIP); 459 | } 460 | 461 | // If this IR Op is a jump target 462 | if (IRTargets.find(i) != IRTargets.end()) { 463 | JumpTargets[LocalRIP] = true; 464 | printf("\tAdding JumpTarget(IRTGT): %zx\n", LocalRIP); 465 | } 466 | i += Emu::IR::GetSize(op->Op); 467 | } 468 | 469 | } 470 | 471 | void* LLVM::CompileCode(Emu::IR::IntrusiveIRList const *ir) { 472 | using namespace llvm; 473 | std::string FunctionName = "Function" + std::to_string(cpu->ParentThread->CPUState.rip); 474 | auto testmodule = new llvm::Module("Main Module", *con); 475 | auto engine = EngineBuilder(std::unique_ptr(testmodule)) 476 | .setEngineKind(EngineKind::JIT) 477 | .create(); 478 | 479 | auto functype = FunctionType::get(Type::getVoidTy(*con), {}, false); 480 | func = Function::Create(functype, 481 | Function::ExternalLinkage, 482 | FunctionName, 483 | testmodule); 484 | 485 | func->setCallingConv(CallingConv::C); 486 | 487 | llvm::Function *fallback; 488 | { 489 | auto functype = FunctionType::get(Type::getVoidTy(*con), {Type::getInt64Ty(*con)}, false); 490 | fallback = Function::Create(functype, 491 | Function::ExternalLinkage, 492 | "Fallback", 493 | testmodule); 494 | 495 | using ClassPtr = void(Emu::CPUCore::*)(CPUCore::ThreadState*); 496 | union Test{ 497 | ClassPtr ClassData; 498 | void* Data; 499 | }; 500 | Test A; 501 | A.ClassData = &CPUCore::FallbackToUnicorn; 502 | engine->addGlobalMapping(fallback, A.Data); 503 | } 504 | 505 | auto entry = BasicBlock::Create(*con, "entry", func); 506 | builder->SetInsertPoint(entry); 507 | 508 | CreateGlobalVariables(engine, testmodule); 509 | 510 | // XXX: Finding our jump targets shouldn't be this dumb 511 | JumpTargets.clear(); 512 | FindJumpTargets(ir); 513 | 514 | auto Size = ir->GetOffset(); 515 | uint64_t i = 0; 516 | 517 | CurrentRIP = cpu->ParentThread->CPUState.rip; 518 | // printf("New Block: 0x%zx\n", cpu->ParentThread->CPUState.rip); 519 | while (i != Size) { 520 | auto op = ir->GetOp(i); 521 | HandleIR(i, op); 522 | i += Emu::IR::GetSize(op->Op); 523 | } 524 | 525 | legacy::PassManager PM; 526 | PassManagerBuilder PMBuilder; 527 | PMBuilder.OptLevel = 2; 528 | raw_ostream& out = outs(); 529 | if (cpu->ParentThread->CPUState.rip == 0x402350) 530 | PM.add(createPrintModulePass(out)); 531 | 532 | verifyModule(*testmodule, &out); 533 | PMBuilder.populateModulePassManager(PM); 534 | PM.run(*testmodule); 535 | engine->finalizeObject(); 536 | 537 | functions.emplace_back(engine); 538 | void *ptr = (void*)engine->getFunctionAddress(FunctionName); 539 | auto GetTime = []() { 540 | return std::chrono::high_resolution_clock::now(); 541 | }; 542 | if (cpu->ParentThread->CPUState.rip == 0x402350) 543 | { 544 | X86State state; 545 | memcpy(&state, &cpu->ParentThread->CPUState, sizeof(state)); 546 | 547 | using JITPtr = void (*)(CPUCore *); 548 | JITPtr call = (JITPtr)ptr; 549 | for (int i = 0; i < 5; ++i) { 550 | memcpy(&cpu->ParentThread->CPUState, &state, sizeof(state)); 551 | auto start = GetTime(); 552 | call(cpu); 553 | auto time = GetTime(); 554 | printf("Test from inside app: %zd %zd\n", cpu->ParentThread->CPUState.gregs[REG_RAX], (time - start).count()); 555 | } 556 | memcpy(&cpu->ParentThread->CPUState, &state, sizeof(state)); 557 | 558 | printf("Ptr: %p\n", ptr); 559 | // std::abort(); 560 | } 561 | 562 | return ptr; 563 | 564 | }; 565 | 566 | CPUBackend *CreateLLVMBackend(Emu::CPUCore *CPU) { 567 | return new LLVM(CPU); 568 | } 569 | 570 | } 571 | -------------------------------------------------------------------------------- /Source/Core/CPU/LLVMBackend/LLVM.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Core/CPU/CPUBackend.h" 3 | 4 | namespace Emu { 5 | class CPUCore; 6 | CPUBackend *CreateLLVMBackend(Emu::CPUCore *CPU); 7 | } 8 | -------------------------------------------------------------------------------- /Source/Core/CPU/OpcodeDispatch.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/CPUCore.h" 2 | #include "Core/CPU/X86Tables.h" 3 | #include "LogManager.h" 4 | #include "OpcodeDispatch.h" 5 | #include 6 | #include 7 | #include 8 | 9 | #define DISABLE_DECODE() do { DecodeFailure = true; return; } while(0) 10 | namespace Emu::IR { 11 | static uint8_t GetModRM_Mod(uint8_t modrm) { 12 | return (modrm & 0b11000000) >> 6; 13 | } 14 | static uint8_t GetModRM_Reg(uint8_t modrm) { 15 | return (modrm & 0b00111000) >> 3; 16 | } 17 | static uint8_t GetModRM_RM(uint8_t modrm) { 18 | return modrm & 0b111; 19 | } 20 | static uint8_t GetREX_W(uint8_t rex) { 21 | return (rex & 0b1000) >> 3; 22 | } 23 | static uint8_t GetREX_R(uint8_t rex) { 24 | return (rex & 0b0100) >> 2; 25 | } 26 | static uint8_t GetREX_X(uint8_t rex) { 27 | return (rex & 0b0010) >> 1; 28 | } 29 | static uint8_t GetREX_B(uint8_t rex) { 30 | return rex & 0b0001; 31 | } 32 | 33 | static uint32_t MapModRMToReg(uint8_t bits) { 34 | std::array GPRIndexes = { 35 | REG_RAX, 36 | REG_RCX, 37 | REG_RDX, 38 | REG_RBX, 39 | REG_RSP, 40 | REG_RBP, 41 | REG_RSI, 42 | REG_RDI, 43 | }; 44 | return GPRIndexes[bits]; 45 | } 46 | 47 | static uint32_t MapModRMToReg(uint8_t REX, uint8_t bits) { 48 | std::array GPRIndexes = { 49 | // Classical ordering? 50 | REG_RAX, 51 | REG_RCX, 52 | REG_RDX, 53 | REG_RBX, 54 | REG_RSP, 55 | REG_RBP, 56 | REG_RSI, 57 | REG_RDI, 58 | REG_R8, 59 | REG_R9, 60 | REG_R10, 61 | REG_R11, 62 | REG_R12, 63 | REG_R13, 64 | REG_R14, 65 | REG_R15, 66 | }; 67 | return GPRIndexes[(REX << 3) | bits]; 68 | } 69 | 70 | static std::string RegToString(uint32_t Reg) { 71 | switch(Reg) { 72 | case REG_RAX: return "RAX"; 73 | case REG_RCX: return "RCX"; 74 | case REG_RDX: return "RDX"; 75 | case REG_RBX: return "RBX"; 76 | case REG_RSP: return "RSP"; 77 | case REG_RBP: return "RBP"; 78 | case REG_RSI: return "RSI"; 79 | case REG_RDI: return "RDI"; 80 | case REG_R8: return "R8"; 81 | case REG_R9: return "R9"; 82 | case REG_R10: return "R10"; 83 | case REG_R11: return "R11"; 84 | case REG_R12: return "R12"; 85 | case REG_R13: return "R13"; 86 | case REG_R14: return "R14"; 87 | case REG_R15: return "R15"; 88 | default: return "UNK"; 89 | }; 90 | } 91 | 92 | void OpDispatchBuilder::BeginBlock() { 93 | IRList.AllocateOp(); 94 | } 95 | 96 | void OpDispatchBuilder::EndBlock(uint64_t RIPIncrement) { 97 | auto EndOp = IRList.AllocateOp(); 98 | EndOp.first->RIPIncrement = RIPIncrement; 99 | } 100 | 101 | void OpDispatchBuilder::AddOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 102 | uint32_t DestReg = 0; 103 | 104 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 105 | DecodeFailure = true; 106 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 107 | DecodeFailure = true; 108 | 109 | uint8_t REX = 0; 110 | uint8_t ModRM = 0; 111 | 112 | uint32_t OpSize = 4; 113 | AlignmentType Src; 114 | 115 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 116 | printf("Add OpSize\n"); 117 | OpSize = 2; 118 | } 119 | 120 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 121 | REX = Code[0]; 122 | ModRM = Code[2]; 123 | 124 | if (GetREX_W(REX)) { 125 | OpSize = 8; 126 | } 127 | } else { 128 | ModRM = Code[1]; 129 | } 130 | 131 | uint8_t Mod = GetModRM_Mod(ModRM); 132 | uint8_t RM = GetModRM_RM(ModRM); 133 | uint8_t Reg = GetModRM_Reg(ModRM); 134 | if (Mod != 0b11 && Reg != 0b100) { 135 | DestReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 136 | // printf("XXX: ADD SIB ModRM without Byte!!Mod: %d Src: %s Base? :%s\n", 137 | // Mod, 138 | // RegToString(MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM))).c_str(), 139 | // RegToString(MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM))).c_str() 140 | // ); 141 | if (Mod == 0b10) { 142 | // [Register + Displacement32] 143 | // printf("\tXXX: R+disp32\n"); 144 | DecodeFailure = true; 145 | 146 | uint32_t SrcReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 147 | Src = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 148 | 149 | uint64_t Const = *(int32_t*)&Code[Op.second.Size - 4]; 150 | auto ConstantOp = IRList.AllocateOp(); 151 | 152 | ConstantOp.first->Flags = IR::TYPE_I64; 153 | ConstantOp.first->Constant = Const; 154 | 155 | auto AddOp = IRList.AllocateOp(); 156 | AddOp.first->Args[0] = Src; 157 | AddOp.first->Args[1] = ConstantOp.second; 158 | 159 | Src = AddOp.second; 160 | } 161 | else if (Mod == 0b00) { 162 | // [Register] 163 | printf("\tXXX: R\n"); 164 | // DecodeFailure = true; 165 | uint32_t SrcReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 166 | Src = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 167 | auto LoadMemOp = IRList.AllocateOp(); 168 | 169 | LoadMemOp.first->Size = OpSize; 170 | LoadMemOp.first->Arg[0] = Src; 171 | LoadMemOp.first->Arg[1] = ~0; 172 | Src = LoadMemOp.second; 173 | } 174 | else { 175 | printf("\tEarlier Mod: %d Reg: %d RM: %d\n", Mod, Reg, RM); 176 | DecodeFailure = true; 177 | } 178 | } 179 | else if (Mod == 0b11) { 180 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 181 | uint32_t SrcReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 182 | auto LoadOp = IRList.AllocateOp(); 183 | LoadOp.first->Size = 8; 184 | LoadOp.first->Offset = offsetof(X86State, gregs) + SrcReg * 8; 185 | Src = LoadOp.second; 186 | } 187 | else { 188 | printf("\tLast Dec\n"); 189 | DecodeFailure = true; 190 | } 191 | 192 | if (DecodeFailure) 193 | return; 194 | 195 | 196 | auto LoadDestOp = IRList.AllocateOp(); 197 | LoadDestOp.first->Size = 8; 198 | LoadDestOp.first->Offset = offsetof(X86State, gregs) + DestReg * 8; 199 | 200 | auto Src1 = LoadDestOp.second; 201 | auto Src2 = Src; 202 | switch (OpSize) { 203 | case 1: LogMan::Msg::A("Unhandled Add size 1"); break; 204 | case 2: { 205 | auto Trunc1 = IRList.AllocateOp(); 206 | Trunc1.first->Arg = Src1; 207 | 208 | auto Trunc2 = IRList.AllocateOp(); 209 | Trunc2.first->Arg = Src2; 210 | 211 | Src1 = Trunc1.second; 212 | Src2 = Trunc2.second; 213 | } 214 | break; 215 | case 4: { 216 | auto Trunc1 = IRList.AllocateOp(); 217 | Trunc1.first->Arg = Src1; 218 | 219 | auto Trunc2 = IRList.AllocateOp(); 220 | Trunc2.first->Arg = Src2; 221 | 222 | Src1 = Trunc1.second; 223 | Src2 = Trunc2.second; 224 | } 225 | break; 226 | case 8: break; 227 | } 228 | 229 | auto AddOp = IRList.AllocateOp(); 230 | AddOp.first->Args[0] = Src1; 231 | AddOp.first->Args[1] = Src2; 232 | 233 | auto Res = AddOp.second; 234 | 235 | switch (OpSize) { 236 | case 1: LogMan::Msg::A("Unhandled Add size 1"); break; 237 | case 2: { 238 | auto Trunc = IRList.AllocateOp(); 239 | Trunc.first->Arg = Res; 240 | 241 | Res = Trunc.second; 242 | } 243 | break; 244 | case 4: { 245 | auto Trunc = IRList.AllocateOp(); 246 | Trunc.first->Arg = Res; 247 | 248 | Res = Trunc.second; 249 | } 250 | break; 251 | case 8: break; 252 | } 253 | 254 | auto StoreOp = IRList.AllocateOp(); 255 | StoreOp.first->Size = 8; 256 | StoreOp.first->Offset = offsetof(X86State, gregs) + DestReg * 8; 257 | StoreOp.first->Arg = Res; 258 | 259 | // printf("AddSize: %d %02x %02x %02x %02x\n", OpSize, 260 | // Code[0], Code[1], Code[2], Code[3]); 261 | } 262 | 263 | void OpDispatchBuilder::CMPOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 264 | uint32_t SrcReg = 0; 265 | uint32_t DestReg = 0; 266 | 267 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 268 | DecodeFailure = true; 269 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 270 | DecodeFailure = true; 271 | 272 | uint8_t REX = 0; 273 | uint8_t ModRM = 0; 274 | 275 | uint32_t OpSize = 4; 276 | 277 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 278 | printf("Add OpSize\n"); 279 | OpSize = 2; 280 | } 281 | 282 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 283 | REX = Code[0]; 284 | ModRM = Code[2]; 285 | 286 | if (GetREX_W(REX)) { 287 | OpSize = 8; 288 | } 289 | } else { 290 | ModRM = Code[1]; 291 | } 292 | 293 | uint8_t Mod = GetModRM_Mod(ModRM); 294 | uint8_t RM = GetModRM_RM(ModRM); 295 | uint8_t Reg = GetModRM_Reg(ModRM); 296 | if (Mod == 0b11) { 297 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 298 | SrcReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 299 | } 300 | else { 301 | DecodeFailure = true; 302 | } 303 | 304 | if (cpu->GetTLSThread()->JITRIP != 0x402366) 305 | DecodeFailure = true; 306 | 307 | if (DecodeFailure) 308 | return; 309 | 310 | auto Src = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 311 | auto Dest = LoadContext(offsetof(X86State, gregs) + DestReg * 8, 8); 312 | 313 | auto SubOp = IRList.AllocateOp(); 314 | SubOp.first->Args[0] = Dest; 315 | SubOp.first->Args[1] = Src; 316 | 317 | printf("\t In CMP! %s, %s\n", RegToString(DestReg).c_str(), RegToString(SrcReg).c_str()); 318 | 319 | // Sets OF, SF, ZF, AF, PF, CF 320 | // JNE depends on ZF 321 | if (1) { 322 | // Set ZF 323 | auto ZeroConstant = IRList.AllocateOp(); 324 | ZeroConstant.first->Flags = IR::TYPE_I64; 325 | ZeroConstant.first->Constant = 1; 326 | 327 | auto OneConstant = IRList.AllocateOp(); 328 | OneConstant.first->Flags = IR::TYPE_I64; 329 | OneConstant.first->Constant = 0; 330 | 331 | auto SelectOp = IRList.AllocateOp(); 332 | SelectOp.first->Op = IROp_Select::COMP_EQ; 333 | SelectOp.first->Args[0] = Src; 334 | SelectOp.first->Args[1] = Dest; 335 | SelectOp.first->Args[2] = OneConstant.second; 336 | SelectOp.first->Args[3] = ZeroConstant.second; 337 | SetZF(SelectOp.second); 338 | } 339 | } 340 | 341 | template 342 | void OpDispatchBuilder::JccOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 343 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 344 | DecodeFailure = true; 345 | 346 | uint32_t FlagBit = 0; 347 | bool Negate = false; 348 | 349 | switch (Type) { 350 | case CC_NOF: 351 | Negate = true; 352 | case CC_OF: 353 | FlagBit = 11; 354 | break; 355 | case CC_NC: 356 | Negate = true; 357 | case CC_C: 358 | FlagBit = 0; 359 | break; 360 | 361 | case CC_NZ: 362 | Negate = true; 363 | case CC_Z: 364 | FlagBit = 6; 365 | break; 366 | 367 | case CC_NS: 368 | Negate = true; 369 | case CC_S: 370 | FlagBit = 7; 371 | break; 372 | 373 | case CC_NP: 374 | Negate = true; 375 | case CC_P: 376 | FlagBit = 2; 377 | break; 378 | 379 | case CC_NBE: 380 | case CC_BE: 381 | case CC_NL: 382 | case CC_L: 383 | case CC_NLE: 384 | case CC_LE: 385 | // printf("\tCouldn't handle this jmp type: %d\n", Type); 386 | DecodeFailure = true; 387 | break; 388 | } 389 | 390 | if (cpu->GetTLSThread()->CPUState.rip != 0x402350) 391 | DecodeFailure = true; 392 | 393 | if (DecodeFailure) 394 | return; 395 | printf("We hit a real jump!\n"); 396 | 397 | auto FlagBitOp = GetFlagBit(FlagBit, !Negate); 398 | 399 | auto JumpOp = IRList.AllocateOp(); 400 | JumpOp.first->Cond = FlagBitOp; 401 | 402 | // If condition holds true 403 | // Set RIP and end block. Else jump over 404 | uint64_t RIPTarget = 0; 405 | 406 | // False block 407 | { 408 | uint64_t ConstLocation = *(int8_t*)&Code[Op.second.Size - 1]; 409 | RIPTarget = cpu->GetTLSThread()->JITRIP + ConstLocation + Op.second.Size; 410 | JumpOp.first->RIPTarget = RIPTarget; 411 | 412 | auto ConstantOp = IRList.AllocateOp(); 413 | ConstantOp.first->Flags = IR::TYPE_I64; 414 | ConstantOp.first->Constant = RIPTarget; 415 | printf("\tWanting to jump to 0x%zx\n", ConstantOp.first->Constant); 416 | 417 | auto StoreOp = IRList.AllocateOp(); 418 | StoreOp.first->Size = 8; 419 | StoreOp.first->Offset = offsetof(X86State, rip); 420 | StoreOp.first->Arg = ConstantOp.second; 421 | StoreContext(ConstantOp.second, offsetof(X86State, rip), 8); 422 | 423 | EndBlock(0); 424 | } 425 | 426 | auto TargetOp = IRList.AllocateOp(); 427 | JumpOp.first->Target = TargetOp.second; 428 | 429 | } 430 | 431 | void OpDispatchBuilder::RETOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 432 | 433 | auto LoadOp = IRList.AllocateOp(); 434 | LoadOp.first->Size = 8; 435 | LoadOp.first->Offset = offsetof(X86State, gregs[REG_RSP]); 436 | 437 | auto EightConstant = IRList.AllocateOp(); 438 | EightConstant.first->Flags = IR::TYPE_I64; 439 | EightConstant.first->Constant = 8; 440 | 441 | auto LoadMemOp = IRList.AllocateOp(); 442 | LoadMemOp.first->Size = 8; 443 | LoadMemOp.first->Arg[0] = LoadOp.second; 444 | LoadMemOp.first->Arg[1] = ~0; 445 | 446 | auto AddOp = IRList.AllocateOp(); 447 | AddOp.first->Args[0] = LoadOp.second; 448 | AddOp.first->Args[1] = EightConstant.second; 449 | 450 | // Store new stack pointer 451 | { 452 | auto StoreOp = IRList.AllocateOp(); 453 | StoreOp.first->Size = 8; 454 | StoreOp.first->Offset = offsetof(X86State, gregs[REG_RSP]); 455 | StoreOp.first->Arg = AddOp.second; 456 | } 457 | 458 | // Store new RIP 459 | { 460 | auto StoreOp = IRList.AllocateOp(); 461 | StoreOp.first->Size = 8; 462 | StoreOp.first->Offset = offsetof(X86State, rip); 463 | StoreOp.first->Arg = LoadMemOp.second; 464 | } 465 | 466 | } 467 | 468 | void OpDispatchBuilder::AddImmOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 469 | auto LoadOp = IRList.AllocateOp(); 470 | LoadOp.first->Size = 8; 471 | LoadOp.first->Offset = offsetof(X86State, gregs[REG_RAX]); 472 | 473 | auto ConstantOp = IRList.AllocateOp(); 474 | 475 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 476 | ConstantOp.first->Flags = IR::TYPE_I64; 477 | ConstantOp.first->Constant = *(int32_t*)&Code[Op.second.Size - 4]; 478 | } 479 | else if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 480 | ConstantOp.first->Flags = IR::TYPE_I16; 481 | ConstantOp.first->Constant = *(int16_t*)&Code[Op.second.Size - 2]; 482 | } 483 | else { 484 | ConstantOp.first->Flags = IR::TYPE_I32; 485 | ConstantOp.first->Constant = *(int32_t*)&Code[Op.second.Size - 4]; 486 | } 487 | 488 | auto AddImmOp = IRList.AllocateOp(); 489 | AddImmOp.first->Args[0] = LoadOp.second; 490 | AddImmOp.first->Args[1] = ConstantOp.second; 491 | 492 | auto StoreOp = IRList.AllocateOp(); 493 | StoreOp.first->Size = 8; 494 | StoreOp.first->Offset = offsetof(X86State, gregs[REG_RAX]); 495 | StoreOp.first->Arg = AddImmOp.second; 496 | } 497 | 498 | void OpDispatchBuilder::AddImmModRMOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 499 | uint32_t DestReg = 0; 500 | 501 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 502 | DecodeFailure = true; 503 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 504 | DecodeFailure = true; 505 | 506 | uint8_t REX = 0; 507 | uint8_t ModRM = 0; 508 | uint32_t OpSize = 4; 509 | 510 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 511 | OpSize = 2; 512 | } 513 | 514 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 515 | REX = Code[0]; 516 | ModRM = Code[2]; 517 | 518 | if (GetModRM_Mod(ModRM) != 0b11) 519 | DecodeFailure = true; 520 | OpSize = 8; 521 | } else { 522 | ModRM = Code[1]; 523 | 524 | if (GetModRM_Mod(ModRM) != 0b11) 525 | DecodeFailure = true; 526 | } 527 | 528 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 529 | 530 | uint64_t Const = *(int8_t*)&Code[Op.second.Size - 1]; 531 | if (DecodeFailure) 532 | return; 533 | 534 | auto Src = LoadContext(offsetof(X86State, gregs) + DestReg * 8, 8); 535 | 536 | auto ConstantOp = IRList.AllocateOp(); 537 | ConstantOp.first->Flags = IR::TYPE_I64; 538 | ConstantOp.first->Constant = Const; 539 | 540 | auto AddImmOp = IRList.AllocateOp(); 541 | AddImmOp.first->Args[0] = Src; 542 | AddImmOp.first->Args[1] = ConstantOp.second; 543 | 544 | StoreContext(AddImmOp.second, offsetof(X86State, gregs) + DestReg * 8, 8); 545 | 546 | if (1) { 547 | // Set SF 548 | auto ConstantOp = IRList.AllocateOp(); 549 | ConstantOp.first->Flags = IR::TYPE_I64; 550 | ConstantOp.first->Constant = (OpSize * 4) - 1; 551 | 552 | auto ShrOp = IRList.AllocateOp(); 553 | ShrOp.first->Args[0] = Src; 554 | ShrOp.first->Args[1] = ConstantOp.second; 555 | SetSF(ShrOp.second); 556 | } 557 | 558 | if (1) { 559 | // Set ZF 560 | auto ZeroConstant = IRList.AllocateOp(); 561 | ZeroConstant.first->Flags = IR::TYPE_I64; 562 | ZeroConstant.first->Constant = 1; 563 | 564 | auto OneConstant = IRList.AllocateOp(); 565 | OneConstant.first->Flags = IR::TYPE_I64; 566 | OneConstant.first->Constant = 0; 567 | 568 | auto SelectOp = IRList.AllocateOp(); 569 | SelectOp.first->Op = IROp_Select::COMP_EQ; 570 | SelectOp.first->Args[0] = AddImmOp.second; 571 | SelectOp.first->Args[1] = ZeroConstant.second; 572 | SelectOp.first->Args[2] = OneConstant.second; 573 | SelectOp.first->Args[3] = ZeroConstant.second; 574 | SetZF(SelectOp.second); 575 | } 576 | } 577 | 578 | void OpDispatchBuilder::ShlImmOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 579 | uint32_t DestReg = 0; 580 | 581 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 582 | DecodeFailure = true; 583 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 584 | DecodeFailure = true; 585 | 586 | uint8_t REX = 0; 587 | uint8_t ModRM = 0; 588 | 589 | uint32_t OpSize = 4; 590 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 591 | OpSize = 2; 592 | } 593 | 594 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 595 | REX = Code[0]; 596 | ModRM = Code[2]; 597 | 598 | if (GetModRM_Mod(ModRM) != 0b11) 599 | DecodeFailure = true; 600 | 601 | if (GetREX_W(REX)) { 602 | OpSize = 8; 603 | } 604 | } else { 605 | ModRM = Code[1]; 606 | 607 | if (GetModRM_Mod(ModRM) != 0b11) 608 | DecodeFailure = true; 609 | } 610 | 611 | if (DecodeFailure) 612 | return; 613 | 614 | { 615 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 616 | 617 | auto LoadOp = IRList.AllocateOp(); 618 | LoadOp.first->Size = 8; 619 | LoadOp.first->Offset = offsetof(X86State, gregs) + DestReg * 8; 620 | 621 | auto ConstantOp = IRList.AllocateOp(); 622 | ConstantOp.first->Flags = IR::TYPE_I8; 623 | ConstantOp.first->Constant = *(int8_t*)&Code[Op.second.Size - 1]; 624 | 625 | auto Src = LoadOp.second; 626 | switch (OpSize) { 627 | case 1: LogMan::Msg::A("Unhandled Add size 1"); break; 628 | case 2: { 629 | auto Trunc1 = IRList.AllocateOp(); 630 | Trunc1.first->Arg = Src; 631 | Src = Trunc1.second; 632 | } 633 | break; 634 | case 4: { 635 | auto Trunc1 = IRList.AllocateOp(); 636 | Trunc1.first->Arg = Src; 637 | Src = Trunc1.second; 638 | } 639 | break; 640 | case 8: break; 641 | } 642 | 643 | auto ShlOp = IRList.AllocateOp(); 644 | ShlOp.first->Args[0] = Src; 645 | ShlOp.first->Args[1] = ConstantOp.second; 646 | auto Res = ShlOp.second; 647 | 648 | switch (OpSize) { 649 | case 1: LogMan::Msg::A("Unhandled Add size 1"); break; 650 | case 2: { 651 | auto Trunc = IRList.AllocateOp(); 652 | Trunc.first->Arg = Res; 653 | 654 | Res = Trunc.second; 655 | } 656 | break; 657 | case 4: { 658 | auto Trunc = IRList.AllocateOp(); 659 | Trunc.first->Arg = Res; 660 | 661 | Res = Trunc.second; 662 | } 663 | break; 664 | case 8: break; 665 | } 666 | 667 | auto StoreOp = IRList.AllocateOp(); 668 | StoreOp.first->Size = 8; 669 | StoreOp.first->Offset = offsetof(X86State, gregs) + DestReg * 8; 670 | StoreOp.first->Arg = Res; 671 | 672 | // Calculate CF value 673 | if (0) { 674 | uint32_t ShrAmount = (OpSize * 8) - ConstantOp.first->Constant; 675 | auto ShrConstantOp = IRList.AllocateOp(); 676 | ShrConstantOp.first->Flags = IR::TYPE_I8; 677 | ShrConstantOp.first->Constant = ShrAmount; 678 | 679 | auto ShrOp = IRList.AllocateOp(); 680 | ShrOp.first->Args[0] = Src; 681 | ShrOp.first->Args[1] = ShrConstantOp.second; 682 | SetCF(ShrOp.second); 683 | } 684 | } 685 | } 686 | 687 | void OpDispatchBuilder::XorOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 688 | uint32_t DestReg = 0; 689 | uint32_t SrcReg = 0; 690 | 691 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 692 | DecodeFailure = true; 693 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 694 | DecodeFailure = true; 695 | 696 | uint8_t REX = 0; 697 | uint8_t ModRM = 0; 698 | 699 | uint32_t OpSize = 4; 700 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 701 | OpSize = 2; 702 | } 703 | 704 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 705 | REX = Code[0]; 706 | ModRM = Code[2]; 707 | 708 | if (GetREX_W(REX)) { 709 | OpSize = 8; 710 | } 711 | 712 | if (GetModRM_Mod(ModRM) != 0b11) 713 | DecodeFailure = true; 714 | 715 | } else { 716 | ModRM = Code[1]; 717 | 718 | if (GetModRM_Mod(ModRM) != 0b11) 719 | DecodeFailure = true; 720 | } 721 | 722 | if (DecodeFailure) 723 | return; 724 | 725 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 726 | SrcReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 727 | 728 | auto Src1 = LoadContext(offsetof(X86State, gregs) + DestReg * 8, 8); 729 | auto Src2 = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 730 | 731 | Src1 = Truncate(Src1, OpSize); 732 | Src2 = Truncate(Src2, OpSize); 733 | 734 | auto XorOp = IRList.AllocateOp(); 735 | XorOp.first->Args[0] = Src1; 736 | XorOp.first->Args[1] = Src2; 737 | 738 | StoreContext(Truncate(XorOp.second, OpSize), offsetof(X86State, gregs) + DestReg * 8, 8); 739 | } 740 | 741 | void OpDispatchBuilder::MovOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 742 | uint32_t DestReg = 0; 743 | uint32_t SrcReg = 0; 744 | 745 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 746 | DecodeFailure = true; 747 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) 748 | DecodeFailure = true; 749 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 750 | DecodeFailure = true; 751 | 752 | uint8_t ModRM = 0; 753 | 754 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 755 | uint8_t REX = Code[0]; 756 | ModRM = Code[2]; 757 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 758 | SrcReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 759 | } 760 | else { 761 | ModRM = Code[1]; 762 | DestReg = MapModRMToReg(GetModRM_RM(ModRM)); 763 | SrcReg = MapModRMToReg(GetModRM_Reg(ModRM)); 764 | } 765 | 766 | if (GetModRM_Mod(ModRM) != 0b11) 767 | DecodeFailure = true; 768 | 769 | if (DecodeFailure) 770 | return; 771 | 772 | auto LoadOp = IRList.AllocateOp(); 773 | LoadOp.first->Size = 8; 774 | LoadOp.first->Offset = offsetof(X86State, gregs) + SrcReg * 8; 775 | 776 | AlignmentType ResultOffset = LoadOp.second; 777 | 778 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_REX)) { 779 | auto Trunc_32Op = IRList.AllocateOp(); 780 | Trunc_32Op.first->Arg = LoadOp.second; 781 | ResultOffset = Trunc_32Op.second; 782 | } 783 | 784 | auto StoreOp = IRList.AllocateOp(); 785 | StoreOp.first->Size = 8; 786 | StoreOp.first->Offset = offsetof(X86State, gregs) + DestReg * 8; 787 | StoreOp.first->Arg = ResultOffset; 788 | } 789 | 790 | void OpDispatchBuilder::BTOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 791 | uint32_t DestReg = 0; 792 | uint32_t SrcReg = 0; 793 | uint8_t RexDest = 0; 794 | uint8_t RexSrc = 0; 795 | uint8_t ModRMOffset = 1; 796 | 797 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 798 | DecodeFailure = true; 799 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) 800 | DecodeFailure = true; 801 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 802 | DecodeFailure = true; 803 | 804 | uint8_t REX = 0; 805 | uint8_t ModRM = 0; 806 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 807 | REX = Code[0]; 808 | ModRM = Code[3]; 809 | 810 | } else { 811 | ModRM = Code[2]; 812 | } 813 | 814 | if (GetModRM_Mod(ModRM) != 0b11) { 815 | DecodeFailure = true; 816 | } 817 | 818 | if (DecodeFailure) 819 | return; 820 | 821 | DestReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 822 | SrcReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 823 | 824 | auto Src1 = LoadContext(offsetof(X86State, gregs) + DestReg * 8, 8); 825 | auto Src2 = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 826 | 827 | auto ConstantOp = IRList.AllocateOp(); 828 | ConstantOp.first->Flags = IR::TYPE_I8; 829 | ConstantOp.first->Constant = 31; 830 | 831 | auto AndOp = IRList.AllocateOp(); 832 | AndOp.first->Args[0] = Src2; 833 | AndOp.first->Args[1] = ConstantOp.second; 834 | 835 | auto BitExtractOp = IRList.AllocateOp(); 836 | BitExtractOp.first->Args[0] = Src1; 837 | BitExtractOp.first->Args[1] = AndOp.second; 838 | 839 | // Result sis stored in CF 840 | SetCF(BitExtractOp.second); 841 | } 842 | 843 | void OpDispatchBuilder::JMPOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 844 | uint32_t SrcReg = 0; 845 | 846 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 847 | DecodeFailure = true; 848 | if (Op.second.Flags & X86Tables::DECODE_FLAG_SIB) 849 | DecodeFailure = true; 850 | 851 | uint8_t REX = 0; 852 | uint8_t ModRM = 0; 853 | 854 | uint32_t OpSize = 8; 855 | 856 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 857 | REX = Code[0]; 858 | ModRM = Code[2]; 859 | printf("Hit REX %02x %02x\n", REX, ModRM); 860 | } else { 861 | ModRM = Code[1]; 862 | } 863 | 864 | if (GetModRM_Mod(ModRM) != 0b11) 865 | DecodeFailure = true; 866 | 867 | 868 | if (DecodeFailure) 869 | return; 870 | 871 | SrcReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 872 | 873 | auto LoadOp = IRList.AllocateOp(); 874 | LoadOp.first->Size = 8; 875 | LoadOp.first->Offset = offsetof(X86State, gregs) + SrcReg * 8; 876 | 877 | auto StoreOp = IRList.AllocateOp(); 878 | StoreOp.first->Size = 8; 879 | StoreOp.first->Offset = offsetof(X86State, rip); 880 | StoreOp.first->Arg = LoadOp.second; 881 | 882 | printf("JMP Op Reg %d(%d %d)! %d\n", SrcReg, GetREX_B(REX), GetModRM_RM(ModRM), OpSize); 883 | } 884 | 885 | void OpDispatchBuilder::LEAOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 886 | // DISABLE_DECODE(); 887 | 888 | bool SIB = !!(Op.second.Flags & X86Tables::DECODE_FLAG_SIB); 889 | uint32_t DestReg = 0; 890 | uint32_t SrcReg = 0; 891 | 892 | if (!(Op.second.Flags & X86Tables::DECODE_FLAG_MODRM)) 893 | DecodeFailure = true; 894 | 895 | uint8_t REX = 0; 896 | uint8_t ModRM = 0; 897 | AlignmentType Src; 898 | 899 | uint32_t OpSize = 4; 900 | if (Op.second.Flags & X86Tables::DECODE_FLAG_OPSIZE) { 901 | OpSize = 2; 902 | } 903 | 904 | if (Op.second.Flags & X86Tables::DECODE_FLAG_REX) { 905 | REX = Code[0]; 906 | ModRM = Code[2]; 907 | 908 | if (GetREX_W(REX)) { 909 | OpSize = 8; 910 | } 911 | uint8_t Mod = GetModRM_Mod(ModRM); 912 | uint8_t RM = GetModRM_RM(ModRM); 913 | uint8_t Reg = GetModRM_Reg(ModRM); 914 | if (Mod != 0b11 && Reg != 0b100) { 915 | // printf("XXX: SIB ModRM without Byte!!Mod: %d Src: %s Base? :%s\n", 916 | // Mod, 917 | // RegToString(MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM))).c_str(), 918 | // RegToString(MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM))).c_str() 919 | // ); 920 | if (Mod == 0b10) { 921 | // Register + Displacement32 922 | DestReg = MapModRMToReg(GetREX_R(REX), GetModRM_Reg(ModRM)); 923 | SrcReg = MapModRMToReg(GetREX_B(REX), GetModRM_RM(ModRM)); 924 | Src = LoadContext(offsetof(X86State, gregs) + SrcReg * 8, 8); 925 | 926 | uint64_t Const = *(int32_t*)&Code[Op.second.Size - 4]; 927 | auto ConstantOp = IRList.AllocateOp(); 928 | 929 | ConstantOp.first->Flags = IR::TYPE_I64; 930 | ConstantOp.first->Constant = Const; 931 | 932 | auto AddOp = IRList.AllocateOp(); 933 | AddOp.first->Args[0] = Src; 934 | AddOp.first->Args[1] = ConstantOp.second; 935 | 936 | Src = AddOp.second; 937 | } 938 | else { 939 | DecodeFailure = true; 940 | } 941 | } 942 | else 943 | DecodeFailure = true; 944 | 945 | } else { 946 | ModRM = Code[1]; 947 | 948 | DecodeFailure = true; 949 | if (GetModRM_Mod(ModRM) != 0b11) 950 | DecodeFailure = true; 951 | } 952 | 953 | // DecodeFailure = true; 954 | 955 | if (DecodeFailure) 956 | return; 957 | 958 | // 48 8d 97 90 01 00 00 lea rdx,[rdi+0x190] 959 | StoreContext(Src, offsetof(X86State, gregs) + DestReg * 8, 8); 960 | } 961 | 962 | void OpDispatchBuilder::SyscallOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 963 | std::array ArgOffsets; 964 | std::array GPRIndexes = { 965 | REG_RAX, 966 | REG_RDI, 967 | REG_RSI, 968 | REG_RDX, 969 | REG_R10, 970 | REG_R8, 971 | REG_R9, 972 | }; 973 | for (int i = 0; i < IROp_Syscall::MAX_ARGS; ++i) { 974 | auto Arg = IRList.AllocateOp(); 975 | Arg.first->Size = 8; 976 | Arg.first->Offset = offsetof(X86State, gregs) + GPRIndexes[i] * 8; 977 | ArgOffsets[i] = Arg.second; 978 | } 979 | 980 | auto SyscallOp = IRList.AllocateOp(); 981 | for (int i = 0; i < IROp_Syscall::MAX_ARGS; ++i) { 982 | SyscallOp.first->Arguments[i] = ArgOffsets[i]; 983 | } 984 | 985 | auto StoreOp = IRList.AllocateOp(); 986 | StoreOp.first->Size = 8; 987 | StoreOp.first->Offset = offsetof(X86State, gregs[REG_RAX]); 988 | StoreOp.first->Arg = SyscallOp.second; 989 | } 990 | 991 | void OpDispatchBuilder::UnknownOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 992 | std::ostringstream str; 993 | str << "Unknown Op: " << Op.first->Name << " 0x"; 994 | for (int i = 0; i < 4; ++i) { 995 | str << " " << std::setw(2) << std::setfill('0') << std::hex << (uint32_t)Code[i]; 996 | } 997 | LogMan::Msg::A(str.str().c_str()); 998 | } 999 | 1000 | void OpDispatchBuilder::NoOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code) { 1001 | } 1002 | 1003 | void OpDispatchBuilder::SetCF(AlignmentType Value) { 1004 | auto LoadOp = IRList.AllocateOp(); 1005 | LoadOp.first->Size = 8; 1006 | LoadOp.first->Offset = offsetof(X86State, rflags); 1007 | 1008 | // Value put in in to the CF is in bit 0 of Value 1009 | auto ConstantOp = IRList.AllocateOp(); 1010 | 1011 | ConstantOp.first->Flags = IR::TYPE_I64; 1012 | ConstantOp.first->Constant = 1; 1013 | 1014 | auto AndOp = IRList.AllocateOp(); 1015 | AndOp.first->Args[0] = Value; 1016 | AndOp.first->Args[1] = ConstantOp.second; 1017 | 1018 | auto NandOp = IRList.AllocateOp(); 1019 | NandOp.first->Args[0] = LoadOp.second; 1020 | NandOp.first->Args[1] = ConstantOp.second; 1021 | 1022 | auto OrOp = IRList.AllocateOp(); 1023 | OrOp.first->Args[0] = AndOp.second; 1024 | OrOp.first->Args[1] = NandOp.second; 1025 | 1026 | auto StoreOp = IRList.AllocateOp(); 1027 | StoreOp.first->Size = 8; 1028 | StoreOp.first->Offset = offsetof(X86State, rflags); 1029 | StoreOp.first->Arg = OrOp.second; 1030 | } 1031 | 1032 | void OpDispatchBuilder::SetZF(AlignmentType Value) { 1033 | constexpr uint32_t BIT_LOCATION = 6; 1034 | auto LoadOp = IRList.AllocateOp(); 1035 | LoadOp.first->Size = 8; 1036 | LoadOp.first->Offset = offsetof(X86State, rflags); 1037 | 1038 | // Value put in in to the ZF is in bit 0 of Value 1039 | auto ConstantOp = IRList.AllocateOp(); 1040 | ConstantOp.first->Flags = IR::TYPE_I64; 1041 | ConstantOp.first->Constant = 1; 1042 | 1043 | auto ZFBitConstant = IRList.AllocateOp(); 1044 | ZFBitConstant.first->Flags = IR::TYPE_I64; 1045 | ZFBitConstant.first->Constant = (1 << BIT_LOCATION); 1046 | 1047 | auto ShiftConstant = IRList.AllocateOp(); 1048 | ShiftConstant.first->Flags = IR::TYPE_I64; 1049 | ShiftConstant.first->Constant = BIT_LOCATION; 1050 | 1051 | // Clear the rest of the bits 1052 | auto AndOp = IRList.AllocateOp(); 1053 | AndOp.first->Args[0] = Value; 1054 | AndOp.first->Args[1] = ConstantOp.second; 1055 | 1056 | // Shift in to the correct position 1057 | auto ShiftOp = IRList.AllocateOp(); 1058 | ShiftOp.first->Args[0] = AndOp.second; 1059 | ShiftOp.first->Args[1] = ShiftConstant.second; 1060 | 1061 | // Clear the location in the FLAGS 1062 | auto NandOp = IRList.AllocateOp(); 1063 | NandOp.first->Args[0] = LoadOp.second; 1064 | NandOp.first->Args[1] = ZFBitConstant.second; 1065 | 1066 | // Or the values together 1067 | auto OrOp = IRList.AllocateOp(); 1068 | OrOp.first->Args[0] = ShiftOp.second; 1069 | OrOp.first->Args[1] = NandOp.second; 1070 | 1071 | auto StoreOp = IRList.AllocateOp(); 1072 | StoreOp.first->Size = 8; 1073 | StoreOp.first->Offset = offsetof(X86State, rflags); 1074 | StoreOp.first->Arg = OrOp.second; 1075 | } 1076 | 1077 | void OpDispatchBuilder::SetSF(AlignmentType Value) { 1078 | constexpr uint32_t BIT_LOCATION = 7; 1079 | auto LoadOp = IRList.AllocateOp(); 1080 | LoadOp.first->Size = 8; 1081 | LoadOp.first->Offset = offsetof(X86State, rflags); 1082 | 1083 | // Value put in in to the SF is in bit 0 of Value 1084 | auto ConstantOp = IRList.AllocateOp(); 1085 | ConstantOp.first->Flags = IR::TYPE_I64; 1086 | ConstantOp.first->Constant = 1; 1087 | 1088 | auto SFBitConstant = IRList.AllocateOp(); 1089 | SFBitConstant.first->Flags = IR::TYPE_I64; 1090 | SFBitConstant.first->Constant = (1 << BIT_LOCATION); 1091 | 1092 | auto ShiftConstant = IRList.AllocateOp(); 1093 | ShiftConstant.first->Flags = IR::TYPE_I64; 1094 | ShiftConstant.first->Constant = BIT_LOCATION; 1095 | 1096 | // Clear the rest of the bits 1097 | auto AndOp = IRList.AllocateOp(); 1098 | AndOp.first->Args[0] = Value; 1099 | AndOp.first->Args[1] = ConstantOp.second; 1100 | 1101 | // Shift in to the correct position 1102 | auto ShiftOp = IRList.AllocateOp(); 1103 | ShiftOp.first->Args[0] = AndOp.second; 1104 | ShiftOp.first->Args[1] = ShiftConstant.second; 1105 | 1106 | // Clear the location in the FLAGS 1107 | auto NandOp = IRList.AllocateOp(); 1108 | NandOp.first->Args[0] = LoadOp.second; 1109 | NandOp.first->Args[1] = SFBitConstant.second; 1110 | 1111 | // Or the values together 1112 | auto OrOp = IRList.AllocateOp(); 1113 | OrOp.first->Args[0] = ShiftOp.second; 1114 | OrOp.first->Args[1] = NandOp.second; 1115 | 1116 | auto StoreOp = IRList.AllocateOp(); 1117 | StoreOp.first->Size = 8; 1118 | StoreOp.first->Offset = offsetof(X86State, rflags); 1119 | StoreOp.first->Arg = OrOp.second; 1120 | } 1121 | 1122 | void OpDispatchBuilder::SetOF(AlignmentType Value) { 1123 | constexpr uint32_t BIT_LOCATION = 11; 1124 | auto LoadOp = IRList.AllocateOp(); 1125 | LoadOp.first->Size = 8; 1126 | LoadOp.first->Offset = offsetof(X86State, rflags); 1127 | 1128 | // Value put in in to the OF is in bit 0 of Value 1129 | auto ConstantOp = IRList.AllocateOp(); 1130 | ConstantOp.first->Flags = IR::TYPE_I64; 1131 | ConstantOp.first->Constant = 1; 1132 | 1133 | auto OFBitConstant = IRList.AllocateOp(); 1134 | OFBitConstant.first->Flags = IR::TYPE_I64; 1135 | OFBitConstant.first->Constant = (1 << BIT_LOCATION); 1136 | 1137 | auto ShiftConstant = IRList.AllocateOp(); 1138 | ShiftConstant.first->Flags = IR::TYPE_I64; 1139 | ShiftConstant.first->Constant = BIT_LOCATION; 1140 | 1141 | // Clear the rest of the bits 1142 | auto AndOp = IRList.AllocateOp(); 1143 | AndOp.first->Args[0] = Value; 1144 | AndOp.first->Args[1] = ConstantOp.second; 1145 | 1146 | // Shift in to the correct position 1147 | auto ShiftOp = IRList.AllocateOp(); 1148 | ShiftOp.first->Args[0] = AndOp.second; 1149 | ShiftOp.first->Args[1] = ShiftConstant.second; 1150 | 1151 | // Clear the location in the FLAGS 1152 | auto NandOp = IRList.AllocateOp(); 1153 | NandOp.first->Args[0] = LoadOp.second; 1154 | NandOp.first->Args[1] = OFBitConstant.second; 1155 | 1156 | // Or the values together 1157 | auto OrOp = IRList.AllocateOp(); 1158 | OrOp.first->Args[0] = ShiftOp.second; 1159 | OrOp.first->Args[1] = NandOp.second; 1160 | 1161 | auto StoreOp = IRList.AllocateOp(); 1162 | StoreOp.first->Size = 8; 1163 | StoreOp.first->Offset = offsetof(X86State, rflags); 1164 | StoreOp.first->Arg = OrOp.second; 1165 | } 1166 | 1167 | AlignmentType OpDispatchBuilder::GetFlagBit(uint32_t bit, bool negate) { 1168 | auto LoadOp = IRList.AllocateOp(); 1169 | LoadOp.first->Size = 8; 1170 | LoadOp.first->Offset = offsetof(X86State, rflags); 1171 | 1172 | auto ConstantOp = IRList.AllocateOp(); 1173 | ConstantOp.first->Flags = IR::TYPE_I64; 1174 | ConstantOp.first->Constant = 1; 1175 | 1176 | auto BitConstant = IRList.AllocateOp(); 1177 | BitConstant.first->Flags = IR::TYPE_I64; 1178 | BitConstant.first->Constant = (1 << bit); 1179 | 1180 | auto ShiftConstant = IRList.AllocateOp(); 1181 | ShiftConstant.first->Flags = IR::TYPE_I64; 1182 | ShiftConstant.first->Constant = bit; 1183 | 1184 | auto AndOp = IRList.AllocateOp(); 1185 | AndOp.first->Args[0] = LoadOp.second; 1186 | AndOp.first->Args[1] = BitConstant.second; 1187 | 1188 | // Shift in to the correct position 1189 | auto ShiftOp = IRList.AllocateOp(); 1190 | ShiftOp.first->Args[0] = AndOp.second; 1191 | ShiftOp.first->Args[1] = ShiftConstant.second; 1192 | 1193 | if (negate) { 1194 | auto XorOp = IRList.AllocateOp(); 1195 | XorOp.first->Args[0] = XorOp.second; 1196 | XorOp.first->Args[1] = ConstantOp.second; 1197 | return XorOp.second; 1198 | } 1199 | else { 1200 | return ShiftOp.second; 1201 | } 1202 | } 1203 | 1204 | AlignmentType OpDispatchBuilder::LoadContext(uint64_t Offset, uint64_t Size) { 1205 | auto LoadOp = IRList.AllocateOp(); 1206 | LoadOp.first->Size = Size; 1207 | LoadOp.first->Offset = Offset; 1208 | return LoadOp.second; 1209 | } 1210 | 1211 | void OpDispatchBuilder::StoreContext(AlignmentType Value, uint64_t Offset, uint64_t Size) { 1212 | auto StoreOp = IRList.AllocateOp(); 1213 | StoreOp.first->Size = Size; 1214 | StoreOp.first->Offset = Offset; 1215 | StoreOp.first->Arg = Value; 1216 | } 1217 | 1218 | AlignmentType OpDispatchBuilder::Truncate(AlignmentType Value, uint64_t Size) { 1219 | switch (Size) { 1220 | case 8: return Value; 1221 | case 4: { 1222 | auto Trunc = IRList.AllocateOp(); 1223 | Trunc.first->Arg = Value; 1224 | return Trunc.second; 1225 | } 1226 | break; 1227 | case 2: { 1228 | auto Trunc = IRList.AllocateOp(); 1229 | Trunc.first->Arg = Value; 1230 | return Trunc.second; 1231 | } 1232 | break; 1233 | default: 1234 | LogMan::Msg::A("Unhandled truncate size\n"); 1235 | break; 1236 | } 1237 | return ~0; 1238 | } 1239 | 1240 | 1241 | OpDispatchBuilder::OpDispatchBuilder(CPUCore *CPU) 1242 | : cpu {CPU} { 1243 | } 1244 | 1245 | void InstallOpcodeHandlers() { 1246 | const std::vector> BaseOpTable = { 1247 | // Instructions 1248 | {0x00, 1, &OpDispatchBuilder::UnknownOp}, 1249 | {0x01, 1, &OpDispatchBuilder::AddOp}, 1250 | {0x02, 1, &OpDispatchBuilder::UnknownOp}, 1251 | {0x03, 1, &OpDispatchBuilder::AddOp}, 1252 | {0x04, 1, &OpDispatchBuilder::UnknownOp}, 1253 | {0x05, 1, &OpDispatchBuilder::AddImmOp}, 1254 | {0x31, 1, &OpDispatchBuilder::XorOp}, 1255 | {0x39, 1, &OpDispatchBuilder::CMPOp}, 1256 | {0x70, 1, &OpDispatchBuilder::JccOp}, 1257 | {0x71, 1, &OpDispatchBuilder::JccOp}, 1258 | {0x72, 1, &OpDispatchBuilder::JccOp}, 1259 | {0x73, 1, &OpDispatchBuilder::JccOp}, 1260 | {0x74, 1, &OpDispatchBuilder::JccOp}, 1261 | {0x75, 1, &OpDispatchBuilder::JccOp}, 1262 | {0x76, 1, &OpDispatchBuilder::JccOp}, 1263 | {0x77, 1, &OpDispatchBuilder::JccOp}, 1264 | {0x78, 1, &OpDispatchBuilder::JccOp}, 1265 | {0x79, 1, &OpDispatchBuilder::JccOp}, 1266 | {0x7A, 1, &OpDispatchBuilder::JccOp}, 1267 | {0x7B, 1, &OpDispatchBuilder::JccOp}, 1268 | {0x7C, 1, &OpDispatchBuilder::JccOp}, 1269 | {0x7D, 1, &OpDispatchBuilder::JccOp}, 1270 | {0x7E, 1, &OpDispatchBuilder::JccOp}, 1271 | {0x7F, 1, &OpDispatchBuilder::JccOp}, 1272 | {0x89, 1, &OpDispatchBuilder::MovOp}, 1273 | {0x8D, 1, &OpDispatchBuilder::LEAOp}, 1274 | {0x90, 1, &OpDispatchBuilder::NoOp}, 1275 | {0xC3, 1, &OpDispatchBuilder::RETOp}, 1276 | }; 1277 | 1278 | const std::vector> TwoByteOpTable = { 1279 | // Instructions 1280 | {0x05, 1, &OpDispatchBuilder::SyscallOp}, 1281 | {0x1F, 1, &OpDispatchBuilder::NoOp}, 1282 | {0xA3, 1, &OpDispatchBuilder::BTOp}, 1283 | }; 1284 | 1285 | const std::vector> ModRMOpTable = { 1286 | {0x8300, 1, &OpDispatchBuilder::AddImmModRMOp}, 1287 | {0xC104, 1, &OpDispatchBuilder::ShlImmOp}, 1288 | {0xFF04, 1, &OpDispatchBuilder::JMPOp}, 1289 | }; 1290 | 1291 | auto InstallToTable = [](auto& FinalTable, auto& LocalTable) { 1292 | for (auto Op : LocalTable) { 1293 | auto OpNum = std::get<0>(Op); 1294 | auto Dispatcher = std::get<2>(Op); 1295 | for (uint8_t i = 0; i < std::get<1>(Op); ++i) { 1296 | LogMan::Throw::A(FinalTable[OpNum + i].OpcodeDispatcher == 0, "Duplicate Entry"); 1297 | FinalTable[OpNum + i].OpcodeDispatcher = Dispatcher; 1298 | } 1299 | } 1300 | }; 1301 | 1302 | InstallToTable(Emu::X86Tables::BaseOps, BaseOpTable); 1303 | InstallToTable(Emu::X86Tables::SecondBaseOps, TwoByteOpTable); 1304 | InstallToTable(Emu::X86Tables::ModRMOps, ModRMOpTable); 1305 | 1306 | } 1307 | } 1308 | -------------------------------------------------------------------------------- /Source/Core/CPU/OpcodeDispatch.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "IntrusiveIRList.h" 3 | #include "X86Tables.h" 4 | #include 5 | 6 | namespace Emu { 7 | class CPUCore; 8 | } 9 | namespace Emu::IR { 10 | 11 | class OpDispatchBuilder final { 12 | public: 13 | OpDispatchBuilder(CPUCore *CPU); 14 | 15 | void BeginBlock(); 16 | void EndBlock(uint64_t RIPIncrement); 17 | 18 | // Op handlers 19 | void AddOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 20 | void AddImmOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 21 | void AddImmModRMOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 22 | void ShlImmOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 23 | void XorOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 24 | void MovOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 25 | void BTOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 26 | void JMPOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 27 | void LEAOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 28 | void CMPOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 29 | 30 | void SyscallOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 31 | void UnknownOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 32 | void NoOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 33 | 34 | // Jump types 35 | enum JumpType { 36 | CC_OF, 37 | CC_NOF, 38 | CC_C, 39 | CC_NC, 40 | CC_Z, 41 | CC_NZ, 42 | CC_BE, 43 | CC_NBE, 44 | CC_S, 45 | CC_NS, 46 | CC_P, 47 | CC_NP, 48 | CC_L, 49 | CC_NL, 50 | CC_LE, 51 | CC_NLE, 52 | }; 53 | template 54 | void JccOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 55 | void RETOp(Emu::X86Tables::DecodedOp Op, uint8_t const *Code); 56 | 57 | Emu::IR::IntrusiveIRList const &GetWorkingIR() { return IRList; } 58 | void ResetWorkingList() { RIPLocations.clear(); IRList.Reset(); DecodeFailure = false; } 59 | bool HadDecodeFailure() { return DecodeFailure; } 60 | void AddRIPMarker(uint64_t RIP) { 61 | auto Marker = IRList.AllocateOp(); 62 | Marker.first->RIP = RIP; 63 | RIPLocations[RIP] = Marker.second; 64 | } 65 | 66 | private: 67 | AlignmentType LoadContext(uint64_t Offset, uint64_t Size); 68 | void StoreContext(AlignmentType Value, uint64_t Offset, uint64_t Size); 69 | 70 | AlignmentType Truncate(AlignmentType Value, uint64_t Size); 71 | AlignmentType GetFlagBit(uint32_t bit, bool negate); 72 | void SetCF(AlignmentType Value); 73 | void SetZF(AlignmentType Value); 74 | void SetSF(AlignmentType Value); 75 | void SetOF(AlignmentType Value); 76 | Emu::IR::IntrusiveIRList IRList{8 * 1024 * 1024}; 77 | std::unordered_map RIPLocations; 78 | 79 | CPUCore *cpu; 80 | bool DecodeFailure{false}; 81 | }; 82 | 83 | void InstallOpcodeHandlers(); 84 | } 85 | -------------------------------------------------------------------------------- /Source/Core/CPU/PassManager.cpp: -------------------------------------------------------------------------------- 1 | #include "PassManager.h" 2 | 3 | namespace Emu::IR { 4 | void BlockPassManager::Run() { 5 | } 6 | 7 | void FunctionPassManager::Run() { 8 | } 9 | 10 | } 11 | 12 | -------------------------------------------------------------------------------- /Source/Core/CPU/PassManager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | namespace Emu::IR { 6 | class PassManager; 7 | 8 | class Pass { 9 | public: 10 | virtual std::string GetName() = 0; 11 | 12 | protected: 13 | friend PassManager; 14 | Pass() {} 15 | virtual void Run() = 0; 16 | }; 17 | 18 | class BlockPass : public Pass { 19 | public: 20 | 21 | private: 22 | virtual void Run() override final { RunOnBlock(); } 23 | virtual void RunOnBlock() = 0; 24 | }; 25 | 26 | class FunctionPass : public Pass { 27 | public: 28 | 29 | private: 30 | virtual void Run() override final { RunOnFunction(); } 31 | virtual void RunOnFunction() = 0; 32 | }; 33 | 34 | class PassManager { 35 | public: 36 | virtual void Run() = 0; 37 | void AddPass(Pass* pass) { passes.emplace_back(); } 38 | 39 | private: 40 | std::vector passes; 41 | }; 42 | 43 | class BlockPassManager final : public PassManager { 44 | public: 45 | void Run(); 46 | }; 47 | 48 | class FunctionPassManager final : public PassManager { 49 | public: 50 | void Run(); 51 | }; 52 | } 53 | -------------------------------------------------------------------------------- /Source/Core/CPU/X86Tables.cpp: -------------------------------------------------------------------------------- 1 | #include "LogManager.h" 2 | #include "X86Tables.h" 3 | #include 4 | #include 5 | #include 6 | 7 | namespace Emu { 8 | namespace X86Tables { 9 | 10 | std::array BaseOps; 11 | std::array SecondBaseOps; 12 | std::array ModRMOps; 13 | 14 | void InitializeInfoTables() { 15 | auto UnknownOp = X86InstInfo{"UND", TYPE_UNKNOWN, FLAGS_NONE, 0, 0}; 16 | for (uint32_t i = 0; i < BaseOps.size(); ++i) { 17 | BaseOps[i] = UnknownOp; 18 | } 19 | for (uint32_t i = 0; i < SecondBaseOps.size(); ++i) { 20 | SecondBaseOps[i] = UnknownOp; 21 | } 22 | for (uint32_t i = 0; i < ModRMOps.size(); ++i) { 23 | ModRMOps[i] = UnknownOp; 24 | } 25 | 26 | const std::vector> BaseOpTable = { 27 | // Prefixes 28 | {0x66, 1, X86InstInfo{"", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 29 | {0x67, 1, X86InstInfo{"", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 30 | {0x2E, 1, X86InstInfo{"CS", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 31 | {0x3E, 1, X86InstInfo{"DS", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 32 | {0x26, 1, X86InstInfo{"ES", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 33 | {0x64, 1, X86InstInfo{"FS", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 34 | {0x65, 1, X86InstInfo{"GS", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 35 | {0x36, 1, X86InstInfo{"SS", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 36 | {0xF0, 1, X86InstInfo{"LOCK", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 37 | {0xF2, 1, X86InstInfo{"REP", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 38 | {0xF3, 1, X86InstInfo{"REPNZ", TYPE_LEGACY_PREFIX, FLAGS_NONE, 0, 0}}, 39 | 40 | // REX 41 | {0x40, 16, X86InstInfo{"", TYPE_REX_PREFIX, FLAGS_NONE, 0, 0}}, 42 | 43 | // Instructions 44 | {0x01, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_DST_MODRM | FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 0, 0}}, 45 | {0x03, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_SRC_MODRM | FLAGS_HAS_MODRM, 0, 0}}, 46 | {0x05, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_SRC_IMM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 47 | {0x08, 4, X86InstInfo{"OR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 48 | {0x20, 4, X86InstInfo{"AND", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 49 | {0x25, 1, X86InstInfo{"AND", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 50 | {0x29, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 0, 0}}, 51 | {0x2B, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 52 | {0x2C, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_NONE, 1, 0}}, 53 | {0x2D, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 54 | {0x30, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 55 | {0x31, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 56 | {0x32, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 57 | {0x33, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 58 | {0x38, 4, X86InstInfo{"CMP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 59 | {0x3C, 1, X86InstInfo{"CMP", TYPE_INST, FLAGS_NONE, 1, 0}}, 60 | {0x3D, 1, X86InstInfo{"CMP", TYPE_INST, FLAGS_REX_IN_BYTE | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 61 | {0x50, 8, X86InstInfo{"PUSH", TYPE_INST, FLAGS_REX_IN_BYTE, 0, 0}}, 62 | {0x58, 8, X86InstInfo{"POP", TYPE_INST, FLAGS_REX_IN_BYTE, 0, 0}}, 63 | {0x63, 1, X86InstInfo{"MOVSXD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 64 | {0x69, 1, X86InstInfo{"IMUL", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 65 | {0x70, 1, X86InstInfo{"JO", TYPE_INST, FLAGS_NONE, 1, 0}}, 66 | {0x71, 1, X86InstInfo{"JNO", TYPE_INST, FLAGS_NONE, 1, 0}}, 67 | {0x72, 1, X86InstInfo{"JB", TYPE_INST, FLAGS_NONE, 1, 0}}, 68 | {0x73, 1, X86InstInfo{"JNB", TYPE_INST, FLAGS_NONE, 1, 0}}, 69 | {0x74, 1, X86InstInfo{"JZ", TYPE_INST, FLAGS_NONE, 1, 0}}, 70 | {0x75, 1, X86InstInfo{"JNZ", TYPE_INST, FLAGS_NONE, 1, 0}}, 71 | {0x76, 1, X86InstInfo{"JBE", TYPE_INST, FLAGS_NONE, 1, 0}}, 72 | {0x77, 1, X86InstInfo{"JNBE", TYPE_INST, FLAGS_NONE, 1, 0}}, 73 | {0x78, 1, X86InstInfo{"JS", TYPE_INST, FLAGS_NONE, 1, 0}}, 74 | {0x79, 1, X86InstInfo{"JNS", TYPE_INST, FLAGS_NONE, 1, 0}}, 75 | {0x7A, 1, X86InstInfo{"JP", TYPE_INST, FLAGS_NONE, 1, 0}}, 76 | {0x7B, 1, X86InstInfo{"JNP", TYPE_INST, FLAGS_NONE, 1, 0}}, 77 | {0x7C, 1, X86InstInfo{"JL", TYPE_INST, FLAGS_NONE, 1, 0}}, 78 | {0x7D, 1, X86InstInfo{"JNL", TYPE_INST, FLAGS_NONE, 1, 0}}, 79 | {0x7E, 1, X86InstInfo{"JLE", TYPE_INST, FLAGS_NONE, 1, 0}}, 80 | {0x7F, 1, X86InstInfo{"JNLE", TYPE_INST, FLAGS_NONE, 1, 0}}, 81 | {0x82, 1, X86InstInfo{"[INV]", TYPE_INVALID, FLAGS_NONE, 0, 0}}, 82 | {0x84, 2, X86InstInfo{"TEST", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 83 | {0x88, 5, X86InstInfo{"MOV", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 84 | {0x8E, 1, X86InstInfo{"MOV", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 85 | {0x8D, 1, X86InstInfo{"LEA", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 86 | {0x90, 1, X86InstInfo{"NOP", TYPE_INST, FLAGS_NONE, 0, 0}}, 87 | {0x98, 1, X86InstInfo{"CDQE", TYPE_INST, FLAGS_NONE, 0, 0}}, 88 | {0x99, 1, X86InstInfo{"CQO", TYPE_INST, FLAGS_NONE, 0, 0}}, 89 | {0xA0, 4, X86InstInfo{"MOV", TYPE_INST, FLAGS_NONE, 0, 0}}, 90 | {0xA8, 1, X86InstInfo{"TEST", TYPE_INST, FLAGS_NONE, 1, 0}}, 91 | {0xA9, 1, X86InstInfo{"TEST", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 92 | {0xAA, 2, X86InstInfo{"STOS", TYPE_INST, FLAGS_NONE, 0, 0}}, 93 | 94 | {0xB0, 8, X86InstInfo{"MOV", TYPE_INST, FLAGS_REX_IN_BYTE, 1, 0}}, 95 | {0xB8, 8, X86InstInfo{"MOV", TYPE_INST, FLAGS_REX_IN_BYTE | FLAGS_DISPLACE_SIZE_DIV_2 | FLAGS_DISPLACE_SIZE_MUL_2, 4, 0}}, 96 | {0xC2, 2, X86InstInfo{"RET", TYPE_INST, FLAGS_SETS_RIP | FLAGS_BLOCK_END, 0, 0}}, 97 | {0xC4, 2, X86InstInfo{"[INV]", TYPE_INVALID, FLAGS_NONE, 0, 0}}, 98 | {0xC6, 1, X86InstInfo{"MOV", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 99 | {0xC7, 1, X86InstInfo{"MOV", TYPE_INST, FLAGS_HAS_MODRM, 4, 0}}, 100 | {0xD4, 3, X86InstInfo{"[INV]", TYPE_INVALID, FLAGS_NONE, 0, 0}}, 101 | 102 | {0xE8, 1, X86InstInfo{"CALL", TYPE_INST, FLAGS_SETS_RIP | FLAGS_DISPLACE_SIZE_DIV_2 | FLAGS_BLOCK_END, 4, 0}}, 103 | {0xE9, 1, X86InstInfo{"JMP", TYPE_INST, FLAGS_SETS_RIP | FLAGS_DISPLACE_SIZE_DIV_2 | FLAGS_BLOCK_END, 4, 0}}, 104 | {0xEB, 1, X86InstInfo{"JMP", TYPE_INST, FLAGS_SETS_RIP | FLAGS_BLOCK_END, 1, 0}}, 105 | 106 | // ModRM table 107 | {0x80, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 108 | {0x81, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 109 | {0x83, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 110 | {0xC0, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 111 | {0xC1, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 112 | {0xD0, 4, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 113 | {0xD8, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 114 | {0xF6, 2, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 115 | {0xFF, 1, X86InstInfo{"", TYPE_MODRM_TABLE_PREFIX, FLAGS_HAS_MODRM, 3, 0}}, 116 | }; 117 | 118 | const std::vector> TwoByteOpTable = { 119 | // Instructions 120 | {0x05, 1, X86InstInfo{"SYSCALL", TYPE_INST, FLAGS_BLOCK_END, 0, 0}}, 121 | {0x1f, 1, X86InstInfo{"NOP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 122 | {0x31, 1, X86InstInfo{"RDTSC", TYPE_INST, FLAGS_NONE, 0, 0}}, 123 | {0x40, 1, X86InstInfo{"CMOVO", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 124 | {0x41, 1, X86InstInfo{"CMOVNO", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 125 | {0x42, 1, X86InstInfo{"CMOVB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 126 | {0x43, 1, X86InstInfo{"CMOVNB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 127 | {0x44, 1, X86InstInfo{"CMOVZ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 128 | {0x45, 1, X86InstInfo{"CMOVNZ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 129 | {0x46, 1, X86InstInfo{"CMOVBE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 130 | {0x47, 1, X86InstInfo{"CMOVNBE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 131 | {0x48, 1, X86InstInfo{"CMOVS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 132 | {0x49, 1, X86InstInfo{"CMOVNS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 133 | {0x4A, 1, X86InstInfo{"CMOVP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 134 | {0x4B, 1, X86InstInfo{"CMOVNP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 135 | {0x4C, 1, X86InstInfo{"CMOVL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 136 | {0x4D, 1, X86InstInfo{"CMOVNL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 137 | {0x4E, 1, X86InstInfo{"CMOVLE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 138 | {0x4F, 1, X86InstInfo{"CMOVNLE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 139 | 140 | {0x6E, 1, X86InstInfo{"MOVD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 141 | {0x6F, 1, X86InstInfo{"MOVDQU", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 142 | 143 | {0x7E, 1, X86InstInfo{"MOVD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 144 | {0x7F, 1, X86InstInfo{"MOVDQU", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 145 | {0x80, 1, X86InstInfo{"JO", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 146 | {0x81, 1, X86InstInfo{"JNO", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 147 | {0x82, 1, X86InstInfo{"JB", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 148 | {0x83, 1, X86InstInfo{"JNB", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 149 | {0x84, 1, X86InstInfo{"JZ", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 150 | {0x85, 1, X86InstInfo{"JNZ", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 151 | {0x86, 1, X86InstInfo{"JBE", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 152 | {0x87, 1, X86InstInfo{"JNBE", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 153 | {0x88, 1, X86InstInfo{"JS", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 154 | {0x89, 1, X86InstInfo{"JNS", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 155 | {0x8A, 1, X86InstInfo{"JP", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 156 | {0x8B, 1, X86InstInfo{"JNP", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 157 | {0x8C, 1, X86InstInfo{"JL", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 158 | {0x8D, 1, X86InstInfo{"JNL", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 159 | {0x8E, 1, X86InstInfo{"JLE", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 160 | {0x8F, 1, X86InstInfo{"JNLE", TYPE_INST, FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 161 | {0x90, 1, X86InstInfo{"SETO", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 162 | {0x91, 1, X86InstInfo{"SETNO", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 163 | {0x92, 1, X86InstInfo{"SETB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 164 | {0x93, 1, X86InstInfo{"SETNB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 165 | {0x94, 1, X86InstInfo{"SETZ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 166 | {0x95, 1, X86InstInfo{"SETNZ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 167 | {0x96, 1, X86InstInfo{"SETBE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 168 | {0x97, 1, X86InstInfo{"SETNBE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 169 | {0x98, 1, X86InstInfo{"SETS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 170 | {0x99, 1, X86InstInfo{"SETNS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 171 | {0x9A, 1, X86InstInfo{"SETP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 172 | {0x9B, 1, X86InstInfo{"SETNP", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 173 | {0x9C, 1, X86InstInfo{"SETL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 174 | {0x9D, 1, X86InstInfo{"SETNL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 175 | {0x9E, 1, X86InstInfo{"SETLE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 176 | {0x9F, 1, X86InstInfo{"SETNLE", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 177 | {0xA2, 1, X86InstInfo{"CPUID", TYPE_INST, FLAGS_NONE, 0, 0}}, 178 | {0xA3, 1, X86InstInfo{"BT", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 179 | {0xAF, 1, X86InstInfo{"IMUL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 180 | {0xB0, 2, X86InstInfo{"CMPXCHG", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 181 | {0xBA, 1, X86InstInfo{"BT", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 182 | {0xB6, 2, X86InstInfo{"MOVZX", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 183 | {0xBC, 2, X86InstInfo{"BSF", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 184 | {0xBE, 2, X86InstInfo{"MOVSX", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 185 | 186 | // SSE 187 | {0x10, 2, X86InstInfo{"MOVUPS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 188 | {0x16, 2, X86InstInfo{"MOVHPS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 189 | {0x29, 1, X86InstInfo{"MOVAPS", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 190 | {0xEB, 1, X86InstInfo{"POR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 191 | 192 | // SSE2 193 | {0x60, 1, X86InstInfo{"PUNPCKLBW", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 194 | {0x61, 1, X86InstInfo{"PUNPCKLWD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 195 | {0x62, 1, X86InstInfo{"PUNPCKLDQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 196 | {0x66, 1, X86InstInfo{"PCMPGTD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 197 | {0x6A, 1, X86InstInfo{"PUNPCKHDQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 198 | {0x6C, 1, X86InstInfo{"PUNPCKLQDQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 199 | {0x6D, 1, X86InstInfo{"PUNPCKHQDQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 200 | {0x70, 1, X86InstInfo{"PSHUFD", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 201 | {0x73, 1, X86InstInfo{"PSLLQ", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 202 | {0x74, 1, X86InstInfo{"PCMPEQB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 203 | {0x72, 1, X86InstInfo{"PSLLD", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 204 | {0x76, 1, X86InstInfo{"PCMPEQD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 205 | {0xD4, 1, X86InstInfo{"PADDQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 206 | {0xD6, 1, X86InstInfo{"MOVQ", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 207 | {0xD7, 1, X86InstInfo{"PMOVMSKB", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 208 | {0xEF, 1, X86InstInfo{"PXOR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 209 | {0xFE, 1, X86InstInfo{"PADDD", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 210 | }; 211 | 212 | const std::vector> ModRMOpTable = { 213 | {0x8000, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 214 | {0x8001, 1, X86InstInfo{"OR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 215 | {0x8002, 1, X86InstInfo{"ADC", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 216 | {0x8003, 1, X86InstInfo{"SBB", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 217 | {0x8004, 1, X86InstInfo{"AND", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 218 | {0x8005, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 219 | {0x8006, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 220 | {0x8007, 1, X86InstInfo{"CMP", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 221 | 222 | {0x8100, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 223 | {0x8101, 1, X86InstInfo{"OR", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 224 | {0x8102, 1, X86InstInfo{"ADC", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 225 | {0x8103, 1, X86InstInfo{"SBB", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 226 | {0x8104, 1, X86InstInfo{"AND", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 227 | {0x8105, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 228 | {0x8106, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 229 | {0x8107, 1, X86InstInfo{"CMP", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 230 | 231 | {0x8300, 1, X86InstInfo{"ADD", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 232 | {0x8301, 1, X86InstInfo{"OR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 233 | {0x8302, 1, X86InstInfo{"ADC", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 234 | {0x8303, 1, X86InstInfo{"SBB", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 235 | {0x8304, 1, X86InstInfo{"AND", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 236 | {0x8305, 1, X86InstInfo{"SUB", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 237 | {0x8306, 1, X86InstInfo{"XOR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 238 | {0x8307, 1, X86InstInfo{"CMP", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 239 | 240 | {0xC000, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 241 | {0xC001, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 242 | {0xC002, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 243 | {0xC003, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 244 | {0xC004, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 245 | {0xC005, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 246 | {0xC006, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 247 | {0xC007, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 248 | 249 | {0xC100, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 250 | {0xC101, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 251 | {0xC102, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 252 | {0xC103, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 253 | {0xC104, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 254 | {0xC105, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 255 | {0xC106, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 256 | {0xC107, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_DST_MODRM | FLAGS_SRC_IMM | FLAGS_HAS_MODRM, 1, 0}}, 257 | 258 | {0xD000, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 259 | {0xD001, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 260 | {0xD002, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 261 | {0xD003, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 262 | {0xD004, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 263 | {0xD005, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 264 | {0xD006, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 265 | {0xD007, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 266 | 267 | {0xD100, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 268 | {0xD101, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 269 | {0xD102, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 270 | {0xD103, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 271 | {0xD104, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 272 | {0xD105, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 273 | {0xD106, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 274 | {0xD107, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 275 | 276 | {0xD200, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 277 | {0xD201, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 278 | {0xD202, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 279 | {0xD203, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 280 | {0xD204, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 281 | {0xD205, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 282 | {0xD206, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 283 | {0xD207, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 284 | 285 | {0xD300, 1, X86InstInfo{"ROL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 286 | {0xD301, 1, X86InstInfo{"ROR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 287 | {0xD302, 1, X86InstInfo{"RCL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 288 | {0xD303, 1, X86InstInfo{"RCR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 289 | {0xD304, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 290 | {0xD305, 1, X86InstInfo{"SHR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 291 | {0xD306, 1, X86InstInfo{"SHL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 292 | {0xD307, 1, X86InstInfo{"SAR", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 293 | 294 | {0xF600, 2, X86InstInfo{"TEST", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 295 | {0xF604, 1, X86InstInfo{"MUL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 296 | {0xF606, 1, X86InstInfo{"DIV", TYPE_INST, FLAGS_HAS_MODRM, 1, 0}}, 297 | {0xF700, 2, X86InstInfo{"TEST", TYPE_INST, FLAGS_HAS_MODRM | FLAGS_DISPLACE_SIZE_DIV_2, 4, 0}}, 298 | {0xF702, 1, X86InstInfo{"NOT", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 299 | {0xF703, 1, X86InstInfo{"NEG", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 300 | {0xF704, 1, X86InstInfo{"MUL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 301 | {0xF705, 1, X86InstInfo{"IMUL", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 302 | {0xF706, 1, X86InstInfo{"DIV", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 303 | {0xF707, 1, X86InstInfo{"IDIV", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 304 | 305 | {0xFF00, 1, X86InstInfo{"INC", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 306 | {0xFF01, 1, X86InstInfo{"DEC", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 307 | {0xFF02, 1, X86InstInfo{"CALL", TYPE_INST, FLAGS_SETS_RIP | FLAGS_HAS_MODRM | FLAGS_BLOCK_END, 0, 0}}, 308 | {0xFF03, 1, X86InstInfo{"CALLF", TYPE_INST, FLAGS_SETS_RIP | FLAGS_HAS_MODRM | FLAGS_BLOCK_END, 0, 0}}, 309 | {0xFF04, 1, X86InstInfo{"JMP", TYPE_INST, FLAGS_SETS_RIP | FLAGS_HAS_MODRM | FLAGS_BLOCK_END, 0, 0}}, 310 | {0xFF05, 1, X86InstInfo{"JMPF", TYPE_INST, FLAGS_SETS_RIP | FLAGS_HAS_MODRM | FLAGS_BLOCK_END, 0, 0}}, 311 | {0xFF06, 1, X86InstInfo{"PUSH", TYPE_INST, FLAGS_HAS_MODRM, 0, 0}}, 312 | }; 313 | 314 | auto GenerateTable = [](auto& FinalTable, auto& LocalTable) { 315 | for (auto Op : LocalTable) { 316 | auto OpNum = std::get<0>(Op); 317 | auto Info = std::get<2>(Op); 318 | for (uint8_t i = 0; i < std::get<1>(Op); ++i) { 319 | LogMan::Throw::A(FinalTable[OpNum + i].Type == TYPE_UNKNOWN, "Duplicate Entry"); 320 | FinalTable[OpNum + i] = Info; 321 | } 322 | } 323 | }; 324 | 325 | GenerateTable(BaseOps, BaseOpTable); 326 | GenerateTable(SecondBaseOps, TwoByteOpTable); 327 | GenerateTable(ModRMOps, ModRMOpTable); 328 | } 329 | 330 | DecodedOp GetInstInfo(uint8_t const *Inst) { 331 | X86InstInfo *Info{nullptr}; 332 | X86InstDecodeFlags Flags{}; 333 | uint8_t InstructionSize = 0; 334 | std::array Instruction; 335 | 336 | bool DoAgain = false; 337 | uint8_t REXOptions = 0; 338 | bool HasWideningDisplacement = false; 339 | bool HasNarrowingDisplacement = false; 340 | OnceMore: 341 | 342 | uint8_t AdditionalBytes = 0; 343 | Instruction[InstructionSize] = Inst[InstructionSize]; InstructionSize++; 344 | uint8_t Op = Inst[InstructionSize - 1]; 345 | 346 | auto NormalOp = [&](auto &Table, auto Op) { 347 | Info = &Table[Op]; 348 | 349 | // We hit a prefix, go again 350 | if (Info->Type == TYPE_PREFIX || Info->Type == TYPE_LEGACY_PREFIX) { 351 | DoAgain = true; 352 | return; 353 | } 354 | 355 | bool HasMODRM = (Info->Flags & FLAGS_HAS_MODRM) || (REXOptions & 1); 356 | 357 | if (Info->Flags & FLAGS_REX_IN_BYTE) 358 | HasMODRM = false; 359 | 360 | bool HasSIB = REXOptions & 2; 361 | uint8_t Disp = 0; 362 | 363 | if (HasMODRM) { 364 | union { 365 | uint8_t Hex; 366 | struct { 367 | uint8_t rm : 3; 368 | uint8_t reg : 3; 369 | uint8_t mod : 2; 370 | }; 371 | } ModRM; 372 | ModRM.Hex = Inst[InstructionSize]; 373 | 374 | // if ModRM.Mod != 0b11 375 | // AND 376 | // ModRM.rm == 0b100 377 | HasSIB = HasSIB || 378 | ((ModRM.mod != 0b11) && 379 | (ModRM.rm == 0b100)); 380 | 381 | // Do we have an offset? 382 | if (ModRM.mod == 0b01) 383 | Disp = 1; 384 | else if (ModRM.mod == 0b10) 385 | Disp = 4; 386 | else if (ModRM.mod == 0 && ModRM.rm == 0b101) 387 | Disp = 4; 388 | 389 | ;//printf("ModRM: 0x%02x HasSIB? %s Disp: %d %d %d %d\n", ModRM.Hex, HasSIB ? "Yes" : "No", Disp, ModRM.mod, ModRM.reg, ModRM.rm); 390 | if (HasSIB) { 391 | union { 392 | uint8_t Hex; 393 | struct { 394 | uint8_t base : 3; 395 | uint8_t index : 3; 396 | uint8_t scale : 2; 397 | }; 398 | } SIB; 399 | SIB.Hex = Inst[InstructionSize+1]; 400 | ;//printf("\tSIB: 0x%02x %d %d %d\n", SIB.Hex, SIB.base, SIB.index, SIB.scale); 401 | 402 | // If the SIB base is 0b101, aka BP or R13 then we have a 32bit displacement 403 | if (SIB.base == 0b101 && Disp == 0) { 404 | ;//printf("\tDisp now 4\n"); 405 | Disp = 4; 406 | } 407 | } 408 | } 409 | 410 | if (HasMODRM) { // MODRM 411 | AdditionalBytes++; 412 | Flags.Flags |= DECODE_FLAG_MODRM; 413 | } 414 | 415 | if (HasSIB) { 416 | AdditionalBytes++; 417 | Flags.Flags |= DECODE_FLAG_SIB; 418 | } 419 | 420 | AdditionalBytes += Disp; 421 | 422 | uint8_t Bytes = Info->MoreBytes; 423 | if ((Info->Flags & FLAGS_DISPLACE_SIZE_MUL_2) && HasWideningDisplacement) { 424 | ;//printf("Cool, nondefault displacement\n"); 425 | Bytes *= 2; 426 | } 427 | if ((Info->Flags & FLAGS_DISPLACE_SIZE_DIV_2) && HasNarrowingDisplacement) { 428 | ;//printf("Cool, nondefault displacement\n"); 429 | Bytes /= 2; 430 | } 431 | 432 | AdditionalBytes += Bytes; 433 | }; 434 | 435 | auto ModRMOp = [&]() { 436 | uint8_t Prefix = Inst[InstructionSize - 1]; 437 | uint8_t ModRM = Inst[InstructionSize]; 438 | 439 | // Get the Prefix Info 440 | auto PrefixInfo = &BaseOps[Prefix]; 441 | uint8_t ValidModRMMask = (1 << PrefixInfo->MoreBytes) - 1; 442 | uint16_t Op = (Prefix << 8) | (((ModRM & 0b111000) >> 3) & (ValidModRMMask)); 443 | ;//printf("ModRM Op: 0x%04x\n", Op); 444 | 445 | // Find the instruction Info 446 | NormalOp(ModRMOps, Op); 447 | }; 448 | 449 | 450 | switch (Op) { 451 | case 0x0F: { // Escape 452 | Flags.PrefixBytes++; 453 | Instruction[InstructionSize] = Inst[InstructionSize]; InstructionSize++; 454 | 455 | switch (Inst[InstructionSize]) { 456 | case 0x0F: // Escape 457 | ;//printf("3DNow!\n"); 458 | Flags.PrefixBytes++; 459 | // This turns in to ModRM 460 | Instruction[InstructionSize] = Inst[InstructionSize]; InstructionSize++; 461 | break; 462 | case 0x38: // Escape 463 | ;//printf("F38 Table!\n"); 464 | Flags.PrefixBytes++; 465 | break; 466 | case 0x3A: // Escape 467 | ;//printf("F3A Table!\n"); 468 | Flags.PrefixBytes++; 469 | break; 470 | default: // Two Byte op table 471 | ;//printf("TwoByte Op Table!\n"); 472 | NormalOp(SecondBaseOps, Inst[InstructionSize - 1]); 473 | break; 474 | } 475 | } 476 | break; 477 | 478 | case 0x66: // Operand size prefix 479 | ;//printf("Operand size prefix!\n"); 480 | // 66 83 78 36 38 cmp WORD PTR [rax+0x36],0x38 481 | HasNarrowingDisplacement = true; 482 | Flags.PrefixBytes++; 483 | Flags.Flags |= DECODE_FLAG_OPSIZE; 484 | DoAgain = true; 485 | break; 486 | case 0x67: // Address size override prefix 487 | ;//printf("Address size override prefix\n"); 488 | Flags.PrefixBytes++; 489 | Flags.Flags |= DECODE_FLAG_ADSIZE; 490 | DoAgain = true; 491 | break; 492 | case 0x40: // REX - 0x40-0x4F 493 | case 0x41: 494 | case 0x42: 495 | case 0x43: 496 | case 0x44: 497 | case 0x45: 498 | case 0x46: 499 | case 0x47: 500 | case 0x48: 501 | case 0x49: 502 | case 0x4A: 503 | case 0x4B: 504 | case 0x4C: 505 | case 0x4D: 506 | case 0x4E: 507 | case 0x4F: { 508 | ;//printf("REX Op\n"); 509 | uint8_t REXValue = Inst[InstructionSize - 1]; 510 | Info = &BaseOps[Inst[InstructionSize - 1]]; 511 | 512 | REXOptions |= 0b100; 513 | // ModRM 514 | if (REXValue & 0b0101) 515 | REXOptions |= 0b001; 516 | 517 | // SIB 518 | if (REXValue & 0b0010) 519 | REXOptions |= 0b010; 520 | 521 | // XXX: Throw in to REXOptions 522 | if (REXValue & 0b1000) 523 | HasWideningDisplacement = true; 524 | 525 | Flags.Flags |= DECODE_FLAG_REX; 526 | ;//printf("REX 0x%01x\n", REXValue & 0xF); 527 | 528 | DoAgain = true; 529 | } 530 | break; 531 | case 0xF0: { 532 | Flags.Flags |= DECODE_FLAG_LOCK; 533 | DoAgain = true; 534 | } 535 | break; 536 | default: // Default Base Op 537 | ;//printf("Default op! 0x%02x\n", Op); 538 | Info = &BaseOps[Inst[InstructionSize - 1]]; 539 | if (Info->Type == TYPE_MODRM_TABLE_PREFIX) { 540 | ModRMOp(); 541 | } 542 | else { 543 | NormalOp(BaseOps, Inst[InstructionSize - 1]); 544 | } 545 | break; 546 | } 547 | 548 | // Read any additional bytes specified 549 | for (uint32_t i = 0; i < AdditionalBytes; ++i) { 550 | Instruction[InstructionSize] = Inst[InstructionSize]; InstructionSize++; 551 | } 552 | 553 | if (DoAgain) { 554 | DoAgain = false; 555 | goto OnceMore; 556 | } 557 | 558 | // printf("Instruction: "); 559 | // if (Info) { 560 | // printf("%s :", Info->Name); 561 | // } 562 | // 563 | // for (uint8_t i = 0; i < InstructionSize; ++i) { 564 | // printf("%02x ", Instruction[i]); 565 | // } 566 | // printf("\n"); 567 | Flags.Size = InstructionSize; 568 | return std::make_pair(Info ? Info->Type == TYPE_UNKNOWN ? nullptr : Info : nullptr, Flags); 569 | } 570 | } 571 | } 572 | -------------------------------------------------------------------------------- /Source/Core/CPU/X86Tables.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Emu { 6 | namespace IR { 7 | struct OpDispatchBuilder; 8 | } 9 | namespace X86Tables { 10 | 11 | enum InstType { 12 | TYPE_UNKNOWN, 13 | TYPE_LEGACY_PREFIX, 14 | TYPE_PREFIX, 15 | TYPE_REX_PREFIX, 16 | TYPE_MODRM_TABLE_PREFIX, 17 | TYPE_INST, 18 | TYPE_INVALID, 19 | }; 20 | enum InstFlags { 21 | FLAGS_NONE = 0, 22 | FLAGS_HAS_MODRM = (1 << 0), 23 | FLAGS_DISPLACE_SIZE_MUL_2 = (1 << 1), 24 | FLAGS_DISPLACE_SIZE_DIV_2 = (1 << 2), 25 | FLAGS_REX_IN_BYTE = (1 << 3), 26 | FLAGS_SRC_MODRM = (1 << 4), 27 | FLAGS_DST_MODRM = (1 << 5), 28 | FLAGS_SRC_IMM = (1 << 6), 29 | FLAGS_BLOCK_END = (1 << 7), 30 | FLAGS_SETS_RIP = (1 << 8), 31 | }; 32 | 33 | enum DecodeFlags { 34 | DECODE_FLAG_NONE = 0, 35 | DECODE_FLAG_REX = (1 << 0), 36 | DECODE_FLAG_OPSIZE = (1 << 1), 37 | DECODE_FLAG_ADSIZE = (1 << 2), 38 | DECODE_FLAG_MODRM = (1 << 3), 39 | DECODE_FLAG_SIB = (1 << 4), 40 | DECODE_FLAG_LOCK = (1 << 5), 41 | 42 | }; 43 | struct X86InstDecodeFlags { 44 | uint8_t Size : 4; 45 | uint8_t PrefixBytes : 4; 46 | uint8_t Flags; ///< Must be larger that DecodeFlags enum 47 | }; 48 | 49 | struct X86InstInfo; 50 | using DecodedOp = std::pair; 51 | using OpDispatchPtr = void (IR::OpDispatchBuilder::*)(Emu::X86Tables::DecodedOp, uint8_t const *Code); 52 | 53 | struct X86InstInfo { 54 | char const *Name; 55 | InstType Type; 56 | uint16_t Flags; ///< Must be larger than InstFlags enum 57 | uint8_t MoreBytes; 58 | OpDispatchPtr OpcodeDispatcher; 59 | }; 60 | extern std::array BaseOps; 61 | extern std::array SecondBaseOps; 62 | extern std::array ModRMOps; 63 | 64 | void InitializeInfoTables(); 65 | DecodedOp GetInstInfo(uint8_t const *Inst); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /Source/Core/Core.cpp: -------------------------------------------------------------------------------- 1 | #include "Bootloader/Bootloader.h" 2 | #include "Core.h" 3 | #include "ELFLoader.h" 4 | #include "LogManager.h" 5 | #include 6 | #include 7 | 8 | namespace Emu { 9 | 10 | Core::Core() 11 | : CPU{&MemoryMapper} { 12 | } 13 | 14 | bool Core::Load(std::string const &File, std::vector const &Args) { 15 | bool Result = true; 16 | // Allocate a 64GB SHM region for fun 17 | Result &= MemoryMapper.AllocateSHMRegion(1ULL << 33); 18 | 19 | CPU.Init(File); 20 | CPU.RunLoop(); 21 | 22 | return Result; 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /Source/Core/Core.h: -------------------------------------------------------------------------------- 1 | #include "Bootloader/Bootloader.h" 2 | #include "CPU/CPUCore.h" 3 | #include "Memmap.h" 4 | #include 5 | #include 6 | 7 | namespace Emu { 8 | class Core { 9 | public: 10 | enum State { 11 | STATE_UNLOADED = 0, 12 | STATE_PAUSED, 13 | STATE_RUNNING, 14 | }; 15 | 16 | Core(); 17 | bool Load(std::string const &File, std::vector const &Args); 18 | 19 | private: 20 | Bootloader BL{}; 21 | CPUCore CPU; 22 | Memmap MemoryMapper{}; 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /Source/Core/HLE/Syscalls/FileManagement.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "Core/CPU/CPUCore.h" 4 | #include "FileManagement.h" 5 | 6 | namespace Emu { 7 | ssize_t FD::writev(int fd, void *iov, int iovcnt) { 8 | return -1; 9 | ssize_t FinalSize = 0; 10 | printf("writev: %d 0x%p %d\n", fd, iov, iovcnt); 11 | for (int i = 0; i < iovcnt; ++i) { 12 | struct iovStruct { 13 | uint64_t iov_base; 14 | size_t iov_len; 15 | } *iovObject; 16 | 17 | iovObject = (iovStruct*)iov; 18 | const char *DataString = cpu->MemoryMapper->GetPointer(iovObject->iov_base); 19 | printf("\t 0x%zx Size: 0x%zx\n", iovObject->iov_base, iovObject->iov_len); 20 | printf("\t %s\n", DataString); 21 | FinalSize += iovObject->iov_len; 22 | 23 | } 24 | return FinalSize; 25 | } 26 | 27 | uint64_t FileManager::Read(int fd, void *buf, size_t count) { 28 | printf("XXX: Implement Read\n"); 29 | return 0; 30 | } 31 | 32 | uint64_t FileManager::Write(int fd, void *buf, size_t count) { 33 | return write(fd, buf, count); 34 | } 35 | 36 | uint64_t FileManager::Open(const char* pathname, int flags, uint32_t mode) { 37 | printf("XXX: Implement Open\n"); 38 | return 0; 39 | } 40 | 41 | uint64_t FileManager::Close(int fd) { 42 | printf("XXX: Implement Close\n"); 43 | return 0; 44 | } 45 | 46 | uint64_t FileManager::Fstat(int fd, void *buf) { 47 | if (fd == STDOUT_FILENO || fd == STDERR_FILENO) { 48 | return fstat(fd, (struct stat*)buf); 49 | } 50 | else { 51 | printf("Attempting to stat: %d\n", fd); 52 | return -1LL; 53 | } 54 | } 55 | 56 | uint64_t FileManager::Lseek(int fd, uint64_t offset, int whence) { 57 | printf("XXX: Implement Lseek\n"); 58 | return 0; 59 | } 60 | 61 | uint64_t FileManager::Writev(int fd, void *iov, int iovcnt) { 62 | auto fdp = FDMap.find(fd); 63 | if (fdp == FDMap.end()) { 64 | printf("XXX: Trying to open unknown fd: %d\n", fd); 65 | return -1LL; 66 | } 67 | return fdp->second->writev(fd, iov, iovcnt); 68 | } 69 | 70 | uint64_t FileManager::Access(const char *pathname, int mode) { 71 | printf("Trying to read access of : %s\n", pathname); 72 | return -1LL; 73 | } 74 | 75 | uint64_t FileManager::Readlink(const char *path, char *buf, size_t bufsiz) { 76 | printf("Trying to readlink of : %s\n", path); 77 | return -1LL; 78 | } 79 | 80 | uint64_t FileManager::Openat(int dirfd, const char *pathname, int flags, uint32_t mode) { 81 | int32_t fdNum = CurrentFDOffset; 82 | printf("Opened file: %s with fd: %d\n", pathname, fdNum); 83 | FD FDObject{cpu, fdNum, pathname, flags, mode}; 84 | auto fd = &FDs.emplace_back(FDObject); 85 | FDMap[CurrentFDOffset] = fd; 86 | CurrentFDOffset++; 87 | return fdNum; 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /Source/Core/HLE/Syscalls/FileManagement.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | namespace Emu { 8 | class CPUCore; 9 | class FD { 10 | public: 11 | FD(CPUCore *CPU, int fd, const char *pathname, int flags, mode_t mode) 12 | : cpu{CPU} 13 | , FDesc{fd} 14 | , Name{pathname} 15 | , Flags{flags} 16 | , Mode{mode} { 17 | } 18 | 19 | ssize_t writev(int fd, void *iov, int iovcnt); 20 | CPUCore *cpu; 21 | int FDesc{0}; 22 | std::string Name; 23 | int Flags; 24 | mode_t Mode; 25 | }; 26 | 27 | class FileManager { 28 | public: 29 | FileManager(CPUCore *CPU) : cpu {CPU} {} 30 | uint64_t Read(int fd, void *buf, size_t count); 31 | uint64_t Write(int fd, void *buf, size_t count); 32 | uint64_t Open(const char* pathname, int flags, uint32_t mode); 33 | uint64_t Close(int fd); 34 | uint64_t Fstat(int fd, void *buf); 35 | uint64_t Lseek(int fd, uint64_t offset, int whence); 36 | uint64_t Writev(int fd, void *iov, int iovcnt); 37 | uint64_t Access(const char *pathname, int mode); 38 | uint64_t Readlink(const char *path, char *buf, size_t bufsiz); 39 | uint64_t Openat(int dirfd, const char *pathname, int flags, uint32_t mode); 40 | 41 | private: 42 | CPUCore *cpu; 43 | 44 | int32_t CurrentFDOffset{3}; 45 | std::vector FDs; 46 | std::unordered_map FDMap; 47 | }; 48 | } 49 | -------------------------------------------------------------------------------- /Source/Core/HLE/Syscalls/Syscalls.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/CPUCore.h" 2 | #include 3 | #include "LogManager.h" 4 | #include "Syscalls.h" 5 | #include 6 | #include 7 | 8 | static uint64_t AlignUp(uint64_t value, uint64_t size) { 9 | return value + (size - value % size) % size; 10 | }; 11 | static uint64_t AlignDown(uint64_t value, uint64_t size) { 12 | return value - value % size; 13 | }; 14 | 15 | constexpr uint64_t PAGE_SIZE = 4096; 16 | 17 | namespace Emu { 18 | 19 | uint64_t SyscallHandler::HandleSyscall(SyscallArguments *Args) { 20 | uint64_t Result = 0; 21 | 22 | uint64_t TID = cpu->GetTLSThread()->threadmanager.GetTID(); 23 | printf("%ld: syscall: %ld\n",cpu->GetTLSThread()->threadmanager.GetTID(), Args->Argument[0]); 24 | switch (Args->Argument[0]) { 25 | default: 26 | Result = -1; 27 | LogMan::Msg::A("Unknown syscall: ", Args->Argument[0]); 28 | break; 29 | // ID Management 30 | case SYSCALL_GETUID: 31 | Result = cpu->GetTLSThread()->threadmanager.GetUID(); 32 | printf("UID: 0x%zx\n", Result); 33 | break; 34 | case SYSCALL_GETGID: 35 | Result = cpu->GetTLSThread()->threadmanager.GetGID(); 36 | printf("GID: 0x%zx\n", Result); 37 | break; 38 | case SYSCALL_GETEUID: 39 | Result = cpu->GetTLSThread()->threadmanager.GetEUID(); 40 | printf("EUID: 0x%zx\n", Result); 41 | break; 42 | case SYSCALL_GETEGID: 43 | Result = cpu->GetTLSThread()->threadmanager.GetEGID(); 44 | printf("EGID: 0x%zx\n", Result); 45 | break; 46 | case SYSCALL_GETTID: 47 | Result = cpu->GetTLSThread()->threadmanager.GetTID(); 48 | printf("TID: 0x%zx\n", Result); 49 | break; 50 | case SYSCALL_GETPID: 51 | Result = cpu->GetTLSThread()->threadmanager.GetPID(); 52 | printf("PID: 0x%zx\n", Result); 53 | break; 54 | 55 | // File Management 56 | case SYSCALL_READ: 57 | Result = filemanager.Read(Args->Argument[1], cpu->MemoryMapper->GetPointer(Args->Argument[2]), Args->Argument[3]); 58 | break; 59 | case SYSCALL_WRITE: 60 | Result = filemanager.Write(Args->Argument[1], cpu->MemoryMapper->GetPointer(Args->Argument[2]), Args->Argument[3]); 61 | break; 62 | case SYSCALL_OPEN: 63 | Result = filemanager.Open(cpu->MemoryMapper->GetPointer(Args->Argument[1]), Args->Argument[2], Args->Argument[3]); 64 | break; 65 | case SYSCALL_CLOSE: 66 | Result = filemanager.Close(Args->Argument[1]); 67 | break; 68 | case SYSCALL_FSTAT: 69 | Result = filemanager.Fstat(Args->Argument[1], cpu->MemoryMapper->GetPointer(Args->Argument[2])); 70 | break; 71 | case SYSCALL_LSEEK: 72 | Result = filemanager.Lseek(Args->Argument[1], Args->Argument[2], Args->Argument[3]); 73 | break; 74 | case SYSCALL_WRITEV: 75 | Result = filemanager.Writev(Args->Argument[1], cpu->MemoryMapper->GetPointer(Args->Argument[2]), Args->Argument[3]); 76 | break; 77 | case SYSCALL_ACCESS: 78 | Result = filemanager.Access(cpu->MemoryMapper->GetPointer(Args->Argument[1]), Args->Argument[2]); 79 | break; 80 | case SYSCALL_READLINK: 81 | Result = filemanager.Readlink(cpu->MemoryMapper->GetPointer(Args->Argument[1]), cpu->MemoryMapper->GetPointer(Args->Argument[2]), Args->Argument[3]); 82 | break; 83 | case SYSCALL_OPENAT: 84 | Result = filemanager.Openat(Args->Argument[1], cpu->MemoryMapper->GetPointer(Args->Argument[2]), Args->Argument[3], Args->Argument[4]); 85 | break; 86 | case SYSCALL_UNAME: { 87 | struct _utsname { 88 | char sysname[65]; 89 | char nodename[65]; 90 | char release[65]; 91 | char version[65]; 92 | char machine[65]; 93 | }; 94 | _utsname *local = cpu->MemoryMapper->GetPointer<_utsname*>(Args->Argument[1]); 95 | 96 | strcpy(local->sysname, "Linux"); 97 | strcpy(local->nodename, "Emu"); 98 | strcpy(local->release, "4.19"); 99 | strcpy(local->version, "#1"); 100 | strcpy(local->machine, "x86_64"); 101 | Result = 0; 102 | break; 103 | } 104 | 105 | case SYSCALL_EXIT: 106 | printf("Thread exited with: %zd\n", Args->Argument[1]); 107 | cpu->GetTLSThread()->StopRunning = true; 108 | if (cpu->GetTLSThread()->threadmanager.child_tid) { 109 | // If we are participating in child_tid then we must clear some things 110 | uint64_t *tidptr = cpu->MemoryMapper->GetPointer(cpu->GetTLSThread()->threadmanager.child_tid); 111 | *tidptr = 0; 112 | // Also wake up the futex for this 113 | Futex *futex = GetFutex(cpu->GetTLSThread()->threadmanager.child_tid); 114 | if (futex) { 115 | futex->cv.notify_one(); 116 | } 117 | } 118 | break; 119 | case SYSCALL_ARCH_PRCTL: 120 | switch (Args->Argument[1]) { 121 | case 0x1002: // ARCH_SET_FS 122 | cpu->SetFS(cpu->GetTLSThread(), Args->Argument[2]); 123 | break; 124 | default: 125 | LogMan::Msg::E("Unknown prctl\n"); 126 | cpu->StopRunning = true; 127 | break; 128 | } 129 | Result = 0; 130 | break; 131 | case SYSCALL_CLOCK_GETTIME: { 132 | timespec *res = cpu->MemoryMapper->GetPointer(Args->Argument[2]); 133 | Result = clock_gettime(Args->Argument[1], res); 134 | break; 135 | } 136 | 137 | // XXX: Improve BRK handling 138 | case SYSCALL_BRK: { 139 | constexpr uint64_t DataOffset = 0xa000'0000; 140 | 141 | if (Args->Argument[1] == 0) { 142 | // Just map a GB of space in, if they need more than that in the memory base then screw you 143 | cpu->MapRegion(cpu->GetTLSThread(), DataOffset, 0x10000000); 144 | dataspace = DataOffset; 145 | Result = DataOffset; 146 | } 147 | else { 148 | uint64_t addedSize = Args->Argument[1] - dataspace; 149 | 150 | dataspacesize += addedSize; 151 | 152 | printf("Adding Size: %ld to Space 0x%zx base 0x%lx\n", addedSize, dataspace, Args->Argument[1]); 153 | Result = dataspace + dataspacesize; 154 | } 155 | break; 156 | } 157 | case SYSCALL_MMAP: { 158 | // 0: addr 159 | // 1: len 160 | // 2: prot 161 | // 3: flags 162 | // 4: fd 163 | // 5: offset 164 | static uint64_t LastMMAP = 0xd000'0000; 165 | uint64_t BasePtr = AlignDown(LastMMAP, PAGE_SIZE); 166 | uint64_t BaseSize = AlignUp(Args->Argument[2], PAGE_SIZE); 167 | LastMMAP += BaseSize; 168 | #if 0 169 | cpu->MapRegion(cpu->GetTLSThread(), BasePtr, BaseSize); 170 | #else 171 | cpu->MapRegionOnAll(BasePtr, BaseSize); 172 | #endif 173 | Result = BasePtr; 174 | break; 175 | } 176 | case SYSCALL_CLONE: { 177 | // 0: clone_flags 178 | // 1: newsp 179 | // 2: parent_tidptr 180 | // 3: child_tidptr 181 | // 4: tls 182 | 183 | uint64_t *res = cpu->MemoryMapper->GetPointer(Args->Argument[2]); 184 | 185 | printf("clone(%lx, %lx, %lx, %lx, %lx)\n", 186 | Args->Argument[1], 187 | Args->Argument[2], 188 | Args->Argument[3], 189 | Args->Argument[4], 190 | Args->Argument[5]); 191 | uint32_t flags = Args->Argument[1]; 192 | #define TPRINT(x, y) \ 193 | if (flags & y) printf("\tFlag " #x "\n") 194 | TPRINT(CSIGNAL , 0x000000ff); 195 | TPRINT(CLONE_VM , 0x00000100); 196 | TPRINT(CLONE_FS , 0x00000200); 197 | TPRINT(CLONE_FILES , 0x00000401); 198 | TPRINT(CLONE_SIGHAND , 0x00801); 199 | TPRINT(CLONE_PTRACE , 0x00002001); 200 | TPRINT(CLONE_VFORK , 0x00004001); 201 | TPRINT(CLONE_PARENT , 0x00008001); 202 | TPRINT(CLONE_THREAD , 0x00010001); 203 | TPRINT(CLONE_NEWNS , 0x00020001); 204 | TPRINT(CLONE_SYSVSEM , 0x00040001); 205 | TPRINT(CLONE_SETTLS , 0x00080001); 206 | TPRINT(CLONE_PARENT_SETTID , 0x00100001); 207 | TPRINT(CLONE_CHILD_CLEARTID , 0x00200001); 208 | TPRINT(CLONE_DETACHED , 0x00400001); 209 | TPRINT(CLONE_UNTRACED , 0x800001); 210 | TPRINT(CLONE_CHILD_SETTID , 0x01000001); 211 | TPRINT(CLONE_NEWCGROUP , 0x02000001); 212 | TPRINT(CLONE_NEWUTS , 0x04000001); 213 | TPRINT(CLONE_NEWIPC , 0x08000001); 214 | TPRINT(CLONE_NEWUSER , 0x10000001); 215 | TPRINT(CLONE_NEWPID , 0x20000001); 216 | TPRINT(CLONE_NEWNET , 0x40000001); 217 | TPRINT(CLONE_IO , 0x80000001); 218 | 219 | X86State NewState; 220 | memcpy(&NewState, &cpu->GetTLSThread()->CPUState, sizeof(X86State)); 221 | NewState.gregs[REG_RAX] = 0; 222 | NewState.gregs[REG_RSP] = Args->Argument[2]; 223 | NewState.fs = Args->Argument[5]; 224 | 225 | // XXX: Hack to offset past the syscall instruction 226 | NewState.rip += 2; 227 | 228 | auto threadstate = cpu->NewThread(&NewState, Args->Argument[3], Args->Argument[4]); 229 | 230 | // For some reason the kernel does this 231 | threadstate->CPUState.gregs[REG_RBX] = 0; 232 | threadstate->CPUState.gregs[REG_RBP] = 0; 233 | Result = threadstate->threadmanager.GetTID(); 234 | if (flags & CLONE_PARENT_SETTID) { 235 | uint64_t *tidptr = cpu->MemoryMapper->GetPointer(Args->Argument[3]); 236 | *tidptr = threadstate->threadmanager.GetTID(); 237 | } 238 | // Time to kick off the thread actually 239 | std::lock_guard lk(threadstate->StartRunningMutex); 240 | threadstate->ShouldStart = true; 241 | threadstate->StartRunning.notify_all(); 242 | } 243 | break; 244 | case SYSCALL_FUTEX: { 245 | // futex(0x7fa5d28aa9d0, FUTEX_WAIT, 76036, NULL 246 | // 0: uaddr 247 | // 1: op 248 | // 2: val 249 | // 3: utime 250 | // 4: uaddr2 251 | // 5: val3 252 | 253 | uint64_t *res = cpu->MemoryMapper->GetPointer(Args->Argument[1]); 254 | uint8_t Command = Args->Argument[2] & 0x0F; 255 | printf("%ld: futex(%lx,\n\t%lx,\n\t%lx,\n\t%lx,\n\t%lx,\n\t%lx)\n", 256 | TID, 257 | Args->Argument[1], 258 | Args->Argument[2], 259 | Args->Argument[3], 260 | Args->Argument[4], 261 | Args->Argument[5], 262 | Args->Argument[6] 263 | ); 264 | 265 | switch (Command) { 266 | case 0: { // WAIT 267 | LogMan::Throw::A(!Args->Argument[4], "Can't handle timed futexes"); 268 | Futex *futex = new Futex{}; 269 | futex->addr = cpu->MemoryMapper->GetPointer*>(Args->Argument[1]); 270 | futex->val = Args->Argument[3]; 271 | EmplaceFutex(Args->Argument[1], futex); 272 | std::unique_lock lk(futex->mute); 273 | futex->cv.wait(lk, [futex] { return futex->addr->load() != futex->val; }); 274 | } 275 | break; 276 | case 1: { // WAKE 277 | Futex *futex = GetFutex(Args->Argument[1]); 278 | for (uint32_t i = 0; i < Args->Argument[3]; ++i) 279 | futex->cv.notify_one(); 280 | } 281 | break; 282 | default: 283 | LogMan::Msg::A("Unknown futex command: ", Command); 284 | break; 285 | } 286 | break; 287 | } 288 | case SYSCALL_SET_TID_ADDRESS: 289 | cpu->GetTLSThread()->threadmanager.child_tid = Args->Argument[1]; 290 | Result = cpu->GetTLSThread()->threadmanager.GetTID(); 291 | break; 292 | case SYSCALL_SET_ROBUST_LIST: 293 | cpu->GetTLSThread()->threadmanager.robust_list_head = Args->Argument[1]; 294 | Result = 0; 295 | break; 296 | case SYSCALL_NANOSLEEP: { 297 | const struct timespec *req = cpu->MemoryMapper->GetPointer(Args->Argument[1]); 298 | struct timespec *rem = cpu->MemoryMapper->GetPointer(Args->Argument[2]); 299 | printf("Time: %ld %ld\n", req->tv_sec, req->tv_nsec); 300 | Result = nanosleep(req, rem); 301 | } 302 | break; 303 | // XXX: Currently unhandled bit hit 304 | case SYSCALL_MPROTECT: 305 | case SYSCALL_RT_SIGACTION: 306 | case SYSCALL_RT_SIGPROCMASK: 307 | case SYSCALL_EXIT_GROUP: 308 | case SYSCALL_TGKILL: 309 | case SYSCALL_PRLIMIT64: 310 | Result = 0; 311 | break; 312 | } 313 | return Result; 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /Source/Core/HLE/Syscalls/Syscalls.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "FileManagement.h" 9 | #include "ThreadManagement.h" 10 | 11 | namespace Emu { 12 | class CPUCore; 13 | 14 | struct Futex { 15 | std::mutex mute; 16 | std::condition_variable cv; 17 | std::atomic *addr; 18 | uint32_t val; 19 | }; 20 | // Enum containing all support x86-64 linux syscalls 21 | enum Syscalls { 22 | SYSCALL_READ = 0, ///< __NR_read 23 | SYSCALL_WRITE = 1, ///< __NR_write 24 | SYSCALL_OPEN = 2, ///< __NR_open 25 | SYSCALL_CLOSE = 3, ///< __NR_close 26 | SYSCALL_FSTAT = 5, ///< __NR_fstat 27 | SYSCALL_LSEEK = 8, ///< __NR_lseek 28 | SYSCALL_MMAP = 9, ///< __NR_mmap 29 | SYSCALL_MPROTECT = 10, ///< __NR_mprotect 30 | SYSCALL_BRK = 12, ///< __NR_brk 31 | SYSCALL_RT_SIGACTION = 13, ///< __NR_rt_sigaction 32 | SYSCALL_RT_SIGPROCMASK = 14, ///< __NR_rt_sigprocmask 33 | SYSCALL_WRITEV = 20, ///< __NR_writev 34 | SYSCALL_ACCESS = 21, ///< __NR_access 35 | SYSCALL_NANOSLEEP = 35, ///< __NR_nanosleep 36 | SYSCALL_GETPID = 39, ///< __NR_getpid 37 | SYSCALL_CLONE = 56, ///< __NR_clone 38 | SYSCALL_EXIT = 60, ///< __NR_exit 39 | SYSCALL_UNAME = 63, ///< __NR_uname 40 | SYSCALL_READLINK = 89, ///< __NR_readlink 41 | SYSCALL_GETUID = 102, ///< __NR_getuid 42 | SYSCALL_GETGID = 104, ///< __NR_getgid 43 | SYSCALL_GETEUID = 107, ///< __NR_geteuid 44 | SYSCALL_GETEGID = 108, ///< __NR_getegid 45 | SYSCALL_ARCH_PRCTL = 158, ///< __NR_arch_prctl 46 | SYSCALL_GETTID = 186, ///< __NR_gettid 47 | SYSCALL_FUTEX = 202, ///< __NR_futex 48 | SYSCALL_SET_TID_ADDRESS = 218, ///< __NR_set_tid_address 49 | SYSCALL_CLOCK_GETTIME = 228, ///< __NR_clock_gettime 50 | SYSCALL_EXIT_GROUP = 231, ///< __NR_exit_group 51 | SYSCALL_TGKILL = 234, ///< __NR_tgkill 52 | SYSCALL_OPENAT = 257, ///< __NR_openat 53 | SYSCALL_SET_ROBUST_LIST = 273, ///< __NR_set_robust_list 54 | SYSCALL_PRLIMIT64 = 302, ///< __NR_prlimit64 55 | }; 56 | 57 | class SyscallHandler final { 58 | public: 59 | struct SyscallArguments { 60 | static constexpr std::size_t MAX_ARGS = 7; 61 | uint64_t Argument[MAX_ARGS]; 62 | }; 63 | SyscallHandler(CPUCore *CPU) : cpu {CPU}, filemanager {CPU} {} 64 | uint64_t HandleSyscall(SyscallArguments *Args); 65 | 66 | void EmplaceFutex(uint64_t Addr, Futex *futex) { 67 | std::scoped_lock lk(FutexMutex); 68 | Futexes[Addr] = futex; 69 | } 70 | Futex *GetFutex(uint64_t Addr) { 71 | std::scoped_lock lk(FutexMutex); 72 | return Futexes[Addr]; 73 | } 74 | private: 75 | CPUCore *cpu; 76 | FileManager filemanager; 77 | std::map Futexes; 78 | std::mutex FutexMutex; 79 | 80 | // BRK management 81 | uint64_t dataspace {}; 82 | uint64_t dataspacesize{}; 83 | }; 84 | } 85 | -------------------------------------------------------------------------------- /Source/Core/HLE/Syscalls/ThreadManagement.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | namespace Emu { 5 | // XXX: This should map multiple IDs correctly 6 | // Tracking relationships between thread IDs and such 7 | class ThreadManagement { 8 | public: 9 | uint64_t GetUID() { return UID; } 10 | uint64_t GetGID() { return GID; } 11 | uint64_t GetEUID() { return EUID; } 12 | uint64_t GetEGID() { return EGID; } 13 | uint64_t GetTID() { return TID; } 14 | uint64_t GetPID() { return PID; } 15 | 16 | uint64_t UID{1000}; 17 | uint64_t GID{1000}; 18 | uint64_t EUID{1000}; 19 | uint64_t EGID{1000}; 20 | uint64_t TID{1}; 21 | uint64_t PID{1}; 22 | uint64_t child_tid{0}; 23 | uint64_t parent_tid{0}; 24 | uint64_t robust_list_head{0}; 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /Source/Core/Memmap.cpp: -------------------------------------------------------------------------------- 1 | #include "Memmap.h" 2 | #include "LogManager.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Emu { 10 | bool Memmap::AllocateSHMRegion(size_t Size) { 11 | const std::string SHMName = "EmuSHM"; 12 | SHMfd = shm_open(SHMName.c_str(), O_RDWR | O_CREAT | O_EXCL, 0600); 13 | if (SHMfd == -1) { 14 | LogMan::Msg::E("Couldn't open SHM"); 15 | return false; 16 | } 17 | 18 | // Unlink the shm file immediately to not leave it around 19 | shm_unlink(SHMName.c_str()); 20 | 21 | // Extend the SHM to the size we requested 22 | if (ftruncate(SHMfd, Size) != 0) { 23 | LogMan::Msg::E("Couldn't set SHM size"); 24 | return false; 25 | } 26 | 27 | SHMSize = Size; 28 | Base = MapRegion(0, SHMSize, false); 29 | printf("Base Ptr: %p\n", Base); 30 | return true; 31 | } 32 | 33 | void *Memmap::MapRegion(size_t Offset, size_t Size, bool Fixed) { 34 | void *Ptr = mmap((void*)((uintptr_t)Base+Offset), Size, PROT_READ | PROT_WRITE, 35 | MAP_SHARED | (Fixed ? MAP_FIXED : 0), SHMfd, Offset); 36 | 37 | if (Ptr == MAP_FAILED) { 38 | LogMan::Msg::A("Failed to map memory region"); 39 | return nullptr; 40 | } 41 | printf("Mapped region: 0x%zx -> %p %zx\n", Offset, Ptr, Size); 42 | MappedRegions.emplace_back(MemRegion{Ptr, Offset, Size}); 43 | return Ptr; 44 | } 45 | 46 | void Memmap::UnmapRegion(void *Ptr, size_t Size) { 47 | auto it = std::find(MappedRegions.begin(), MappedRegions.end(), Ptr); 48 | if (it != MappedRegions.end()) { 49 | munmap(Ptr, Size); 50 | MappedRegions.erase(it); 51 | } 52 | } 53 | 54 | void Memmap::DeallocateSHMRegion() { 55 | close(SHMfd); 56 | } 57 | 58 | 59 | void *Memmap::GetPointer(uint64_t Offset) { 60 | for (const auto &Region : MappedRegions) { 61 | if (Offset >= Region.Offset && 62 | Offset < (Region.Offset + Region.Size)) 63 | return reinterpret_cast(reinterpret_cast(Region.Ptr) + (Offset - Region.Offset)); 64 | } 65 | return nullptr; 66 | } 67 | 68 | } 69 | -------------------------------------------------------------------------------- /Source/Core/Memmap.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | namespace Emu { 7 | 8 | class Memmap { 9 | public: 10 | bool AllocateSHMRegion(size_t Size); 11 | void DeallocateSHMRegion(); 12 | 13 | void *MapRegion(size_t Offset, size_t Size, bool Fixed = true); 14 | void UnmapRegion(void *Ptr, size_t Size); 15 | 16 | void *GetPointer(uint64_t Offset); 17 | void *GetMemoryBase() { return Base; } 18 | 19 | template 20 | T GetPointer(uint64_t Offset) { 21 | return reinterpret_cast(GetPointer(Offset)); 22 | } 23 | 24 | template 25 | T GetBaseOffset(uint64_t Offset) { 26 | return reinterpret_cast((reinterpret_cast(GetMemoryBase()) + Offset)); 27 | } 28 | 29 | struct MemRegion { 30 | void *Ptr; 31 | size_t Offset; 32 | size_t Size; 33 | 34 | bool operator==(void* rhs) { return Ptr == rhs; } 35 | }; 36 | 37 | std::vector MappedRegions; 38 | 39 | private: 40 | int SHMfd; 41 | size_t SHMSize; 42 | void *Base{}; 43 | }; 44 | } 45 | -------------------------------------------------------------------------------- /Source/UI/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(NAME Test) 2 | set(SRCS TestHarness.cpp) 3 | 4 | add_executable(${NAME} ${SRCS}) 5 | target_link_libraries(${NAME} Core SonicUtils unicorn pthread LLVM) 6 | 7 | set(NAME HostInterface) 8 | set(SRCS HostInterface.cpp) 9 | 10 | add_executable(${NAME} ${SRCS}) 11 | target_link_libraries(${NAME} Core SonicUtils unicorn pthread LLVM) 12 | -------------------------------------------------------------------------------- /Source/UI/HostInterface.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/Core.h" 2 | #include "ELFLoader.h" 3 | #include "LogManager.h" 4 | 5 | void MsgHandler(LogMan::DebugLevels Level, std::string const &Message) { 6 | const char *CharLevel{nullptr}; 7 | 8 | switch (Level) { 9 | case LogMan::NONE: 10 | CharLevel = "NONE"; 11 | break; 12 | case LogMan::ASSERT: 13 | CharLevel = "ASSERT"; 14 | break; 15 | case LogMan::ERROR: 16 | CharLevel = "ERROR"; 17 | break; 18 | case LogMan::DEBUG: 19 | CharLevel = "DEBUG"; 20 | break; 21 | case LogMan::INFO: 22 | CharLevel = "Info"; 23 | break; 24 | default: 25 | CharLevel = "???"; 26 | break; 27 | } 28 | printf("[%s] %s\n", CharLevel, Message.c_str()); 29 | } 30 | 31 | void AssertHandler(std::string const &Message) { 32 | printf("[ASSERT] %s\n", Message.c_str()); 33 | } 34 | 35 | int main(int argc, char **argv) { 36 | LogMan::Throw::InstallHandler(AssertHandler); 37 | LogMan::Msg::InstallHandler(MsgHandler); 38 | 39 | LogMan::Throw::A(argc > 1, "Not enough arguments"); 40 | 41 | Emu::Core localCore; 42 | bool Result = localCore.Load(argv[1], {}); 43 | printf("Managed to load? %s\n", Result ? "Yes" : "No"); 44 | 45 | return 0; 46 | } 47 | -------------------------------------------------------------------------------- /Source/UI/TestHarness.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/CPU/IntrusiveIRList.h" 2 | #include "Core/CPU/CPUBackend.h" 3 | #include "Core/CPU/CPUCore.h" 4 | #include "ELFLoader.h" 5 | #include "LogManager.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | void MsgHandler(LogMan::DebugLevels Level, std::string const &Message) { 26 | const char *CharLevel{nullptr}; 27 | 28 | switch (Level) { 29 | case LogMan::NONE: 30 | CharLevel = "NONE"; 31 | break; 32 | case LogMan::ASSERT: 33 | CharLevel = "ASSERT"; 34 | break; 35 | case LogMan::ERROR: 36 | CharLevel = "ERROR"; 37 | break; 38 | case LogMan::DEBUG: 39 | CharLevel = "DEBUG"; 40 | break; 41 | case LogMan::INFO: 42 | CharLevel = "Info"; 43 | break; 44 | default: 45 | CharLevel = "???"; 46 | break; 47 | } 48 | printf("[%s] %s\n", CharLevel, Message.c_str()); 49 | } 50 | 51 | void AssertHandler(std::string const &Message) { 52 | printf("[ASSERT] %s\n", Message.c_str()); 53 | } 54 | 55 | class LLVMIRVisitor final { 56 | public: 57 | LLVMIRVisitor(); 58 | ~LLVMIRVisitor(); 59 | void *Visit(Emu::IR::IntrusiveIRList const *ir); 60 | 61 | private: 62 | llvm::LLVMContext *con; 63 | LLVMContextRef conref; 64 | llvm::Module *mainmodule; 65 | llvm::IRBuilder<> *builder; 66 | llvm::Function *func; 67 | std::vector functions; 68 | }; 69 | 70 | LLVMIRVisitor::LLVMIRVisitor() { 71 | using namespace llvm; 72 | InitializeNativeTarget(); 73 | InitializeNativeTargetAsmPrinter(); 74 | conref = LLVMContextCreate(); 75 | con = *llvm::unwrap(&conref); 76 | mainmodule = new llvm::Module("Main Module", *con); 77 | builder = new IRBuilder<>(*con); 78 | } 79 | 80 | LLVMIRVisitor::~LLVMIRVisitor() { 81 | delete builder; 82 | for (auto module : functions) 83 | delete module; 84 | LLVMContextDispose(conref); 85 | } 86 | 87 | void *LLVMIRVisitor::Visit(Emu::IR::IntrusiveIRList const *ir) { 88 | using namespace llvm; 89 | 90 | std::string FunctionName = "Function"; 91 | auto testmodule = new llvm::Module("Main Module", *con); 92 | 93 | SmallVector attrs; 94 | std::string arch = ""; 95 | std::string cpu = "haswell"; 96 | 97 | auto engine_builder = EngineBuilder(std::unique_ptr(testmodule)); 98 | engine_builder.setEngineKind(EngineKind::JIT); 99 | TargetOptions opts; 100 | 101 | Triple test("x86_64", "None", "Unknown"); 102 | 103 | TargetMachine* target = engine_builder.selectTarget( 104 | test, 105 | arch, cpu, attrs); 106 | if (target == nullptr) { 107 | printf("Couldn't select target"); 108 | return nullptr; 109 | } 110 | auto engine = engine_builder.create(target); 111 | 112 | auto functype = FunctionType::get(Type::getInt64Ty(*con), {Type::getInt64Ty(*con)}, false); 113 | func = Function::Create(functype, 114 | Function::ExternalLinkage, 115 | FunctionName, 116 | testmodule); 117 | 118 | func->setCallingConv(CallingConv::C); 119 | 120 | size_t Size = ir->GetOffset(); 121 | size_t i = 0; 122 | BasicBlock *curblock = nullptr; 123 | BasicBlock *entryblock = nullptr; 124 | std::unordered_map block_locations; 125 | std::unordered_map Values; 126 | 127 | struct FixupData { 128 | size_t From; 129 | BasicBlock *block; 130 | }; 131 | // Destination 132 | std::unordered_map RequiredFixups; 133 | Value *ContextData; 134 | while (i != Size) { 135 | auto op = ir->GetOp(i); 136 | using namespace Emu::IR; 137 | switch (op->Op) { 138 | case OP_BEGINFUNCTION: 139 | case OP_ENDFUNCTION: 140 | break; 141 | case OP_BEGINBLOCK: { 142 | LogMan::Throw::A(curblock == nullptr, "Oops, hit begin block without ending the other one."); 143 | curblock = BasicBlock::Create(*con, "block", func); 144 | block_locations[i] = curblock; 145 | if (entryblock == nullptr) { 146 | entryblock = curblock; 147 | } 148 | auto it = RequiredFixups.find(i); 149 | if (it != RequiredFixups.end()) { 150 | // Something is trying to jump to this location 151 | // This only happens on forward jumps! 152 | RequiredFixups.erase(it); 153 | builder->SetInsertPoint(it->second.block); 154 | auto FromOp = ir->GetOp(it->second.From); 155 | switch (FromOp->Op) { 156 | // case OP_COND_JUMP: { 157 | // auto Op = op->C(); 158 | // 159 | // } 160 | // break; 161 | case OP_JUMP: { 162 | auto Op = op->C(); 163 | builder->CreateBr(curblock); 164 | } 165 | break; 166 | default: 167 | LogMan::Msg::A("Unknown Source inst type!"); 168 | } 169 | } 170 | builder->SetInsertPoint(curblock); 171 | } 172 | break; 173 | case OP_ENDBLOCK: { 174 | curblock = nullptr; 175 | } 176 | break; 177 | case OP_ALLOCATE_CONTEXT: { 178 | auto Op = op->C(); 179 | ContextData = builder->CreateAlloca(ArrayType::get(Type::getInt64Ty(*con), Op->Size / 8)); 180 | } 181 | break; 182 | case OP_STORECONTEXT: { 183 | auto Op = op->C(); 184 | auto location = builder->CreateGEP(ContextData, 185 | { 186 | builder->getInt32(0), 187 | builder->getInt32(Op->Offset / 8), 188 | }, 189 | "ContextLoad"); 190 | builder->CreateStore(Values[Op->Arg], location); 191 | } 192 | break; 193 | case OP_LOADCONTEXT: { 194 | auto Op = op->C(); 195 | auto location = builder->CreateGEP(ContextData, 196 | { 197 | builder->getInt32(0), 198 | builder->getInt32(Op->Offset / 8), 199 | }, 200 | "ContextLoad"); 201 | Values[i] = builder->CreateLoad(location); 202 | } 203 | break; 204 | case OP_GETARGUMENT: { 205 | auto Op = op->C(); 206 | Values[i] = func->arg_begin() + Op->Argument; 207 | } 208 | break; 209 | case OP_ADD: { 210 | auto Op = op->C(); 211 | Values[i] = builder->CreateAdd(Values[Op->Args[0]], Values[Op->Args[1]]); 212 | } 213 | break; 214 | case OP_SUB: { 215 | auto Op = op->C(); 216 | Values[i] = builder->CreateSub(Values[Op->Args[0]], Values[Op->Args[1]]); 217 | } 218 | break; 219 | case OP_SHL: { 220 | auto Op = op->C(); 221 | Values[i] = builder->CreateShl(Values[Op->Args[0]], Values[Op->Args[1]]); 222 | } 223 | break; 224 | case OP_CONSTANT: { 225 | auto Op = op->C(); 226 | Values[i] = builder->getInt64(Op->Constant); 227 | } 228 | break; 229 | case OP_LOAD_MEM: { 230 | auto Op = op->C(); 231 | 232 | Value *Src; 233 | switch (Op->Size) { 234 | case 4: 235 | Src = builder->CreateIntToPtr(Values[Op->Arg[0]], Type::getInt32PtrTy(*con)); 236 | break; 237 | case 8: 238 | Src = builder->CreateIntToPtr(Values[Op->Arg[0]], Type::getInt64PtrTy(*con)); 239 | break; 240 | default: 241 | printf("Unknown LoadSize: %d\n", Op->Size); 242 | std::abort(); 243 | break; 244 | } 245 | Values[i] = builder->CreateLoad(Src); 246 | } 247 | break; 248 | case OP_COND_JUMP: { 249 | auto Op = op->C(); 250 | if (i > Op->Target) { 251 | // Conditional backwards jump 252 | // IR doesn't split the block on the false path 253 | // LLVM splits the block at all branches 254 | auto FalsePath = BasicBlock::Create(*con, "false", func); 255 | 256 | auto Comp = builder->CreateICmpNE(Values[Op->Cond], builder->getInt64(0)); 257 | builder->CreateCondBr(Comp, block_locations[Op->Target], FalsePath); 258 | builder->SetInsertPoint(FalsePath); 259 | curblock = FalsePath; 260 | block_locations[i] = curblock; 261 | } 262 | else { 263 | // Conditional forward jump 264 | FixupData data; 265 | data.From = i; 266 | data.block = curblock; 267 | RequiredFixups[Op->Target] = data; 268 | } 269 | } 270 | break; 271 | case OP_JUMP: { 272 | auto Op = op->C(); 273 | if (i > Op->Target) { 274 | // Backwards jump 275 | builder->CreateBr(block_locations[Op->Target]); 276 | } 277 | else { 278 | // Forward jump 279 | FixupData data; 280 | data.From = i; 281 | data.block = curblock; 282 | RequiredFixups[Op->Target] = data; 283 | } 284 | } 285 | break; 286 | case OP_RETURN: { 287 | auto Op = op->C(); 288 | builder->CreateRet(Values[Op->Arg]); 289 | } 290 | break; 291 | default: 292 | LogMan::Msg::A("Unknown Op: ", Emu::IR::GetName(op->Op).data()); 293 | break; 294 | } 295 | i += Emu::IR::GetSize(op->Op); 296 | } 297 | legacy::PassManager PM; 298 | PassManagerBuilder PMBuilder; 299 | PMBuilder.OptLevel = 2; 300 | raw_ostream& out = outs(); 301 | PM.add(createPrintModulePass(out)); 302 | 303 | verifyModule(*testmodule, &out); 304 | PMBuilder.populateModulePassManager(PM); 305 | PM.run(*testmodule); 306 | engine->finalizeObject(); 307 | 308 | functions.emplace_back(engine); 309 | void *ptr = (void*)engine->getFunctionAddress(FunctionName); 310 | return ptr; 311 | } 312 | 313 | int main(int argc, char **argv) { 314 | LogMan::Throw::InstallHandler(AssertHandler); 315 | LogMan::Msg::InstallHandler(MsgHandler); 316 | 317 | using namespace Emu::IR; 318 | Emu::IR::IntrusiveIRList ir{8 * 1024 * 1024}; 319 | #if 1 320 | auto func = ir.AllocateOp(); 321 | func.first->Arguments = 1; 322 | func.first->HasReturn = true; 323 | 324 | uint64_t ResultLocation = 0; 325 | uint64_t DecLocation = 8; 326 | 327 | 328 | { 329 | auto header = ir.AllocateOp(); 330 | IntrusiveIRList::IRPair header_jump; 331 | { 332 | auto result = ir.AllocateOp(); 333 | result.first->Constant = 0; 334 | 335 | auto context = ir.AllocateOp(); 336 | context.first->Size = 16; 337 | 338 | auto resultstore = ir.AllocateOp(); 339 | resultstore.first->Size = 8; 340 | resultstore.first->Offset = ResultLocation; 341 | resultstore.first->Arg = result.second; 342 | 343 | auto loop_store = ir.AllocateOp(); 344 | loop_store.first->Size = 8; 345 | loop_store.first->Offset = DecLocation; 346 | loop_store.first->Arg = result.second; 347 | 348 | header_jump = ir.AllocateOp(); 349 | } 350 | ir.AllocateOp(); 351 | 352 | auto loop_body = ir.AllocateOp(); 353 | header_jump.first->Target = loop_body.second; 354 | 355 | IntrusiveIRList::IRPair body_jump; 356 | { 357 | auto loop_load = ir.AllocateOp(); 358 | loop_load.first->Size = 8; 359 | loop_load.first->Offset = DecLocation; 360 | 361 | { 362 | auto scale_amt = ir.AllocateOp(); 363 | scale_amt.first->Constant = 3; 364 | 365 | auto arg1 = ir.AllocateOp(); 366 | arg1.first->Argument = 0; 367 | 368 | auto loop_multiply = ir.AllocateOp(); 369 | loop_multiply.first->Args[0] = loop_load.second; 370 | loop_multiply.first->Args[1] = scale_amt.second; 371 | 372 | auto mem_offset = ir.AllocateOp(); 373 | mem_offset.first->Args[0] = loop_multiply.second; 374 | mem_offset.first->Args[1] = arg1.second; 375 | 376 | auto loadmem = ir.AllocateOp(); 377 | loadmem.first->Size = 8; 378 | loadmem.first->Arg[0] = mem_offset.second; 379 | loadmem.first->Arg[1] = ~0; 380 | 381 | auto res_load = ir.AllocateOp(); 382 | res_load.first->Size = 8; 383 | res_load.first->Offset = ResultLocation; 384 | 385 | auto local_result = ir.AllocateOp(); 386 | local_result.first->Args[0] = loadmem.second; 387 | local_result.first->Args[1] = res_load.second; 388 | 389 | auto res_store = ir.AllocateOp(); 390 | res_store.first->Size = 8; 391 | res_store.first->Offset = ResultLocation; 392 | res_store.first->Arg = local_result.second; 393 | } 394 | 395 | auto inc_amt = ir.AllocateOp(); 396 | inc_amt.first->Constant = 1; 397 | 398 | auto loop_index = ir.AllocateOp(); 399 | loop_index.first->Args[0] = loop_load.second; 400 | loop_index.first->Args[1] = inc_amt.second; 401 | 402 | auto loop_store = ir.AllocateOp(); 403 | loop_store.first->Size = 8; 404 | loop_store.first->Offset = DecLocation; 405 | loop_store.first->Arg = loop_index.second; 406 | 407 | auto loop_count = ir.AllocateOp(); 408 | loop_count.first->Constant = 10000; 409 | 410 | auto loop_remaining = ir.AllocateOp(); 411 | loop_remaining.first->Args[0] = loop_count.second; 412 | loop_remaining.first->Args[1] = loop_index.second; 413 | 414 | auto cond_jump = ir.AllocateOp(); 415 | cond_jump.first->Cond = loop_remaining.second; 416 | cond_jump.first->Target = loop_body.second; 417 | 418 | body_jump = ir.AllocateOp(); 419 | } 420 | ir.AllocateOp(); 421 | 422 | auto loop_end = ir.AllocateOp(); 423 | body_jump.first->Target = loop_end.second; 424 | { 425 | auto loop_load = ir.AllocateOp(); 426 | loop_load.first->Size = 8; 427 | loop_load.first->Offset = ResultLocation; 428 | 429 | auto return_op = ir.AllocateOp(); 430 | return_op.first->Arg = loop_load.second; 431 | } 432 | ir.AllocateOp(); 433 | } 434 | ir.AllocateOp(); 435 | #else 436 | auto func = ir.AllocateOp(); 437 | func.first->Arguments = 1; 438 | func.first->HasReturn = true; 439 | ir.AllocateOp(); 440 | auto result = ir.AllocateOp(); 441 | result.first->Constant = 0xDEADBEEFBAD0DAD1ULL; 442 | auto return_op = ir.AllocateOp(); 443 | return_op.first->Arg = result.second; 444 | ir.AllocateOp(); 445 | ir.AllocateOp(); 446 | #endif 447 | 448 | ir.Dump(); 449 | 450 | LLVMIRVisitor visit; 451 | 452 | using PtrType = uint64_t (*)(uint64_t*); 453 | PtrType ptr = (PtrType)visit.Visit(&ir); 454 | 455 | #define LOOP_SIZE 10000 456 | std::vector vals; 457 | vals.reserve(LOOP_SIZE); 458 | for (uint64_t i = 0; i < LOOP_SIZE; ++i) { 459 | vals.emplace_back(i); 460 | } 461 | 462 | if (ptr) { 463 | uint64_t ret = ptr(&vals.at(0)); 464 | printf("Ret: %zd\n", ret); 465 | printf("ptr: 0x%p\n", ptr); 466 | std::abort(); 467 | } 468 | else { 469 | printf("No return pointer passed\n"); 470 | } 471 | return 0; 472 | } 473 | --------------------------------------------------------------------------------