├── rustfmt.toml ├── .github ├── scripts │ ├── package_version.py │ ├── release_notes.py │ └── build_dist_archive.py └── workflows │ ├── release.yml │ ├── release-preview.yml │ └── ci.yml ├── test_utils ├── Cargo.toml └── src │ └── lib.rs ├── src ├── lib.rs ├── disabler │ ├── util.rs │ ├── call_hook.rs │ ├── result.rs │ ├── game.rs │ ├── slist.rs │ ├── code_buffer.rs │ ├── ffi.rs │ ├── lazy_global.rs │ ├── entry_point.rs │ ├── steamstub.rs │ └── mod.rs ├── analysis │ ├── vm │ │ ├── util.rs │ │ ├── registers.rs │ │ ├── image.rs │ │ └── memory.rs │ ├── mod.rs │ ├── entry_point.rs │ ├── cfg.rs │ └── encryption.rs └── patch.rs ├── test_launcher ├── Cargo.toml └── src │ └── main.rs ├── test_dll ├── Cargo.toml └── src │ └── lib.rs ├── .gitignore ├── LICENSE-MIT ├── Cargo.toml ├── examples └── analysis.rs ├── CHANGELOG.md ├── README.md ├── LICENSE-APACHE └── include └── dearxan.h /rustfmt.toml: -------------------------------------------------------------------------------- 1 | indent_style = "Block" 2 | reorder_imports = true 3 | group_imports = "StdExternalCrate" 4 | control_brace_style = "ClosingNextLine" 5 | chain_width = 80 6 | merge_derives = true 7 | single_line_if_else_max_width = 80 8 | wrap_comments = true 9 | comment_width = 100 -------------------------------------------------------------------------------- /.github/scripts/package_version.py: -------------------------------------------------------------------------------- 1 | import os, json 2 | 3 | def get_version() -> str: 4 | cargo_meta = json.loads(os.popen("cargo metadata --format-version 1 -q").read()) 5 | return next(p["version"] for p in cargo_meta["packages"] if p["name"] == "dearxan") 6 | 7 | if __name__ == '__main__': 8 | print(get_version()) -------------------------------------------------------------------------------- /.github/scripts/release_notes.py: -------------------------------------------------------------------------------- 1 | import package_version, re 2 | 3 | version = package_version.get_version() 4 | 5 | with open("CHANGELOG.md", "r") as f: 6 | changelog = f.read() 7 | 8 | regex = re.compile(r"\[v" + version.replace(".", "\\.") + r"\][^\n]*\n+(.*?)\n(\#\# |\Z)", re.S) 9 | changes = next(regex.finditer(changelog)).group(1) 10 | 11 | print(changes) -------------------------------------------------------------------------------- /test_utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dearxan-test-utils" 3 | description = "Utilities for dearxan's integration tests" 4 | version.workspace = true 5 | authors.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | pelite.workspace = true 12 | log.workspace = true 13 | simplelog.workspace = true -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, feature(doc_auto_cfg))] 2 | #![deny(unsafe_op_in_unsafe_fn)] 3 | #![doc = include_str!("../README.md")] 4 | 5 | #[cfg(feature = "disabler")] 6 | pub mod disabler; 7 | 8 | pub mod analysis; 9 | pub mod patch; 10 | 11 | /// Re-export of the `iced_x86` crate. 12 | #[cfg(feature = "internal_api")] 13 | pub use iced_x86; 14 | #[cfg(feature = "internal_api")] 15 | /// Re-export of the `pelite` crate. 16 | pub use pelite; 17 | -------------------------------------------------------------------------------- /test_launcher/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dearxan-test-launcher" 3 | description = "simple launcher to inject dearxan-dll in FromSoftware games" 4 | authors.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | 8 | [dependencies] 9 | log.workspace = true 10 | simplelog.workspace = true 11 | clap.workspace = true 12 | dll-syringe = { version = "0.17", default-features = false, features = ["syringe"] } 13 | dotenvy_macro = "0.15" 14 | steamlocate = "2.0" 15 | 16 | [dependencies.windows] 17 | version = "0.61" 18 | features = [ 19 | "Win32_System_Threading" 20 | ] -------------------------------------------------------------------------------- /test_dll/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dearxan-test-dll" 3 | authors.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [dependencies] 11 | log.workspace = true 12 | simplelog.workspace = true 13 | 14 | [features] 15 | instrument_stubs = ["dearxan/instrument_stubs"] 16 | 17 | [dependencies.dearxan] 18 | version = "*" 19 | path = ".." 20 | features = ["disabler", "rayon"] 21 | 22 | [dependencies.windows] 23 | version = "0.61" 24 | features = [ 25 | "Win32_System_Console", 26 | "Win32_System_LibraryLoader", 27 | "Win32_System_SystemServices", 28 | ] 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | # VSCode configuration 13 | .vscode/ 14 | 15 | # RustRover 16 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 17 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 18 | # and can be added to the global gitignore or merged into this file. For a more nuclear 19 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 20 | #.idea/ 21 | 22 | # Log files 23 | *.log 24 | 25 | # Compiled Python scripts 26 | __pycache__ -------------------------------------------------------------------------------- /src/disabler/util.rs: -------------------------------------------------------------------------------- 1 | use windows_sys::Win32::System::Memory::{PAGE_EXECUTE_READWRITE, VirtualProtect}; 2 | 3 | unsafe fn with_rwx_inner(addr: *const (), size: usize, fun: impl FnOnce() -> R) -> R { 4 | let mut old_protect = Default::default(); 5 | let addr = addr as *const _; 6 | 7 | if unsafe { VirtualProtect(addr, size, PAGE_EXECUTE_READWRITE, &mut old_protect) } == 0 { 8 | panic!("VirtualProtect failed to make memory RWX"); 9 | } 10 | 11 | let ret = fun(); 12 | 13 | if unsafe { VirtualProtect(addr, size, old_protect, &mut old_protect) } == 0 { 14 | panic!("VirtualProtect failed to restore memory protection flags"); 15 | } 16 | 17 | ret 18 | } 19 | 20 | pub unsafe fn with_rwx_ptr(ptr: *mut T, fun: impl FnOnce(*mut T) -> R) -> R { 21 | let size = std::mem::size_of_val(unsafe { &*ptr }); 22 | unsafe { with_rwx_inner(ptr.cast(), size, || fun(ptr)) } 23 | } 24 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright © 2025 William Tremblay 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the “Software”), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /src/disabler/call_hook.rs: -------------------------------------------------------------------------------- 1 | use std::mem::transmute_copy; 2 | 3 | use crate::disabler::util; 4 | 5 | pub struct CallHook { 6 | imm_ptr: *mut i32, 7 | original: F, 8 | } 9 | 10 | unsafe impl Send for CallHook {} 11 | unsafe impl Sync for CallHook {} 12 | 13 | impl CallHook { 14 | pub unsafe fn new(call_ptr: *mut u8) -> Self { 15 | const { 16 | assert!( 17 | size_of::() == size_of::(), 18 | "Call hook generic parameter must be pointer-sized" 19 | ); 20 | } 21 | 22 | let imm_ptr = call_ptr.wrapping_add(1) as *mut i32; 23 | let imm = unsafe { imm_ptr.read_unaligned() }; 24 | let target = (imm_ptr.addr() + 4).wrapping_add_signed(imm as isize); 25 | 26 | Self { 27 | imm_ptr, 28 | original: unsafe { transmute_copy(&target) }, 29 | } 30 | } 31 | 32 | pub fn original(&self) -> F { 33 | self.original 34 | } 35 | 36 | pub unsafe fn hook_with(&self, new_target: F) { 37 | let address: isize = unsafe { transmute_copy(&new_target) }; 38 | let imm: i32 = address.wrapping_sub_unsigned(self.imm_ptr.addr() + 4).try_into().unwrap(); 39 | unsafe { util::with_rwx_ptr(self.imm_ptr, |p| p.write_unaligned(imm)) } 40 | } 41 | 42 | pub unsafe fn unhook(&self) { 43 | unsafe { self.hook_with(self.original) } 44 | } 45 | } 46 | 47 | impl Drop for CallHook { 48 | fn drop(&mut self) { 49 | unsafe { self.unhook() } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /test_dll/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | 3 | use dearxan::disabler::neuter_arxan; 4 | use windows::Win32::{ 5 | Foundation::HMODULE, 6 | System::{ 7 | Console::{ATTACH_PARENT_PROCESS, AllocConsole, AttachConsole}, 8 | LibraryLoader::DisableThreadLibraryCalls, 9 | SystemServices::DLL_PROCESS_ATTACH, 10 | }, 11 | }; 12 | 13 | #[allow(non_snake_case)] 14 | #[unsafe(no_mangle)] 15 | unsafe extern "system" fn DllMain( 16 | h_inst_dll: HMODULE, 17 | fdw_reason: u32, 18 | _lpv_reserved: *const (), 19 | ) -> i32 { 20 | if fdw_reason == DLL_PROCESS_ATTACH { 21 | unsafe { 22 | DisableThreadLibraryCalls(h_inst_dll).ok(); 23 | AttachConsole(ATTACH_PARENT_PROCESS).or_else(|_| AllocConsole()).unwrap(); 24 | }; 25 | simplelog::CombinedLogger::init(vec![ 26 | simplelog::TermLogger::new( 27 | simplelog::LevelFilter::Debug, 28 | simplelog::Config::default(), 29 | simplelog::TerminalMode::Stdout, 30 | simplelog::ColorChoice::Auto, 31 | ), 32 | simplelog::WriteLogger::new( 33 | simplelog::LevelFilter::Debug, 34 | simplelog::Config::default(), 35 | File::options() 36 | .create(true) 37 | .write(true) 38 | .truncate(true) 39 | .open("test-dearxan-dll.log") 40 | .unwrap(), 41 | ), 42 | ]) 43 | .unwrap(); 44 | 45 | unsafe { 46 | neuter_arxan(|result| { 47 | log::info!("arxan detected: {result:?}"); 48 | }) 49 | }; 50 | } 51 | 1 52 | } 53 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | branches: [main] 7 | 8 | jobs: 9 | check-release-readiness: 10 | if: github.event_name == 'pull_request' 11 | name: Check release readiness 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: dtolnay/rust-toolchain@stable 16 | 17 | - name: Ensure changelog has notes for release 18 | run: python ./.github/scripts/release_notes.py 19 | 20 | - name: Publish dry-run 21 | run: cargo publish --dry-run -p dearxan -p dearxan-test-utils 22 | 23 | publish: 24 | if: github.event_name == 'push' 25 | name: cargo publish and create release 26 | runs-on: windows-latest 27 | permissions: 28 | contents: write 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | - uses: dtolnay/rust-toolchain@stable 33 | 34 | - name: Make static library dist 35 | run: python ./.github/scripts/build_dist_archive.py 36 | 37 | - name: crates.io publish 38 | run: cargo publish -p dearxan -p dearxan-test-utils --token ${{ secrets.CRATES_IO_TOKEN }} 39 | 40 | - name: Get package version 41 | id: get_version 42 | run: | 43 | $version = python ./.github/scripts/package_version.py 44 | echo "version=$version" >> "$env:GITHUB_OUTPUT" 45 | 46 | - name: Generate release notes 47 | run: python ./.github/scripts/release_notes.py > release-notes.md 48 | 49 | - name: GitHub release 50 | uses: softprops/action-gh-release@v2 51 | with: 52 | tag_name: v${{ steps.get_version.outputs.version }} 53 | files: target/dearxan-${{ steps.get_version.outputs.version }}.zip 54 | body_path: release-notes.md -------------------------------------------------------------------------------- /.github/workflows/release-preview.yml: -------------------------------------------------------------------------------- 1 | name: Preview static library distribution 2 | on: pull_request 3 | 4 | jobs: 5 | build_static_library_artifact: 6 | name: Build and package static library into artifact 7 | runs-on: windows-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: dtolnay/rust-toolchain@stable 11 | - run: python ./.github/scripts/build_dist_archive.py 12 | - id: upload_dist 13 | uses: actions/upload-artifact@v4 14 | with: 15 | name: dearxan-static-${{ github.event.pull_request.head.sha }} 16 | path: target/dist/ 17 | outputs: 18 | artifact-url: ${{ steps.upload_dist.outputs.artifact-url }} 19 | 20 | comment_static_library_artifact: 21 | name: Create or update PR comment with static library build 22 | if: github.event.pull_request.head.repo.full_name == github.repository 23 | runs-on: ubuntu-latest 24 | permissions: 25 | pull-requests: write 26 | needs: 27 | - build_static_library_artifact 28 | steps: 29 | - uses: peter-evans/find-comment@v3 30 | id: find_comment 31 | with: 32 | issue-number: ${{ github.event.pull_request.number }} 33 | comment-author: 'github-actions[bot]' 34 | body-includes: Static library release preview 35 | - uses: peter-evans/create-or-update-comment@v4 36 | with: 37 | comment-id: ${{ steps.find_comment.outputs.comment-id }} 38 | issue-number: ${{ github.event.pull_request.number }} 39 | edit-mode: replace 40 | body: |- 41 | ## Static library release preview 42 | 43 | [dearxan-static-${{ github.event.pull_request.head.sha }}](${{ needs.build_static_library_artifact.outputs.artifact-url }}) 44 | 45 | Note that this build artifact is only retained for 90 days. -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | on: 3 | push: 4 | branches: [dev] 5 | pull_request: 6 | branches-ignore: [main] 7 | 8 | jobs: 9 | clippy: 10 | name: cargo clippy 11 | 12 | runs-on: windows-latest 13 | env: 14 | RUSTFLAGS: "-Dwarnings" 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: dtolnay/rust-toolchain@nightly 18 | with: 19 | components: clippy 20 | - run: cargo clippy --workspace --all-targets --all-features 21 | 22 | rustfmt: 23 | name: cargo fmt 24 | 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: dtolnay/rust-toolchain@nightly 29 | with: 30 | components: rustfmt 31 | - run: cargo fmt --check --all 32 | 33 | rustdoc: 34 | name: cargo doc 35 | 36 | runs-on: ubuntu-latest 37 | steps: 38 | - uses: actions/checkout@v4 39 | - uses: dtolnay/rust-toolchain@nightly 40 | - run: cargo doc --all-features 41 | 42 | test_stable: 43 | name: build and test dearxan library on stable 44 | 45 | runs-on: windows-latest 46 | steps: 47 | - uses: actions/checkout@v4 48 | - uses: dtolnay/rust-toolchain@stable 49 | - run: cargo test -p dearxan --all-features 50 | 51 | test_nightly: 52 | name: build and test all utilities on nightly 53 | 54 | runs-on: windows-latest 55 | steps: 56 | - uses: actions/checkout@v4 57 | - uses: dtolnay/rust-toolchain@nightly 58 | - run: cargo test --workspace --all-features 59 | 60 | 61 | all_checks_pass: 62 | needs: 63 | - rustfmt 64 | - rustdoc 65 | - clippy 66 | - test_stable 67 | - test_nightly 68 | 69 | runs-on: ubuntu-latest 70 | steps: 71 | - name: Decide whether the needed jobs succeeded or failed 72 | uses: re-actors/alls-green@release/v1 73 | with: 74 | jobs: ${{ toJSON(needs) }} 75 | -------------------------------------------------------------------------------- /.github/scripts/build_dist_archive.py: -------------------------------------------------------------------------------- 1 | import os, shutil, json, package_version 2 | 3 | print("clearing dist directory") 4 | 5 | shutil.rmtree("target\\dist", ignore_errors=True) 6 | os.makedirs("target\\dist\\lib", exist_ok=True) 7 | shutil.copytree("include", "target\\dist\\include") 8 | 9 | print("building static library") 10 | 11 | os.environ["RUSTFLAGS"] = "--print=native-static-libs " + (os.environ.get("RUSTFLAGS") or "") 12 | 13 | os.system("cargo build --release -F ffi") 14 | shutil.copy("target\\release\\dearxan.lib", "target\\dist\\lib\\dearxan.lib") 15 | 16 | build_output = os.popen("cargo build --release -F ffi --message-format json") 17 | json_outputs = [json.loads(line) for line in build_output.readlines()] 18 | messages = [j["message"]["message"] for j in json_outputs if j["reason"] == "compiler-message"] 19 | libs = [lib for m in messages for lib in m.split(" ")[1:] if m.startswith("native-static-libs:")] 20 | 21 | print("linked static libraries:", libs) 22 | 23 | WIN_SDKS_PATH = "C:\\Program Files (x86)\\Windows Kits\\10\\Lib" 24 | latest_sdk_ver = max(d for d in os.listdir(WIN_SDKS_PATH) if d.startswith("10.")) 25 | print("pulling static libraries from Windows SDK:", latest_sdk_ver) 26 | 27 | libs_folder = os.path.join(WIN_SDKS_PATH, latest_sdk_ver, "um\\x64") 28 | lib_paths = {p for p in map(lambda l: os.path.join(libs_folder, l), libs) if os.path.exists(p)} 29 | 30 | for lib in lib_paths: 31 | print(lib) 32 | shutil.copy(lib, "target\\dist\\lib") 33 | 34 | with open("target\\dist\\lib\\readme.txt", "w") as f: 35 | f.write("'dearxan.lib' depends on the following Windows SDK import libraries:\n") 36 | f.writelines(f"- {os.path.basename(p)}\n" for p in lib_paths) 37 | f.write( 38 | "\nWhile they have been copied to this folder, if building with MSVC it is recommended " \ 39 | "to link to the ones that ship with your Windows SDK." 40 | ) 41 | 42 | print("Zipping static libraries") 43 | shutil.make_archive(f"target\\dearxan-{package_version.get_version()}", format="zip", root_dir="target\\dist") -------------------------------------------------------------------------------- /src/disabler/result.rs: -------------------------------------------------------------------------------- 1 | use std::{any::Any, fmt, panic::UnwindSafe}; 2 | 3 | pub type DearxanResult = std::result::Result; 4 | 5 | /// The informational part of a [`DearxanResult`]. 6 | #[derive(Clone, Debug)] 7 | pub struct Status { 8 | /// Whether Arxan was applied to the program. 9 | pub is_arxan_detected: bool, 10 | /// If true, the callback execution is blocking the program entry point. If false, the callback 11 | /// is being executed in a separate thread. 12 | /// 13 | /// In either case, it is guaranteed that the Arxan entry point stub has finished initializing 14 | /// once the callback runs. 15 | pub is_executing_entrypoint: bool, 16 | } 17 | 18 | /// Errors that prevented dearxan from finishing. 19 | /// 20 | /// Either a `dyn`[`std::error::Error`] error or a payload panic as a string. 21 | #[derive(Debug)] 22 | pub enum Error { 23 | Error(Box), 24 | Panic(String), 25 | } 26 | 27 | impl Error { 28 | pub(crate) fn from_panic_payload(payload: Box) -> Self { 29 | // As of Rust 2024, library panic payloads are always `&'static str`. 30 | match payload.downcast::<&'static str>() { 31 | Ok(panic_msg) => Self::Panic(panic_msg.to_string()), 32 | Err(_) => Self::Panic("panicked, but failed to retrieve panic message".to_owned()), 33 | } 34 | } 35 | } 36 | 37 | pub(crate) fn from_maybe_panic( 38 | fun: impl FnOnce() -> Result + UnwindSafe, 39 | ) -> Result 40 | where 41 | E: Into>, 42 | { 43 | std::panic::catch_unwind(fun) 44 | .map(|res| res.map_err(|e| Error::Error(e.into()))) 45 | .unwrap_or_else(|e| Err(Error::from_panic_payload(e))) 46 | } 47 | 48 | impl fmt::Display for Error { 49 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 50 | match self { 51 | Self::Error(err) => err.fmt(f), 52 | Self::Panic(msg) => f.write_str(msg), 53 | } 54 | } 55 | } 56 | 57 | impl std::error::Error for Error {} 58 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = ["test_dll", "test_launcher", "test_utils"] 4 | 5 | [workspace.package] 6 | version = "0.5.2" 7 | authors = ["William Tremblay"] 8 | edition = "2024" 9 | license = "MIT OR Apache-2.0" 10 | repository = "https://github.com/tremwil/dearxan" 11 | 12 | [workspace.dependencies] 13 | log = "0.4" 14 | simplelog = "0.12.2" 15 | clap = { version = "4.5.32", features = ["derive"] } 16 | pelite = { version = "0.10", default-features = false } 17 | 18 | [package] 19 | name = "dearxan" 20 | authors.workspace = true 21 | edition.workspace = true 22 | license.workspace = true 23 | version.workspace = true 24 | repository.workspace = true 25 | description = "Static analyzer and patcher for the Arxan anti-debug/DRM as found in FromSoftware titles" 26 | keywords = ["arxan", "guardit", "reverse-engineering", "modding", "fromsoftware"] 27 | categories = ["security"] 28 | 29 | [lib] 30 | crate-type = ["rlib", "staticlib"] 31 | 32 | [features] 33 | default = ["rayon", "disabler"] 34 | disabler = ["dep:closure-ffi", "dep:windows-sys"] 35 | ffi = ["disabler"] 36 | instrument_stubs = [] 37 | internal_api = [] 38 | 39 | [dependencies] 40 | pelite.workspace = true 41 | log.workspace = true 42 | iced-x86 = "1.21" 43 | memchr = "2.7.4" 44 | fxhash = "0.2.1" 45 | indexmap = "2.7.1" 46 | bitvec = "1.0" 47 | bytemuck = "1.22" 48 | bitfield-struct = "0.12.1" 49 | closure-ffi = { version = "5.0.1", optional = true } 50 | rayon = { version = "1.10", optional = true } 51 | atomic-wait = "1.1.0" 52 | 53 | [dependencies.windows-sys] 54 | optional = true 55 | version = "0.61.2" 56 | features = [ 57 | "Win32_Security", 58 | "Win32_System_Kernel", 59 | "Win32_System_LibraryLoader", 60 | "Win32_System_Memory", 61 | "Win32_System_Diagnostics_Debug", 62 | "Win32_System_SystemInformation", 63 | "Win32_System_Threading", 64 | ] 65 | 66 | [dev-dependencies] 67 | simplelog.workspace = true 68 | clap.workspace = true 69 | clap-num = "1.2" 70 | dearxan-test-utils = { version = "0.5.1", path = "test_utils" } 71 | iced-x86 = { version = "1.21", features = ["code_asm"] } 72 | pretty-hex = "0.4.1" 73 | 74 | [profile.release] 75 | opt-level = 3 76 | codegen-units = 1 77 | lto = "fat" 78 | 79 | [package.metadata.docs.rs] 80 | features = ["disabler", "internal_api", "ffi"] 81 | default-target = "x86_64-pc-windows-msvc" 82 | rustdoc-args = ["--cfg", "docsrs"] 83 | -------------------------------------------------------------------------------- /src/disabler/game.rs: -------------------------------------------------------------------------------- 1 | use pelite::pe64::{ 2 | PeObject, PeView, 3 | image::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64}, 4 | }; 5 | use windows_sys::Win32::System::LibraryLoader::{GetModuleFileNameW, GetModuleHandleW}; 6 | 7 | use crate::disabler::code_buffer::CodeBuffer; 8 | 9 | pub struct CurrentGame { 10 | pub pe: PeView<'static>, 11 | pub preferred_base: u64, 12 | pub hook_buffer: CodeBuffer, 13 | } 14 | 15 | #[cfg(target_os = "windows")] 16 | pub fn game() -> &'static CurrentGame { 17 | use std::{ffi::OsString, io::Read, os::windows::ffi::OsStringExt, sync::LazyLock}; 18 | 19 | static GAME: LazyLock = LazyLock::new(|| unsafe { 20 | let handle = GetModuleHandleW(std::ptr::null()); 21 | if handle.is_null() { 22 | panic!("GetModuleHandleW(NULL) failed"); 23 | } 24 | let pe = PeView::module(handle as *const _); 25 | 26 | let mut game_path = vec![0u16; 0x1000]; 27 | let path_size = GetModuleFileNameW(handle, game_path.as_mut_ptr(), game_path.len() as u32); 28 | if path_size == 0 || path_size == game_path.len() as u32 { 29 | panic!("GetModuleFileNameW failed for game module"); 30 | } 31 | let game_path = OsString::from_wide(&game_path[..path_size as usize]); 32 | log::debug!("game path: {:?}", game_path); 33 | 34 | let mut game_file = match std::fs::File::open(&game_path) { 35 | Err(err) => panic!("failed to read game executable at {game_path:?} due to {err}"), 36 | Ok(f) => f, 37 | }; 38 | let mut first_page = [0; 0x1000]; 39 | let _ = game_file.read_exact(&mut first_page).inspect_err(|err| { 40 | panic!("failed to read first page of game executable {game_path:?} due to {err}") 41 | }); 42 | 43 | let dos_header = first_page.as_ptr().cast::().read_unaligned(); 44 | let nt_headers = first_page 45 | .as_ptr() 46 | .add(dos_header.e_lfanew as usize) 47 | .cast::() 48 | .read_unaligned(); 49 | 50 | let preferred_base = nt_headers.OptionalHeader.ImageBase; 51 | log::debug!("preferred base address: {preferred_base:x}"); 52 | 53 | let hook_buffer = CodeBuffer::alloc_near(pe.image().as_ptr_range(), 0x100_0000, 1 << 31) 54 | .expect("failed to create hook buffer near the game module"); 55 | 56 | CurrentGame { 57 | pe, 58 | preferred_base, 59 | hook_buffer, 60 | } 61 | }); 62 | &GAME 63 | } 64 | 65 | // Hack to get docs-rs (which runs on linux only) to build the disabler module 66 | #[cfg(not(target_os = "windows"))] 67 | pub fn game() -> &'static CurrentGame { 68 | unimplemented!("unsupported platform") 69 | } 70 | -------------------------------------------------------------------------------- /src/disabler/slist.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | alloc::{GlobalAlloc, Layout, System}, 3 | cell::UnsafeCell, 4 | marker::PhantomData, 5 | ptr, 6 | }; 7 | 8 | use windows_sys::Win32::System::{ 9 | Kernel::{SLIST_ENTRY, SLIST_HEADER}, 10 | Threading::{InitializeSListHead, InterlockedFlushSList, InterlockedPushEntrySList}, 11 | }; 12 | 13 | /// Lightweight FFI friendly atomic singly linked list. 14 | /// 15 | /// Used together with [`lazy_global`] to guarantee a particular collection layout, 16 | /// whereas using [`Vec`] would not be (say, between Rust versions). 17 | #[repr(C, align(16))] 18 | pub struct SList { 19 | inner: UnsafeCell, 20 | _marker: PhantomData, 21 | } 22 | 23 | #[repr(C, align(16))] 24 | struct SListEntry { 25 | inner: SLIST_ENTRY, 26 | value: T, 27 | } 28 | 29 | impl SList { 30 | /// Creates a new list without allocating. 31 | pub fn new() -> Self { 32 | let new = Self { 33 | inner: Default::default(), 34 | _marker: PhantomData, 35 | }; 36 | 37 | unsafe { 38 | InitializeSListHead(new.inner.get()); 39 | } 40 | 41 | new 42 | } 43 | 44 | /// Prepends a new entry with value `value` to the front of the list atomically. 45 | pub fn push(&self, value: T) { 46 | unsafe { 47 | let layout = Layout::new::>(); 48 | let entry = System.alloc(layout) as *mut SListEntry; 49 | 50 | if entry.is_null() { 51 | std::alloc::handle_alloc_error(layout) 52 | } 53 | 54 | ptr::write(&raw mut (*entry).value, value); 55 | 56 | InterlockedPushEntrySList(self.inner.get(), &raw mut (*entry).inner); 57 | } 58 | } 59 | 60 | /// Atomically takes the contents of the linked list and returns them as a [`Vec`]. 61 | /// 62 | /// The elements are ordered in FIFO order. 63 | pub fn flush(&self) -> Vec { 64 | let mut entries = vec![]; 65 | 66 | unsafe { 67 | let mut next_entry = InterlockedFlushSList(self.inner.get()); 68 | 69 | while !next_entry.is_null() { 70 | let entry = ptr::read(next_entry as *mut SListEntry); 71 | entries.push(entry.value); 72 | 73 | System.dealloc(next_entry as _, Layout::new::>()); 74 | 75 | next_entry = entry.inner.Next; 76 | } 77 | } 78 | 79 | // From LIFO to FIFO order: 80 | entries.reverse(); 81 | entries 82 | } 83 | } 84 | 85 | impl Drop for SList { 86 | fn drop(&mut self) { 87 | let _flushed = self.flush(); 88 | } 89 | } 90 | 91 | unsafe impl Send for SList {} 92 | 93 | unsafe impl Sync for SList {} 94 | -------------------------------------------------------------------------------- /src/analysis/vm/util.rs: -------------------------------------------------------------------------------- 1 | //! Utility functions used by the forking emulator. 2 | 3 | use iced_x86::{Instruction, Mnemonic, OpAccess, OpKind}; 4 | 5 | /// Check whether the given instruction mnemonic is a CMOV instruction. 6 | pub fn is_cmov(mnemonic: Mnemonic) -> bool { 7 | mnemonic >= Mnemonic::Cmova && mnemonic <= Mnemonic::Cmovs 8 | } 9 | 10 | /// Debugging helper taking a [`RunStep`](super::RunStep) to create a string showing: 11 | /// - the current branch and fork depth; 12 | /// - the instruction and stack pointer values; 13 | /// - the dissassembly of the current instruction; 14 | /// - all register and memory accesses performed by the current instruction. 15 | #[allow(dead_code)] // Public when internal_api is enabled 16 | pub fn format_step_state(step: &super::RunStep<'_, I, D>) -> String { 17 | use std::fmt::Write; 18 | 19 | use iced_x86::{FastFormatter, InstructionInfoFactory}; 20 | 21 | let mut formatter = FastFormatter::new(); 22 | let mut instr_factory = InstructionInfoFactory::new(); 23 | let mut step_info = String::new(); 24 | 25 | formatter.format(step.instruction, &mut step_info); 26 | step_info = format!( 27 | "B{:02} F{:02} {:x} RSP = {:08x}\t{step_info}\n\t", 28 | step.branch_count, 29 | step.past_forks.len(), 30 | step.instruction.ip(), 31 | step.state.registers.rsp().unwrap_or(0), 32 | ); 33 | 34 | let instr_info = instr_factory.info(step.instruction); 35 | for used_reg in instr_info.used_registers() { 36 | let code = op_access_code(used_reg.access()); 37 | let reg_value = (used_reg.register().is_gpr()) 38 | .then(|| step.state.registers.read_gpr(used_reg.register())) 39 | .flatten(); 40 | step_info += &format!("[{code}] {:?} = {reg_value:x?}\t", used_reg.register()); 41 | } 42 | for used_mem in instr_info.used_memory() { 43 | let code = op_access_code(used_mem.access()); 44 | let Some(va) = used_mem.virtual_address(0, |reg, _, _| step.state.virtual_address_cb(reg)) 45 | else { 46 | continue; 47 | }; 48 | 49 | let mem_value = (used_mem.memory_size().size() <= 8) 50 | .then(|| step.state.memory.read_int(va, used_mem.memory_size().size())) 51 | .flatten(); 52 | 53 | step_info 54 | .write_fmt(format_args!("[{code}] mem[{va:x}] = {mem_value:x?}\t")) 55 | .unwrap(); 56 | } 57 | 58 | step_info 59 | } 60 | 61 | pub(crate) fn op_size(instr: &Instruction, op: u32) -> usize { 62 | match instr.op_kind(op) { 63 | OpKind::Register => instr.op_register(op).size(), 64 | OpKind::Memory => instr.memory_size().size(), 65 | OpKind::Immediate8 => 1, 66 | OpKind::Immediate16 | OpKind::Immediate8to16 => 2, 67 | OpKind::Immediate32 | OpKind::Immediate8to32 => 4, 68 | OpKind::Immediate64 | OpKind::Immediate8to64 | OpKind::Immediate32to64 => 8, 69 | _ => unimplemented!(), 70 | } 71 | } 72 | 73 | pub(crate) fn reinterpret_unsigned(val: u64, size_bytes: usize) -> u64 { 74 | let mask = u64::MAX >> (64 - 8 * size_bytes); 75 | val & mask 76 | } 77 | 78 | pub(crate) fn reinterpret_signed(val: u64, size_bytes: usize) -> i64 { 79 | let sign_bit = 1u64 << (8 * size_bytes - 1); 80 | (val | (val & sign_bit).wrapping_neg()) as i64 81 | } 82 | 83 | #[allow(dead_code)] 84 | pub(crate) fn op_access_code(op_access: OpAccess) -> &'static str { 85 | match op_access { 86 | OpAccess::Read => "R", 87 | OpAccess::Write => "W", 88 | OpAccess::CondRead => "R?", 89 | OpAccess::CondWrite => "W?", 90 | OpAccess::ReadWrite => "RW", 91 | OpAccess::ReadCondWrite => "RW?", 92 | _ => "-", 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/analysis/mod.rs: -------------------------------------------------------------------------------- 1 | mod cfg; 2 | pub mod encryption; 3 | pub mod entry_point; 4 | mod stub_info; 5 | mod vm; 6 | 7 | /// Internal analysis API. 8 | /// 9 | /// # Warning 10 | /// 11 | ///
12 | /// 13 | /// Breaking changes to this module are *not* considered as breaking for the purpose of semantic 14 | /// versioning! 15 | /// 16 | ///
17 | #[cfg(feature = "internal_api")] 18 | pub mod internal { 19 | pub mod cfg { 20 | #[doc(inline)] 21 | pub use super::super::cfg::*; 22 | } 23 | #[deprecated( 24 | since = "0.5.0", 25 | note = "dearxan::analysis::encryption has been stabilized" 26 | )] 27 | pub mod encryption { 28 | #[doc(inline)] 29 | pub use super::super::encryption::*; 30 | } 31 | pub mod stub_info { 32 | #[doc(inline)] 33 | pub use super::super::stub_info::*; 34 | } 35 | pub mod vm { 36 | #[doc(inline)] 37 | pub use super::super::vm::*; 38 | } 39 | } 40 | 41 | pub use self::{ 42 | encryption::{EncryptedRegion, EncryptedRegionList}, 43 | stub_info::{ReturnGadget, StubAnalysisError, StubAnalyzer, StubInfo}, 44 | vm::image::{BadRelocsError, ImageView, WithBase}, 45 | }; 46 | 47 | fn find_test_rsp_instructions(image: &I) -> Vec { 48 | use memchr::memmem::find_iter; 49 | #[cfg(feature = "rayon")] 50 | use rayon::iter::{IntoParallelIterator, ParallelBridge, ParallelIterator}; 51 | 52 | const TEST_RSP_15: &[u8] = b"\x48\xf7\xc4\x0f\x00\x00\x00"; 53 | 54 | let sections = image.sections().collect::>(); 55 | 56 | #[cfg(not(feature = "rayon"))] 57 | return sections 58 | .into_iter() 59 | .flat_map(|(va, slice)| find_iter(slice, TEST_RSP_15).map(move |offset| va + offset as u64)) 60 | .collect(); 61 | 62 | #[cfg(feature = "rayon")] 63 | sections 64 | .into_par_iter() 65 | .flat_map(|(va, slice)| { 66 | find_iter(slice, TEST_RSP_15).map(move |offset| va + offset as u64).par_bridge() 67 | }) 68 | .collect() 69 | } 70 | 71 | /// Analyze all Arxan stubs found in the executable image using the provided [`StubAnalyzer`]. 72 | /// 73 | /// If you do not need to configure the analyzer, consider using [`analyze_all_stubs`] instead. 74 | pub fn analyze_all_stubs_with< 75 | #[cfg(feature = "rayon")] I: ImageView + Sync, 76 | #[cfg(not(feature = "rayon"))] I: ImageView, 77 | >( 78 | image: I, 79 | analyzer: StubAnalyzer, 80 | ) -> Vec> { 81 | #[cfg(feature = "rayon")] 82 | use rayon::iter::{IntoParallelIterator, ParallelIterator}; 83 | 84 | let test_rsp_vas = find_test_rsp_instructions(&image); 85 | 86 | log::debug!("Found {} potential Arxan stubs", test_rsp_vas.len()); 87 | 88 | #[cfg(feature = "rayon")] 89 | let iter = test_rsp_vas.into_par_iter(); 90 | #[cfg(not(feature = "rayon"))] 91 | let iter = test_rsp_vas.into_iter(); 92 | 93 | // Exclude don't report `NotAStub` errors as errors, just filter them out 94 | iter.filter_map(|va| match analyzer.analyze(&image, va) { 95 | Err(StubAnalysisError::NotAStub(_)) => None, 96 | other => Some(other), 97 | }) 98 | .collect() 99 | } 100 | 101 | /// Analyze all Arxan stubs found in the executable image using a default [`StubAnalyzer`]. 102 | /// 103 | /// If you need to configure the analyzer, consider using [`analyze_all_stubs_with`] instead. 104 | pub fn analyze_all_stubs< 105 | #[cfg(feature = "rayon")] I: ImageView + Sync, 106 | #[cfg(not(feature = "rayon"))] I: ImageView, 107 | >( 108 | image: I, 109 | ) -> Vec> { 110 | analyze_all_stubs_with(image, StubAnalyzer::default()) 111 | } 112 | -------------------------------------------------------------------------------- /src/analysis/entry_point.rs: -------------------------------------------------------------------------------- 1 | //! Tools for analyzing the entry point of an executable protected by Arxan. 2 | 3 | use iced_x86::{Code, Decoder, DecoderOptions, Register}; 4 | 5 | use super::ImageView; 6 | use crate::analysis::vm::{MemoryStore, ProgramState, Registers, StepKind}; 7 | 8 | /// Information about the structure of the entry point of an executable compiled with MSVC on which 9 | /// Arxan was possibly applied. 10 | /// 11 | /// The first few instructions of a MSVC entry point are as follows: 12 | /// 13 | /// ```text 14 | /// sub rsp, 28 15 | /// call __security_init_cookie 16 | /// add rsp, 28 17 | /// call __scrt_common_main_seh 18 | /// ``` 19 | /// 20 | /// If Arxan was applied to the executable, a sequence of chained Arxan stubs will be inserted at 21 | /// the beginning of `__security_init_cookie`. 22 | pub struct MsvcEntryPoint { 23 | /// Virtual address of the `__security_init_cookie` function. 24 | pub security_init_cookie_va: u64, 25 | /// Virtual address of the `__scrt_common_main_seh` function. 26 | pub scrt_common_main_seh_va: u64, 27 | /// If true, the entry point was hooked by Arxan. 28 | /// 29 | /// This is done by inserting Arxan stubs at the start of `__security_init_cookie`. 30 | pub is_arxan_hooked: bool, 31 | } 32 | 33 | impl MsvcEntryPoint { 34 | pub fn try_from_va(image: impl ImageView, entry_point_va: u64) -> Option { 35 | // Parse the msvc crt entry point structure 36 | 37 | const EXPECTED_CODES: &[&[Code]] = &[ 38 | &[Code::Sub_rm64_imm8], 39 | &[Code::Call_rel32_64], 40 | &[Code::Add_rm64_imm8], 41 | &[Code::Jmp_rel32_64, Code::Jmp_rel8_64], 42 | ]; 43 | 44 | let mut decoder = Decoder::with_ip( 45 | 64, 46 | image.read(entry_point_va, 15)?, 47 | entry_point_va, 48 | DecoderOptions::NONE, 49 | ); 50 | 51 | let mut security_init_cookie_va = 0; 52 | let mut scrt_common_main_seh_va = 0; 53 | for (i, &codes) in EXPECTED_CODES.iter().enumerate() { 54 | let instr = decoder.decode(); 55 | if !codes.contains(&instr.code()) { 56 | return None; 57 | } 58 | match i { 59 | 1 => security_init_cookie_va = instr.near_branch_target(), 60 | 3 => scrt_common_main_seh_va = instr.near_branch_target(), 61 | _ => (), 62 | }; 63 | } 64 | 65 | // Arxan inserts stubs into `security_init_cookie_va`. 66 | // Inspect it and try to find a `TEST RSP, 0xF` instruction after a bit 67 | 68 | let state = ProgramState { 69 | rip: Some(security_init_cookie_va), 70 | registers: Registers::new([(Register::RSP, 0x10000)]), 71 | memory: MemoryStore::new(&image), 72 | user_data: (), 73 | }; 74 | 75 | let mut num_steps = 0; 76 | let is_arxan_hooked = state 77 | .run(|mut step| { 78 | num_steps += 1; 79 | // We should get to the TEST RSP, 0xF instruction quite quickly. 80 | if num_steps > 0x100 { 81 | return StepKind::Stop(false); 82 | } 83 | 84 | if step.instruction.code() == Code::Test_rm64_imm32 85 | && step.instruction.op0_register() == Register::RSP 86 | && step.instruction.immediate32() == 0xF 87 | { 88 | return StepKind::Stop(true); 89 | } 90 | // Don't take any forks 91 | let _maybe_fork = step.single_step(); 92 | StepKind::Custom(None) 93 | }) 94 | .unwrap_or_default(); 95 | 96 | Some(Self { 97 | security_init_cookie_va, 98 | scrt_common_main_seh_va, 99 | is_arxan_hooked, 100 | }) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /examples/analysis.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::Parser; 4 | use dearxan::analysis::{StubAnalyzer, analyze_all_stubs_with, encryption}; 5 | use dearxan_test_utils::{FsExe, fsbins, init_log}; 6 | 7 | #[derive(Parser)] 8 | #[command(version, about = "Analyze a single Arxan stub", long_about = None)] 9 | struct CliArgs { 10 | #[arg(value_name = "GAME", help = "Game executable to analyze")] 11 | game: String, 12 | 13 | #[arg( 14 | short, 15 | long, 16 | help = "Version of the game executable to analyze (default latest)" 17 | )] 18 | ver: Option, 19 | 20 | #[arg( 21 | short, long, 22 | value_name = "PTR", 23 | help = "Address of stub's TEST RSP, 15 instruction, or none for all stubs", 24 | value_parser=clap_num::maybe_hex:: 25 | )] 26 | address: Option, 27 | 28 | #[arg( 29 | short, 30 | long, 31 | help = "Enable trace logs, including full program CFG visit" 32 | )] 33 | trace: bool, 34 | } 35 | 36 | fn main() { 37 | init_log(log::LevelFilter::Trace); 38 | 39 | let args = CliArgs::parse(); 40 | let game = if matches!(std::fs::exists(&args.game), Ok(true)) { 41 | let path = PathBuf::from(&args.game); 42 | log::info!("assuming game is the executable at '{}'", args.game); 43 | FsExe { 44 | game: path.file_stem().unwrap().to_string_lossy().to_string(), 45 | ver: "0".to_string(), 46 | path, 47 | } 48 | } 49 | else { 50 | fsbins() 51 | .iter() 52 | .find(|g| g.name.eq_ignore_ascii_case(&args.game)) 53 | .and_then(|g| { 54 | args.ver 55 | .map(|tgt| g.versions.iter().find(|exe| exe.ver == tgt)) 56 | .unwrap_or(g.versions.last()) 57 | }) 58 | .expect("game or version not found") 59 | .clone() 60 | }; 61 | 62 | if !args.trace { 63 | log::set_max_level(log::LevelFilter::Debug); 64 | } 65 | 66 | let mapped_game = game.load_64().expect("failed to load the game's executable image"); 67 | let pe = mapped_game.pe_view(); 68 | 69 | let analyzer = StubAnalyzer::new().trace_execution(args.trace); 70 | 71 | let stub_infos = match args.address { 72 | Some(addr) => { 73 | log::info!("analyzing stub {:x} of {} v{}", addr, game.game, game.ver); 74 | vec![analyzer.analyze(&pe, addr)] 75 | } 76 | None => { 77 | log::info!("analyzing all stubs of {} v{}", game.game, game.ver); 78 | analyze_all_stubs_with(pe, analyzer) 79 | } 80 | }; 81 | 82 | log::info!("found {} Arxan stubs", stub_infos.len()); 83 | 84 | for stub_info_result in &stub_infos { 85 | let stub_info = match stub_info_result { 86 | Ok(si) => si, 87 | Err(e) => { 88 | log::warn!("{e}"); 89 | continue; 90 | } 91 | }; 92 | 93 | println!("\nSTUB {:x}:", stub_info.test_rsp_va); 94 | println!("context_pop_va: {:x}", stub_info.context_pop_va); 95 | println!("return_gadget : {:x?}", stub_info.return_gadget); 96 | 97 | if let Some(region_list) = &stub_info.encrypted_regions { 98 | println!( 99 | "writes {} contiguous {:?} encrypted regions", 100 | region_list.len(), 101 | region_list.kind 102 | ); 103 | } 104 | } 105 | 106 | let final_patches = encryption::apply_relocs_and_resolve_conflicts( 107 | stub_infos 108 | .iter() 109 | .filter_map(|si| si.as_ref().ok()) 110 | .filter_map(|si| si.encrypted_regions.as_ref()), 111 | pe, 112 | None, 113 | ) 114 | .unwrap(); 115 | 116 | for rlist in final_patches { 117 | println!( 118 | "\n{} contiguous {:?} encrypted regions", 119 | rlist.len(), 120 | rlist.kind 121 | ); 122 | 123 | for r in &rlist.regions { 124 | println!( 125 | "rva = {:x} {}", 126 | r.rva, 127 | pretty_hex::pretty_hex(&r.decrypted_slice(&rlist).unwrap()) 128 | ); 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [v0.5.2] - 2025-10-28 9 | 10 | ## Changed 11 | - Bumped closure-ffi to 5.0 12 | 13 | ## [v0.5.1] - 2025-09-20 14 | 15 | ### Fixed 16 | - Compile error when not using the `rayon` feature 17 | 18 | ## [v0.5.0] - 2025-09-19 19 | 20 | ### Breaking Changes 21 | - Added `kind` field to `dearxan::analysis::encryption::EncryptedRegionList` 22 | - Methods of `dearxan::analysis::encryption::EncryptedRegion` 23 | - `Sync` bound now present on `image` parameter of `dearxan::patch::ArxanPatch::build_from_stubs` if the `rayon` feature is enabled 24 | 25 | ### Added 26 | - Region extraction analyses for Arxan's rotate-mulitply-xor and constant subtraction pseudo-encryption algorithms 27 | 28 | ### Changed 29 | - Greatly improved robustness of encrypted region conflict resolution and elimination 30 | 31 | ### Fixed 32 | - Sound partially broken in DS3 due to missing Arxan encrypted region for sound binder encryption keys 33 | 34 | ### Stabilized 35 | - `dearxan::analysis::encryption` APIs, deprecating its usage through the `internal_api` feature 36 | 37 | ## [v0.4.1] - 2025-09-01 38 | 39 | ### Fixed 40 | - `is_created_suspended` falsely returning `false` due to thread hijacking performed by the Steam Overlay 41 | - Thread handle leakage in `iter_threads` 42 | 43 | ## [v0.4.0] - 2025-08-29 44 | 45 | ### Breaking Changes 46 | - `dearxan::analysis::is_arxan_hooked_entry_point` has been removed, and replaced with `dearxan::analysis::entry_point::MsvcEntryPoint::try_from_va`. 47 | 48 | ### Changed 49 | - While not recommended, `neuter_arxan` and `schedule_after_arxan` APIs now support being called after the game's entry point has run. 50 | - The static library releases now contain the Windows import libraries that `dearxan.lib` depends on to make linking easier. 51 | 52 | ## [v0.3.1] - 2025-08-19 53 | 54 | ### Fixed 55 | - `ArxanPatch::build_from_stubs` emitting patches in an incorrect order by @Dasaav-dsv 56 | 57 | ## [v0.3.0] - 2025-08-17 58 | 59 | ### Breaking Changes 60 | - the FFI API and most `dearxan::disabler` functions have had major API changes to support process-wide synchronization. Refer to the docs for updated usage examples. 61 | 62 | ### Changed 63 | - Process-wide synchronization of `dearxan::disabler::neuter_arxan` by @Dasaav-dsv. This ensures that your callback will be invoked after Arxan has been patched, no matter if other DLL mods using the library have already called it. 64 | 65 | ### Fixed 66 | - Relocs being applied over disabler patches even when unnecessary 67 | 68 | ## [v0.2.2] - 2025-08-12 69 | 70 | ### Fixed 71 | - Missing overflow checks in memory emulation code causing panics in debug builds by @Dasaav-dsv 72 | 73 | ## [v0.2.1] - 2025-08-08 74 | 75 | ### Changed 76 | - Improved parallelization of stub analysis and patch generation by @Dasaav-dsv 77 | 78 | ### Fixed 79 | - Missing `--check` argument to `cargo fmt` in CI 80 | - Incorrect signature `dearxan_neuter_arxan` in the provided include file 81 | - Fix UB on the rust side of `dearxan_neuter_arxan` when the user passes a null callback by @Dasaav-dsv 82 | 83 | ## [v0.2.0] - 2025-08-07 84 | 85 | ### Added 86 | - `dearxan::analysis::is_arxan_hooked_entry_point` to check if the entry point of the executable image is hooked by Arxan 87 | 88 | ### Fixed 89 | - Missing text in readme 90 | - `dearxan::disabler::schedule_after_arxan` not properly checking if Arxan was applied to the entry point (thanks @Dasaav-dsv for raising this issue) 91 | 92 | ### Removed 93 | - `dearxan::disabler::is_arxan_entry`. Use `dearxan::analysis::is_arxan_hooked_entry_point` instead. 94 | 95 | ### Changed 96 | - Hardcoded game executable paths for game aliases in `test_launcher` to prevent it detecting a different executable if many are present. 97 | 98 | ## [v0.1.2] - 2025-08-07 99 | 100 | ### Fixed 101 | - Added missing credits to @Dasaav-dsv for helping reverse engineer how Arxan decrypts game functions at runtime 102 | 103 | ## [v0.1.1] - 2025-08-07 104 | 105 | ### Fixed 106 | - CI release workflow 107 | - Missing `internal_api` feature in readme 108 | 109 | ## [v0.1.0] - 2025-08-06 110 | 111 | Initial release. 112 | -------------------------------------------------------------------------------- /src/analysis/vm/registers.rs: -------------------------------------------------------------------------------- 1 | use iced_x86::Register; 2 | 3 | use super::util; 4 | 5 | /// Struct holding the values of x64 general-purpose registers (RAX-R15). 6 | #[derive(Default, Clone)] 7 | pub struct Registers([Option; Self::GPR_COUNT]); 8 | 9 | impl Registers { 10 | pub const GPR_COUNT: usize = 16; 11 | 12 | /// Create a [`Registers`] instance with initial values given by an iterator. 13 | /// 14 | /// Registers without an initial value are set to [`None`]. 15 | pub fn new(values: impl IntoIterator) -> Self { 16 | let mut s = Self::default(); 17 | for (reg, val) in values { 18 | s.write_gpr(reg, Some(val)); 19 | } 20 | s 21 | } 22 | 23 | /// Get the value of the 64-bit general-purpose register `r` if it is known. 24 | /// 25 | /// # Panics 26 | /// If `r` is not a 64-bit general-purpose register (RAX-R15). 27 | #[inline] 28 | pub fn gpr64(&self, r: Register) -> Option { 29 | assert!(r.is_gpr64()); // This assertion will make the bounds check below go away 30 | self.0[r as usize - Register::RAX as usize] 31 | } 32 | 33 | /// Get a mutable reference to the 64-bit general-purpose register `r`. 34 | /// 35 | /// # Panics 36 | /// If `r` is not a 64-bit general-purpose register (RAX-R15). 37 | #[inline] 38 | pub fn gpr64_mut(&mut self, r: Register) -> &mut Option { 39 | assert!(r.is_gpr64()); // This assertion will make the bounds check below go away 40 | &mut self.0[r as usize - Register::RAX as usize] 41 | } 42 | 43 | /// Read zero-extended general-purpose register. Value is assumed to be unsigned. 44 | /// 45 | /// # Panics 46 | /// If `r` is a not a general-purpose register. 47 | pub fn read_gpr(&self, r: Register) -> Option { 48 | Some(util::reinterpret_unsigned( 49 | self.gpr64(r.full_register())?, 50 | r.size(), 51 | )) 52 | } 53 | 54 | /// Write a zero-exetended general-purpose register. Value is assumed to be unsigned. 55 | /// 56 | /// # Panics 57 | /// If `r` is a not a general-purpose register. 58 | pub fn write_gpr(&mut self, r: Register, val: Option) { 59 | match (self.gpr64_mut(r.full_register()), val, r.size()) { 60 | // 64-bit write 61 | (old, Some(new), 8) => *old = Some(new), 62 | // 32-bit write (clears upper bits) 63 | (old, Some(new), 4) => *old = Some(new & (u32::MAX as u64)), 64 | // 16 or 8-bit write (doesn't affect upper bits) 65 | (Some(old), Some(new), 2) => *old = (*old & !0xFFFF) | (new & 0xFFFF), 66 | (Some(old), Some(new), 1) => *old = (*old & !0xFF) | (new & 0xFF), 67 | // Writing None (clears known register value) 68 | (old, None, _) => *old = None, 69 | // Writing to None with non-clobbering size (full register still unknown) 70 | (None, _, _) => (), 71 | _ => unreachable!(), 72 | } 73 | } 74 | } 75 | 76 | macro_rules! register_impl { 77 | ($reg:ident, $name:ident, $name_mut:ident) => { 78 | #[inline] 79 | pub fn $name(&self) -> Option { 80 | self.gpr64(Register::$reg) 81 | } 82 | #[inline] 83 | pub fn $name_mut(&mut self) -> &mut Option { 84 | self.gpr64_mut(Register::$reg) 85 | } 86 | }; 87 | } 88 | 89 | #[allow(dead_code)] 90 | impl Registers { 91 | register_impl!(RAX, rax, rax_mut); 92 | register_impl!(RCX, rcx, rcx_mut); 93 | register_impl!(RDX, rdx, rdx_mut); 94 | register_impl!(RBX, rbx, rbx_mut); 95 | register_impl!(RSP, rsp, rsp_mut); 96 | register_impl!(RBP, rbp, rbp_mut); 97 | register_impl!(RSI, rsi, rsi_mut); 98 | register_impl!(RDI, rdi, rdi_mut); 99 | register_impl!(R8, r8, r8_mut); 100 | register_impl!(R9, r9, r9_mut); 101 | register_impl!(R10, r10, r10_mut); 102 | register_impl!(R11, r11, r11_mut); 103 | register_impl!(R12, r12, r12_mut); 104 | register_impl!(R13, r13, r13_mut); 105 | register_impl!(R14, r14, r14_mut); 106 | register_impl!(R15, r15, r15_mut); 107 | } 108 | 109 | impl std::fmt::Debug for Registers { 110 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 111 | let mut ds = f.debug_struct("Registers"); 112 | for (i, val) in self.0.iter().enumerate().filter(|(_, r)| r.is_some()) { 113 | ds.field(&format!("{:?}", Register::RAX + i as u32), &val.unwrap()); 114 | } 115 | ds.finish()?; 116 | Ok(()) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/analysis/vm/image.rs: -------------------------------------------------------------------------------- 1 | use pelite::pe64::{Pe, PeView}; 2 | 3 | /// Opaque error type returned by [`ImageView::relocs64`] when image relocations cannot be read. 4 | #[derive(Debug, Clone, Copy)] 5 | pub struct BadRelocsError; 6 | 7 | impl std::fmt::Display for BadRelocsError { 8 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 9 | f.write_str("failed to read image relocations") 10 | } 11 | } 12 | 13 | impl std::error::Error for BadRelocsError {} 14 | 15 | /// Abstraction over an immutable view of a mapped executable image. 16 | pub trait ImageView: Clone { 17 | /// The actual base address of the image. 18 | fn base_va(&self) -> u64; 19 | 20 | /// Iterate over the virtual address and bytes of each section of the image. 21 | fn sections(&self) -> impl Iterator; 22 | 23 | /// Iterate over the RVAs of all 64-bit relative relocations of the image. 24 | /// 25 | /// May fail with an opaque error if the relocations section of the image is corrupted. 26 | fn relocs64(&self) -> Result, BadRelocsError>; 27 | 28 | /// Attempt to read at least `min_size` bytes at the virtual address `va`. 29 | /// 30 | /// Returns the longest possible contiguous readable slice, and [`None`] if the address is 31 | /// out-of-bounds or less than `min_size` bytes can be read. 32 | fn read(&self, va: u64, min_size: usize) -> Option<&[u8]>; 33 | } 34 | 35 | impl ImageView for &I { 36 | fn base_va(&self) -> u64 { 37 | (*self).base_va() 38 | } 39 | 40 | fn sections(&self) -> impl Iterator { 41 | (*self).sections() 42 | } 43 | 44 | fn relocs64(&self) -> Result, BadRelocsError> { 45 | (*self).relocs64() 46 | } 47 | 48 | fn read(&self, va: u64, min_size: usize) -> Option<&[u8]> { 49 | (*self).read(va, min_size) 50 | } 51 | } 52 | 53 | impl ImageView for PeView<'_> { 54 | fn base_va(&self) -> u64 { 55 | Pe::optional_header(*self).ImageBase 56 | } 57 | 58 | fn sections(&self) -> impl Iterator { 59 | use pelite::pe64::Pe; 60 | self.section_headers().iter().filter_map(|s| { 61 | self.get_section_bytes(s) 62 | .ok() 63 | .map(|slice| (self.base_va() + s.VirtualAddress as u64, slice)) 64 | }) 65 | } 66 | 67 | #[allow(clippy::filter_map_bool_then)] 68 | fn relocs64(&self) -> Result, BadRelocsError> { 69 | let maybe_relocs = match self.base_relocs() { 70 | Ok(relocs) => Some(relocs), 71 | Err(pelite::Error::Null) => None, 72 | Err(_) => return Err(BadRelocsError), 73 | }; 74 | Ok(maybe_relocs 75 | .into_iter() 76 | .flat_map(|relocs| relocs.iter_blocks()) 77 | .flat_map(|block| { 78 | block.words().iter().filter_map(move |w| { 79 | // IMAGE_REL_BASED_DIR64 = 10 80 | (block.type_of(w) == 10).then(|| block.rva_of(w)) 81 | }) 82 | })) 83 | } 84 | 85 | fn read(&self, va: u64, min_size: usize) -> Option<&[u8]> { 86 | pelite::pe64::Pe::read(self, va, min_size, 1).ok() 87 | } 88 | } 89 | 90 | /// Wrapper around an [`AsRef<[u8]>`](AsRef) type which implements [`ImageView`] over a single 91 | /// section. 92 | /// 93 | /// The base address of this "image" is arbitrary and can be set during construction with 94 | /// [`WithBase::new`]. 95 | #[derive(Debug, Clone, Copy)] 96 | pub struct WithBase + Clone> { 97 | bytes: T, 98 | base: u64, 99 | } 100 | 101 | impl + Clone> WithBase { 102 | /// Construct a [`WithBase`] from a u8 slice-like type and a base virtual address. 103 | #[allow(dead_code)] // To be used in tests 104 | pub fn new(bytes: T, base: u64) -> Self { 105 | Self { bytes, base } 106 | } 107 | } 108 | 109 | impl + Clone> ImageView for WithBase { 110 | fn base_va(&self) -> u64 { 111 | self.base 112 | } 113 | 114 | fn sections(&self) -> impl Iterator { 115 | std::iter::once((self.base, self.bytes.as_ref())) 116 | } 117 | 118 | fn relocs64(&self) -> Result, BadRelocsError> { 119 | Ok(std::iter::empty()) 120 | } 121 | 122 | fn read(&self, va: u64, min_size: usize) -> Option<&[u8]> { 123 | va.checked_sub(self.base) 124 | .and_then(|offset| offset.try_into().ok()) 125 | .and_then(|offset| self.bytes.as_ref().get(offset..)) 126 | .and_then(|bytes| (bytes.len() >= min_size).then_some(bytes)) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/disabler/code_buffer.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ffi::c_void, 3 | ops::Range, 4 | ptr::slice_from_raw_parts_mut, 5 | sync::atomic::{AtomicPtr, Ordering}, 6 | }; 7 | 8 | use closure_ffi::{JitAlloc, JitAllocError}; 9 | use windows_sys::Win32::System::{ 10 | Memory::{ 11 | MEM_COMMIT, MEM_FREE, MEM_RELEASE, MEM_RESERVE, MEMORY_BASIC_INFORMATION, 12 | PAGE_EXECUTE_READWRITE, VirtualAlloc, VirtualFree, VirtualQuery, 13 | }, 14 | SystemInformation::{GetSystemInfo, SYSTEM_INFO}, 15 | }; 16 | 17 | #[derive(Debug)] 18 | #[repr(align(64))] 19 | pub struct CodeBuffer { 20 | cursor: AtomicPtr, 21 | alloc_base: *mut c_void, 22 | end: *mut u8, 23 | } 24 | 25 | unsafe impl Send for CodeBuffer {} 26 | unsafe impl Sync for CodeBuffer {} 27 | 28 | impl CodeBuffer { 29 | pub fn alloc_near(region: Range<*const u8>, size: usize, max_sep: usize) -> Option { 30 | let region = region.start.addr()..region.end.addr(); 31 | 32 | // Get allocation granularity (typically 64KB) 33 | let mut si = SYSTEM_INFO::default(); 34 | unsafe { GetSystemInfo(&mut si) }; 35 | let gran = si.dwAllocationGranularity as usize; 36 | 37 | // compute lowest possible allocation address (note that the first block cannot be 38 | // allocated) 39 | let lowest_base = region.end.saturating_sub(max_sep).max(gran).next_multiple_of(gran); 40 | 41 | // Search free region closest to target module to allocate our hook memory at, 42 | // starting at lowest possible address that admits a REL32 jmp 43 | let mut minfo = MEMORY_BASIC_INFORMATION::default(); 44 | let mut query_base = lowest_base; 45 | while unsafe { 46 | VirtualQuery( 47 | query_base as *const _, 48 | &mut minfo, 49 | size_of::(), 50 | ) != 0 51 | } { 52 | // Compute portion of block that is aligned to allocation boundaries 53 | let block_start = (minfo.BaseAddress as usize).next_multiple_of(gran); 54 | let block_end = (minfo.BaseAddress as usize + minfo.RegionSize) & !(gran - 1); 55 | let block_size = block_end - block_start; 56 | 57 | // block end would be too far from region start 58 | if (block_size + size).saturating_sub(region.start) > max_sep { 59 | break; 60 | } 61 | // block is not free or not enough space 62 | else if minfo.State != MEM_FREE || size > block_size { 63 | query_base = minfo.BaseAddress as usize + minfo.RegionSize; 64 | continue; 65 | } 66 | 67 | // Otherwise, block satisfies all requirements 68 | let alloc_base = unsafe { 69 | VirtualAlloc( 70 | block_start as *const _, 71 | size, 72 | MEM_RESERVE | MEM_COMMIT, 73 | PAGE_EXECUTE_READWRITE, 74 | ) 75 | }; 76 | assert!(!alloc_base.is_null(), "VirtualAlloc failed"); 77 | return Some(Self { 78 | alloc_base, 79 | cursor: AtomicPtr::new(alloc_base as *mut _), 80 | end: unsafe { (alloc_base as *mut u8).add(size) }, 81 | }); 82 | } 83 | None 84 | } 85 | 86 | pub fn reserve(&self, size: usize) -> Option<*mut [u8]> { 87 | self.cursor 88 | .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |c| { 89 | let new_cursor = c.with_addr(c.addr().checked_add(size)?); 90 | (c < self.end).then_some(new_cursor) 91 | }) 92 | .ok() 93 | // SAFETY: 94 | // - Slice is atomically reserved 95 | // - VirtualAlloc zero-initializes the bytes 96 | .map(|c| slice_from_raw_parts_mut(c, size)) 97 | } 98 | 99 | pub fn write(&self, bytes: &[u8]) -> Option<*mut [u8]> { 100 | self.reserve(bytes.len()).inspect(|&buf| unsafe { 101 | (buf as *mut u8).copy_from_nonoverlapping(bytes.as_ptr(), bytes.len()); 102 | }) 103 | } 104 | } 105 | 106 | impl Drop for CodeBuffer { 107 | fn drop(&mut self) { 108 | if unsafe { VirtualFree(self.alloc_base, 0, MEM_RELEASE) } == 0 { 109 | log::error!("VirtualFree failed") 110 | } 111 | } 112 | } 113 | 114 | impl JitAlloc for CodeBuffer { 115 | fn alloc(&self, size: usize) -> Result<(*const u8, *mut u8), JitAllocError> { 116 | self.reserve(size).map(|p| (p as *const u8, p as *mut u8)).ok_or(JitAllocError) 117 | } 118 | 119 | // CodeBuffer is a simple arena without the ability to free individual blocks 120 | #[allow(unused_variables)] 121 | unsafe fn release(&self, rx_ptr: *const u8) -> Result<(), JitAllocError> { 122 | Ok(()) 123 | } 124 | 125 | // Not needed on modern AMD64 processors 126 | #[allow(unused_variables)] 127 | unsafe fn flush_instruction_cache(&self, rx_ptr: *const u8, size: usize) {} 128 | 129 | // Not needed on Windows 130 | #[allow(unused_variables)] 131 | unsafe fn protect_jit_memory( 132 | &self, 133 | ptr: *const u8, 134 | size: usize, 135 | access: closure_ffi::jit_alloc::ProtectJitAccess, 136 | ) { 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /test_utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{ffi::OsStr, path::PathBuf, sync::LazyLock}; 2 | 3 | use pelite::pe64::{Pe, PeFile, PeObject, PeView}; 4 | 5 | // Re-implementation of `PeFile::to_view` as the original is broken 6 | pub fn pe_file_to_view(pe: pelite::pe64::PeFile) -> Vec { 7 | let (sizeof_headers, sizeof_image) = { 8 | let optional_header = pe.optional_header(); 9 | (optional_header.SizeOfHeaders, optional_header.SizeOfImage) 10 | }; 11 | 12 | // Zero fill the underlying image 13 | let mut vec = vec![0u8; sizeof_image as usize]; 14 | 15 | // Start by copying the headers 16 | let image = pe.image(); 17 | unsafe { 18 | // Validated by constructor 19 | let dest_headers = vec.get_unchecked_mut(..sizeof_headers as usize); 20 | let src_headers = image.get_unchecked(..sizeof_headers as usize); 21 | dest_headers.copy_from_slice(src_headers); 22 | } 23 | 24 | // Copy the section file data 25 | for section in pe.section_headers() { 26 | let dest = vec.get_mut( 27 | section.VirtualAddress as usize 28 | ..u32::wrapping_add(section.VirtualAddress, section.VirtualSize) as usize, 29 | ); 30 | let src = image.get( 31 | section.PointerToRawData as usize 32 | ..u32::wrapping_add(section.PointerToRawData, section.SizeOfRawData) as usize, 33 | ); 34 | // Skip invalid sections... 35 | if let (Some(dest), Some(src)) = (dest, src) { 36 | let write_sz = src.len().min(dest.len()); 37 | dest[..write_sz].copy_from_slice(&src[..write_sz]); 38 | } 39 | } 40 | 41 | vec 42 | } 43 | 44 | #[derive(Debug, Clone)] 45 | pub struct FsExe { 46 | pub game: String, 47 | pub ver: String, 48 | pub path: PathBuf, 49 | } 50 | 51 | impl FsExe { 52 | pub fn load_64(&self) -> pelite::Result { 53 | let disk_view = std::fs::read(&self.path).unwrap(); 54 | let pe_file = PeFile::from_bytes(&disk_view)?; 55 | let mem_view = pe_file_to_view(pe_file); 56 | 57 | Ok(MappedFsExe { 58 | exe: self.clone(), 59 | disk_view, 60 | mem_view, 61 | }) 62 | } 63 | } 64 | 65 | #[derive(Debug, Clone)] 66 | pub struct MappedFsExe { 67 | exe: FsExe, 68 | disk_view: Vec, 69 | mem_view: Vec, 70 | } 71 | 72 | impl MappedFsExe { 73 | pub fn exe_info(&self) -> &FsExe { 74 | &self.exe 75 | } 76 | 77 | pub fn game(&self) -> &str { 78 | &self.exe.game 79 | } 80 | 81 | pub fn ver(&self) -> &str { 82 | &self.exe.ver 83 | } 84 | 85 | pub fn pe_file(&self) -> PeFile<'_> { 86 | PeFile::from_bytes(&self.disk_view).unwrap() 87 | } 88 | 89 | pub fn pe_view(&self) -> PeView<'_> { 90 | PeView::from_bytes(&self.mem_view).unwrap() 91 | } 92 | } 93 | 94 | pub struct FsGame { 95 | pub name: String, 96 | pub versions: Vec, 97 | } 98 | 99 | pub fn fsbins() -> &'static [FsGame] { 100 | static FS_BINS: LazyLock> = LazyLock::new(|| { 101 | let fsbins_root = std::env::var("FSBINS_PATH").expect( 102 | "This test requires fsbins to be installed and the FSBINS_PATH environment 103 | variable to point to its root.", 104 | ); 105 | 106 | std::fs::read_dir(fsbins_root) 107 | .unwrap() 108 | .filter_map(|game_folder| { 109 | let game_folder = game_folder.ok()?; 110 | let game_name = game_folder.file_name().to_string_lossy().to_string(); 111 | if !game_folder.file_type().ok()?.is_dir() { 112 | return None; 113 | } 114 | 115 | let mut versions: Vec<_> = std::fs::read_dir(game_folder.path()) 116 | .ok()? 117 | .filter_map(|ver_folder| { 118 | let ver_folder = ver_folder.ok()?; 119 | let ver_name = ver_folder.file_name().to_string_lossy().to_string(); 120 | if !ver_folder.file_type().ok()?.is_dir() { 121 | return None; 122 | } 123 | 124 | let exe = std::fs::read_dir(ver_folder.path()) 125 | .ok()? 126 | .filter_map(|f| f.ok()) 127 | .find(|f| f.path().extension() == Some(OsStr::new("exe")))?; 128 | 129 | Some(FsExe { 130 | game: game_name.clone(), 131 | ver: ver_name.clone(), 132 | path: exe.path(), 133 | }) 134 | }) 135 | .collect(); 136 | 137 | if versions.is_empty() { 138 | return None; 139 | } 140 | 141 | // permissive semver sorting: if dot-delimited versions are numbers, compare them as 142 | // such otherwise compare them as strings 143 | versions.sort_by(|a, b| { 144 | let mut a_split = a.ver.split('.'); 145 | let mut b_split = b.ver.split('.'); 146 | for (a_seg, b_seg) in (&mut a_split).zip(&mut b_split) { 147 | let ordering = match (a_seg.parse::(), b_seg.parse::()) { 148 | (Ok(a_num), Ok(b_num)) => a_num.cmp(&b_num), 149 | _ => a_seg.cmp(b_seg), 150 | }; 151 | if ordering != std::cmp::Ordering::Equal { 152 | return ordering; 153 | } 154 | } 155 | a_split.cmp(b_split) 156 | }); 157 | Some(FsGame { 158 | name: game_name.to_string(), 159 | versions, 160 | }) 161 | }) 162 | .collect() 163 | }); 164 | &FS_BINS 165 | } 166 | 167 | pub fn latest_fsbins() -> Vec<&'static FsExe> { 168 | fsbins().iter().map(|g| g.versions.last().unwrap()).collect() 169 | } 170 | 171 | pub fn init_log(level: log::LevelFilter) { 172 | simplelog::SimpleLogger::init(level, simplelog::Config::default()).unwrap(); 173 | } 174 | -------------------------------------------------------------------------------- /src/patch.rs: -------------------------------------------------------------------------------- 1 | //! Structs detailing code patches to indivitual Arxan stubs. 2 | 3 | use iced_x86::IcedError; 4 | 5 | use crate::analysis::{BadRelocsError, ImageView, StubInfo, encryption}; 6 | 7 | /// An individual patch to the executable image. 8 | #[derive(Debug, Clone)] 9 | pub enum ArxanPatch { 10 | /// Install a 32-bit jmp hook at virtual address `target` pointing to executable memory where 11 | /// the position-independent code `pic` has been written. 12 | JmpHook { target: u64, pic: Vec }, 13 | /// Write the contents of `bytes` to the virtual address `va`. 14 | Write { va: u64, bytes: Vec }, 15 | } 16 | 17 | /// Opqaue inner type for [`PatchGenError::AssemblerError`]. 18 | #[derive(Debug, Clone)] 19 | pub struct AssemblerErrorInner(IcedError); 20 | 21 | /// Different ways that generation of an [`ArxanPatch`] can fail. 22 | #[derive(Debug, Clone)] 23 | #[non_exhaustive] 24 | pub enum PatchGenError { 25 | /// An error occured when assembling machine code for an [`ArxanPatch::JmpHook`]. 26 | AssemblerError(AssemblerErrorInner), 27 | /// The patch would be writing to memory outside of the executable image. 28 | OutOfBounds { rva: usize, size: usize }, 29 | /// The executable image's .reloc section is required but could not be read. 30 | RelocsCorrupted, 31 | } 32 | 33 | impl From for PatchGenError { 34 | fn from(value: IcedError) -> Self { 35 | Self::AssemblerError(AssemblerErrorInner(value)) 36 | } 37 | } 38 | 39 | impl From for PatchGenError { 40 | fn from(_value: BadRelocsError) -> Self { 41 | Self::RelocsCorrupted 42 | } 43 | } 44 | 45 | impl std::fmt::Display for PatchGenError { 46 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 47 | match self { 48 | Self::AssemblerError(AssemblerErrorInner(iced)) => { 49 | write!(f, "assembly error: {iced}") 50 | } 51 | Self::OutOfBounds { rva, size } => { 52 | write!( 53 | f, 54 | "write of {size} bytes to RVA 0x{rva:x} is not within the executable image" 55 | ) 56 | } 57 | Self::RelocsCorrupted => { 58 | write!(f, ".relocs section is corrupted and could not be read") 59 | } 60 | } 61 | } 62 | } 63 | 64 | impl std::error::Error for PatchGenError {} 65 | 66 | impl ArxanPatch { 67 | /// Generate the required patches to disable Arxan given `analyzed_stubs` extracted from 68 | /// the executable image `image`, e.g. through 69 | /// [`analyze_all_stubs`](super::analysis::analyze_all_stubs) 70 | /// 71 | /// If `image` was mapped at a different address to its preferred base address, 72 | /// relocations may need to be applied to some of the patches. In that case `preferred_base` 73 | /// must be provided. 74 | pub fn build_from_stubs< 75 | 'a, 76 | #[cfg(feature = "rayon")] I: ImageView + Sync, 77 | #[cfg(not(feature = "rayon"))] I: ImageView, 78 | >( 79 | image: I, 80 | preferred_base: Option, 81 | analyzed_stubs: impl IntoIterator, 82 | ) -> Result, PatchGenError> { 83 | let analyzed_stubs = analyzed_stubs.into_iter(); 84 | 85 | let mut hooks = Vec::with_capacity(analyzed_stubs.size_hint().0); 86 | let mut error = None; 87 | let final_rlists = encryption::apply_relocs_and_resolve_conflicts( 88 | analyzed_stubs.filter_map(|si| { 89 | hooks.push(ArxanPatch::JmpHook { 90 | target: si.test_rsp_va, 91 | pic: match assemble_stub_patch(si) { 92 | Ok(pic) => pic, 93 | Err(e) => { 94 | error = Some(e); 95 | return None; 96 | } 97 | }, 98 | }); 99 | si.encrypted_regions.as_ref() 100 | }), 101 | &image, 102 | preferred_base, 103 | )?; 104 | if let Some(e) = error { 105 | return Err(e.into()); 106 | } 107 | 108 | let base_va = image.base_va(); 109 | 110 | let patches = final_rlists 111 | .into_iter() 112 | .flat_map(|rlist| { 113 | rlist.regions.into_iter().map(move |r| ArxanPatch::Write { 114 | va: base_va + r.rva as u64, 115 | bytes: rlist.decrypted_stream[r.stream_offset..r.stream_offset + r.size] 116 | .to_owned(), 117 | }) 118 | }) 119 | .chain(hooks) 120 | .collect(); 121 | 122 | Ok(patches) 123 | } 124 | } 125 | 126 | fn assemble_stub_patch(stub: &StubInfo) -> Result, IcedError> { 127 | use iced_x86::{Code, Instruction, MemoryOperand, Register::*}; 128 | 129 | let mut instructions = Vec::with_capacity(8); 130 | 131 | // Write a pointer to our own return gadget to the low slot 132 | if let Some(rg) = &stub.return_gadget { 133 | let rg_low = MemoryOperand::with_base_displ(RSP, rg.stack_offset as i64 - 16); 134 | // Will point to the our own return gadget 135 | let ret_stub_ref = MemoryOperand::with_base_displ(RIP, 1); 136 | instructions.extend([ 137 | Instruction::with2(Code::Lea_r64_m, RAX, ret_stub_ref)?, 138 | Instruction::with2(Code::Mov_rm64_r64, rg_low, RAX)?, 139 | ]); 140 | } 141 | 142 | // Adjust stack and jump to context restore 143 | instructions.extend([ 144 | Instruction::with2(Code::Sub_rm64_imm8, RSP, 8)?, 145 | Instruction::with2(Code::Mov_r64_imm64, RAX, stub.context_pop_va)?, 146 | Instruction::with1(Code::Jmp_rm64, RAX)?, 147 | ]); 148 | 149 | // Write our own return gadget -- adds 16 to rsp and jumps to the top one directly 150 | if let Some(rg) = &stub.return_gadget { 151 | let mut add_rsp = Instruction::with2(Code::Add_rm64_imm8, RSP, 16)?; 152 | add_rsp.set_ip(1); 153 | 154 | let mut dq_rg_address = Instruction::with_declare_qword_1(rg.address); 155 | dq_rg_address.set_ip(2); 156 | 157 | let rg_address_ref = MemoryOperand::with_base_displ(RIP, 2); 158 | let jmp_rg = Instruction::with1(Code::Jmp_rm64, rg_address_ref)?; 159 | 160 | instructions.extend([add_rsp, jmp_rg, dq_rg_address]); 161 | } 162 | 163 | let block = iced_x86::InstructionBlock::new(&instructions, 0); 164 | Ok(iced_x86::BlockEncoder::encode(64, block, iced_x86::BlockEncoderOptions::NONE)?.code_buffer) 165 | } 166 | -------------------------------------------------------------------------------- /src/disabler/ffi.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "ffi")] 2 | use std::ffi::c_void; 3 | use std::{ 4 | ffi::{c_char, c_int}, 5 | mem, ptr, 6 | }; 7 | 8 | use crate::disabler::{ 9 | lazy_global::LazyGlobal, 10 | result::{DearxanResult as Result, Error, Status}, 11 | }; 12 | 13 | /// DearxanResult as seen across FFI boundaries. 14 | /// 15 | /// Take utmost care when modifying its layout to maintain ABI compatibility: 16 | /// 1. No fields may be removed. 17 | /// 2. No fields may be reordered. 18 | /// 3. New fields must be added before `_last_for_offsetof`. 19 | /// 20 | /// Likewise, don't forget to update the C layout in `include/dearxan.h`. 21 | #[repr(C)] 22 | #[derive(Clone)] 23 | pub struct DearxanResult { 24 | result_size: usize, 25 | status: c_int, 26 | error_msg: *const c_char, 27 | error_msg_size: usize, 28 | is_arxan_detected: bool, 29 | is_executing_entrypoint: bool, 30 | _last_for_offsetof: c_char, 31 | } 32 | 33 | #[repr(C)] 34 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 35 | pub enum DearxanStatus { 36 | Success = 1, 37 | Error, 38 | Panic, 39 | } 40 | 41 | /// Callback invoked once arxan has been disabled (or if it wasn't detected). 42 | #[cfg(feature = "ffi")] 43 | pub type DearxanUserCallback = extern "C" fn(result: *const DearxanResult, context: *mut c_void); 44 | 45 | /// Single function to neuter all of Arxan's checks. 46 | /// 47 | /// The callback will be invoked with the true entry point of the program once patching 48 | /// is complete, and a bool indicating whether Arxan was detected. It can be used to initialize 49 | /// hooks/etc. 50 | /// 51 | /// Handles SteamStub 3.1 possibly being applied on top of Arxan. 52 | /// 53 | /// # Safety 54 | /// This function must be called before the game's entry point runs. It is generally safe to call 55 | /// from within DllMain. 56 | #[cfg(feature = "ffi")] 57 | #[unsafe(no_mangle)] 58 | pub unsafe extern "C" fn dearxan_neuter_arxan( 59 | callback: Option, 60 | context: *mut c_void, 61 | ) { 62 | let context_send = context.addr(); 63 | // SAFETY: Send'ness of context is asserted by caller 64 | unsafe { 65 | super::neuter_arxan(move |result| { 66 | if let Some(callback) = callback { 67 | callback(&DearxanResult::from(result), context_send as *mut c_void); 68 | } 69 | }) 70 | }; 71 | } 72 | 73 | impl DearxanResult { 74 | const UNPADDED_SIZE: usize = mem::offset_of!(Self, _last_for_offsetof); 75 | 76 | fn new_without_status() -> Self { 77 | Self { 78 | result_size: Self::UNPADDED_SIZE, 79 | status: 0, 80 | is_arxan_detected: false, 81 | is_executing_entrypoint: false, 82 | error_msg: c"".as_ptr(), 83 | error_msg_size: 0, 84 | _last_for_offsetof: 0, 85 | } 86 | } 87 | 88 | pub(crate) fn new(status: DearxanStatus) -> Self { 89 | Self { 90 | status: status.into(), 91 | ..Self::new_without_status() 92 | } 93 | } 94 | 95 | #[track_caller] 96 | pub(crate) unsafe fn from_global(global: &LazyGlobal) -> Self { 97 | let (ptr, global_size) = **global; 98 | if ptr.is_null() || !ptr.is_aligned() || global_size < mem::size_of::() { 99 | panic!("lazy_global variable was incorrectly initialized"); 100 | } 101 | 102 | let result_size = unsafe { ptr::read(&raw const (*ptr).result_size) }; 103 | if global_size < result_size { 104 | panic!("lazy_global variable self-reported size mismatch"); 105 | } 106 | 107 | let mut result = Self::new_without_status(); 108 | 109 | unsafe { 110 | let result_size = Self::UNPADDED_SIZE.min(result_size); 111 | ptr::copy_nonoverlapping(ptr as *const u8, &raw mut result as *mut u8, result_size); 112 | result.result_size = result_size; 113 | } 114 | 115 | result 116 | } 117 | } 118 | 119 | unsafe impl Send for DearxanResult {} 120 | 121 | unsafe impl Sync for DearxanResult {} 122 | 123 | impl From for DearxanResult { 124 | fn from(result: Result) -> Self { 125 | match result { 126 | Ok(status) => Self { 127 | is_arxan_detected: status.is_arxan_detected, 128 | is_executing_entrypoint: status.is_executing_entrypoint, 129 | ..Self::new(DearxanStatus::Success) 130 | }, 131 | Err(Error::Error(err)) => { 132 | let msg = { 133 | let mut msg = err.to_string(); 134 | msg.push('\0'); 135 | msg.leak() 136 | }; 137 | 138 | Self { 139 | error_msg: msg.as_ptr() as *const c_char, 140 | error_msg_size: msg.len() - 1, 141 | ..Self::new(DearxanStatus::Error) 142 | } 143 | } 144 | Err(Error::Panic(mut msg)) => { 145 | let msg = { 146 | msg.push('\0'); 147 | msg.leak() 148 | }; 149 | 150 | Self { 151 | error_msg: msg.as_ptr() as *const c_char, 152 | error_msg_size: msg.len() - 1, 153 | ..Self::new(DearxanStatus::Panic) 154 | } 155 | } 156 | } 157 | } 158 | } 159 | 160 | impl From for Result { 161 | fn from(result: DearxanResult) -> Self { 162 | if result.status == DearxanStatus::Success { 163 | Ok(Status { 164 | is_arxan_detected: result.is_arxan_detected, 165 | is_executing_entrypoint: result.is_executing_entrypoint, 166 | }) 167 | } 168 | else if result.status == DearxanStatus::Error || result.status == DearxanStatus::Panic { 169 | let bytes = unsafe { 170 | std::slice::from_raw_parts(result.error_msg as *const u8, result.error_msg_size) 171 | }; 172 | 173 | Err(match str::from_utf8(bytes) { 174 | Ok(str) => { 175 | if result.status == DearxanStatus::Error { 176 | Error::Error(str.into()) 177 | } 178 | else { 179 | Error::Panic(str.to_owned()) 180 | } 181 | } 182 | Err(err) => Error::Error(Box::new(err)), 183 | }) 184 | } 185 | else { 186 | Err(Error::Error("result was uninitialized or invalid".into())) 187 | } 188 | } 189 | } 190 | 191 | impl From for c_int { 192 | fn from(value: DearxanStatus) -> Self { 193 | value as c_int 194 | } 195 | } 196 | 197 | impl PartialEq for c_int { 198 | fn eq(&self, other: &DearxanStatus) -> bool { 199 | *self == *other as c_int 200 | } 201 | } 202 | 203 | impl PartialEq for DearxanStatus { 204 | fn eq(&self, other: &c_int) -> bool { 205 | *self as c_int == *other 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /test_launcher/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | env::current_dir, 3 | error::Error, 4 | ffi::CString, 5 | os::windows::io::{FromRawHandle, OwnedHandle}, 6 | time::Duration, 7 | }; 8 | 9 | use clap::Parser; 10 | use dll_syringe::{ 11 | Syringe, 12 | process::{OwnedProcess, Process}, 13 | }; 14 | use windows::{ 15 | Win32::System::Threading::{ 16 | CREATE_SUSPENDED, CreateProcessA, INFINITE, PROCESS_INFORMATION, ResumeThread, 17 | STARTUPINFOA, WaitForSingleObject, 18 | }, 19 | core::PCSTR, 20 | }; 21 | 22 | struct Game { 23 | alias: &'static str, 24 | appid: u32, 25 | exe_path: &'static str, 26 | } 27 | 28 | impl Game { 29 | const fn new(alias: &'static str, appid: u32, exe_path: &'static str) -> Self { 30 | Self { 31 | alias, 32 | appid, 33 | exe_path, 34 | } 35 | } 36 | } 37 | 38 | const GAMES: &[Game] = &[ 39 | Game::new("ds2s", 335300, "Game/DarkSoulsII.exe"), 40 | Game::new("ds3", 374320, "Game/DarkSoulsIII.exe"), 41 | Game::new("dsr", 570940, "DarkSoulsRemastered.exe"), 42 | Game::new("sdt", 814380, "sekiro.exe"), 43 | Game::new("er", 1245620, "Game/eldenring.exe"), 44 | Game::new("ac6", 1888160, "Game/armoredcore6.exe"), 45 | Game::new("nr", 2622380, "Game/nightreign.exe"), 46 | ]; 47 | 48 | #[derive(Parser)] 49 | #[command(version, about, long_about = None)] 50 | struct CliArgs { 51 | #[arg( 52 | value_name = "GAME", 53 | help = "Game alias or executable path to start and inject the arxan disabler into.", 54 | long_help = "Game alias or executable path to start and inject the arxan disabler into. The valid aliases are: 55 | \t- ds2s (Dark Souls II SOTFS) 56 | \t- ds3 (Dark Souls III) 57 | \t- dsr (Dark Souls Remastered) 58 | \t- sdt (Sekiro) 59 | \t- er (Elden Ring) 60 | \t- nr (Elden Ring: Nightreign) 61 | " 62 | )] 63 | game: String, 64 | 65 | #[arg( 66 | short, 67 | long, 68 | value_name = "SECONDS", 69 | help = "Time to wait before resuming the game process." 70 | )] 71 | delay: Option, 72 | 73 | #[arg( 74 | short, 75 | long, 76 | action = clap::ArgAction::SetTrue, 77 | help = "Wait for user input before resuming the game process." 78 | )] 79 | wait_for_input: bool, 80 | 81 | #[arg( 82 | long, 83 | action = clap::ArgAction::SetTrue, 84 | help = "Skip injecting the arxan disabler, just launch the game." 85 | )] 86 | no_inject: bool, 87 | 88 | #[arg( 89 | long, 90 | value_name = "APPID", 91 | help = "Optionally override the appid given to the game on launch." 92 | )] 93 | env_app_id: Option, 94 | 95 | #[arg(short, long, help = "Instrument Arxan stub invocations")] 96 | instrument_stubs: bool, 97 | 98 | #[arg(long, help = "Do not create the process as suspended")] 99 | nosuspend: bool, 100 | } 101 | 102 | fn main() -> Result<(), Box> { 103 | simplelog::TermLogger::init( 104 | simplelog::LevelFilter::Debug, 105 | simplelog::Config::default(), 106 | simplelog::TerminalMode::Stdout, 107 | simplelog::ColorChoice::Auto, 108 | )?; 109 | 110 | let args = CliArgs::parse(); 111 | let lowercase_game_alias = args.game.to_lowercase(); 112 | let (game_path, appid) = match GAMES.iter().find(|game| game.alias == lowercase_game_alias) { 113 | Some(game) => { 114 | let (game_app, game_lib) = 115 | steamlocate::SteamDir::locate()?.find_app(game.appid)?.ok_or(format!( 116 | "Game '{lowercase_game_alias}' (app ID {}) not found in local Steam libraries", 117 | game.appid 118 | ))?; 119 | 120 | ( 121 | game_lib.resolve_app_dir(&game_app).join(game.exe_path), 122 | args.env_app_id.unwrap_or(game.appid), 123 | ) 124 | } 125 | None => { 126 | log::info!("unknown game alias, assuming path to executable"); 127 | let appid = args 128 | .env_app_id 129 | .ok_or("--env_app_id must be specified when using an explicit executable path")?; 130 | (args.game.into(), appid) 131 | } 132 | }; 133 | 134 | let game_dir = game_path.parent().unwrap(); 135 | let game_path_cstr = CString::new(game_path.as_os_str().to_str().unwrap())?; 136 | let game_dir_cstr = CString::new(game_dir.as_os_str().to_str().unwrap())?; 137 | 138 | let dll_path = if !args.no_inject { 139 | let mut build_args = vec!["build", "--release", "-p", "dearxan-test-dll"]; 140 | if args.instrument_stubs { 141 | build_args.extend_from_slice(&["-F", "instrument_stubs"]); 142 | } 143 | log::info!("Building test DLL"); 144 | std::process::Command::new("cargo").args(build_args).status()?; 145 | 146 | let dll_path = current_dir()?.join("target/release/dearxan_test_dll.dll"); 147 | log::info!("DLL path: {}", dll_path.display()); 148 | Some(dll_path) 149 | } 150 | else { 151 | None 152 | }; 153 | 154 | log::info!("Game path: {}", game_path.display()); 155 | 156 | log::info!("Launching with app ID: {}", appid); 157 | unsafe { std::env::set_var("SteamAppId", appid.to_string()) }; 158 | 159 | let startup = STARTUPINFOA { 160 | cb: size_of::().try_into()?, 161 | ..Default::default() 162 | }; 163 | let mut proc_info = PROCESS_INFORMATION::default(); 164 | 165 | let proc = unsafe { 166 | CreateProcessA( 167 | PCSTR(game_path_cstr.as_ptr() as *const _), 168 | None, 169 | None, 170 | None, 171 | true, 172 | if args.nosuspend { Default::default() } else { CREATE_SUSPENDED }, 173 | None, 174 | PCSTR(game_dir_cstr.as_ptr() as *const _), 175 | &startup, 176 | &mut proc_info, 177 | )?; 178 | 179 | let handle = OwnedHandle::from_raw_handle(proc_info.hProcess.0); 180 | OwnedProcess::from_handle_unchecked(handle).kill_on_drop() 181 | }; 182 | 183 | log::info!( 184 | "Created suspended game process. PID = {}", 185 | proc_info.dwProcessId 186 | ); 187 | 188 | if let Some(dll_path) = dll_path { 189 | log::info!("Injecting DLL"); 190 | let syringe = Syringe::for_process(proc.try_clone()?); 191 | let _ = syringe.inject(dll_path)?; 192 | log::info!("DLL injected"); 193 | } 194 | 195 | if let Some(delay) = args.delay { 196 | log::info!("Waiting {delay:.2} seconds before resuming process"); 197 | std::thread::sleep(Duration::from_secs_f64(delay)); 198 | } 199 | 200 | if args.wait_for_input && !args.nosuspend { 201 | log::info!("Press enter to resume process. Output will appear below."); 202 | let _ = std::io::stdin().read_line(&mut String::new()); 203 | } 204 | else { 205 | log::info!("Resuming process. Output will appear below"); 206 | } 207 | 208 | unsafe { 209 | ResumeThread(proc_info.hThread); 210 | WaitForSingleObject(proc_info.hProcess, INFINITE); 211 | } 212 | 213 | Ok(()) 214 | } 215 | -------------------------------------------------------------------------------- /src/disabler/lazy_global.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | alloc::{GlobalAlloc, Layout, System}, 3 | mem, 4 | ops::Deref, 5 | ptr, 6 | sync::LazyLock, 7 | }; 8 | 9 | use windows_sys::Win32::{ 10 | Foundation::GetLastError, 11 | System::{ 12 | Memory::{CreateFileMappingW, FILE_MAP_ALL_ACCESS, MapViewOfFile, PAGE_READWRITE}, 13 | Threading::{AcquireSRWLockExclusive, ReleaseSRWLockExclusive, SRWLOCK}, 14 | }, 15 | }; 16 | 17 | /// [`LazyLock`] wrapper for process-wide global variables created with [`lazy_global`]. 18 | pub struct LazyGlobal(LazyLock<(*const T, usize)>); 19 | 20 | /// Defines a process-wide global variable that manages a named file mapping. It is 21 | /// guaranteed to only be assigned once. 22 | /// 23 | /// The name of the file mapping is the name of the static variable. In that way, 24 | /// it is globally defined for the entire process. Only ASCII alphanumerics and the 25 | /// underscore are allowed to be used in the name of the identifier. 26 | /// 27 | /// # Safety 28 | /// 29 | /// Obtaining a pointer to the shared memory is safe, but using it is extremely unsafe. 30 | /// Different modules may have different ideas about the layout of `T`. It is *highly* 31 | /// recommended to use `repr(C)` and types with a stable ABI, as well as verify 32 | /// the size returned by derefencing. 33 | /// 34 | /// # Panics 35 | /// 36 | /// Dereferencing will panic if the initializer panics, if the identifier contains 37 | /// disallowed characters or if one of the OS routines fails. 38 | macro_rules! lazy_global { 39 | ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr;) => { 40 | $(#[$attr])* $vis static $name: $crate::disabler::lazy_global::LazyGlobal<$t> = 41 | $crate::disabler::lazy_global::LazyGlobal::<$t>::new(|| { 42 | $crate::disabler::lazy_global::get_ptr::<$t, _>(stringify!($name), || $init) 43 | }); 44 | } 45 | } 46 | pub(crate) use lazy_global; 47 | 48 | impl LazyGlobal { 49 | #[doc(hidden)] 50 | pub const fn new(f: fn() -> (*const T, usize)) -> Self { 51 | Self(LazyLock::new(f)) 52 | } 53 | } 54 | 55 | impl Deref for LazyGlobal { 56 | type Target = (*const T, usize); 57 | 58 | #[track_caller] 59 | fn deref(&self) -> &Self::Target { 60 | &self.0 61 | } 62 | } 63 | 64 | /// Process-wide named file mapping. 65 | /// 66 | /// `repr(C)` for ABI compatibility. Likewise, instead of storing `T` directly, 67 | /// it stores a pointer to `T` to not invoke immediate UB when the layout of `T` 68 | /// is different between callers and to preserve its natural alignment. 69 | #[repr(C)] 70 | struct LazyGlobalMapping { 71 | value: *const T, 72 | size: usize, 73 | init_lock: SRWLOCK, 74 | is_poisoned: bool, 75 | } 76 | 77 | /// Accesses a process-wide global variable that manages a named file mapping. It is 78 | /// guaranteed to only be assigned once. 79 | /// 80 | /// Only ASCII alphanumerics and the underscore are allowed to be used in the name of the 81 | /// identifier. 82 | /// 83 | /// # Safety 84 | /// 85 | /// Obtaining a pointer to the shared memory is safe, but using it is extremely unsafe. 86 | /// Different modules may have different ideas about the layout of `T`. It is *highly* 87 | /// recommended to use `repr(C)` and types with a stable ABI, as well as verify 88 | /// the size returned by derefencing. 89 | /// 90 | /// # Panics 91 | /// 92 | /// Will panic if the initializer panics, if the name contains disallowed characters 93 | /// or if one of the OS routines fails. 94 | #[track_caller] 95 | pub fn get_ptr T>(name: &str, init: F) -> (*const T, usize) { 96 | unsafe { 97 | // Filter invalid names and prepend the local (process-wide) namespace prefix. 98 | if name.chars().any(|c| !c.is_ascii_alphanumeric() && c != '_') { 99 | panic!("{name} is not a valid file mapping name"); 100 | } 101 | 102 | // Note: backslashes are not permitted after the prefix, but they are already 103 | // filtered out above. 104 | let name = format!("Local\\{name}\0").encode_utf16().collect::>(); 105 | 106 | // Create or open the named file mapping backed by the paging file (no file handle). 107 | let mapping_handle = CreateFileMappingW( 108 | ptr::null_mut(), 109 | ptr::null(), 110 | PAGE_READWRITE, 111 | 0, 112 | mem::size_of::>() as u32, 113 | name.as_ptr(), 114 | ); 115 | 116 | if mapping_handle.is_null() { 117 | let last_error = GetLastError(); 118 | panic!("CreateFileMappingW failed with code {last_error:08x}"); 119 | } 120 | 121 | // Map the file mapping memory. It is zero initialized, which is already a valid state for 122 | // all members of `LazyGlobalMapping` (for `SRWLOCK` see `SRWLOCK_INIT`). 123 | // 124 | // https://learn.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-mapviewoffile 125 | // 126 | // "The initial contents of the pages in a file mapping object backed by the paging file 127 | // are 0 (zero)." 128 | let mapping = MapViewOfFile( 129 | mapping_handle, 130 | FILE_MAP_ALL_ACCESS, 131 | 0, 132 | 0, 133 | mem::size_of::>(), 134 | ) 135 | .Value as *mut LazyGlobalMapping; 136 | 137 | if mapping.is_null() { 138 | let last_error = GetLastError(); 139 | panic!("MapViewOfFile failed with code {last_error:08x}"); 140 | }; 141 | 142 | // Get an exclusive lock in case initialization is required. 143 | AcquireSRWLockExclusive(&raw mut (*mapping).init_lock); 144 | 145 | // RAII guard that releases the lock when it goes out of scope or poisons it in case of 146 | // a panic. 147 | struct LockGuard(*mut LazyGlobalMapping); 148 | 149 | impl Drop for LockGuard { 150 | fn drop(&mut self) { 151 | unsafe { 152 | if std::thread::panicking() { 153 | (*self.0).is_poisoned = true; 154 | } 155 | 156 | ReleaseSRWLockExclusive(&raw mut (*self.0).init_lock); 157 | } 158 | } 159 | } 160 | 161 | let _lock_guard = LockGuard(mapping); 162 | let mapping = &mut *mapping; 163 | 164 | // Check for poisoning (panic while initializing). 165 | if mapping.is_poisoned { 166 | panic!("variable initialization failed and the lock is poisoned"); 167 | } 168 | 169 | // Read or initialize the mapping contents. The exclusive lock serializes accesses and 170 | // initialization. 171 | if mapping.value.is_null() { 172 | // *Actual* size of `T`, which may be a ZST. 173 | let size = mem::size_of::(); 174 | 175 | // Allocate at least one byte as per the contract of `GlobalAlloc`. 176 | let layout = Layout::from_size_align_unchecked(size.max(1), mem::align_of::()); 177 | let value = System.alloc(layout) as *mut T; 178 | 179 | if value.is_null() { 180 | std::alloc::handle_alloc_error(layout); 181 | }; 182 | 183 | // Populate the value. `init` is only ever called once globally, since a panic poisons 184 | // the lock. 185 | value.write(init()); 186 | 187 | mapping.value = value; 188 | mapping.size = size; 189 | } 190 | 191 | (mapping.value, mapping.size) 192 | } 193 | } 194 | 195 | unsafe impl Send for LazyGlobal {} 196 | 197 | unsafe impl Sync for LazyGlobal {} 198 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dearxan 2 | 3 | `dearxan` is a library for performing static and runtime analysis/patching of the checks Arxan (now GuardIT) inserts in a binary. 4 | 5 | It is currently able to fully[^1] neuter Arxan in all the FromSoftware games using it. In particular, once patches are applied absolutely zero Arxan code (e.g. anti-debug checks and integrity checks) will run and all encrypted functions will be forever decrypted. 6 | 7 | [^1]: This is not quite true at the moment, since we still have to let the Arxan entry point stubs run. This is not really a problem as the entry point stub does not do any anti-debug checks. In some games (e.g. Dark Souls Remastered) these stubs perform integrity checks and crash the game if modifications are detected, which can be mitigated by performing your hooks in the callback function provided to `neuter_arxan`. Once the entry point stubs are fully reverse engineered, this will no longer be necessary. 8 | 9 | Note that this crate is only tested against the variants of Arxan present in the latest versions FromSoftware games, which is all of the following: 10 | - Dark Souls Remastered 11 | - Dark Souls II SOTFS 12 | - Dark Souls III 13 | - Elden Ring 14 | - Armored Core VI 15 | - Elden Ring: Nightreign 16 | 17 | It may not work with the Arxan configurations used by other game developers. That said, contributions are welcome. 18 | 19 |
20 | 21 | Many DLL injectors or mod launchers do not suspend the process upon creation or otherwise provide a method to execute your code before the game's entry point is invoked. The crate supports these loaders on a best-effort basis, but it is **strongly** recommended to use one that loads mods before the game's entry point runs. 22 | 23 | For Souls games compatible with [me3](https://github.com/garyttierney/me3), this crate is already integrated into mod loader itself through the`disable_arxan` profile option. If your mod already depends on me3, it is recommended to make use of this instead of using this crate from your DLL mod. 24 | 25 |
26 | 27 | # Usage 28 | 29 | ## From Rust, using the `disabler` feature 30 | 31 | Add the following to your `Cargo.toml`: 32 | ```toml 33 | dearxan = "0.5.1" 34 | ``` 35 | 36 | Then, simply call the `dearxan::disabler::neuter_arxan` function once, ideally before the entry point of the game is executed: 37 | ```rust, 38 | unsafe fn runs_before_entry_point() { 39 | use dearxan::disabler::neuter_arxan; 40 | 41 | neuter_arxan(|result| { 42 | match result { 43 | Ok(_status) => println!("Arxan disabled!"), 44 | Err(e) => println!("{e}\nFailed to disable Arxan!"), 45 | } 46 | // This is a good place to do your hooks. 47 | // Once this callback returns, the game's true entry point 48 | // will be invoked. 49 | }); 50 | } 51 | ``` 52 | 53 | ## From C++ 54 | 55 | Download the static library from the [Releases](https://github.com/tremwil/dearxan/releases) page and link to it along with the included Windows import libraries. Include `include/dearxan.h` and call `dearxan::neuter_arxan`, ideally before the game's entry point runs: 56 | 57 | ```C++ 58 | #include 59 | 60 | #include "include/dearxan.h" 61 | 62 | void runs_before_entry_point() { 63 | dearxan::neuter_arxan([](const dearxan::DearxanResult& result) { 64 | if (result.status() == dearxan::DearxanStatus::DearxanSuccess) { 65 | std::cout << "Arxan disabled!\n"; 66 | } else { 67 | std::cout << result.error_msg() << '\n'; 68 | std::cout << "Failed to disable Arxan!\n"; 69 | } 70 | }); 71 | } 72 | ``` 73 | 74 | Note that the minimal supported C++ standard is C++14, although C++17 and above are recommended. 75 | 76 | ## From C 77 | 78 | Download the static library from the [Releases](https://github.com/tremwil/dearxan/releases) page and link to it along with the included Windows import libraries. Include `include/dearxan.h` and call `dearxan_neuter_arxan`, ideally before the game's entry point runs: 79 | 80 | ```C 81 | #include 82 | 83 | #include "include/dearxan.h" 84 | 85 | void my_callback(const DearxanResult* result, void* opaque) { 86 | if (result->status == DearxanSuccess) { 87 | printf("Arxan disabled!\n"); 88 | } else { 89 | printf( 90 | "%.*s\nFailed to disable Arxan!\n", 91 | (int)result->error_msg_size, 92 | result->error_msg 93 | ); 94 | } 95 | } 96 | 97 | void runs_before_entry_point() { 98 | dearxan_neuter_arxan(my_callback, NULL); 99 | } 100 | ``` 101 | 102 | ## From another language 103 | 104 | Download the static library from the [Releases](https://github.com/tremwil/dearxan/releases) page and link to it along with the included Windows import libraries. Generate C bindings according to `include/dearxan.h` and call `dearxan_neuter_arxan`, ideally before the game's entry point runs. 105 | 106 | ## Writing your own patcher 107 | 108 | If you want to patch an executable on disk, for example, you will need to write your own disabler. This will involve analyzing the Arxan stubs in the binary with `dearxan::analysis::analyze_all_stubs` or equivalent APIs, then passing the resulting `StubInfo` values to `dearxan::patch::ArxanPatch::build_from_stubs`. From there you will have to iterate over the patches and apply them to the executable manually. 109 | 110 | Note that currently, for this to work on a live executable image it is important to make sure that the Arxan entry point stub has been invoked. For FromSoftware games, beware that binaries may be wrapped in SteamStub as well. 111 | 112 | # About Arxan 113 | 114 | Arxan is an anti debug and tampering product often applied to games. Some features of Arxan include: 115 | - Instruction mutations and control flow obfuscation to confuse decompilers and make reverse engineering harder 116 | - Obfuscation by encrypting sensitive functions at rest and decrypting them only when they are being executed 117 | - A varied suite of anti-debug checks 118 | - Integrity checks on functions marked as sensitive by the game developer, with the ability to a combination of the following when tampering is detected: 119 | - Silently writing flags to a buffer that the game developer can read to integrate with their anti-cheat solution 120 | - Crashing the game by corrupting the stack or control flow in a way that is difficult to debug 121 | - Repairing the function's code 122 | 123 | Every bit of new logic (e.g. not just instruction mutations) that Arxan adds to the game is contained within an *Arxan stub* that is inserted into an arbitrary game function. These stubs perform a context save by pushing the registers they will be using on the stack before executing some Arxan logic, restoring the context and jumping back to the function's original code. 124 | 125 | This crate disables Arxan by searching for these stubs and visting their control flow graphs. Using partial instruction emulation and forking the program state when branches are hit, it is possible to work through Arxan's control flow obfuscation. From there, the structure of the stubs is analyzed to extract the patches required to neuter it. This is usually a jump hook to a special trampoline that fixes up the stub's stack, and sometimes includes extracting the code regions decrypted by the stub to write their contents directly. 126 | 127 | # Feature flags 128 | 129 | The crate comes with the following feature flags: 130 | - `disabler` (default): Provides an implementation of a patcher capable of fully disabling Arxan by calling the `neuter_arxan` function. 131 | - `rayon` (default): Parallelizes Arxan stub analysis using the `rayon` crate. 132 | - `ffi`: Exports a C function `dearxan_neuter_arxan` to use the Arxan disabler from another language. 133 | - `instrument_stubs`: Adds upon `disabler` by instrumenting each Arxan stub to log a message the first time it is called. **CAREFUL**: This feature currently crashes for games other than Dark Souls Remastered due to register clobbering! 134 | - `internal_api`: Make most of the internal binary analysis APIs public through `dearxan::analysis::internal`. These APIs are *not* stabilized yet and may break between minor crate versions. 135 | 136 | # Credits 137 | 138 | Many thanks to [dasaav](https://github.com/Dasaav-dsv/) for implementing global synchronization for `neuter_arxan` ad well as helping me reverse engineer how Arxan stores the regions of memory to decrypt and for finding the encryption algorithm they used (32-round TEA). -------------------------------------------------------------------------------- /src/analysis/vm/memory.rs: -------------------------------------------------------------------------------- 1 | //! Basic emulation of a program's virtual address space. 2 | 3 | use std::ops::ControlFlow; 4 | use std::{ 5 | borrow::Cow, 6 | io::{Read, Write}, 7 | }; 8 | 9 | use bitvec::{BitArr, array::BitArray, bitarr}; 10 | use fxhash::FxHashMap; 11 | 12 | use super::image::ImageView; 13 | 14 | #[derive(Debug, Clone)] 15 | struct MemoryBlock { 16 | bytes: [u8; Self::SIZE], 17 | is_known: BitArr!(for Self::SIZE), 18 | } 19 | 20 | impl MemoryBlock { 21 | const SIZE: usize = 64; 22 | } 23 | 24 | impl Default for MemoryBlock { 25 | fn default() -> Self { 26 | Self { 27 | bytes: [0; Self::SIZE], 28 | is_known: BitArray::ZERO, 29 | } 30 | } 31 | } 32 | 33 | impl MemoryBlock { 34 | #[inline(always)] 35 | fn known_slices( 36 | &self, 37 | mut offset: usize, 38 | max_offset: usize, 39 | mut cb: impl FnMut(u64, &[u8]) -> ControlFlow, 40 | ) -> ControlFlow { 41 | while offset < max_offset { 42 | let start = offset + self.is_known[offset..max_offset].leading_zeros(); 43 | let end = start + self.is_known[start..max_offset].leading_ones(); 44 | if start != end { 45 | cb(start as u64, &self.bytes[start..end])?; 46 | } 47 | offset = end; 48 | } 49 | ControlFlow::Continue(()) 50 | } 51 | } 52 | 53 | #[derive(Clone)] 54 | pub struct MemoryStore { 55 | blocks: FxHashMap, 56 | image: I, 57 | } 58 | 59 | impl MemoryStore { 60 | pub fn new(image: I) -> Self { 61 | Self { 62 | blocks: Default::default(), 63 | image, 64 | } 65 | } 66 | 67 | pub fn new_initialized>( 68 | image: I, 69 | known_memory: impl IntoIterator, 70 | ) -> Self { 71 | let mut s = Self::new(image); 72 | for (addr, mem) in known_memory { 73 | s.write(addr, mem.as_ref()); 74 | } 75 | s 76 | } 77 | 78 | pub fn image(&self) -> &I { 79 | &self.image 80 | } 81 | 82 | pub fn read<'b>(&self, addr: u64, out_buf: &'b mut [u8]) -> Option<&'b mut [u8]> { 83 | if out_buf.is_empty() { 84 | return Some(out_buf); 85 | } 86 | 87 | let (i_start_block, start_ofs) = Self::block_and_offset(addr); 88 | let (i_end_block, end_ofs) = 89 | Self::block_and_offset(addr.checked_add(out_buf.len() as u64 - 1)?); 90 | 91 | if i_start_block == i_end_block { 92 | let block = self.get_block(i_start_block)?; 93 | block.is_known[start_ofs..=end_ofs].all().then(|| { 94 | out_buf.copy_from_slice(&block.bytes[start_ofs..=end_ofs]); 95 | })?; 96 | } 97 | else { 98 | let mut out_cursor = &mut *out_buf; 99 | 100 | let start_block = self.get_block(i_start_block)?; 101 | start_block.is_known[start_ofs..] 102 | .all() 103 | .then(|| out_cursor.write_all(&start_block.bytes[start_ofs..]).unwrap())?; 104 | 105 | for i_mid_block in (i_start_block + 1)..(i_end_block - 1) { 106 | let mid_block = self.get_block(i_mid_block)?; 107 | mid_block.is_known.all().then(|| { 108 | out_cursor.write_all(&mid_block.bytes).unwrap(); 109 | })?; 110 | } 111 | 112 | let end_block = self.get_block(i_end_block)?; 113 | end_block.is_known[..end_ofs] 114 | .all() 115 | .then(|| out_cursor.write_all(&end_block.bytes[..=end_ofs]).unwrap())?; 116 | } 117 | 118 | Some(out_buf) 119 | } 120 | 121 | pub fn write(&mut self, addr: u64, mut buf: &[u8]) { 122 | if buf.is_empty() { 123 | return; 124 | } 125 | 126 | let (i_start_block, start_ofs) = Self::block_and_offset(addr); 127 | let (i_end_block, end_ofs) = 128 | Self::block_and_offset(addr.saturating_add(buf.len() as u64 - 1)); 129 | 130 | if i_start_block == i_end_block { 131 | let block = self.get_block_mut(i_start_block); 132 | block.is_known[start_ofs..=end_ofs].fill(true); 133 | block.bytes[start_ofs..=end_ofs].copy_from_slice(buf); 134 | } 135 | else { 136 | let start_block = self.get_block_mut(i_start_block); 137 | start_block.is_known[start_ofs..].fill(true); 138 | buf.read_exact(&mut start_block.bytes[start_ofs..]).unwrap(); 139 | 140 | for i_mid_block in (i_start_block + 1)..(i_end_block - 1) { 141 | let mid_block = self.get_block_mut(i_mid_block); 142 | mid_block.is_known.fill(true); 143 | buf.read_exact(&mut mid_block.bytes).unwrap(); 144 | } 145 | 146 | let end_block = self.get_block_mut(i_end_block); 147 | end_block.is_known[..end_ofs].fill(true); 148 | buf.read_exact(&mut end_block.bytes[..=end_ofs]).unwrap(); 149 | } 150 | } 151 | 152 | pub fn invalidate(&mut self, addr: u64, count: usize) { 153 | if count == 0 { 154 | return; 155 | } 156 | 157 | let (i_start_block, start_ofs) = Self::block_and_offset(addr); 158 | let (i_end_block, end_ofs) = Self::block_and_offset(addr.saturating_add(count as u64 - 1)); 159 | 160 | if i_start_block == i_end_block { 161 | let block = self.get_block_mut(i_start_block); 162 | block.is_known[start_ofs..=end_ofs].fill(false); 163 | } 164 | else { 165 | let start_block = self.get_block_mut(i_start_block); 166 | start_block.is_known[start_ofs..].fill(false); 167 | 168 | for i_mid_block in (i_start_block + 1)..(i_end_block - 1) { 169 | let mid_block = self.get_block_mut(i_mid_block); 170 | mid_block.is_known.fill(false); 171 | } 172 | 173 | let end_block = self.get_block_mut(i_end_block); 174 | end_block.is_known[..end_ofs].fill(false); 175 | } 176 | } 177 | 178 | pub fn read_int(&self, addr: u64, size: usize) -> Option { 179 | if size > 8 { 180 | panic!("integers of size >8 not supported by read_int"); 181 | } 182 | 183 | let mut read_buf = [0u8; 8]; 184 | self.read(addr, &mut read_buf[..size])?; 185 | Some(u64::from_le_bytes(read_buf)) 186 | } 187 | 188 | pub fn write_int(&mut self, addr: u64, val: Option, size: usize) { 189 | match (val, size) { 190 | (None, _) => self.invalidate(addr, size), 191 | (Some(val), ..=8) => self.write(addr, &val.to_le_bytes()[..size]), 192 | (Some(_), 9..) => panic!("integers of size >8 not supported by write_int"), 193 | } 194 | } 195 | 196 | fn block_and_offset(addr: u64) -> (usize, usize) { 197 | ( 198 | addr as usize / MemoryBlock::SIZE, 199 | addr as usize % MemoryBlock::SIZE, 200 | ) 201 | } 202 | 203 | fn get_block(&self, i_block: usize) -> Option> { 204 | self.blocks.get(&i_block).map(Cow::Borrowed).or_else(|| { 205 | let bytes = self.image.read((i_block * MemoryBlock::SIZE) as u64, MemoryBlock::SIZE)? 206 | [..MemoryBlock::SIZE] 207 | .try_into() 208 | .unwrap(); 209 | 210 | Some(Cow::Owned(MemoryBlock { 211 | bytes, 212 | is_known: bitarr![1; MemoryBlock::SIZE], 213 | })) 214 | }) 215 | } 216 | 217 | fn get_block_mut(&mut self, i_block: usize) -> &mut MemoryBlock { 218 | self.blocks.entry(i_block).or_insert_with(|| { 219 | self.image 220 | .read((i_block * MemoryBlock::SIZE) as u64, MemoryBlock::SIZE) 221 | .map(|bytes| MemoryBlock { 222 | bytes: bytes[..MemoryBlock::SIZE].try_into().unwrap(), 223 | is_known: bitarr![1; MemoryBlock::SIZE], 224 | }) 225 | .unwrap_or_default() 226 | }) 227 | } 228 | } 229 | 230 | impl std::fmt::Debug for MemoryStore { 231 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 232 | let mut blocks_sorted = self.blocks.iter().collect::>(); 233 | blocks_sorted.sort_by_key(|(b, _)| **b); 234 | 235 | let mut map = f.debug_map(); 236 | for (block_id, block) in blocks_sorted { 237 | let _ = block.known_slices::<()>(0, MemoryBlock::SIZE, |ofs, bytes| { 238 | map.entry(&((block_id * MemoryBlock::SIZE) as u64 + ofs), &bytes); 239 | ControlFlow::Continue(()) 240 | }); 241 | } 242 | map.finish()?; 243 | Ok(()) 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /src/disabler/entry_point.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ptr::{NonNull, null_mut}, 3 | sync::{ 4 | LazyLock, 5 | atomic::{AtomicU64, Ordering::Relaxed}, 6 | }, 7 | time::{Duration, Instant}, 8 | }; 9 | 10 | use pelite::pe64::Pe; 11 | use windows_sys::Win32::{ 12 | Foundation::{ 13 | CloseHandle, DUPLICATE_SAME_ACCESS, DuplicateHandle, GetLastError, HANDLE, NTSTATUS, 14 | }, 15 | System::{ 16 | Diagnostics::Debug::{CONTEXT, CONTEXT_FULL_AMD64, GetThreadContext}, 17 | LibraryLoader::{GetModuleHandleA, GetProcAddress}, 18 | Threading::{ 19 | GetCurrentProcess, GetCurrentThreadId, GetThreadId, ResumeThread, SuspendThread, 20 | THREAD_ACCESS_RIGHTS, THREAD_ALL_ACCESS, THREAD_QUERY_INFORMATION, 21 | THREAD_SUSPEND_RESUME, 22 | }, 23 | }, 24 | }; 25 | 26 | #[allow( 27 | non_camel_case_types, 28 | non_snake_case, 29 | non_upper_case_globals, 30 | dead_code 31 | )] 32 | mod ntdll { 33 | use super::*; 34 | 35 | #[cfg_attr(target_os = "windows", link(name = "ntdll", kind = "raw-dylib"))] 36 | unsafe extern "C" { 37 | pub fn NtGetNextThread( 38 | process_handle: HANDLE, 39 | thread_handle: HANDLE, 40 | desired_access: THREAD_ACCESS_RIGHTS, 41 | handle_attributes: u32, 42 | flags: u32, 43 | new_thread_handle: *mut HANDLE, 44 | ) -> NTSTATUS; 45 | 46 | pub fn NtQueryInformationThread( 47 | thread_handle: HANDLE, 48 | thread_information_class: u32, 49 | thread_information: *mut (), 50 | thread_information_length: usize, 51 | return_length: *mut usize, 52 | ) -> NTSTATUS; 53 | } 54 | 55 | pub const ThreadSuspendCount: u32 = 35; 56 | pub const ThreadQuerySetWin32StartAddress: u32 = 9; 57 | } 58 | 59 | use fxhash::FxHashMap; 60 | 61 | use crate::{analysis::ImageView, disabler::game::game}; 62 | 63 | #[derive(Debug, PartialEq, Eq)] 64 | pub struct OwnedHandle(NonNull); 65 | 66 | impl OwnedHandle { 67 | pub fn new(handle: HANDLE) -> Option { 68 | NonNull::new(handle).map(Self) 69 | } 70 | 71 | pub fn raw(&self) -> HANDLE { 72 | self.0.as_ptr() 73 | } 74 | } 75 | 76 | impl Clone for OwnedHandle { 77 | fn clone(&self) -> Self { 78 | let mut handle_out = null_mut(); 79 | unsafe { 80 | let cproc = GetCurrentProcess(); 81 | let success = DuplicateHandle( 82 | cproc, 83 | self.raw(), 84 | cproc, 85 | &mut handle_out, 86 | 0, 87 | 0, 88 | DUPLICATE_SAME_ACCESS, 89 | ); 90 | if success == 0 { 91 | panic!("DuplicateHandle failed: {}", GetLastError()); 92 | } 93 | } 94 | handle_out.into() 95 | } 96 | } 97 | 98 | impl Drop for OwnedHandle { 99 | fn drop(&mut self) { 100 | unsafe { CloseHandle(self.raw()) }; 101 | } 102 | } 103 | 104 | impl From for OwnedHandle { 105 | fn from(value: HANDLE) -> Self { 106 | Self(NonNull::new(value).unwrap()) 107 | } 108 | } 109 | 110 | /// Find the virtual address of the global security cookie. 111 | /// 112 | /// This is designed to work for MSVC binaries, but the pattern used might work on code 113 | /// generated by other compilers. 114 | /// 115 | /// The binary must have at least a few 10s of stack-protected functions for the analysis to be 116 | /// successful. 117 | fn find_gs_cookie_va(image: impl ImageView) -> Option { 118 | use memchr::memmem; 119 | 120 | const XOR_RAX_RSP: &[u8; 3] = b"\x48\x33\xc4"; 121 | const MIN_COUNT: usize = 16; 122 | 123 | let mut va_counts: FxHashMap = FxHashMap::default(); 124 | 125 | image 126 | .sections() 127 | .flat_map(|(va, slice)| memmem::find_iter(slice, XOR_RAX_RSP).map(move |o| va + o as u64)) 128 | .filter_map(|xor_va| { 129 | let offset = i32::from_le_bytes(image.read(xor_va - 4, 4)?[..4].try_into().unwrap()); 130 | xor_va.checked_add_signed(offset.into()) 131 | }) 132 | .find(|&gs_cookie| { 133 | let count = va_counts.entry(gs_cookie).or_default(); 134 | *count += 1; 135 | *count >= MIN_COUNT 136 | }) 137 | } 138 | 139 | pub unsafe fn wait_for_gs_cookie(timeout: Option) -> Result<(), &'static str> { 140 | static GS_COOKIE_ADDR: LazyLock> = LazyLock::new(|| find_gs_cookie_va(game().pe)); 141 | 142 | const UNITNIT_GS_COOKIE: u64 = 0x2b992ddfa232; 143 | 144 | let gs_cookie_addr = GS_COOKIE_ADDR.ok_or("global security cookie not found")?; 145 | let gs_cookie_ptr = unsafe { AtomicU64::from_ptr(gs_cookie_addr as *mut u64) }; 146 | 147 | let ts = Instant::now(); 148 | 149 | // Poll GS cookie every 10ms until it is no longer equal to the uninitialized value 150 | while timeout.is_none_or(|timeout| ts.elapsed() < timeout) { 151 | if gs_cookie_ptr.load(Relaxed) != UNITNIT_GS_COOKIE { 152 | return Ok(()); 153 | } 154 | 155 | std::thread::sleep(Duration::from_millis(10)); 156 | } 157 | Err("timed out waiting for __security_init_cookie") 158 | } 159 | 160 | /// Iterate over all threads in the current process, getting handles with the requested access 161 | /// rights. 162 | /// 163 | /// If `access` is `None`, the default rights of [`THREAD_ALL_ACCESS`] are used. 164 | pub fn iter_threads(access: Option) -> impl Iterator { 165 | let access = access.unwrap_or(THREAD_ALL_ACCESS); 166 | let proc = unsafe { GetCurrentProcess() }; 167 | let mut thread: Option = None; 168 | 169 | std::iter::from_fn(move || unsafe { 170 | let mut raw_thread = thread.as_ref().map(|t| t.raw()).unwrap_or_default(); 171 | let status = ntdll::NtGetNextThread(proc, raw_thread, access, 0, 0, &mut raw_thread); 172 | thread = OwnedHandle::new(raw_thread); 173 | thread.as_ref().filter(|_| status >= 0).cloned() 174 | }) 175 | } 176 | 177 | pub struct SuspendGuard { 178 | suspended: Vec, 179 | } 180 | 181 | impl SuspendGuard { 182 | pub unsafe fn suspend_all_threads() -> Self { 183 | log::debug!("suspending all threads"); 184 | 185 | unsafe { 186 | let current_thread = GetCurrentThreadId(); 187 | 188 | let suspended = iter_threads(Some(THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION)) 189 | .filter(|h| { 190 | GetThreadId(h.raw()) != current_thread && SuspendThread(h.raw()) != u32::MAX 191 | }) 192 | .collect(); 193 | 194 | Self { suspended } 195 | } 196 | } 197 | } 198 | 199 | impl Drop for SuspendGuard { 200 | fn drop(&mut self) { 201 | log::debug!("resuming threads"); 202 | for thread in std::mem::take(&mut self.suspended) { 203 | unsafe { ResumeThread(thread.raw()) }; 204 | } 205 | } 206 | } 207 | 208 | pub fn process_main_thread() -> Option { 209 | let pe = game().pe; 210 | let pe_ep = pe.optional_header().ImageBase + pe.optional_header().AddressOfEntryPoint as u64; 211 | 212 | iter_threads(None).find(|thread| unsafe { 213 | let mut thread_ep = 0; 214 | let status = ntdll::NtQueryInformationThread( 215 | thread.raw(), 216 | ntdll::ThreadQuerySetWin32StartAddress, 217 | (&raw mut thread_ep).cast(), 218 | size_of_val(&thread_ep), 219 | null_mut(), 220 | ); 221 | status >= 0 && thread_ep == pe_ep 222 | }) 223 | } 224 | 225 | pub fn is_created_suspended(thread: HANDLE) -> bool { 226 | static RTL_USER_THREAD_START: LazyLock = LazyLock::new(|| unsafe { 227 | let ntdll = GetModuleHandleA(c"ntdll.dll".as_ptr().cast()); 228 | GetProcAddress(ntdll, c"RtlUserThreadStart".as_ptr().cast()) 229 | .expect("RtlUserThreadStart not found") as usize 230 | }); 231 | 232 | // Check if the thread is suspended 233 | let mut suspend_count: std::ffi::c_ulong = 0; 234 | let info_status = unsafe { 235 | ntdll::NtQueryInformationThread( 236 | thread, 237 | ntdll::ThreadSuspendCount, 238 | (&raw mut suspend_count).cast(), 239 | size_of_val(&suspend_count), 240 | null_mut(), 241 | ) 242 | }; 243 | if info_status < 0 { 244 | log::error!( 245 | "NtQueryInformationThread failed: {:x}", 246 | info_status.cast_unsigned() 247 | ); 248 | return false; 249 | } 250 | if suspend_count == 0 { 251 | return false; 252 | } 253 | 254 | // Check the context to verify that it hasn't started executing its entry point yet 255 | let mut context = CONTEXT { 256 | ContextFlags: CONTEXT_FULL_AMD64, 257 | ..Default::default() 258 | }; 259 | if unsafe { GetThreadContext(thread, &mut context) } == 0 { 260 | log::error!("GetThreadContext failed"); 261 | return false; 262 | } 263 | 264 | let pe = game().pe; 265 | let pe_ep = pe.optional_header().ImageBase + pe.optional_header().AddressOfEntryPoint as u64; 266 | 267 | // We check if either are true instead of both to account for thread hijacking techniques 268 | // In particular, the Steam game overlay initializes itself using RIP thread hijacking 269 | // But another thread hijacking technique for suspended threads is overwriting RCX 270 | context.Rip == *RTL_USER_THREAD_START as u64 || context.Rcx == pe_ep 271 | } 272 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /include/dearxan.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifndef _DEARXAN_H 4 | #define _DEARXAN_H 5 | 6 | #ifdef __cplusplus 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #if __cplusplus >= 201703 || _MSVC_LANG >= 201703 15 | #include 16 | #else 17 | #include 18 | #endif 19 | 20 | namespace dearxan { 21 | namespace detail { 22 | extern "C" { 23 | #else 24 | #include 25 | #include 26 | #include 27 | 28 | /// The size of `DearxanResult` in bytes, WITHOUT the trailing padding. 29 | #define DEARXAN_RESULT_SIZE offsetof(DearxanResult, _last_for_offsetof) 30 | 31 | /// Verify field `field` is in bounds in a `DearxanResult`. 32 | /// 33 | /// If the member field is in bounds, `then_expr` is executed, otherwise 34 | /// `else_expr` is executed. 35 | #define DEARXAN_RESULT_FIELD(ptr, field, then_expr, else_expr) do { \ 36 | if (offsetof(DearxanResult, field) < ptr->result_size) { \ 37 | then_expr; \ 38 | } else { \ 39 | else_expr; \ 40 | } \ 41 | } while(0) 42 | #endif 43 | 44 | /// Possible values of the field `status` inside `DearxanResult`. 45 | /// 46 | /// `DearxanError` and `DearxanPanic` may mean the result contains and error message. 47 | typedef enum DearxanStatus { 48 | DearxanInvalid, 49 | DearxanSuccess, 50 | DearxanError, 51 | DearxanPanic, 52 | DearxanMaxStatus 53 | } DearxanStatus; 54 | 55 | /// The outcome of a call to `dearxan_neuter_arxan`. 56 | /// 57 | /// Contains its own size in bytes as the first member field for the purpose 58 | /// of versioning when another instance of `dearxan` handles the call. 59 | /// 60 | /// To maintain ABI stability, future `dearxan` versions are not permitted to 61 | /// remove or reorder fields, any new fields must be added before `_last_for_offsetof`. 62 | typedef struct DearxanResult { 63 | /// Size of this `DearxanResult` struct. 64 | /// 65 | /// Only fields whose end offset fit within this size are valid for reads. Consider using 66 | /// the `DEARXAN_RESULT_FIELD` macro to check this automatically. 67 | size_t result_size; 68 | /// The status of `dearxan_neuter_arxan`. 69 | /// 70 | /// This is expected to be a value of `DearxanStatus`, but is non-exhaustive as other statuses 71 | /// may be added in the future. As such it is typed as an `int`. 72 | int status; 73 | /// The error or panic message, if `status` is `DearxanError` or `DearxanPanic`. 74 | const char* error_msg; 75 | /// The size of the error or panic message. 76 | size_t error_msg_size; 77 | /// Whether Arxan was detected or not. 78 | bool is_arxan_detected; 79 | /// If true, the callback execution is blocking the program entry point. If false, the callback 80 | /// that received this `DearxanResult` is being executed in a separate thread. 81 | /// 82 | /// In either case, it is guaranteed that the Arxan entry point stub has finished initializing 83 | /// once the callback runs. 84 | bool is_executing_entrypoint; 85 | char _last_for_offsetof; 86 | } DearxanResult; 87 | 88 | /// Callback invoked once arxan has been disabled (or if it wasn't detected). 89 | typedef void (*DearxanUserCallback)(const DearxanResult* result, void* opaque); 90 | 91 | /// Single function to neuter all of Arxan's checks. 92 | /// 93 | /// The callback will be invoked with a pointer to a `DearxanResult` containing 94 | /// fields indicating whether Arxan was detected and whether entry point execution is being 95 | /// blocked while the callback is running. Modulo any reported error, it is safe to assume 96 | /// that Arxan has been disabled once it executes. 97 | /// 98 | /// Handles SteamStub 3.1 possibly being applied on top of Arxan. 99 | extern void dearxan_neuter_arxan(DearxanUserCallback callback, void* opaque); 100 | 101 | #ifdef __cplusplus 102 | } 103 | } // namespace detail 104 | 105 | /// Possible values of `status` inside `DearxanResult`. 106 | /// 107 | /// `DearxanError` and `DearxanPanic` may mean the result contains and error message. 108 | using DearxanStatus = detail::DearxanStatus; 109 | 110 | /// The size of `DearxanResult` in bytes, WITHOUT the trailing padding. 111 | /// 112 | /// Internal macro for use in `DearxanResult` below. 113 | #define DEARXAN_RESULT_SIZE \ 114 | offsetof(detail::DearxanResult, _last_for_offsetof) 115 | 116 | /// Verify field `field` is in bounds in a `DearxanResult`. 117 | /// 118 | /// Internal macro for use in `DearxanResult` below. 119 | /// 120 | /// If the member field is in bounds, `then_expr` is executed, otherwise 121 | /// `else_expr` is executed. 122 | #define DEARXAN_RESULT_FIELD(ptr, field, then_expr, else_expr) do { \ 123 | if (offsetof(detail::DearxanResult, field) < ptr->result_size) { \ 124 | then_expr; \ 125 | } else { \ 126 | else_expr; \ 127 | } \ 128 | } while(0) 129 | 130 | #if __cplusplus >= 201703 || _MSVC_LANG >= 201703 131 | /// Declare a getter for field `field` that performs bounds checking. 132 | /// 133 | /// Internal macro for use in `DearxanResult` below. 134 | /// 135 | /// The field must exist in `detail::DearxanResult`. 136 | #define DEARXAN_DECLARE_FIELD(field) auto field() const { \ 137 | DEARXAN_RESULT_FIELD( \ 138 | static_cast(this), \ 139 | field, \ 140 | return std::optional(detail::DearxanResult::field), \ 141 | return std::optional() \ 142 | ); \ 143 | } 144 | #else 145 | #define DEARXAN_DECLARE_FIELD(field) auto field() const { \ 146 | DEARXAN_RESULT_FIELD( \ 147 | static_cast(this), \ 148 | field, \ 149 | return detail::DearxanResult::field, \ 150 | throw std::length_error( \ 151 | "old DearxanResult layout lacks field " #field) \ 152 | ); \ 153 | } 154 | #endif 155 | 156 | /// The outcome of a call to `dearxan_neuter_arxan`. 157 | /// 158 | /// Performs bounds checking utilizing its own size in bytes for the purpose 159 | /// of versioning when another instance of `dearxan` handles the call. 160 | /// 161 | /// To maintain ABI stability, future `dearxan` versions are not permitted to 162 | /// remove or reorder fields. 163 | struct DearxanResult : private detail::DearxanResult { 164 | explicit DearxanResult(const detail::DearxanResult* ptr) : detail::DearxanResult{} { 165 | if (ptr == nullptr) { 166 | return; 167 | } 168 | 169 | size_t result_size = DEARXAN_RESULT_SIZE < ptr->result_size 170 | ? DEARXAN_RESULT_SIZE : ptr->result_size; 171 | 172 | std::memcpy( 173 | static_cast(static_cast(this)), 174 | static_cast(ptr), 175 | result_size 176 | ); 177 | 178 | this->result_size = result_size; 179 | } 180 | 181 | /// The status of `dearxan_neuter_arxan`. 182 | /// 183 | /// This is expected to be a valid value of `DearxanStatus`, but is non-exhaustive as other statuses 184 | /// may be added in the future. Hence it is recommended to compare it against `DearxanMaxStatus`. 185 | DearxanStatus status() const noexcept { 186 | return static_cast(detail::DearxanResult::status); 187 | } 188 | 189 | /// The error or panic message, if `status` is `DearxanError` or `DearxanPanic`. 190 | std::string error_msg() const noexcept { 191 | return std::string(detail::DearxanResult::error_msg, 192 | detail::DearxanResult::error_msg_size); 193 | } 194 | 195 | /// Whether Arxan was detected or not. 196 | bool is_arxan_detected() const noexcept { 197 | return detail::DearxanResult::is_arxan_detected; 198 | } 199 | 200 | /// If true, the callback execution is blocking the program entry point. If false, the callback 201 | /// that received this `DearxanResult` is being executed in a separate thread. 202 | /// 203 | /// In either case, it is guaranteed that the Arxan entry point stub has finished initializing 204 | /// once the callback runs. 205 | bool is_executing_entrypoint() const noexcept { 206 | return detail::DearxanResult::is_executing_entrypoint; 207 | } 208 | }; 209 | 210 | /// Callback invoked once arxan has been disabled (or if it wasn't detected). 211 | using DearxanUserCallback = std::function; 212 | 213 | /// Single function to neuter all of Arxan's checks. 214 | /// 215 | /// The callback will be invoked with a pointer to a `DearxanResult` containing 216 | /// fields indicating whether Arxan was detected and whether entry point execution is being 217 | /// blocked while the callback is running. Modulo any reported error, it is safe to assume 218 | /// that Arxan has been disabled once it executes. 219 | /// 220 | /// Handles SteamStub 3.1 possibly being applied on top of Arxan. 221 | inline void neuter_arxan(DearxanUserCallback f) { 222 | auto boxed_function = 223 | std::make_unique(std::move(f)).release(); 224 | 225 | auto callback = +[](const detail::DearxanResult* result, void* opaque) { 226 | auto boxed_function = 227 | std::unique_ptr(reinterpret_cast(opaque)); 228 | (*boxed_function.get())(DearxanResult(result)); 229 | }; 230 | 231 | detail::dearxan_neuter_arxan(callback, static_cast(boxed_function)); 232 | } 233 | } // namespace dearxan 234 | 235 | #undef DEARXAN_RESULT_SIZE 236 | #undef DEARXAN_RESULT_FIELD 237 | #undef DEARXAN_DECLARE_FIELD 238 | 239 | #endif 240 | 241 | #endif 242 | -------------------------------------------------------------------------------- /src/analysis/cfg.rs: -------------------------------------------------------------------------------- 1 | //! Implements specialized logic to visit all instructions in an arbitrary Arxan stub, using the 2 | //! forking emulator from the [`vm`](`crate::analysis::vm`) module to work through obfuscated 3 | //! control flow. 4 | 5 | use std::collections::hash_map::Entry; 6 | 7 | use bitfield_struct::bitfield; 8 | use fxhash::FxHashMap; 9 | use iced_x86::{Code, FlowControl, Register}; 10 | 11 | use super::vm::{ImageView, ProgramState, RunStep, StepKind, util}; 12 | 13 | const VOLATILE_REGS: &[Register] = &[ 14 | Register::RAX, 15 | Register::RCX, 16 | Register::RDX, 17 | Register::R8, 18 | Register::R9, 19 | Register::R10, 20 | Register::R11, 21 | ]; 22 | 23 | #[bitfield(u64)] 24 | struct CfgInfo { 25 | #[bits(63)] 26 | cmov_id: u64, 27 | unresolved_branch: bool, 28 | } 29 | 30 | impl CfgInfo { 31 | const fn detached() -> Self { 32 | Self::new().with_cmov_id(u64::MAX >> 1) 33 | } 34 | 35 | fn detach_cmov_pair(&mut self) { 36 | self.set_cmov_id(u64::MAX >> 1); 37 | } 38 | 39 | fn is_prev_of(&self, other: &Self) -> bool { 40 | self.cmov_id().checked_add(1) == Some(other.cmov_id()) 41 | } 42 | 43 | fn create_pair(&mut self) -> (Self, Self) { 44 | self.set_cmov_id(self.cmov_id() + 3); 45 | ( 46 | Self::new().with_cmov_id(self.cmov_id()), 47 | Self::new().with_cmov_id(self.cmov_id() + 1), 48 | ) 49 | } 50 | } 51 | 52 | /// Wrapper attaching the CFG state information required to by the [`ArxanCfgVisitor`] to an 53 | /// arbitrary type. 54 | #[derive(Clone, Copy)] 55 | pub struct ArxanCfgData { 56 | #[allow(dead_code)] 57 | pub inner: D, 58 | cfg_info: CfgInfo, 59 | } 60 | 61 | impl core::ops::Deref for ArxanCfgData { 62 | type Target = D; 63 | 64 | fn deref(&self) -> &Self::Target { 65 | &self.inner 66 | } 67 | } 68 | 69 | impl core::ops::DerefMut for ArxanCfgData { 70 | fn deref_mut(&mut self) -> &mut Self::Target { 71 | &mut self.inner 72 | } 73 | } 74 | 75 | /// Information about a possibly-obfuscated call instruction. 76 | pub struct CallInfo { 77 | #[allow(dead_code)] 78 | /// The value of RSP after taking the call. 79 | pub target_rsp: u64, 80 | /// The value of RSP after returning from the call. 81 | pub return_rsp: u64, 82 | /// The call's target function. 83 | pub target_ip: Option, 84 | /// The return address to jump to after returning from the called function. 85 | pub return_ip: Option, 86 | } 87 | 88 | impl CallInfo { 89 | /// Attempt to extract call information from the current execution step. 90 | /// 91 | /// The instruction trigerring this may not necessarily be a call instruction. 92 | /// Heuristics regarding the stack and its alignment make detecting calls obfuscated 93 | /// via jump or return instructions possible. 94 | pub fn from_step(step: &RunStep) -> Option { 95 | // rsp must be known to analyze calls 96 | let rsp = step.state.registers.rsp()?; 97 | 98 | match step.instruction.flow_control() { 99 | // If RSP is call-aligned after a return/indirect branch, assume an obfuscated call. 100 | FlowControl::Return => { 101 | let target_rsp = 102 | rsp.wrapping_add_signed(step.instruction.stack_pointer_increment() as i64); 103 | 104 | (target_rsp & 0xF == 8).then(|| Self { 105 | target_rsp, 106 | return_rsp: target_rsp.wrapping_add(8), 107 | target_ip: step.state.memory.read_int(rsp, 8), 108 | return_ip: step.state.memory.read_int(target_rsp, 8), 109 | }) 110 | } 111 | FlowControl::IndirectBranch => (rsp & 0xF == 8).then(|| Self { 112 | target_rsp: rsp, 113 | return_rsp: rsp.wrapping_add(8), 114 | target_ip: step.state.get_operand_value(step.instruction, 0), 115 | return_ip: step.state.memory.read_int(rsp, 8), 116 | }), 117 | FlowControl::Call | FlowControl::IndirectCall => Some(Self { 118 | target_rsp: rsp 119 | .wrapping_add_signed(step.instruction.stack_pointer_increment() as i64), 120 | return_rsp: rsp, 121 | target_ip: if step.instruction.flow_control() == FlowControl::IndirectCall { 122 | step.state.get_operand_value(step.instruction, 0) 123 | } 124 | else { 125 | Some(step.instruction.near_branch_target()) 126 | }, 127 | return_ip: Some(step.instruction.next_ip()), 128 | }), 129 | _ => None, 130 | } 131 | } 132 | } 133 | 134 | /// Wrapper around a [`ProgramState`] providing a different [`run`](ArxanCfgVisitor::run) method 135 | /// which attempts to visit all instructions of an Arxan stub. 136 | pub struct ArxanCfgVisitor(pub ProgramState); 137 | 138 | impl ArxanCfgVisitor { 139 | // Handles repeat calls and obfuscated calls 140 | fn handle_call_like( 141 | step: RunStep>, 142 | visited: &mut FxHashMap, 143 | ) -> StepKind, R> { 144 | let Some(call) = CallInfo::from_step(&step) 145 | else { 146 | return StepKind::SingleStep; 147 | }; 148 | let Some(return_ip) = call.return_ip 149 | else { 150 | return StepKind::SingleStep; 151 | }; 152 | 153 | let oob_or_visited = match call.target_ip { 154 | Some(t) => step.state.memory.image().read(t, 1).is_none() || visited.contains_key(&t), 155 | None => true, 156 | }; 157 | 158 | // make sure we visit instructions after returning, skip to the return immediately 159 | // if the target is oob or already visited 160 | if oob_or_visited { 161 | log::trace!("skipping detected call at {:x}", step.instruction.ip()); 162 | *step.state.registers.rsp_mut() = Some(call.return_rsp); 163 | step.state.rip = Some(return_ip); 164 | 165 | // Clear volatile registers, since we don't know what the function did 166 | for &r in VOLATILE_REGS { 167 | *step.state.registers.gpr64_mut(r) = None; 168 | } 169 | StepKind::Custom(None) 170 | } 171 | else { 172 | StepKind::SingleStep 173 | } 174 | } 175 | 176 | /// Visits the control flow graph of the provided [`ProgramState`] while resolving obfuscated 177 | /// branches and preserving partial register and memory state information along the way. 178 | /// 179 | /// The `on_step` function can be used to modify the state of the emulator and/or stop visting 180 | /// certain branches. 181 | /// 182 | /// Unlike [`ProgramState::run`], this function will always halt, usually taking `O(n)` 183 | /// steps to visit all instructions of the stub. Note that pathological worst cases may take 184 | /// `O(n^2)` time to halt, but such cases will not be encountered in practice. 185 | /// 186 | /// # Requirements 187 | /// The [`ProgramState`] must be initialized to the `TEST RSP, 0xF` instruction of the Arxan 188 | /// stub. 189 | pub fn run(self, mut on_step: F) -> Option 190 | where 191 | F: FnMut(RunStep<'_, I, ArxanCfgData>) -> StepKind, R>, 192 | { 193 | // Ignore the RSP-aligning first branch path that doesn't correspond to the 194 | // actual RSP value 195 | let ignored_test_rsp_branch = match self.0.registers.rsp() { 196 | Some(rsp) if rsp.is_multiple_of(16) => 1, 197 | _ => 0, 198 | }; 199 | let mut bad_cmp_rax_branch = None; 200 | 201 | let mut is_double_stepping = false; 202 | let mut info_pair_gen = CfgInfo::new(); 203 | let mut visited: FxHashMap = Default::default(); 204 | let init_state = ProgramState { 205 | rip: self.0.rip, 206 | registers: self.0.registers, 207 | memory: self.0.memory, 208 | user_data: ArxanCfgData { 209 | inner: self.0.user_data, 210 | cfg_info: CfgInfo::detached(), 211 | }, 212 | }; 213 | init_state.run(move |mut step| { 214 | // Don't execute the incorrect RSP alignment branch 215 | if (step.branch_count, step.past_forks.len()) == (1, ignored_test_rsp_branch) { 216 | log::trace!("Ignoring unreachable RSP alignment branch"); 217 | return StepKind::StopFork; 218 | } 219 | 220 | // Obfuscated stub call routines will first check if we pushed 18 earlier. 221 | // We need to make sure to take the correct branch here too 222 | if step.branch_count == 1 223 | && step.instruction.code() == Code::Cmp_rm64_imm8 224 | && step.instruction.op0_register() == Register::RAX 225 | && step.instruction.immediate8() == 0x18 226 | { 227 | bad_cmp_rax_branch = Some(2 * ignored_test_rsp_branch); 228 | } 229 | if step.branch_count == 2 && Some(step.past_forks.len()) == bad_cmp_rax_branch { 230 | log::trace!("Ignoring unreachable RSP alignment return fixup branch"); 231 | return StepKind::StopFork; 232 | } 233 | 234 | // Keep track of visited instructions 235 | match visited.entry(step.instruction.ip()) { 236 | Entry::Occupied(mut e) => { 237 | let cfg_info = e.get_mut(); 238 | 239 | // Clear the unresolved branch flag if set 240 | let mut allow_visited = cfg_info.unresolved_branch(); 241 | if allow_visited { 242 | cfg_info.set_unresolved_branch(false); 243 | } 244 | 245 | // Only double step instructions when: 246 | // - instruction immediately follows the latest cmov branch 247 | // - no conditional branch instruction has been invoked yet 248 | let is_cond = step.instruction.flow_control() == FlowControl::ConditionalBranch; 249 | if !is_cond && step.state.user_data.cfg_info.is_prev_of(cfg_info) { 250 | // Since we already visited the instruction, don't call the user step 251 | // function 252 | log::trace!("double stepping"); 253 | is_double_stepping = true; 254 | allow_visited = true; 255 | } 256 | else if allow_visited && is_double_stepping { 257 | log::trace!("double stepping path diverged"); 258 | is_double_stepping = false; 259 | step.state.user_data.cfg_info.detach_cmov_pair(); 260 | } 261 | 262 | if !allow_visited { 263 | return StepKind::StopFork; 264 | } 265 | } 266 | Entry::Vacant(e) => { 267 | if is_double_stepping { 268 | log::trace!("double stepping path diverged"); 269 | is_double_stepping = false; 270 | step.state.user_data.cfg_info.detach_cmov_pair(); 271 | } 272 | e.insert(step.state.user_data.cfg_info); 273 | } 274 | }; 275 | 276 | // Run the user step function 277 | // TODO: Don't do this when double stepping, so each instruction is only seen once by 278 | // the user code 279 | match on_step(step.reborrow()) { 280 | StepKind::SingleStep => (), 281 | handled => return handled, 282 | } 283 | 284 | // If the instruction is a int 0x2D, skip it instead of stopping 285 | // Without this, Arxan anti-debug checks will not be fully visited 286 | if step.instruction.code() == Code::Int_imm8 && step.instruction.immediate8() == 0x2D { 287 | step.state.rip = Some(step.instruction.next_ip()); 288 | return StepKind::Custom(None); 289 | } 290 | 291 | // If instruction is a conditional move, fork the cmov path too 292 | if util::is_cmov(step.instruction.mnemonic()) { 293 | let maybe_fork = step.single_step().map(|mut fork| { 294 | (step.state.user_data.cfg_info, fork.user_data.cfg_info) = 295 | info_pair_gen.create_pair(); 296 | fork 297 | }); 298 | return StepKind::Custom(maybe_fork); 299 | } 300 | 301 | match Self::handle_call_like(step.reborrow(), &mut visited) { 302 | StepKind::SingleStep => (), 303 | handled => return handled, 304 | }; 305 | 306 | // If we didn't fork and somehow ended up at no rip while single-stepping an indirect 307 | // branch or return, set the unresolved flag on all instructions in the basic block 308 | let maybe_fork = step.single_step(); 309 | let indirect = matches!( 310 | step.instruction.flow_control(), 311 | FlowControl::IndirectBranch | FlowControl::Return 312 | ); 313 | if maybe_fork.is_none() && indirect && step.state.rip.is_none() { 314 | let last_blocks = step.basic_block(); 315 | 316 | log::trace!( 317 | "Unresolved jump/ret at {:x}, allowing revisits from {:x?}", 318 | step.instruction.ip(), 319 | last_blocks.first() 320 | ); 321 | 322 | for ip in last_blocks { 323 | visited.get_mut(ip).unwrap().set_unresolved_branch(true); 324 | } 325 | } 326 | 327 | StepKind::Custom(maybe_fork) 328 | }) 329 | } 330 | } 331 | -------------------------------------------------------------------------------- /src/disabler/steamstub.rs: -------------------------------------------------------------------------------- 1 | //! Provides utilities to hook around SteamStub V3.1 (as versionned by Steamless). 2 | //! 3 | //! SteamStub is used in DS2, DS3 and SDT. We must hook around it to be able to extract 4 | //! the binary's original entry point (which in DS3's case will be the Arxan entry point) 5 | //! to then apply the arxan patches on that one. 6 | //! 7 | //! Neutering SteamStub 3.1 is fairly straightforward. It stores its context in a global header 8 | //! under the executable's entry point. This context is obfuscated using a simple running XOR 9 | //! encryption scheme. Among other things, this context includes, the original entry point of the 10 | //! executable, offsets to a table of null-terminated function and module string names, offsets to 11 | //! an encrypted manually-mapped DLL called `steam_drmp.dll`, a set of DRM configuration flags and 12 | //! an integrity hash. 13 | //! 14 | //! To detour SteamStub and simultaneously neuter its capabilities, we clear all anti tamper/debug 15 | //! flags from the header, replace the original entry point field with our own, and recompute the 16 | //! integrity hash. 17 | 18 | use std::{ 19 | sync::atomic::{AtomicBool, Ordering}, 20 | time::Duration, 21 | }; 22 | 23 | use closure_ffi::BareFnOnce; 24 | use pelite::pe64::{Pe, PeView}; 25 | 26 | use super::{game::game, util}; 27 | use crate::disabler::entry_point::{is_created_suspended, process_main_thread, wait_for_gs_cookie}; 28 | 29 | #[derive(Default, Debug, Clone, Copy)] 30 | struct SteamDrmHasher { 31 | hash: u32, 32 | } 33 | 34 | impl SteamDrmHasher { 35 | fn write(&mut self, bytes: &[u8]) { 36 | const SCRAMBLE: u32 = 0x488781ed; 37 | 38 | for &b in bytes { 39 | self.hash ^= (b as u32) << 0x18; 40 | for _ in 0..8 { 41 | if self.hash & 0x8000_0000 == 0 { 42 | self.hash <<= 1; 43 | } 44 | else { 45 | self.hash = (self.hash << 1) ^ SCRAMBLE; 46 | } 47 | } 48 | } 49 | } 50 | 51 | fn finish(&self) -> u32 { 52 | self.hash 53 | } 54 | } 55 | 56 | #[bitfield_struct::bitfield(u32)] 57 | struct SteamDrmFlags { 58 | _unused_0: bool, 59 | no_module_verification: bool, 60 | no_encryption: bool, 61 | _unused_1: bool, 62 | no_ownership_check: bool, 63 | no_debugger_check: bool, 64 | no_error_dialog: bool, 65 | #[bits(25)] 66 | _unused_2: u32, 67 | } 68 | 69 | impl SteamDrmFlags { 70 | pub fn clear_protection_flags(&mut self) { 71 | self.set_no_module_verification(true); 72 | self.set_no_ownership_check(true); 73 | self.set_no_debugger_check(true); 74 | } 75 | } 76 | 77 | /// SteamStub 3.1 header data. 78 | /// 79 | /// Derived from atom0s's [Steamless](https://github.com/atom0s/Steamless/blob/master/Steamless.Unpacker.Variant31.x64/Classes/SteamStubHeader.cs) 80 | /// source code, with additional fields reversed. 81 | #[repr(C)] 82 | #[allow(dead_code)] 83 | #[derive(Debug, Clone, Copy)] 84 | struct SteamStubHeader { 85 | xor_key: u32, 86 | signature: u32, 87 | image_base: u64, 88 | steamstub_entry_point: u64, 89 | bind_section_ep_offset: u32, 90 | steamstub_ep_code_size: u32, 91 | original_entry_point: u64, 92 | strings_bind_offset: u32, 93 | strings_data_size: u32, 94 | drmp_dll_bind_offset: u32, 95 | drmp_dll_size: u32, 96 | steam_app_id: u32, 97 | drm_flags: SteamDrmFlags, 98 | bind_section_virtual_size: u32, 99 | integrity_hash: u32, 100 | code_section_virtual_address: u64, 101 | code_section_size: u64, 102 | aes_key: [u8; 32], 103 | aes_iv: [u8; 16], 104 | code_section_bytes: [u8; 16], 105 | drmp_xtea_key: [u32; 4], 106 | unk_a8: [u32; 8], 107 | get_module_handle_a_rva: u64, 108 | get_module_handle_w_rva: u64, 109 | load_library_a_rva: u64, 110 | load_library_w_rva: u64, 111 | get_proc_address_rva: u64, 112 | } 113 | 114 | unsafe impl bytemuck::Zeroable for SteamStubHeader {} 115 | unsafe impl bytemuck::Pod for SteamStubHeader {} 116 | unsafe impl pelite::Pod for SteamStubHeader {} 117 | 118 | struct SteamStubContext<'a> { 119 | header: SteamStubHeader, 120 | decrypted_drmp_dll: Vec, 121 | decrypted_strings: Vec, 122 | encrypted_header: &'a SteamStubHeader, 123 | encrypted_strings: &'a [u8], 124 | pe: PeView<'a>, 125 | } 126 | 127 | impl<'a> SteamStubContext<'a> { 128 | fn from_pe_inner( 129 | pe: PeView<'a>, 130 | encrypted_header: &'a SteamStubHeader, 131 | ) -> pelite::Result { 132 | let mut header = *encrypted_header; 133 | let strings_table_key = header.decrypt(); 134 | 135 | Ok(Self { 136 | encrypted_strings: header.strings(pe)?, 137 | decrypted_strings: header.decrypt_strings(pe, strings_table_key)?, 138 | decrypted_drmp_dll: header.decrypt_drmp_dll(pe)?, 139 | encrypted_header, 140 | header, 141 | pe, 142 | }) 143 | } 144 | 145 | pub fn from_pe(pe: PeView<'a>) -> Option> { 146 | Some(Self::from_pe_inner( 147 | pe, 148 | SteamStubHeader::from_pe_encrypted(pe)?, 149 | )) 150 | } 151 | 152 | /// Recompute the steamstub integrity hash. 153 | pub fn recompute_hash(&mut self) -> pelite::Result { 154 | let code_rva = self.pe.optional_header().AddressOfEntryPoint; 155 | let aligned_ep_code_size = self.header.steamstub_ep_code_size.next_multiple_of(16) as usize; 156 | 157 | let mut hasher = SteamDrmHasher::default(); 158 | self.header.integrity_hash = 0; 159 | 160 | hasher.write(&self.decrypted_strings); 161 | hasher.write(bytemuck::bytes_of(&self.header)); 162 | hasher.write(self.pe.derva_slice(code_rva, aligned_ep_code_size)?); 163 | hasher.write(&self.decrypted_drmp_dll); 164 | 165 | self.header.integrity_hash = hasher.finish(); 166 | Ok(self.header.integrity_hash) 167 | } 168 | 169 | /// Re-encrypt the header and strings table. 170 | pub fn re_encrypt(&self) -> (SteamStubHeader, Vec) { 171 | let mut encrypted = self.header; 172 | let mut encrypted_strings = self.decrypted_strings.clone(); 173 | 174 | let mut key = 0; 175 | for block in bytemuck::cast_slice_mut(bytemuck::bytes_of_mut(&mut encrypted)) { 176 | *block ^= key; 177 | key = *block; 178 | } 179 | for block in encrypted_strings.chunks_exact_mut(4) { 180 | let new_block = bytemuck::pod_read_unaligned::(block) ^ key; 181 | block.copy_from_slice(bytemuck::bytes_of(&new_block)); 182 | key = new_block; 183 | } 184 | 185 | (encrypted, encrypted_strings) 186 | } 187 | } 188 | 189 | impl SteamStubHeader { 190 | const EXPECTED_SIGNATURE: u32 = 0xC0DEC0DF; 191 | 192 | /// Reads the encrypted SteamStub header from a PE file SteamStub was applied to. 193 | /// 194 | /// To decrypt the header, make a copy and call [`Self::decrypt`]. 195 | pub fn from_pe_encrypted(pe: PeView<'_>) -> Option<&Self> { 196 | const HEADER_SIZE: u32 = size_of::() as u32; 197 | 198 | let entry_rva = pe.optional_header().AddressOfEntryPoint; 199 | let encrypted: &Self = pe.derva(entry_rva - HEADER_SIZE).ok()?; 200 | (encrypted.xor_key ^ encrypted.signature == Self::EXPECTED_SIGNATURE).then_some(encrypted) 201 | } 202 | 203 | /// Decrypts the header, leaving the original XOR key in place. 204 | /// 205 | /// Returns the last 4-byte encrypted block, which is the key to use for 206 | /// [`Self::decrypt_strings`]. 207 | pub fn decrypt(&mut self) -> u32 { 208 | let mut key = 0; 209 | for block in bytemuck::cast_slice_mut(bytemuck::bytes_of_mut(self)) { 210 | let new_key = *block; 211 | *block ^= key; 212 | key = new_key; 213 | } 214 | key 215 | } 216 | 217 | pub fn drmp_dll<'a>(&self, pe: PeView<'a>) -> pelite::Result<&'a [u8]> { 218 | let entry_rva = pe.optional_header().AddressOfEntryPoint; 219 | let drmp_dll_rva = entry_rva - self.bind_section_ep_offset + self.drmp_dll_bind_offset; 220 | pe.derva_slice(drmp_dll_rva, self.drmp_dll_size as usize) 221 | } 222 | 223 | /// Decrypts the Steam DRMP dll payload. 224 | /// 225 | /// This algorithm used here is a [XTEA](https://en.wikipedia.org/wiki/XTEA) 226 | /// variant augmented with a running XOR key. 227 | pub fn decrypt_drmp_dll(&self, pe: PeView<'_>) -> pelite::Result> { 228 | let mut drmp_dll = self.drmp_dll(pe)?.to_owned(); 229 | 230 | let key = self.drmp_xtea_key; 231 | let mut xor_key = [0x5555_5555u32; 2]; 232 | for block in drmp_dll.chunks_exact_mut(8) { 233 | const DELTA: u32 = 0x9E3779B9; 234 | let mut sum: u32 = DELTA.wrapping_mul(32); 235 | 236 | let [mut v0, mut v1]: [u32; 2] = bytemuck::pod_read_unaligned(block); 237 | let next_xor_key = [v0, v1]; 238 | 239 | for _ in 0..32 { 240 | let v1_diff = v0.wrapping_add((v0 << 4) ^ (v0 >> 5)) 241 | ^ sum.wrapping_add(key[(sum as usize >> 11) & 3]); 242 | v1 = v1.wrapping_sub(v1_diff); 243 | 244 | sum = sum.wrapping_sub(DELTA); 245 | 246 | let v0_diff = v1.wrapping_add((v1 << 4) ^ (v1 >> 5)) 247 | ^ sum.wrapping_add(key[sum as usize & 3]); 248 | v0 = v0.wrapping_sub(v0_diff); 249 | } 250 | 251 | v0 ^= xor_key[0]; 252 | v1 ^= xor_key[1]; 253 | 254 | block.copy_from_slice(bytemuck::bytes_of(&[v0, v1])); 255 | xor_key = next_xor_key; 256 | } 257 | 258 | Ok(drmp_dll) 259 | } 260 | 261 | pub fn strings<'a>(&self, pe: PeView<'a>) -> pelite::Result<&'a [u8]> { 262 | let entry_rva = pe.optional_header().AddressOfEntryPoint; 263 | let strings_rva = entry_rva - self.bind_section_ep_offset + self.strings_bind_offset; 264 | let aligned_strings_size = self.strings_data_size.next_multiple_of(16) as usize; 265 | pe.derva_slice(strings_rva, aligned_strings_size) 266 | } 267 | 268 | pub fn decrypt_strings(&self, pe: PeView<'_>, mut key: u32) -> pelite::Result> { 269 | let mut strings = self.strings(pe)?.to_owned(); 270 | 271 | for block in strings.chunks_exact_mut(4) { 272 | let next_key: u32 = bytemuck::pod_read_unaligned(block); 273 | block.copy_from_slice(bytemuck::bytes_of(&(key ^ next_key))); 274 | key = next_key; 275 | } 276 | 277 | Ok(strings) 278 | } 279 | } 280 | 281 | #[derive(Debug, Clone, Copy)] 282 | pub struct SteamstubStatus { 283 | pub original_entry_point: u64, 284 | pub blocking_entry_point: bool, 285 | #[allow(dead_code)] 286 | pub is_present: bool, 287 | } 288 | 289 | /// If present, patches the SteamStub 3.1 header so that all anti-tamper protections are disabled, 290 | /// then invokes `callback` once SteamStub finishes to unpack the game. 291 | /// 292 | /// The callback runs immediately if SteamStub is not detected. 293 | /// 294 | /// If SteamStub is detected, the callback is *almost* guaranteed to execute after the unpacking 295 | /// routine has finished. When the function is called before the process entry point runs, this 296 | /// blocks said entry point until the callback has ran. When called after, it runs in another thread 297 | /// which synchronizes with the entry point using the value of the GS cookie. On non-MSVC 298 | /// toolchains, the analysis for identifying the cookie value may fail and result in the program 299 | /// taking a conservative, arbitrary wait instead. 300 | /// 301 | /// The callback receives a [`SteamstubStatus`] struct which it can use to determine the original 302 | /// program entry point before SteamStub was applied, whether SteamStub is present, and whether 303 | /// entry point execution is being blocked. 304 | /// 305 | /// # Panics 306 | /// If called more than once. 307 | /// 308 | /// # Safety 309 | /// When run before the process entry point, this function patches the SteamStub headers and 310 | /// replaces the OEP value in said header. As such, it can race with code that attempts to do the 311 | /// same thing. 312 | pub unsafe fn neuter_steamstub(callback: impl FnOnce(SteamstubStatus) + Send + 'static) { 313 | static CALLED: AtomicBool = AtomicBool::new(false); 314 | if CALLED.swap(true, Ordering::Relaxed) { 315 | panic!("schedule_after_steamstub must not be called more than once"); 316 | } 317 | 318 | let blocking = process_main_thread().is_none_or(|t| is_created_suspended(t.raw())); 319 | 320 | let game = game(); 321 | let base = game.pe.optional_header().ImageBase; 322 | let opt_header = game.pe.optional_header(); 323 | let entry_point = opt_header.ImageBase + opt_header.AddressOfEntryPoint as u64; 324 | 325 | let mut steamstub_ctx = match SteamStubContext::from_pe(game.pe) { 326 | None => { 327 | log::debug!("SteamStub not detected, running callback immediately"); 328 | callback(SteamstubStatus { 329 | is_present: false, 330 | original_entry_point: entry_point, 331 | blocking_entry_point: blocking, 332 | }); 333 | return; 334 | } 335 | Some(Ok(ctx)) => ctx, 336 | Some(Err(err)) => panic!("got pelite error while evaluating steamstub ctx: {err}"), 337 | }; 338 | 339 | log::debug!("SteamStub detected"); 340 | 341 | let original_entry_point = base + steamstub_ctx.header.original_entry_point; 342 | if !blocking { 343 | std::thread::spawn(move || { 344 | if let Err(err) = unsafe { wait_for_gs_cookie(None) } { 345 | log::warn!("failed to wait for entry point initialization: {err}"); 346 | log::warn!("sleeping for an arbitrary period instead"); 347 | std::thread::sleep(Duration::from_secs(1)); 348 | } 349 | callback(SteamstubStatus { 350 | original_entry_point, 351 | blocking_entry_point: false, 352 | is_present: true, 353 | }) 354 | }); 355 | return; 356 | } 357 | 358 | log::debug!( 359 | "clearing SteamStub protection flags, original values: {:#?}", 360 | steamstub_ctx.header.drm_flags 361 | ); 362 | steamstub_ctx.header.drm_flags.clear_protection_flags(); 363 | 364 | log::debug!("swapping steamstub header OEP with user callback"); 365 | 366 | let bare_callback = BareFnOnce::new_c(move || { 367 | callback(SteamstubStatus { 368 | original_entry_point, 369 | blocking_entry_point: blocking, 370 | is_present: true, 371 | }); 372 | let ep_call: extern "C" fn() -> u64 = unsafe { std::mem::transmute(original_entry_point) }; 373 | ep_call() 374 | }) 375 | .leak(); 376 | 377 | steamstub_ctx.header.original_entry_point = (bare_callback as usize as u64).wrapping_sub(base); 378 | 379 | steamstub_ctx.recompute_hash().unwrap(); 380 | let (new_header, new_strings) = steamstub_ctx.re_encrypt(); 381 | 382 | // writing to immutable refs is bad, but pelite's PeView already borrows all that memory 383 | // so writing to the game's memory is UB no matter what :) 384 | unsafe { 385 | util::with_rwx_ptr( 386 | steamstub_ctx.encrypted_header as *const _ as *mut SteamStubHeader, 387 | |p| p.write(new_header), 388 | ); 389 | util::with_rwx_ptr(steamstub_ctx.encrypted_strings.as_ptr().cast_mut(), |p| { 390 | std::ptr::copy_nonoverlapping(new_strings.as_ptr(), p, new_strings.len()); 391 | }); 392 | } 393 | } 394 | -------------------------------------------------------------------------------- /src/disabler/mod.rs: -------------------------------------------------------------------------------- 1 | //! Provides utilities for neutering Arxan. 2 | //! 3 | //!
4 | //! 5 | //! Many DLL injectors or mod launchers do not suspend the process upon creation or otherwise 6 | //! provide a method to execute your code before the game's entry point is invoked. The crate 7 | //! supports these loaders on a best-effort basis, but it is **strongly** recommended to use 8 | //! one that loads mods before the game's entry point runs. 9 | //! 10 | //!
11 | //! 12 | //! Example usage: 13 | //! ```no_run 14 | //! use dearxan::disabler::neuter_arxan; 15 | //! 16 | //! unsafe fn my_entry_point() { 17 | //! unsafe { 18 | //! neuter_arxan(|result| { 19 | //! println!("Arxan disabled!"); 20 | //! // This is a good place to do your hooks. 21 | //! // Once this callback returns, the game's true entry point 22 | //! // will be invoked. 23 | //! }); 24 | //! } 25 | //! } 26 | //! ``` 27 | //! 28 | //! # Debugging 29 | //! If the `instrument_stubs` feature is enabled, patched Arxan stubs will log their first 30 | //! execution with the [`log::Level::Trace`] severity. 31 | 32 | use std::{ 33 | io::Write, 34 | sync::{ 35 | Once, 36 | atomic::{AtomicBool, AtomicU32, Ordering}, 37 | }, 38 | time::Instant, 39 | }; 40 | 41 | use call_hook::CallHook; 42 | use closure_ffi::BareFnOnce; 43 | use pelite::pe64::{Pe, PeObject, PeView}; 44 | use windows_sys::Win32::System::Memory::{PAGE_EXECUTE_READWRITE, VirtualProtect}; 45 | 46 | use crate::disabler::slist::SList; 47 | use crate::disabler::steamstub::neuter_steamstub; 48 | use crate::disabler::{ 49 | entry_point::wait_for_gs_cookie, 50 | result::{DearxanResult, Status}, 51 | }; 52 | use crate::patch::ArxanPatch; 53 | use crate::{ 54 | analysis::entry_point::MsvcEntryPoint, 55 | disabler::entry_point::{is_created_suspended, process_main_thread}, 56 | }; 57 | 58 | mod call_hook; 59 | mod code_buffer; 60 | mod entry_point; 61 | pub mod ffi; 62 | mod game; 63 | mod lazy_global; 64 | pub mod result; 65 | mod slist; 66 | mod steamstub; 67 | mod util; 68 | 69 | use code_buffer::CodeBuffer; 70 | use game::game; 71 | use lazy_global::lazy_global; 72 | 73 | /// Single function to neuter all of Arxan's checks. 74 | /// 75 | /// The callback will be invoked with a [`DearxanResult`] which contains fields indicating whether 76 | /// Arxan was detected and whether entry point execution is being blocked while the callback is 77 | /// running. Modulo any reported error, it is safe to assume that Arxan has been disabled once it is 78 | /// executed. 79 | /// 80 | /// Handles SteamStub 3.1 possibly being applied on top of Arxan. 81 | /// 82 | /// # Safety 83 | /// 84 | /// This function applies code patches derived from imperfect binary analysis to the program. 85 | /// Although extremely unlikely, it is theoretically possible for code to be falsely identified as 86 | /// an Arxan stub and incorrectly patched, which will lead to all kinds of UB. 87 | /// 88 | /// While best-effort synchronization with the entry point is performed when this function is 89 | /// called after it has started executing, it is not perfect and may lead to race conditions. 90 | /// For this reason it is **strongly** recommended to use a mod loader that creates the game process 91 | /// as suspended. 92 | pub unsafe fn neuter_arxan(callback: F) 93 | where 94 | F: FnOnce(DearxanResult) + Send + 'static, 95 | { 96 | // Functions are carefully wrapped in `std::panic::catch_unwind` to avoid entry point panics! 97 | lazy_global! { 98 | static DEARXAN_NEUTER_ARXAN_RESULT: ffi::DearxanResult = unsafe { 99 | result::from_maybe_panic(|| neuter_arxan_inner()).into() 100 | }; 101 | } 102 | 103 | // Backwards compatibility jank -- we should have made `lazy_global` 104 | // function more like a `LazyLock` whose constructor takes an argument 105 | static NEEDS_SUSPEND: AtomicBool = AtomicBool::new(false); 106 | 107 | unsafe fn neuter_arxan_inner() -> Result> { 108 | static CALLED: AtomicBool = AtomicBool::new(false); 109 | if CALLED.swap(true, Ordering::Relaxed) { 110 | panic!("neuter_arxan_inner must not be called more than once"); 111 | } 112 | 113 | let _suspend_guard: Option = NEEDS_SUSPEND 114 | .load(Ordering::SeqCst) 115 | .then(|| unsafe { entry_point::SuspendGuard::suspend_all_threads() }); 116 | 117 | let game = game(); 118 | unsafe { 119 | make_module_rwe(game.pe); 120 | } 121 | 122 | let analysis_time = Instant::now(); 123 | log::info!("analyzing Arxan stubs"); 124 | 125 | let analysis_results = crate::analysis::analyze_all_stubs(game.pe); 126 | let num_found = analysis_results.len(); 127 | log::info!( 128 | "analysis completed in {:.3?}. {} stubs found", 129 | analysis_time.elapsed(), 130 | num_found 131 | ); 132 | 133 | let good_stubs: Vec<_> = analysis_results 134 | .into_iter() 135 | .filter_map(|maybe_stub| maybe_stub.inspect_err(|err| log::error!("{err}")).ok()) 136 | .collect(); 137 | 138 | if good_stubs.len() != num_found { 139 | return Err("failed to generate patches for all stubs".into()); 140 | } 141 | 142 | log::info!("generating patches"); 143 | let patch_gen_time = Instant::now(); 144 | let patches = crate::patch::ArxanPatch::build_from_stubs( 145 | game.pe, 146 | Some(game.preferred_base), 147 | good_stubs.iter(), 148 | )?; 149 | 150 | log::info!( 151 | "generated {} patches in {:.3?}", 152 | patches.len(), 153 | patch_gen_time.elapsed() 154 | ); 155 | 156 | log::info!("applying patches"); 157 | let patch_apply_time = Instant::now(); 158 | for patch in &patches { 159 | unsafe { 160 | apply_patch(patch, &game.hook_buffer); 161 | } 162 | } 163 | log::info!( 164 | "all patches applied in {:.3?}. Arxan is now neutered", 165 | patch_apply_time.elapsed() 166 | ); 167 | 168 | Ok(Status { 169 | is_arxan_detected: true, 170 | is_executing_entrypoint: true, 171 | }) 172 | } 173 | 174 | unsafe { 175 | schedule_after_arxan(move |is_present, is_executing_entrypoint: bool| { 176 | NEEDS_SUSPEND.store(!is_executing_entrypoint, Ordering::SeqCst); 177 | let result = if is_present { 178 | result::from_maybe_panic(|| { 179 | ffi::DearxanResult::from_global(&DEARXAN_NEUTER_ARXAN_RESULT).into() 180 | }) 181 | } 182 | else { 183 | Ok(Status { 184 | is_arxan_detected: false, 185 | is_executing_entrypoint, 186 | }) 187 | }; 188 | 189 | log::debug!("invoking user callback"); 190 | callback(result.map(|s| Status { 191 | is_executing_entrypoint, 192 | ..s 193 | })); 194 | }) 195 | }; 196 | } 197 | 198 | /// Schedule a callback to run right after the Arxan entry point stub terminates, in lockstep with 199 | /// the executable's main entry point. 200 | /// 201 | /// If Arxan is not present, will try to run the callback after the executable's 202 | /// `__security_init_cookie` has finished running, which is right before the main entry point. 203 | /// This may fail if the executable was built with a non-MSVC CRT, in which case the callback 204 | /// will be run immediately in a separate thread. 205 | /// 206 | /// The callback receives the following: 207 | /// - a boolean indicating whether Arxan was detected 208 | /// - a boolean indicating whether execution is blocking the entry point. 209 | /// 210 | /// Handles SteamStub 3.1 possibly being applied on top of Arxan. 211 | /// 212 | /// # Safety 213 | /// 214 | /// This function may apply code and memory patches to the program depending on various checks, 215 | /// such as patching the SteamStub headers if it is present. Although it is extremely unlikely for a 216 | /// patch to be incorrectly applied, this is a fundamentally unsafe operation and may lead to all 217 | /// kinds of UB. 218 | pub unsafe fn schedule_after_arxan(callback: F) 219 | where 220 | F: FnOnce(bool, bool) + Send + 'static, 221 | { 222 | #[repr(C)] 223 | struct Ctx { 224 | callbacks: SList, 225 | wait_done: AtomicU32, 226 | is_present: AtomicBool, 227 | } 228 | 229 | lazy_global! { 230 | static DEARXAN_SCHEDULED_AFTER_ARXAN: Ctx = { 231 | unsafe { schedule_after_arxan_inner(); } 232 | Ctx { 233 | callbacks: SList::new(), 234 | wait_done: AtomicU32::new(0), 235 | is_present: AtomicBool::new(false) 236 | } 237 | }; 238 | } 239 | static CALLBACK_PUSHED: Once = Once::new(); 240 | 241 | fn first_callback_flush(is_present: bool, is_blocking: bool) { 242 | let ctx = unsafe { &*DEARXAN_SCHEDULED_AFTER_ARXAN.0 }; 243 | 244 | ctx.is_present.store(false, Ordering::SeqCst); 245 | 246 | let callbacks = ctx.callbacks.flush(); 247 | for callback in callbacks { 248 | unsafe { callback(is_present, is_blocking) }; 249 | } 250 | 251 | ctx.wait_done.store(1, Ordering::SeqCst); 252 | atomic_wait::wake_all(&ctx.wait_done); 253 | } 254 | 255 | unsafe fn schedule_after_arxan_inner() { 256 | static CALLED: AtomicBool = AtomicBool::new(false); 257 | if CALLED.swap(true, Ordering::Relaxed) { 258 | panic!("schedule_after_arxan_inner must not be called more than once"); 259 | } 260 | 261 | unsafe { 262 | neuter_steamstub(move |result| { 263 | let Some(msvc_ep) = 264 | MsvcEntryPoint::try_from_va(game().pe, result.original_entry_point) 265 | else { 266 | log::warn!( 267 | "non-msvc entry point detected. Assuming Arxan was not applied to this binary" 268 | ); 269 | log::warn!("callbacks will *not* be synchronized with the entry point"); 270 | 271 | std::thread::spawn(move || { 272 | // Avoid potential race condition where callback is pushed after the flush 273 | CALLBACK_PUSHED.wait(); 274 | first_callback_flush(false, false); 275 | }); 276 | return; 277 | }; 278 | 279 | log::info!("arxan detected: {}", msvc_ep.is_arxan_hooked); 280 | if !result.blocking_entry_point { 281 | log::warn!("schedule_after_arxan run after the process entry point"); 282 | log::warn!("callbacks will race with game initialization"); 283 | std::thread::spawn(move || { 284 | // This shouldn't panic, as we already know we have a MSVC entry point 285 | wait_for_gs_cookie(None).unwrap(); 286 | 287 | log::debug!("arxan entry point finished, flushing callback functions"); 288 | // Note: No CALLBACK_PUSHED race condition here: `blocking_entry_point` is 289 | // false, so the same check after pushing the callback will be true and 290 | // another flush will be triggered 291 | first_callback_flush(msvc_ep.is_arxan_hooked, false); 292 | }); 293 | return; 294 | } 295 | 296 | // Call hook `__security_init_cookie`, which is where Arxan inserted its entry stubs 297 | let security_init_cookie_hook = 298 | &*Box::leak(Box::new(CallHook::::new( 299 | (result.original_entry_point + 4) as *mut u8, 300 | ))); 301 | 302 | let detour = BareFnOnce::new_c_in( 303 | move || { 304 | log::debug!("removing __security_init_cookie entry point hook"); 305 | security_init_cookie_hook.unhook(); 306 | // TODO: Fully reverse the entry point so this is not necessary 307 | log::debug!( 308 | "calling __security_init_cookie (will run Arxan initialization routines)" 309 | ); 310 | security_init_cookie_hook.original()(); 311 | log::debug!("flushing callback functions"); 312 | 313 | first_callback_flush(msvc_ep.is_arxan_hooked, true); 314 | }, 315 | &game().hook_buffer, 316 | ); 317 | 318 | log::debug!("detouring entry point via __security_init_cookie call hook"); 319 | security_init_cookie_hook.hook_with(detour.leak()); 320 | }) 321 | } 322 | } 323 | 324 | // Only use callbacks here, as older versions of DEARXAN_SCHEDULED_AFTER_ARXAN may not have the 325 | // is_present and wait_done fields 326 | let ctx = unsafe { &*DEARXAN_SCHEDULED_AFTER_ARXAN.0 }; 327 | let bare_callback = BareFnOnce::new_c(callback); 328 | ctx.callbacks.push(bare_callback.leak()); 329 | CALLBACK_PUSHED.call_once(|| {}); 330 | 331 | if !process_main_thread().is_none_or(|t| is_created_suspended(t.raw())) { 332 | if DEARXAN_SCHEDULED_AFTER_ARXAN.1 < size_of::() { 333 | log::error!( 334 | "module that initialized the schedule_after_arxan state does not support post-entry-point calls" 335 | ); 336 | log::error!("the schedule_after_arxan callback might never be run!"); 337 | return; 338 | } 339 | 340 | log::warn!("schedule_after_arxan run after the process entry point"); 341 | log::warn!("callbacks will race with game initialization"); 342 | 343 | std::thread::spawn(|| { 344 | while ctx.wait_done.load(Ordering::SeqCst) != 1 { 345 | atomic_wait::wait(&ctx.wait_done, 0); 346 | } 347 | log::debug!("flushing callback functions"); 348 | 349 | let is_present = ctx.is_present.load(Ordering::SeqCst); 350 | let callbacks = ctx.callbacks.flush(); 351 | for callback in callbacks { 352 | unsafe { callback(is_present, false) }; 353 | } 354 | }); 355 | } 356 | } 357 | 358 | unsafe fn apply_patch(patch: &ArxanPatch, code_buf: &CodeBuffer) { 359 | match patch { 360 | ArxanPatch::JmpHook { target, pic } => { 361 | #[cfg(feature = "instrument_stubs")] 362 | let instrumented: Vec<_> = stub_instrumentation::emit_log_call(*target) 363 | .unwrap() 364 | .into_iter() 365 | .chain(pic.iter().copied()) 366 | .collect(); 367 | #[cfg(feature = "instrument_stubs")] 368 | let pic = &instrumented; 369 | 370 | let hook = code_buf.write(pic).unwrap().addr() as i64; 371 | let jmp_immediate: i32 = hook.wrapping_sub(*target as i64 + 5).try_into().unwrap(); 372 | 373 | let mut hook_site = unsafe { std::slice::from_raw_parts_mut(*target as *mut u8, 5) }; 374 | hook_site.write_all(&[0xE9]).unwrap(); 375 | hook_site.write_all(&jmp_immediate.to_le_bytes()).unwrap(); 376 | 377 | log::trace!("patched arxan stub at {:016x}", *target); 378 | } 379 | ArxanPatch::Write { va, bytes } => { 380 | unsafe { 381 | core::ptr::copy_nonoverlapping(bytes.as_ptr(), *va as *mut u8, bytes.len()); 382 | } 383 | log::trace!("wrote {} bytes to {va:x}", bytes.len()) 384 | } 385 | } 386 | } 387 | 388 | #[cfg(feature = "instrument_stubs")] 389 | mod stub_instrumentation { 390 | use std::option::Option::None; 391 | use std::sync::Mutex; 392 | 393 | use fxhash::FxHashSet; 394 | use iced_x86::{ 395 | BlockEncoder, BlockEncoderOptions, Code, IcedError, Instruction, InstructionBlock, 396 | MemoryOperand, Register::*, 397 | }; 398 | 399 | unsafe extern "C" fn log_arxan_stub(hook_addr: u64, rsp: u64) { 400 | static CALLED_STUBS: Mutex>> = Mutex::new(None); 401 | let mut maybe_map = CALLED_STUBS.lock().unwrap(); 402 | if maybe_map.get_or_insert_default().insert(hook_addr) { 403 | log::debug!("Stub for {hook_addr:016x} called | RSP = {rsp:016x}"); 404 | } 405 | } 406 | 407 | pub fn emit_log_call(hook_addr: u64) -> Result, IcedError> { 408 | #[allow(clippy::fn_to_numeric_cast)] 409 | let log_stub_instructions = [ 410 | Instruction::with2(Code::Mov_r64_rm64, RDX, RSP)?, 411 | Instruction::with2(Code::And_rm64_imm8, RSP, -0x10i64)?, 412 | Instruction::with1(Code::Push_rm64, RDX)?, 413 | Instruction::with2(Code::Sub_rm64_imm8, RSP, 0x28)?, 414 | Instruction::with2(Code::Mov_r64_imm64, RCX, hook_addr)?, 415 | Instruction::with2(Code::Mov_r64_imm64, RAX, log_arxan_stub as u64)?, 416 | Instruction::with1(Code::Call_rm64, RAX)?, 417 | Instruction::with2( 418 | Code::Mov_r64_rm64, 419 | RSP, 420 | MemoryOperand::with_base_displ(RSP, 0x28), 421 | )?, 422 | ]; 423 | let encoded = BlockEncoder::encode( 424 | 64, 425 | InstructionBlock::new(&log_stub_instructions, 0), 426 | BlockEncoderOptions::NONE, 427 | )?; 428 | Ok(encoded.code_buffer) 429 | } 430 | } 431 | 432 | unsafe fn make_module_rwe(pe: PeView<'_>) { 433 | log::debug!("setting game executable page protection flags to RWX"); 434 | 435 | let base = pe.image().as_ptr().addr(); 436 | for section in pe.section_headers() { 437 | let rva_range = section.virtual_range(); 438 | let len = (rva_range.end - rva_range.start) as usize; 439 | 440 | let mut protect = Default::default(); 441 | if 0 == unsafe { 442 | VirtualProtect( 443 | (base + rva_range.start as usize) as *const _, 444 | len, 445 | PAGE_EXECUTE_READWRITE, 446 | &mut protect, 447 | ) 448 | } { 449 | panic!( 450 | "VirtualProtect failed on address {:x} and length {len}", 451 | base + rva_range.start as usize 452 | ); 453 | } 454 | } 455 | } 456 | -------------------------------------------------------------------------------- /src/analysis/encryption.rs: -------------------------------------------------------------------------------- 1 | //! Algorithms and data structures used to deal with Arxan's at-rest encryption of game functions 2 | //! and data. 3 | //! 4 | //! Given a a function or some other not-necessarily-contiguous static data, Arxan may 5 | //! be used to encrypt it at rest to make reverse engineering harder. Such encrypted regions can 6 | //! then be dynamically decrypted whenever the data/code is needed and re-"encrypted" immediately 7 | //! afterwards. 8 | //! 9 | //! The result of this process is the creation of two function-like Arxan stubs: one is called 10 | //! before the code/data needs to be accessed, and the other after to replace it with garbage bytes. 11 | //! 12 | //! There are two types of Arxan encryption: TEA and RMX (rotate-multiply-xor). 13 | //! 14 | //! Stubs of both types first recover a list of (offset, size) pairs, each 15 | //! representing a contiguous region to be decrypted. These pairs are encoded as 7-bit 16 | //! variable-length integers (varints) where the high bit is used as a terminator, and the initial 17 | //! offset is the base of the executable image. A running offset of [`u32::MAX`] indicates the end 18 | //! of the list. TEA stubs encrypt this offset list using TEA with a per-stub hardcoded key. 19 | //! 20 | //! 21 | //! The ciphertext for these regions is stored as a single contiguous blob, encrypted with either 22 | //! TEA or RMX (in both cases using a hardcoded key). After a region is parsed from the varint list, 23 | //! the corresponding ciphertext bytes will be decrypted and copied to it. 24 | //! 25 | //! The "encryption" process is exactly the same, except that the ciphertext used decrypts to random 26 | //! garbage bytes. These bytes seem to be uniformly distributed and can thus be effectively 27 | //! identified by calculating their Shannon entropy. In fact, when an "encryption" stub is 28 | //! instantiated across multiple translation units, different random bytes are used. 29 | //! 30 | //! The static data structures described above are modeled through the [`EncryptedRegion`] and 31 | //! [`EncryptedRegionList`] types. 32 | 33 | use std::{ 34 | io::{self, Read}, 35 | marker::PhantomData, 36 | }; 37 | 38 | use crate::analysis::{ImageView, vm::image::BadRelocsError}; 39 | 40 | /// Abstraction over a decryption algorithm operating in fixed-size blocks. 41 | pub trait Decryptor { 42 | /// The cipher's block type (e.g. `[u32; 2]` for TEA). 43 | type Block: bytemuck::Pod; 44 | 45 | /// Decrypt a single block in place, updating the decryptor's state. 46 | fn decrypt(&mut self, block: &mut Self::Block); 47 | } 48 | 49 | /// [`Decryptor`] wrapper which decrypts an arbitrary [`io::Read`] stream. 50 | pub struct DecryptReader { 51 | reader: R, 52 | decryptor: D, 53 | block_buffer: D::Block, 54 | consumed: usize, 55 | } 56 | 57 | impl DecryptReader { 58 | const BLOCK_SIZE: usize = size_of::(); 59 | 60 | /// Create a [`DecryptReader`] from an [`io::Read`] implementationa and a decryptor. 61 | pub fn new(reader: R, decryptor: D) -> Self { 62 | Self { 63 | reader, 64 | decryptor, 65 | block_buffer: bytemuck::Zeroable::zeroed(), 66 | consumed: size_of::(), 67 | } 68 | } 69 | } 70 | 71 | impl Read for DecryptReader { 72 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 73 | if self.consumed == Self::BLOCK_SIZE { 74 | self.reader.read_exact(bytemuck::bytes_of_mut(&mut self.block_buffer))?; 75 | self.decryptor.decrypt(&mut self.block_buffer); 76 | self.consumed = 0; 77 | } 78 | 79 | let to_read = (Self::BLOCK_SIZE - self.consumed).min(buf.len()); 80 | buf[..to_read].copy_from_slice( 81 | &bytemuck::bytes_of(&self.block_buffer)[self.consumed..self.consumed + to_read], 82 | ); 83 | self.consumed += to_read; 84 | Ok(to_read) 85 | } 86 | } 87 | 88 | /// wrapper around a block decrypt function that implements [`Decryptor`]. 89 | pub struct FnDecryptor(F, PhantomData); 90 | 91 | impl FnDecryptor { 92 | pub fn new(fun: F) -> Self { 93 | Self(fun, PhantomData) 94 | } 95 | } 96 | 97 | impl Decryptor for FnDecryptor { 98 | type Block = B; 99 | 100 | fn decrypt(&mut self, block: &mut Self::Block) { 101 | self.0(block) 102 | } 103 | } 104 | 105 | /// Encryption algorithms used by Arxan to obfuscate static data. 106 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 107 | pub enum ArxanDecryptionKind { 108 | /// Standard 32-round TEA. 109 | Tea, 110 | /// Custom rotate-multiply-xor algorithm. 111 | Rmx, 112 | /// Simple subtraction from a constant. 113 | Sub, 114 | } 115 | 116 | /// 32-round TEA (Tiny Encryption Algorithm) decryptor. 117 | pub fn tea_decryptor(key: &[u8; 16]) -> impl Decryptor { 118 | let key: [u32; 4] = bytemuck::pod_read_unaligned(key); 119 | FnDecryptor::new(move |block: &mut [u32; 2]| { 120 | const NUM_ROUNDS: u32 = 32; 121 | const DELTA: u32 = 0x9E3779B9; 122 | let mut sum = 0xC6EF3720; 123 | 124 | fn fiestel_round(b1: u32, b2: &mut u32, k1: u32, k2: u32, sum: u32) { 125 | let k1_term = (b1 << 4).wrapping_add(k1) ^ b1.wrapping_add(sum); 126 | let k2_term = (b1 >> 5).wrapping_add(k2); 127 | *b2 = b2.wrapping_sub(k1_term ^ k2_term); 128 | } 129 | 130 | for _ in 0..NUM_ROUNDS { 131 | fiestel_round(block[0], &mut block[1], key[2], key[3], sum); 132 | fiestel_round(block[1], &mut block[0], key[0], key[1], sum); 133 | sum = sum.wrapping_sub(DELTA); 134 | } 135 | }) 136 | } 137 | 138 | /// Rotate-multiply-xor decryptor. 139 | /// 140 | /// This algorithm seems to have been invented by the Arxan developers. It is reminiscent of ARX 141 | /// ciphers, but uses multiplication. Its cryptographic security seems poor. 142 | pub fn rmx_decryptor(mut key: u32) -> impl Decryptor { 143 | let mut key_rot = key & 0x1f; 144 | 145 | FnDecryptor::new(move |block: &mut u32| { 146 | key = key.rotate_left(key_rot); 147 | *block = block.wrapping_sub(key.wrapping_mul(key_rot)); 148 | key_rot ^= !*block; 149 | }) 150 | } 151 | 152 | /// Subtraction decryptor. 153 | /// 154 | /// This is more obfuscation than encryption. Blocks of 4 bytes are subtracted from a constant 155 | /// "key". 156 | pub fn sub_decryptor(key: u32) -> impl Decryptor { 157 | FnDecryptor::new(move |block| *block = key.wrapping_sub(*block)) 158 | } 159 | 160 | /// Try to parse a 32-bit unsigned integer encoded as a varint. 161 | /// 162 | /// On success, returns the decoded number. 163 | pub fn try_read_varint(mut reader: impl io::Read) -> io::Result { 164 | let mut result = 0u32; 165 | let mut num_read = 0u32; 166 | 167 | let mut b = 0u8; 168 | loop { 169 | reader.read_exact(std::slice::from_mut(&mut b))?; 170 | 171 | result = (b as u32 & 0x7F) 172 | .checked_shl(7 * num_read) 173 | .and_then(|s| result.checked_add(s)) 174 | .ok_or(io::ErrorKind::InvalidData)?; 175 | 176 | num_read += 1; 177 | 178 | if b < 0x80 { 179 | return Ok(result); 180 | } 181 | } 182 | } 183 | 184 | /// A contiguous region of bytes encrypted by Arxan. 185 | /// 186 | /// See the module-level documentation for more information. 187 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 188 | pub struct EncryptedRegion { 189 | /// Offset of the plaintext for this region in the decrypted byte stream. 190 | pub stream_offset: usize, 191 | /// The size of the region. 192 | pub size: usize, 193 | /// The relative virtual address of the region. 194 | pub rva: u32, 195 | } 196 | 197 | impl EncryptedRegion { 198 | /// Return the decrypted slice of bytes corresponding to this region, borrowing the bytes 199 | /// from its parent [`EncryptedRegionList`]. 200 | /// 201 | /// Will always return [`Some`] if `list` is the actual parent. 202 | pub fn decrypted_slice<'a>(&self, list: &'a EncryptedRegionList) -> Option<&'a [u8]> { 203 | list.decrypted_stream.get(self.stream_offset..self.stream_offset + self.size) 204 | } 205 | 206 | /// Try to extract a list of encrypted regions from a stream of varint-encoded offset size 207 | /// pairs. 208 | /// 209 | /// See the module-level documentation for more information. 210 | pub fn try_from_varints(mut reader: impl Read) -> io::Result> { 211 | let mut regions = Vec::new(); 212 | 213 | let mut rva = 0u32; 214 | let mut stream_offset = 0usize; 215 | 216 | // as an optimization to cut down the time before an error 217 | // for false positives, disallow zero offsets/sizes 218 | loop { 219 | let offset = try_read_varint(&mut reader)?; 220 | if offset == 0 { 221 | return Err(io::ErrorKind::InvalidData.into()); 222 | } 223 | 224 | rva = rva.checked_add(offset).ok_or(io::ErrorKind::InvalidData)?; 225 | if rva == u32::MAX { 226 | return Ok(regions); 227 | } 228 | 229 | let size = try_read_varint(&mut reader)?; 230 | if size == 0 { 231 | return Err(io::ErrorKind::InvalidData.into()); 232 | } 233 | 234 | regions.push(Self { 235 | stream_offset, 236 | size: size as usize, 237 | rva, 238 | }); 239 | rva = rva.checked_add(size).ok_or(io::ErrorKind::InvalidData)?; 240 | stream_offset += size as usize; 241 | } 242 | } 243 | 244 | pub fn intersects(&self, other: &EncryptedRegion) -> bool { 245 | let end = self.rva as usize + self.size; 246 | let other_end = other.rva as usize + other.size; 247 | 248 | end.min(other_end) > self.rva.max(other.rva) as usize 249 | } 250 | } 251 | 252 | /// A list of contiguous regions encrypted by Arxan using the same TEA key paired with the decrypted 253 | /// plaintext for said regions. 254 | /// 255 | /// See the module-level documentation for more details. 256 | #[derive(Debug, Clone)] 257 | pub struct EncryptedRegionList { 258 | pub kind: ArxanDecryptionKind, 259 | pub regions: Vec, 260 | pub decrypted_stream: Vec, 261 | } 262 | 263 | impl EncryptedRegionList { 264 | /// Return the number of encrypted regions in this list. 265 | /// 266 | /// Shorthand for `self.regions.len()`. 267 | pub fn len(&self) -> usize { 268 | self.regions.len() 269 | } 270 | 271 | /// Return true if this encrypted region list is empty. 272 | /// 273 | /// Shorthand for `self.regions.is_empty()`. 274 | pub fn is_empty(&self) -> bool { 275 | self.regions.is_empty() 276 | } 277 | 278 | pub fn try_new( 279 | kind: ArxanDecryptionKind, 280 | regions: Vec, 281 | mut decrypted_stream: impl Read, 282 | ) -> io::Result { 283 | let ctext_len = regions.last().map(|r| r.stream_offset + r.size).unwrap_or(0); 284 | 285 | let mut plaintext = vec![0; ctext_len]; 286 | decrypted_stream.read_exact(&mut plaintext)?; 287 | 288 | Ok(Self { 289 | kind, 290 | regions, 291 | decrypted_stream: plaintext, 292 | }) 293 | } 294 | } 295 | 296 | /// Compute the Shannon entropy of a sequence of bytes. 297 | /// 298 | /// This is useful to discriminate between non-random and random data, provided its length is 299 | /// sufficient. 300 | pub fn shannon_entropy(bytes: impl IntoIterator) -> f64 { 301 | let mut byte_dist = [0usize; 256]; 302 | let mut len = 0; 303 | for b in bytes { 304 | byte_dist[b as usize] += 1; 305 | len += 1; 306 | } 307 | 308 | let len_log2 = (len as f64).log2(); 309 | // -sum b/N * log2(b/N) = 1/N sum b(log2 N - log2 b) 310 | let plogp_sum: f64 = byte_dist 311 | .into_iter() 312 | .filter(|&b| b != 0) 313 | // rust-analyzer reports an error without the type hint (but not rustc) 314 | .map(|b: usize| (b as f64) * (len_log2 - (b as f64).log2())) 315 | .sum(); 316 | 317 | plogp_sum / (len as f64) 318 | } 319 | 320 | /// Apply relocs and resolve conflicts between many [`EncryptedRegionList`]. 321 | /// 322 | /// Conflict resolution is based on Shannon entropy. The region lists with lowest entropy 323 | /// are assumed to represent decrypted bytes, while any conflicting region is assumed to be 324 | /// "encrypted". 325 | pub fn apply_relocs_and_resolve_conflicts< 326 | 'a, 327 | #[cfg(feature = "rayon")] I: ImageView + Sync, 328 | #[cfg(not(feature = "rayon"))] I: ImageView, 329 | >( 330 | region_lists: impl IntoIterator, 331 | image: I, 332 | preferred_base: Option, 333 | ) -> Result, BadRelocsError> { 334 | #[cfg(feature = "rayon")] 335 | use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator}; 336 | 337 | let base_va = image.base_va(); 338 | 339 | struct ProcessedRegionList { 340 | rlist: EncryptedRegionList, 341 | entropy: f64, 342 | base_entropy: f64, 343 | eliminated: bool, 344 | } 345 | 346 | struct ContiguousRegion { 347 | rlist_index: usize, 348 | region: EncryptedRegion, 349 | } 350 | 351 | let sorted_relocs = { 352 | let mut relocs: Vec<_> = image.relocs64()?.collect(); 353 | relocs.sort(); 354 | relocs 355 | }; 356 | 357 | let region_lists = region_lists.into_iter(); 358 | let mut processed = Vec::with_capacity(region_lists.size_hint().0); 359 | let mut contiguous_regions = Vec::with_capacity(processed.capacity()); 360 | 361 | // Compute the image entropy for each non-empty region list 362 | for rlist in region_lists.filter(|r| !r.is_empty()) { 363 | let index = processed.len(); 364 | processed.push(ProcessedRegionList { 365 | entropy: 0.0, 366 | base_entropy: 0.0, 367 | rlist: rlist.clone(), 368 | eliminated: false, 369 | }); 370 | 371 | contiguous_regions.extend(rlist.regions.iter().map(|r| ContiguousRegion { 372 | rlist_index: index, 373 | region: r.clone(), 374 | })); 375 | } 376 | 377 | // sort contiguous regions by increasing rva and size 378 | // will make applying relocs and handling collisions faster 379 | contiguous_regions.sort_by_key(|r| (r.region.rva, r.region.size)); 380 | 381 | // apply relocs using single pass through the sorted relocs array 382 | // also use relocs to eliminate encrypted regions 383 | let pref_base = preferred_base.unwrap_or(base_va); 384 | let base_diff = base_va.wrapping_sub(pref_base); 385 | let mut crel = sorted_relocs.iter().copied().peekable(); 386 | 387 | for r in &contiguous_regions { 388 | let parent = &mut processed[r.rlist_index]; 389 | if parent.eliminated { 390 | continue; 391 | } 392 | 393 | // skip earlier relocs and stop if we exhausted them 394 | while crel.next_if(|&reloc| reloc < r.region.rva).is_some() {} 395 | if crel.peek().is_none() { 396 | break; 397 | } 398 | 399 | let region_end = r.region.rva + r.region.size as u32; 400 | for reloc in crel.clone().take_while(|&r| r + 8 <= region_end) { 401 | let offset = (reloc - r.region.rva) as usize + r.region.stream_offset; 402 | let reloc_area: &mut [u8; 8] = 403 | (&mut parent.rlist.decrypted_stream[offset..offset + 8]).try_into().unwrap(); 404 | 405 | let relocated = u64::from_le_bytes(*reloc_area).wrapping_add(base_diff); 406 | if image.read(relocated, 1).is_none() { 407 | log::trace!("rlist {} eliminated using relocs", r.rlist_index); 408 | parent.eliminated = true; 409 | break; 410 | } 411 | 412 | *reloc_area = relocated.to_le_bytes(); 413 | } 414 | } 415 | 416 | #[cfg(not(feature = "rayon"))] 417 | let not_eliminated = processed.iter_mut().filter(|p| !p.eliminated); 418 | #[cfg(feature = "rayon")] 419 | let not_eliminated = processed.par_iter_mut().filter(|p| !p.eliminated); 420 | 421 | // now that relocs have been applied, compute entropies on non-eliminated rlists 422 | // this is worth doing in parallel 423 | not_eliminated.for_each(|p| { 424 | let base_bytes_iter = p.rlist.regions.iter().flat_map(|r| { 425 | image 426 | .read(base_va + r.rva as u64, r.size) 427 | .map_or(&[] as &[u8], |s| &s[..r.size]) 428 | }); 429 | p.base_entropy = shannon_entropy(base_bytes_iter.copied()); 430 | p.entropy = shannon_entropy(p.rlist.decrypted_stream.iter().copied()); 431 | p.eliminated = p.entropy >= p.base_entropy; 432 | 433 | if !p.eliminated { 434 | log::trace!( 435 | "kind = {:?} rva = {:08x} base_entropy = {:.03} entropy = {:.03} len = {}", 436 | p.rlist.kind, 437 | p.rlist.regions[0].rva, 438 | p.base_entropy, 439 | p.entropy, 440 | p.rlist.decrypted_stream.len() 441 | ); 442 | } 443 | }); 444 | 445 | // use sorted contiguous regions to find intersections between region lists and eliminate 446 | // conflicting ones with high shannon entropy 447 | if let Some(i) = contiguous_regions.iter().position(|r| !processed[r.rlist_index].eliminated) { 448 | let mut best = &contiguous_regions[i]; 449 | for r in contiguous_regions.get(i + 1..).unwrap_or(&[]) { 450 | let Ok([r_rlist, best_rlist]) = 451 | processed.get_disjoint_mut([r.rlist_index, best.rlist_index]) 452 | else { 453 | // if not disjoint then they have the same rlist and don't intersect 454 | best = r; 455 | continue; 456 | }; 457 | if r_rlist.eliminated { 458 | continue; 459 | } 460 | if !best.region.intersects(&r.region) { 461 | best = r; 462 | continue; 463 | } 464 | if best_rlist.entropy > r_rlist.entropy { 465 | best_rlist.eliminated = true; 466 | best = r; 467 | } 468 | else { 469 | r_rlist.eliminated = true; 470 | } 471 | } 472 | }; 473 | 474 | Ok(processed 475 | .into_iter() 476 | .filter_map(|p| (!p.eliminated).then_some(p.rlist)) 477 | .collect()) 478 | } 479 | --------------------------------------------------------------------------------