├── .gitattributes
├── .gitignore
├── CMakeLists.txt
├── LICENSE
├── README.md
├── VMPAttack.sln
├── VMPAttack
├── CMakeLists.txt
├── VMPAttack.vcxproj
├── VMPAttack.vcxproj.filters
├── analysis_context.cpp
├── analysis_context.hpp
├── arithmetic_expression.cpp
├── arithmetic_expression.hpp
├── arithmetic_operation.cpp
├── arithmetic_operation.hpp
├── arithmetic_operation_desc.hpp
├── arithmetic_operations.hpp
├── arithmetic_utilities.hpp
├── disassembler.cpp
├── disassembler.hpp
├── flags.hpp
├── instruction.cpp
├── instruction.hpp
├── instruction_stream.cpp
├── instruction_stream.hpp
├── instruction_utilities.hpp
├── main.cpp
├── vm_analysis_context.hpp
├── vm_bridge.cpp
├── vm_bridge.hpp
├── vm_context.hpp
├── vm_handler.cpp
├── vm_handler.hpp
├── vm_instance.cpp
├── vm_instance.hpp
├── vm_instruction.cpp
├── vm_instruction.hpp
├── vm_instruction_desc.hpp
├── vm_instruction_info.hpp
├── vm_instruction_set.hpp
├── vm_state.hpp
├── vmentry.hpp
├── vmpattack.cpp
└── vmpattack.hpp
├── VMPAttack_Tester
├── Assembly.asm
├── VMPAttack_Tester.cpp
├── VMPAttack_Tester.vcxproj
└── VMPAttack_Tester.vcxproj.filters
├── entry_stub.png
└── screenshot.png
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Aa][Rr][Mm]/
27 | [Aa][Rr][Mm]64/
28 | bld/
29 | [Bb]in/
30 | [Oo]bj/
31 | [Ll]og/
32 | [Ll]ogs/
33 |
34 | # Visual Studio 2015/2017 cache/options directory
35 | .vs/
36 | # Uncomment if you have tasks that create the project's static files in wwwroot
37 | #wwwroot/
38 |
39 | # Visual Studio 2017 auto generated files
40 | Generated\ Files/
41 |
42 | # MSTest test Results
43 | [Tt]est[Rr]esult*/
44 | [Bb]uild[Ll]og.*
45 |
46 | # NUnit
47 | *.VisualState.xml
48 | TestResult.xml
49 | nunit-*.xml
50 |
51 | # Build Results of an ATL Project
52 | [Dd]ebugPS/
53 | [Rr]eleasePS/
54 | dlldata.c
55 |
56 | # Benchmark Results
57 | BenchmarkDotNet.Artifacts/
58 |
59 | # .NET Core
60 | project.lock.json
61 | project.fragment.lock.json
62 | artifacts/
63 |
64 | # StyleCop
65 | StyleCopReport.xml
66 |
67 | # Files built by Visual Studio
68 | *_i.c
69 | *_p.c
70 | *_h.h
71 | *.ilk
72 | *.meta
73 | *.obj
74 | *.iobj
75 | *.pch
76 | *.pdb
77 | *.ipdb
78 | *.pgc
79 | *.pgd
80 | *.rsp
81 | *.sbr
82 | *.tlb
83 | *.tli
84 | *.tlh
85 | *.tmp
86 | *.tmp_proj
87 | *_wpftmp.csproj
88 | *.log
89 | *.vspscc
90 | *.vssscc
91 | .builds
92 | *.pidb
93 | *.svclog
94 | *.scc
95 |
96 | # Chutzpah Test files
97 | _Chutzpah*
98 |
99 | # Visual C++ cache files
100 | ipch/
101 | *.aps
102 | *.ncb
103 | *.opendb
104 | *.opensdf
105 | *.sdf
106 | *.cachefile
107 | *.VC.db
108 | *.VC.VC.opendb
109 |
110 | # Visual Studio profiler
111 | *.psess
112 | *.vsp
113 | *.vspx
114 | *.sap
115 |
116 | # Visual Studio Trace Files
117 | *.e2e
118 |
119 | # TFS 2012 Local Workspace
120 | $tf/
121 |
122 | # Guidance Automation Toolkit
123 | *.gpState
124 |
125 | # ReSharper is a .NET coding add-in
126 | _ReSharper*/
127 | *.[Rr]e[Ss]harper
128 | *.DotSettings.user
129 |
130 | # JustCode is a .NET coding add-in
131 | .JustCode
132 |
133 | # TeamCity is a build add-in
134 | _TeamCity*
135 |
136 | # DotCover is a Code Coverage Tool
137 | *.dotCover
138 |
139 | # AxoCover is a Code Coverage Tool
140 | .axoCover/*
141 | !.axoCover/settings.json
142 |
143 | # Visual Studio code coverage results
144 | *.coverage
145 | *.coveragexml
146 |
147 | # NCrunch
148 | _NCrunch_*
149 | .*crunch*.local.xml
150 | nCrunchTemp_*
151 |
152 | # MightyMoose
153 | *.mm.*
154 | AutoTest.Net/
155 |
156 | # Web workbench (sass)
157 | .sass-cache/
158 |
159 | # Installshield output folder
160 | [Ee]xpress/
161 |
162 | # DocProject is a documentation generator add-in
163 | DocProject/buildhelp/
164 | DocProject/Help/*.HxT
165 | DocProject/Help/*.HxC
166 | DocProject/Help/*.hhc
167 | DocProject/Help/*.hhk
168 | DocProject/Help/*.hhp
169 | DocProject/Help/Html2
170 | DocProject/Help/html
171 |
172 | # Click-Once directory
173 | publish/
174 |
175 | # Publish Web Output
176 | *.[Pp]ublish.xml
177 | *.azurePubxml
178 | # Note: Comment the next line if you want to checkin your web deploy settings,
179 | # but database connection strings (with potential passwords) will be unencrypted
180 | *.pubxml
181 | *.publishproj
182 |
183 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
184 | # checkin your Azure Web App publish settings, but sensitive information contained
185 | # in these scripts will be unencrypted
186 | PublishScripts/
187 |
188 | # NuGet Packages
189 | *.nupkg
190 | # NuGet Symbol Packages
191 | *.snupkg
192 | # The packages folder can be ignored because of Package Restore
193 | **/[Pp]ackages/*
194 | # except build/, which is used as an MSBuild target.
195 | !**/[Pp]ackages/build/
196 | # Uncomment if necessary however generally it will be regenerated when needed
197 | #!**/[Pp]ackages/repositories.config
198 | # NuGet v3's project.json files produces more ignorable files
199 | *.nuget.props
200 | *.nuget.targets
201 |
202 | # Microsoft Azure Build Output
203 | csx/
204 | *.build.csdef
205 |
206 | # Microsoft Azure Emulator
207 | ecf/
208 | rcf/
209 |
210 | # Windows Store app package directories and files
211 | AppPackages/
212 | BundleArtifacts/
213 | Package.StoreAssociation.xml
214 | _pkginfo.txt
215 | *.appx
216 | *.appxbundle
217 | *.appxupload
218 |
219 | # Visual Studio cache files
220 | # files ending in .cache can be ignored
221 | *.[Cc]ache
222 | # but keep track of directories ending in .cache
223 | !?*.[Cc]ache/
224 |
225 | # Others
226 | ClientBin/
227 | ~$*
228 | *~
229 | *.dbmdl
230 | *.dbproj.schemaview
231 | *.jfm
232 | *.pfx
233 | *.publishsettings
234 | orleans.codegen.cs
235 |
236 | # Including strong name files can present a security risk
237 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
238 | #*.snk
239 |
240 | # Since there are multiple workflows, uncomment next line to ignore bower_components
241 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
242 | #bower_components/
243 |
244 | # RIA/Silverlight projects
245 | Generated_Code/
246 |
247 | # Backup & report files from converting an old project file
248 | # to a newer Visual Studio version. Backup files are not needed,
249 | # because we have git ;-)
250 | _UpgradeReport_Files/
251 | Backup*/
252 | UpgradeLog*.XML
253 | UpgradeLog*.htm
254 | ServiceFabricBackup/
255 | *.rptproj.bak
256 |
257 | # SQL Server files
258 | *.mdf
259 | *.ldf
260 | *.ndf
261 |
262 | # Business Intelligence projects
263 | *.rdl.data
264 | *.bim.layout
265 | *.bim_*.settings
266 | *.rptproj.rsuser
267 | *- [Bb]ackup.rdl
268 | *- [Bb]ackup ([0-9]).rdl
269 | *- [Bb]ackup ([0-9][0-9]).rdl
270 |
271 | # Microsoft Fakes
272 | FakesAssemblies/
273 |
274 | # GhostDoc plugin setting file
275 | *.GhostDoc.xml
276 |
277 | # Node.js Tools for Visual Studio
278 | .ntvs_analysis.dat
279 | node_modules/
280 |
281 | # Visual Studio 6 build log
282 | *.plg
283 |
284 | # Visual Studio 6 workspace options file
285 | *.opt
286 |
287 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
288 | *.vbw
289 |
290 | # Visual Studio LightSwitch build output
291 | **/*.HTMLClient/GeneratedArtifacts
292 | **/*.DesktopClient/GeneratedArtifacts
293 | **/*.DesktopClient/ModelManifest.xml
294 | **/*.Server/GeneratedArtifacts
295 | **/*.Server/ModelManifest.xml
296 | _Pvt_Extensions
297 |
298 | # Paket dependency manager
299 | .paket/paket.exe
300 | paket-files/
301 |
302 | # FAKE - F# Make
303 | .fake/
304 |
305 | # CodeRush personal settings
306 | .cr/personal
307 |
308 | # Python Tools for Visual Studio (PTVS)
309 | __pycache__/
310 | *.pyc
311 |
312 | # Cake - Uncomment if you are using it
313 | # tools/**
314 | # !tools/packages.config
315 |
316 | # Tabs Studio
317 | *.tss
318 |
319 | # Telerik's JustMock configuration file
320 | *.jmconfig
321 |
322 | # BizTalk build output
323 | *.btp.cs
324 | *.btm.cs
325 | *.odx.cs
326 | *.xsd.cs
327 |
328 | # OpenCover UI analysis results
329 | OpenCover/
330 |
331 | # Azure Stream Analytics local run output
332 | ASALocalRun/
333 |
334 | # MSBuild Binary and Structured Log
335 | *.binlog
336 |
337 | # NVidia Nsight GPU debugger configuration file
338 | *.nvuser
339 |
340 | # MFractors (Xamarin productivity tool) working folder
341 | .mfractor/
342 |
343 | # Local History for Visual Studio
344 | .localhistory/
345 |
346 | # BeatPulse healthcheck temp database
347 | healthchecksdb
348 |
349 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
350 | MigrationBackup/
351 |
352 | # Ionide (cross platform F# VS Code tools) working folder
353 | .ionide/
354 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.15)
2 | project(VMPAttach)
3 |
4 | include(FetchContent)
5 |
6 | FetchContent_Declare(
7 | VTIL-Core
8 | GIT_REPOSITORY https://github.com/vtil-project/VTIL-Core
9 | GIT_SHALLOW true
10 | )
11 | FetchContent_MakeAvailable(VTIL-Core)
12 |
13 | add_subdirectory(VMPAttack)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # VMPAttack
2 | 
3 | A Work-In-Progress VMP to VTIL lifter.
4 | Works for VMProtect 3.X x64.
5 |
6 | ## Usage
7 | Literally drag + drop the unpacked victim file onto VMPAttack.exe.
8 | Lifted VTIL routines will appear in a folder named "VMPAttack-Output".
9 |
10 | ## Advanced Usage
11 | All lifting functionality depends on the `vmpattack` root class object. This object can easily be constructed using a byte vector of the target image.
12 | You can lift any routine manually by passing the VMEntry **RVA** and entry stub value in a `lifting_job` structure to the `vmpattack::lift` function.
13 |
14 | 
15 |
16 | `lifting_job`s can be automatically generated by providing the **RVA** of the entry stub (see above) to the `vmpattack::analyze_entry_stub` function.
17 |
18 | Example usage:
19 | ```C++
20 | std::vector buffer = read_file( file_path );
21 |
22 | vmpattack instance( buffer );
23 |
24 | if ( auto result = instance.analyze_entry_stub( my_rva ) )
25 | {
26 | if ( auto routine = instance.lift( result->job ) )
27 | {
28 | vtil::optimizer::apply_all_profiled( *routine );
29 | vtil::save_routine( *routine, "C:\\my_routine.vtil" );
30 | }
31 | }
32 | ```
33 |
34 | ## Building
35 | Building in VS is as simple as replacing the include/library directories to VTIL/Keystone/Capstone in the vcxproj.
36 |
37 | The project now also universally supports CMake and platforms other than Windows.
38 |
39 | The project requires C++20.
40 |
41 | ## Issues
42 | Stability is the main issue. Sometimes the lifter or optimizer can hang unexpectedly, or fail to lift certain branches.
43 | The lifter also does not currently handle switch tables.
44 |
45 | ## Licence
46 | Licensed under the GPL-3.0 License. No warranty is provided of any kind.
47 |
--------------------------------------------------------------------------------
/VMPAttack.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 16
4 | VisualStudioVersion = 16.0.30114.105
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "VMPAttack", "VMPAttack\VMPAttack.vcxproj", "{15A0B665-FD76-49BA-A346-AFBA31359144}"
7 | EndProject
8 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "VMPAttack_Tester", "VMPAttack_Tester\VMPAttack_Tester.vcxproj", "{A8B46B1A-78C8-4003-9848-5F02549D0ED1}"
9 | EndProject
10 | Global
11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
12 | Debug|x64 = Debug|x64
13 | Debug|x86 = Debug|x86
14 | Release|x64 = Release|x64
15 | Release|x86 = Release|x86
16 | EndGlobalSection
17 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
18 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Debug|x64.ActiveCfg = Debug|x64
19 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Debug|x64.Build.0 = Debug|x64
20 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Debug|x86.ActiveCfg = Debug|Win32
21 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Debug|x86.Build.0 = Debug|Win32
22 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Release|x64.ActiveCfg = Release|x64
23 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Release|x64.Build.0 = Release|x64
24 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Release|x86.ActiveCfg = Release|Win32
25 | {15A0B665-FD76-49BA-A346-AFBA31359144}.Release|x86.Build.0 = Release|Win32
26 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Debug|x64.ActiveCfg = Debug|x64
27 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Debug|x64.Build.0 = Debug|x64
28 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Debug|x86.ActiveCfg = Debug|Win32
29 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Debug|x86.Build.0 = Debug|Win32
30 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Release|x64.ActiveCfg = Release|x64
31 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Release|x64.Build.0 = Release|x64
32 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Release|x86.ActiveCfg = Release|Win32
33 | {A8B46B1A-78C8-4003-9848-5F02549D0ED1}.Release|x86.Build.0 = Release|Win32
34 | EndGlobalSection
35 | GlobalSection(SolutionProperties) = preSolution
36 | HideSolutionNode = FALSE
37 | EndGlobalSection
38 | GlobalSection(ExtensibilityGlobals) = postSolution
39 | SolutionGuid = {243D1F4F-DA77-474A-B092-913983BD6899}
40 | EndGlobalSection
41 | EndGlobal
42 |
--------------------------------------------------------------------------------
/VMPAttack/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_executable(VMPAttack
2 | analysis_context.cpp
3 | analysis_context.hpp
4 | arithmetic_expression.cpp
5 | arithmetic_expression.hpp
6 | arithmetic_operation.cpp
7 | arithmetic_operation_desc.hpp
8 | arithmetic_operation.hpp
9 | arithmetic_operations.hpp
10 | arithmetic_utilities.hpp
11 | disassembler.cpp
12 | disassembler.hpp
13 | flags.hpp
14 | instruction.cpp
15 | instruction.hpp
16 | instruction_stream.cpp
17 | instruction_stream.hpp
18 | instruction_utilities.hpp
19 | main.cpp
20 | vm_analysis_context.hpp
21 | vm_bridge.cpp
22 | vm_bridge.hpp
23 | vm_context.hpp
24 | vmentry.hpp
25 | vm_handler.cpp
26 | vm_handler.hpp
27 | vm_instance.cpp
28 | vm_instance.hpp
29 | vm_instruction.cpp
30 | vm_instruction_desc.hpp
31 | vm_instruction.hpp
32 | vm_instruction_info.hpp
33 | vm_instruction_set.hpp
34 | vmpattack.cpp
35 | vmpattack.hpp
36 | vm_state.hpp
37 | )
38 |
39 | set(THREADS_PREFER_PTHREAD_FLAG ON)
40 | find_package(Threads REQUIRED)
41 |
42 | target_link_libraries(VMPAttack PRIVATE VTIL Threads::Threads)
--------------------------------------------------------------------------------
/VMPAttack/VMPAttack.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 | 16.0
62 | Win32Proj
63 | {15a0b665-fd76-49ba-a346-afba31359144}
64 | VMPAttack
65 | 10.0
66 |
67 |
68 |
69 | Application
70 | true
71 | v142
72 | Unicode
73 |
74 |
75 | Application
76 | false
77 | v142
78 | true
79 | Unicode
80 |
81 |
82 | Application
83 | true
84 | v142
85 | Unicode
86 |
87 |
88 | Application
89 | false
90 | v142
91 | true
92 | Unicode
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 | true
114 | $(SolutionDir)..\dependencies\Capstone\include;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Architecture\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Common\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Optimizer\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-SymEx\includes;$(IncludePath)
115 | $(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Architecture\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Common\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Optimizer\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-SymEx\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\dependencies\Capstone\msvc\$(PlatformShortName)\Release;$(LibraryPath)
116 |
117 |
118 | false
119 | $(SolutionDir)..\dependencies\Capstone\include;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Architecture\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Common\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Optimizer\includes;$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-SymEx\includes;$(IncludePath)
120 | $(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Architecture\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Common\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-Optimizer\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\..\VTIL\VTIL-Core\VTIL-SymEx\$(PlatformShortName)\$(Configuration);$(SolutionDir)..\dependencies\Capstone\msvc\$(PlatformShortName)\Release;$(LibraryPath)
121 |
122 |
123 | true
124 | $(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug (default)\_deps\capstone-src\include;$(SolutionDir)..\..\VTIL-Core\VTIL-Architecture\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-Common\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-Compiler\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-SymEx\includes;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug (default)\_deps\keystone-src\include;$(IncludePath)
125 | $(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\VTIL-Compiler;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\VTIL-Common;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\VTIL-Architecture;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\VTIL-SymEx;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\_deps\capstone-build;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Debug\_deps\keystone-build\llvm\lib;$(LibraryPath)
126 |
127 |
128 | false
129 | $(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\_deps\capstone-src\include;$(SolutionDir)..\..\VTIL-Core\VTIL-Architecture\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-Common\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-Compiler\includes;$(SolutionDir)..\..\VTIL-Core\VTIL-SymEx\includes;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\_deps\keystone-src\include;$(IncludePath)
130 | $(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\VTIL-Compiler;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\VTIL-Common;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\VTIL-Architecture;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\VTIL-SymEx;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\_deps\capstone-build;$(SolutionDir)..\..\VTIL-Core\out\build\x64-Release\_deps\keystone-build\llvm\lib;$(LibraryPath)
131 |
132 |
133 |
134 | Level3
135 | true
136 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
137 | true
138 | stdcpplatest
139 |
140 |
141 | Console
142 | true
143 |
144 |
145 |
146 |
147 | Level3
148 | true
149 | true
150 | true
151 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
152 | true
153 | stdcpplatest
154 |
155 |
156 | Console
157 | true
158 | true
159 | true
160 |
161 |
162 |
163 |
164 | Level3
165 | true
166 | _DEBUG;_CONSOLE;NOMINMAX;%(PreprocessorDefinitions);VMPATTACK_VERBOSE_0;VMPATTACK_VERBOSE_1;VTIL_OPT_TRACE_VERBOSE
167 | true
168 | stdcpplatest
169 | MultiThreadedDebugDLL
170 | true
171 |
172 |
173 | Console
174 | true
175 | %(AdditionalDependencies);VTIL-Architecture.lib;VTIL-Common.lib;VTIL-SymEx.lib;VTIL-Compiler.lib;capstone.lib;keystone.lib
176 | 4194304
177 |
178 |
179 |
180 |
181 | Level3
182 | true
183 | true
184 | true
185 | NDEBUG;_CONSOLE;NOMINMAX;%(PreprocessorDefinitions)
186 | true
187 | stdcpplatest
188 | true
189 | true
190 |
191 |
192 | Console
193 | true
194 | true
195 | true
196 | kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);VTIL-Architecture.lib;VTIL-Common.lib;VTIL-SymEx.lib;VTIL-Compiler.lib;capstone.lib;keystone.lib
197 |
198 |
199 |
200 |
201 |
202 |
--------------------------------------------------------------------------------
/VMPAttack/VMPAttack.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Instruction Parser
7 |
8 |
9 | Instruction Parser
10 |
11 |
12 | Arithmetic
13 |
14 |
15 | Analysis
16 |
17 |
18 | Arithmetic
19 |
20 |
21 | VM\Architecture
22 |
23 |
24 | VM\Architecture
25 |
26 |
27 | Instruction Parser
28 |
29 |
30 | VM\Architecture
31 |
32 |
33 | VM\Architecture
34 |
35 |
36 | Lifter
37 |
38 |
39 |
40 |
41 | {d250c107-f315-4d93-b16b-a6d6819e66cb}
42 |
43 |
44 | {34b1ee1c-bce8-477d-ae6b-3c7c6a6f4301}
45 |
46 |
47 | {c7bc42a8-1849-48b4-ac0b-c1c57ef6d345}
48 |
49 |
50 | {7bb3161d-877f-467b-a0dc-0ee84775e037}
51 |
52 |
53 | {e3b96949-8e01-424e-95df-c1a067e2230a}
54 |
55 |
56 | {7e47b311-cfb1-4131-ab7b-426cf4c330d7}
57 |
58 |
59 | {a65f961c-e5ec-4b40-9030-c6248e77b8bd}
60 |
61 |
62 | {aa95b3f0-4684-4867-9d8f-577e044b2003}
63 |
64 |
65 |
66 |
67 | Instruction Parser
68 |
69 |
70 | Instruction Parser
71 |
72 |
73 | Instruction Parser
74 |
75 |
76 | Arithmetic
77 |
78 |
79 | Arithmetic
80 |
81 |
82 | Arithmetic
83 |
84 |
85 | Arithmetic
86 |
87 |
88 | VM\State
89 |
90 |
91 | Analysis
92 |
93 |
94 | Analysis
95 |
96 |
97 | VM\Architecture
98 |
99 |
100 | VM\Architecture
101 |
102 |
103 | VM\Architecture
104 |
105 |
106 | VM\Architecture
107 |
108 |
109 | VM\Architecture
110 |
111 |
112 | VM\State
113 |
114 |
115 | VM\Architecture
116 |
117 |
118 | VM\Architecture
119 |
120 |
121 | Instruction Parser
122 |
123 |
124 | Lifter
125 |
126 |
127 | Arithmetic
128 |
129 |
130 | Lifter\x86
131 |
132 |
133 | Lifter
134 |
135 |
136 |
--------------------------------------------------------------------------------
/VMPAttack/analysis_context.cpp:
--------------------------------------------------------------------------------
1 | #include "analysis_context.hpp"
2 | #include "arithmetic_operations.hpp"
3 | #include
4 |
5 | namespace vmpattack
6 | {
7 | // Processes the instruction, updating any properties that the instruction
8 | // may change.
9 | //
10 | void analysis_context::process( const instruction* instruction )
11 | {
12 | // If the expression is valid, attempt to record the current instruction
13 | // in the expression
14 | //
15 | if ( expression )
16 | {
17 | // Try to get operation descriptor for the current instruction
18 | //
19 | if ( auto operation_desc = operation_desc_from_instruction( instruction ) )
20 | {
21 | uint8_t read_count = 0, write_count = 0;
22 | cs_regs read_regs, write_regs;
23 |
24 | // Fetch registers read/written to by instruction
25 | //
26 | cs_regs_access( disassembler::get().get_handle(), &instruction->ins, read_regs, &read_count, write_regs, &write_count );
27 |
28 | // Flip flag if expression target register is being written to
29 | //
30 | bool writes_to_reg = false;
31 | for ( int i = 0; i < write_count; i++ )
32 | {
33 | if ( register_base_equal( ( x86_reg )write_regs[ i ], expression_register ) )
34 | {
35 | writes_to_reg = true;
36 | break;
37 | }
38 | }
39 |
40 | // If it does write to the register, add it to the expression
41 | //
42 | if ( writes_to_reg )
43 | {
44 | if ( auto operation = arithmetic_operation::from_instruction( instruction ) )
45 | expression->operations.push_back( *operation );
46 |
47 | }
48 | }
49 | }
50 |
51 | // If we are currently tracking any registers (ie. tracked register is not empty) attempt
52 | // to update them using the current instruction.
53 | //
54 | if ( tracked_registers.size() > 0
55 | && ( instruction->ins.id == X86_INS_MOV
56 | || instruction->ins.id == X86_INS_XCHG ) )
57 | {
58 | // If both operands are registers.
59 | //
60 | if ( instruction->operand( 0 ).type == X86_OP_REG
61 | && instruction->operand( 1 ).type == X86_OP_REG )
62 | {
63 | // Loop through tracked registers vector.
64 | //
65 | for ( x86_reg* tracked_reg : tracked_registers )
66 | {
67 | if ( instruction->ins.id == X86_INS_MOV )
68 | {
69 | // operand( 0 ) = operand( 1 )
70 | //
71 | if ( instruction->operand( 1 ).reg == *tracked_reg )
72 | *tracked_reg = instruction->operand( 0 ).reg;
73 | }
74 | else if ( instruction->ins.id == X86_INS_XCHG )
75 | {
76 | // operand ( 0 ) = operand( 1 ) && operand( 1 ) = operand( 0 )
77 | //
78 | if ( instruction->operand( 0 ).reg == *tracked_reg )
79 | *tracked_reg = instruction->operand( 1 ).reg;
80 | else if ( instruction->operand( 1 ).reg == *tracked_reg )
81 | *tracked_reg = instruction->operand( 0 ).reg;
82 | }
83 | }
84 | }
85 | }
86 |
87 | // If we are currently tracking stack pushes, update them.
88 | //
89 | if ( pushed_registers )
90 | {
91 | if ( instruction->ins.id == X86_INS_PUSH
92 | && instruction->operand( 0 ).type == X86_OP_REG )
93 | pushed_registers->push_back( instruction->operand( 0 ).reg );
94 | else if ( instruction->ins.id == X86_INS_PUSHFQ
95 | || instruction->ins.id == X86_INS_PUSHFD
96 | || instruction->ins.id == X86_INS_PUSHF )
97 | pushed_registers->push_back( X86_REG_EFLAGS );
98 | }
99 |
100 | // If we are currently tracking stack pops, update them.
101 | //
102 | if ( popped_registers )
103 | {
104 | if ( instruction->ins.id == X86_INS_POP
105 | && instruction->operand( 0 ).type == X86_OP_REG )
106 | popped_registers->push_back( instruction->operand( 0 ).reg );
107 | else if ( instruction->ins.id == X86_INS_POPFQ
108 | || instruction->ins.id == X86_INS_POPFD
109 | || instruction->ins.id == X86_INS_POPF )
110 | popped_registers->push_back( X86_REG_EFLAGS );
111 | }
112 | }
113 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_expression.cpp:
--------------------------------------------------------------------------------
1 | #include "arithmetic_expression.hpp"
2 | #include "arithmetic_utilities.hpp"
3 |
4 | namespace vmpattack
5 | {
6 | // Compute the output for a given input, by applying each operation on said input.
7 | //
8 | uint64_t arithmetic_expression::compute( uint64_t input, size_t byte_count ) const
9 | {
10 | uint64_t output = input;
11 |
12 | // Loop through each operation in order.
13 | //
14 | for ( auto& operation : operations )
15 | {
16 | // Update the ouput, specifying the previous expression's output as the current input.
17 | //
18 | output = operation.descriptor->transform( output, operation.additional_operands.data() );
19 |
20 | // Size-cast the output.
21 | //
22 | output = dynamic_size_cast( output, byte_count );
23 | }
24 |
25 | return output;
26 | }
27 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_expression.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include "arithmetic_operation.hpp"
4 |
5 | namespace vmpattack
6 | {
7 | // This struct describes an expression instance containing numerous arithmetic_operation's
8 | // in a specific order. It allows for computation of an output given an input value.
9 | //
10 | struct arithmetic_expression
11 | {
12 | // An ordered vector of operations.
13 | //
14 | std::vector operations;
15 |
16 | // Compute the output for a given input, by applying each operation on said input.
17 | //
18 | uint64_t compute( uint64_t input, size_t byte_count = 8 ) const;
19 | };
20 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_operation.cpp:
--------------------------------------------------------------------------------
1 | #include "arithmetic_operation.hpp"
2 | #include "arithmetic_operations.hpp"
3 |
4 | namespace vmpattack
5 | {
6 | // Construct via instruction and descriptor.
7 | // If construction failed, returns empty object.
8 | //
9 | std::optional arithmetic_operation::from_instruction( const arithmetic_operation_desc* descriptor, const instruction* instruction )
10 | {
11 | std::vector imm_operands;
12 |
13 | // The first operand is always the target operand. We need
14 | // to generate the additional operand vector, so we only loop
15 | // through these.
16 | //
17 | for ( int i = 1; i < instruction->operand_count(); i++ )
18 | {
19 | auto operand = instruction->operand( i );
20 |
21 | // Only immediate additional operands are supported, to make
22 | // this process simpler.
23 | //
24 | if ( operand.type != X86_OP_IMM )
25 | return {};
26 |
27 | // Append to vector.
28 | //
29 | imm_operands.push_back( operand.imm );
30 | }
31 |
32 | return arithmetic_operation( descriptor, imm_operands );
33 | }
34 |
35 | // Construct via instruction.
36 | // If construction failed, returns empty object.
37 | //
38 | std::optional arithmetic_operation::from_instruction( const instruction* instruction )
39 | {
40 | if ( auto descriptor = operation_desc_from_instruction( instruction ) )
41 | {
42 | return from_instruction( descriptor, instruction );
43 | }
44 |
45 | return {};
46 | }
47 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_operation.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include "arithmetic_operation_desc.hpp"
4 | #include "instruction.hpp"
5 |
6 | namespace vmpattack
7 | {
8 | // This struct describes an arithmetic operation instance, containing
9 | // a backing descriptor and any operand arguments.
10 | //
11 | struct arithmetic_operation
12 | {
13 | // The backing operation descriptor.
14 | //
15 | const arithmetic_operation_desc* descriptor;
16 |
17 | // Any additional argument operands in order.
18 | //
19 | const std::vector additional_operands;
20 |
21 | // Construct via backing descriptor and additional operand vector.
22 | //
23 | arithmetic_operation( const arithmetic_operation_desc* descriptor, const std::vector& additional_operands )
24 | : descriptor( descriptor ), additional_operands( additional_operands )
25 | {}
26 |
27 | // Construct via instruction and descriptor.
28 | // If construction failed, returns empty object.
29 | //
30 | static std::optional from_instruction( const arithmetic_operation_desc* descriptor, const instruction* instruction );
31 |
32 | // Construct via instruction only.
33 | // If construction failed, returns empty object.
34 | //
35 | static std::optional from_instruction( const instruction* instruction );
36 | };
37 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_operation_desc.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 |
6 | namespace vmpattack
7 | {
8 | // This struct describes an arithmetic operation descriptor, outlining
9 | // any of its semantics.
10 | //
11 | struct arithmetic_operation_desc
12 | {
13 | // Tranform function, taking inputs, transforming them as per
14 | // operation semantic, and returning the final output value.
15 | //
16 | using fn_transform = uint64_t( * )( uint64_t input, const uint64_t additional_operands[] );
17 |
18 | // The instruction correspoinding to the operation.
19 | // NOTE: this is not nessecarily unique per operation.
20 | //
21 | x86_insn insn;
22 |
23 | // The number of additional operands, NOT INCLUDING the main input.
24 | // e.g. `neg rax` = 0
25 | // e.g. `xor rax, 0Ah` = 1
26 | //
27 | uint8_t num_additional_operands;
28 |
29 | // The transformation function.
30 | //
31 | fn_transform transform;
32 |
33 | // The operation input size, in bits, or none if not relevant.
34 | //
35 | std::optional input_size;
36 |
37 | // Constructor.
38 | //
39 | arithmetic_operation_desc( x86_insn insn, uint8_t num_additional_operands, fn_transform transform, std::optional input_size = {} )
40 | : insn( insn ), num_additional_operands( num_additional_operands ), transform( transform ), input_size( input_size )
41 | {}
42 | };
43 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_operations.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #ifdef _WIN32
3 | #include
4 | #else
5 | #include
6 | #endif
7 | #include "arithmetic_operation_desc.hpp"
8 | #include "instruction.hpp"
9 |
10 | namespace vmpattack
11 | {
12 | //
13 | // This file describes all of the arithmetic operations used for mutation by
14 | // VMProtect.
15 | //
16 |
17 | namespace arithmetic_descriptors
18 | {
19 | // Addition / Subtraction.
20 | //
21 | inline const arithmetic_operation_desc add = { X86_INS_ADD, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return d + a[ 0 ]; } };
22 | inline const arithmetic_operation_desc sub = { X86_INS_SUB, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return d - a[ 0 ]; } };
23 |
24 | // Bitwise Byte-Swaps.
25 | //
26 | // inline const arithmetic_operation_desc bswap_64 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return __bswap_64( d ); }, 8 };
27 | // inline const arithmetic_operation_desc bswap_32 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return __bswap_32( ( uint32_t )d ); }, 4 };
28 | // inline const arithmetic_operation_desc bswap_16 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return __bswap_16( ( uint16_t )d ); }, 2 };
29 | inline const arithmetic_operation_desc bswap_64 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
30 | #ifdef _WIN32
31 | return _byteswap_uint64( d );
32 | #else
33 | return __bswap_64( d );
34 | #endif
35 | }, 8 };
36 | inline const arithmetic_operation_desc bswap_32 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
37 | #ifdef _WIN32
38 | return _byteswap_ulong( ( uint32_t )d );
39 | #else
40 | return __bswap_32( ( uint32_t )d );
41 | #endif
42 | }, 4 };
43 | inline const arithmetic_operation_desc bswap_16 = { X86_INS_BSWAP, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
44 | #ifdef _WIN32
45 | return _byteswap_ushort( ( uint16_t )d );
46 | #else
47 | return __bswap_16( ( uint16_t )d );
48 | #endif
49 | }, 2 };
50 |
51 | // Incement / Decrement.
52 | //
53 | inline const arithmetic_operation_desc inc = { X86_INS_INC, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return ++d; } };
54 | inline const arithmetic_operation_desc dec = { X86_INS_DEC, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return --d; } };
55 |
56 | // Bitwise NOT / NEG / XOR.
57 | //
58 | inline const arithmetic_operation_desc bnot = { X86_INS_NOT, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return ~d; } };
59 | inline const arithmetic_operation_desc bneg = { X86_INS_NEG, 0, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return ( uint64_t )-( int64_t )d; } };
60 | inline const arithmetic_operation_desc bxor = { X86_INS_XOR, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t { return d ^ a[ 0 ]; } };
61 |
62 | // Left Bitwise Rotations.
63 | //
64 | inline const arithmetic_operation_desc brol_64 = { X86_INS_ROL, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
65 | #ifdef _WIN32
66 | return _rotl64( d, ( int )a[ 0 ] );
67 | #else
68 | return __rolq( d, ( int )a[ 0 ] );
69 | #endif
70 | }, 8 };
71 | inline const arithmetic_operation_desc brol_32 = { X86_INS_ROL, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
72 | #ifdef _WIN32
73 | return _rotl( ( uint32_t )d, ( int )a[ 0 ] );
74 | #else
75 | return __rold( ( uint32_t )d, ( int )a[ 0 ] );
76 | #endif
77 | }, 4 };
78 | inline const arithmetic_operation_desc brol_16 = { X86_INS_ROL, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
79 | #ifdef _WIN32
80 | return _rotl16( ( uint16_t )d, ( uint8_t )a[ 0 ] );
81 | #else
82 | return __rolw( ( uint16_t )d, ( uint8_t )a[ 0 ] );
83 | #endif
84 | }, 2 };
85 | inline const arithmetic_operation_desc brol_8 = { X86_INS_ROL, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
86 | #ifdef _WIN32
87 | return _rotl8( ( uint8_t )d, ( uint8_t )a[ 0 ] );
88 | #else
89 | return __rolb( ( uint8_t )d, ( uint8_t )a[ 0 ] );
90 | #endif
91 | }, 1 };
92 |
93 | // Right Bitwise Rotations.
94 | //
95 | inline const arithmetic_operation_desc bror_64 = { X86_INS_ROR, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
96 | #ifdef _WIN32
97 | return _rotr64( d, ( int )a[ 0 ] );
98 | #else
99 | return __rorq( d, ( int )a[ 0 ] );
100 | #endif
101 | }, 8 };
102 | inline const arithmetic_operation_desc bror_32 = { X86_INS_ROR, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
103 | #ifdef _WIN32
104 | return _rotr( ( uint32_t )d, ( int )a[ 0 ] );
105 | #else
106 | return __rord( ( uint32_t )d, ( int )a[ 0 ] );
107 | #endif
108 | }, 4 };
109 | inline const arithmetic_operation_desc bror_16 = { X86_INS_ROR, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
110 | #ifdef _WIN32
111 | return _rotr16( ( uint16_t )d, ( uint8_t )a[ 0 ] );
112 | #else
113 | return __rorw( ( uint16_t )d, ( uint8_t )a[ 0 ] );
114 | #endif
115 | }, 2 };
116 | inline const arithmetic_operation_desc bror_8 = { X86_INS_ROR, 1, []( uint64_t d, const uint64_t a[] ) -> uint64_t {
117 | #ifdef _WIN32
118 | return _rotr8( ( uint8_t )d, ( uint8_t )a[ 0 ] );
119 | #else
120 | return __rorb( ( uint8_t )d, ( uint8_t )a[ 0 ] );
121 | #endif
122 | }, 1 };
123 |
124 | // List of all operation descriptors.
125 | //
126 | inline const arithmetic_operation_desc* all[] =
127 | {
128 | &add, &sub,
129 | &bswap_64, &bswap_32, &bswap_16,
130 | &inc, &dec,
131 | &bnot, &bneg, &bxor,
132 | &brol_64, &brol_32, &brol_16, &brol_8,
133 | &bror_64, &bror_32, &bror_16, &bror_8
134 | };
135 | }
136 |
137 | // Fetches the appropriate arithmetic operation descriptor for the given instruction, or nullptr otherwise.
138 | //
139 | inline const arithmetic_operation_desc* operation_desc_from_instruction( const instruction* instruction )
140 | {
141 | // Loop through full operation descriptor list.
142 | //
143 | for ( auto descriptor : arithmetic_descriptors::all )
144 | {
145 | // Check if the descriptor target instruction is equal to the instruction given.
146 | //
147 | if ( descriptor->insn == instruction->ins.id )
148 | {
149 | // If the descriptor has a specified input size, ensure it is equal to the input operand, which
150 | // is always the first operand.
151 | //
152 | if ( descriptor->input_size.has_value()
153 | && descriptor->input_size.value() != instruction->ins.detail->x86.operands[ 0 ].size )
154 | {
155 | // If not equal, continue search.
156 | continue;
157 | }
158 |
159 | return descriptor;
160 | }
161 | }
162 |
163 | // Nothing found; return nullptr.
164 | //
165 | return nullptr;
166 | }
167 | }
--------------------------------------------------------------------------------
/VMPAttack/arithmetic_utilities.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | namespace vmpattack
4 | {
5 | // Dynamically casts the integral value to the specified byte count.
6 | //
7 | template
8 | inline T dynamic_size_cast( T value, size_t bytes )
9 | {
10 | if ( bytes == sizeof( T ) )
11 | return value;
12 |
13 | T mask = ( 1ull << ( bytes * 8ull ) ) - 1ull;
14 |
15 | return value & mask;
16 | }
17 | }
--------------------------------------------------------------------------------
/VMPAttack/disassembler.cpp:
--------------------------------------------------------------------------------
1 | #include "disassembler.hpp"
2 |
3 | namespace vmpattack
4 | {
5 | // Disassembles at the effective address, negotating jumps according to the flags.
6 | //
7 | instruction_stream disassembler::disassemble( uint64_t base, uint64_t offset, disassembler_flags flags )
8 | {
9 | // ea = base + offset
10 | //
11 | uint64_t ea = base + offset;
12 |
13 | std::vector> instructions;
14 |
15 | size_t size = 0xFFFFFFFFFFFFFFFFull;
16 |
17 | // While iterative disassembly is successful.
18 | //
19 | while ( cs_disasm_iter( handle, ( const uint8_t** )&ea, &size, &offset, insn ) )
20 | {
21 | // Construct a self-containing instruction.
22 | //
23 | auto ins = std::make_shared( insn );
24 |
25 | // Is the instruction a branch?
26 | //
27 | if ( ins->is_branch() )
28 | {
29 | // If it's unconditional, and we know the destination, and we are specified
30 | // to follow these types of jumps, do so.
31 | //
32 | if ( flags & disassembler_take_unconditional_imm
33 | && ins->is_uncond_jmp() && ins->operand( 0 ).type == X86_OP_IMM )
34 | {
35 | // We must set the offset, otherwise the disassembly will be incorrect.
36 | //
37 | offset = ins->operand( 0 ).imm;
38 |
39 | // Update actual disassembly pointer.
40 | //
41 | ea = offset + base;
42 |
43 | // Don't append the jump to the stream.
44 | //
45 | continue;
46 | }
47 |
48 | // Branch not resolved - simply end disassembly.
49 | //
50 | break;
51 | }
52 |
53 | // Is the instruction a call?
54 | //
55 | if ( ins->ins.id == X86_INS_CALL )
56 | {
57 | // If the pass calls flag is not set, add it and end disassembly.
58 | //
59 | if ( !( flags & disassembler_pass_calls ) )
60 | {
61 | instructions.push_back( ins );
62 | break;
63 | }
64 | }
65 |
66 | // Is the instruction a return?
67 | //
68 | if ( ins->ins.id == X86_INS_RET )
69 | {
70 | // Add the instruction and end disassembly.
71 | //
72 | instructions.push_back( ins );
73 | break;
74 | }
75 |
76 | // Add instruction to list.
77 | //
78 | instructions.push_back( ins );
79 | }
80 |
81 | // Return an instruction stream of said instructions.
82 | //
83 | return { instructions };
84 | }
85 |
86 |
87 | // Disassembles at the offset from the base, simply disassembling every instruction in order.
88 | //
89 | std::vector> disassembler::disassembly_simple( uint64_t base, uint64_t offset, uint64_t end_rva )
90 | {
91 | // ea = base + offset
92 | //
93 | uint64_t ea = base + offset;
94 |
95 | std::vector> instructions;
96 |
97 | size_t size = end_rva - offset;
98 |
99 | // While iterative disassembly is successful.
100 | //
101 | while ( true )
102 | {
103 | // Check if we're within bounds.
104 | //
105 | if ( offset >= size )
106 | break;
107 |
108 | // In case disassembly failed (due to invalid instructions), try to continue by incrementing offset.
109 | //
110 | if ( !cs_disasm_iter( handle, ( const uint8_t** )&ea, &size, &offset, insn ) )
111 | {
112 | offset++;
113 | ea++;
114 |
115 | continue;
116 | }
117 |
118 | instructions.push_back( std::make_unique( insn ) );
119 | }
120 |
121 | return std::move( instructions );
122 | }
123 | }
--------------------------------------------------------------------------------
/VMPAttack/disassembler.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "instruction_stream.hpp"
5 |
6 | namespace vmpattack
7 | {
8 | // Defaults.
9 | //
10 | const cs_arch cs_default_arch = CS_ARCH_X86;
11 | const cs_mode cs_default_mode = CS_MODE_64;
12 |
13 | // Specifies the desired behaviour of the auto disassembler when a jump condition is
14 | // encountered
15 | //
16 | enum disassembler_flags : uint32_t
17 | {
18 | // When met with a branch, stop dissassembly.
19 | //
20 | disassembler_none = 0,
21 |
22 | // Take all unconditional immediate jumps, ignoring the jump instructions.
23 | //
24 | disassembler_take_unconditional_imm = 1 << 0,
25 |
26 | // Take all conditional jumps.
27 | //
28 | disassembler_take_conditional = 1 << 1,
29 |
30 | // Skip all conditional jumps.
31 | //
32 | disassembler_skip_conditional = 1 << 2,
33 |
34 | // Pass on calls.
35 | //
36 | disassembler_pass_calls = 1 << 3,
37 | };
38 |
39 | // This class provides a very lightweight thread-safe wrapper over capstone.
40 | //
41 | class disassembler
42 | {
43 | private:
44 | // The internal handle.
45 | //
46 | csh handle;
47 |
48 | // The internal instruction allocation memory.
49 | //
50 | cs_insn* insn;
51 |
52 |
53 | public:
54 | // Cannot be copied or moved.
55 | // Only one disassembler can exist per thread.
56 | //
57 | disassembler( const disassembler& ) = delete;
58 | disassembler( disassembler&& ) = delete;
59 | disassembler& operator=( const disassembler&& ) = delete;
60 | disassembler& operator=( disassembler&& ) = delete;
61 |
62 | disassembler( cs_arch arch, cs_mode mode )
63 | {
64 | fassert( cs_open( arch, mode, &handle ) == CS_ERR_OK );
65 | cs_option( handle, CS_OPT_DETAIL, CS_OPT_ON );
66 | insn = cs_malloc( handle );
67 | }
68 |
69 | ~disassembler()
70 | {
71 | cs_close( &handle );
72 | }
73 |
74 | // Getter to the handle.
75 | //
76 | csh get_handle() const { return handle; }
77 |
78 | // Singleton to provide a unique disassembler instance for each thread.
79 | //
80 | inline static disassembler& get( cs_arch arch = cs_default_arch, cs_mode mode = cs_default_mode)
81 | {
82 | thread_local static disassembler instance( arch, mode );
83 |
84 | return instance;
85 | }
86 |
87 | // Disassembles at the offset from the base, negotating jumps according to the flags.
88 | // NOTE: The offset is used for the disassembled instructions' addresses.
89 | //
90 | instruction_stream disassemble( uint64_t base, uint64_t offset, disassembler_flags flags = disassembler_take_unconditional_imm );
91 |
92 | // Disassembles at the offset from the base, simply disassembling every instruction in order.
93 | //
94 | std::vector> disassembly_simple( uint64_t base, uint64_t offset, uint64_t end_rva );
95 | };
96 | }
--------------------------------------------------------------------------------
/VMPAttack/flags.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 |
4 | namespace vmpattack::flags
5 | {
6 | // Individual flag registers
7 | //
8 | inline static const vtil::register_desc CF = { vtil::register_physical | vtil::register_flags, 0, 1, 0 };
9 | inline static const vtil::register_desc PF = { vtil::register_physical | vtil::register_flags, 0, 1, 2 };
10 | inline static const vtil::register_desc AF = { vtil::register_physical | vtil::register_flags, 0, 1, 4 };
11 | inline static const vtil::register_desc ZF = { vtil::register_physical | vtil::register_flags, 0, 1, 6 };
12 | inline static const vtil::register_desc SF = { vtil::register_physical | vtil::register_flags, 0, 1, 7 };
13 | inline static const vtil::register_desc IF = { vtil::register_physical | vtil::register_flags, 0, 1, 9 };
14 | inline static const vtil::register_desc DF = { vtil::register_physical | vtil::register_flags, 0, 1, 10 };
15 | inline static const vtil::register_desc OF = { vtil::register_physical | vtil::register_flags, 0, 1, 11 };
16 | }
--------------------------------------------------------------------------------
/VMPAttack/instruction.cpp:
--------------------------------------------------------------------------------
1 | #include "instruction.hpp"
2 | #include "disassembler.hpp"
3 |
4 | namespace vmpattack
5 | {
6 | // Determines whether this instruction is any type of jump.
7 | //
8 | bool instruction::is_jmp() const
9 | {
10 | // Enumerate instruction groups.
11 | //
12 | for ( int i = 0; i < ins.detail->groups_count; i++ )
13 | {
14 | auto grp = ins.detail->groups[ i ];
15 |
16 | // If group is JMP, return true.
17 | //
18 | if ( grp == X86_GRP_JUMP )
19 | return true;
20 | }
21 |
22 | return false;
23 | }
24 |
25 | // Is the instruction a conditional jump?
26 | //
27 | bool instruction::is_cond_jump() const
28 | {
29 | // Return false if unconditional.
30 | //
31 | if ( ins.id == X86_INS_JMP )
32 | return false;
33 |
34 | // Loop through groups.
35 | //
36 | for ( int i = 0; i < ins.detail->groups_count; i++ )
37 | {
38 | if ( ins.detail->groups[ i ] == X86_GRP_JUMP )
39 | return true;
40 | }
41 |
42 | return false;
43 | }
44 |
45 | // Returns a vector of registers this instruction writes to and reads from.
46 | // Read is returned in the first part of the pair, Written in the second.
47 | //
48 | std::pair, std::vector> instruction::get_regs_accessed() const
49 | {
50 | // Declare C-arrays of the data.
51 | //
52 | cs_regs read, write;
53 | uint8_t readc, writec;
54 |
55 | // Use capstone to get lists of registers read from / written to.
56 | //
57 | if ( cs_regs_access( disassembler::get().get_handle(), &ins, read, &readc, write, &writec ) != CS_ERR_OK )
58 | return {};
59 |
60 | std::vector read_vec, write_vec;
61 |
62 | // Convert raw C style arrays to pretty C++ vectors.
63 | //
64 | for ( int i = 0; i < readc; i++ )
65 | read_vec.push_back( ( x86_reg )read[ i ] );
66 | for ( int i = 0; i < writec; i++ )
67 | write_vec.push_back( ( x86_reg )write[ i ] );
68 |
69 | return { read_vec, write_vec };
70 | }
71 | }
--------------------------------------------------------------------------------
/VMPAttack/instruction.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 |
6 | namespace vmpattack
7 | {
8 | // This class provides a simple wrapper over the cs_insn and cs_detail
9 | // structs to make it self-containing, and to provide some simple utilities.
10 | //
11 | class instruction
12 | {
13 | private:
14 | // This is an internal backing structure that the cs_insn->detail
15 | // points to.
16 | //
17 | cs_detail detail;
18 |
19 | public:
20 | // The wrapped instruction.
21 | //
22 | cs_insn ins;
23 |
24 | // Copy constructor.
25 | //
26 | instruction( const cs_insn* ins )
27 | : ins( *ins ), detail( *ins->detail )
28 | {
29 | // Point ins->detail to copy.
30 | //
31 | this->ins.detail = &detail;
32 | }
33 |
34 | // Determines whether this instruction is any type of jump.
35 | //
36 | bool is_jmp() const;
37 |
38 | // Useful utilities.
39 | //
40 | inline int operand_count() const { return detail.x86.op_count; }
41 | inline const cs_x86_op& operand( int i ) const { return detail.x86.operands[ i ]; }
42 | inline x86_op_type operand_type( int i ) const { return detail.x86.operands[ i ].type; }
43 |
44 | inline bool is_uncond_jmp() const { return ins.id == X86_INS_JMP; };
45 |
46 | inline bool is_branch() const { return is_jmp(); }
47 |
48 | inline x86_prefix prefix( int i ) const { return ( x86_prefix )detail.x86.prefix[ i ]; }
49 |
50 | // Returns a vector of registers this instruction writes to and reads from.
51 | // Read is returned in the first part of the pair, Written in the second.
52 | //
53 | std::pair, std::vector> get_regs_accessed() const;
54 |
55 | // Is the instruction a conditional jump?
56 | //
57 | bool is_cond_jump() const;
58 | };
59 | }
--------------------------------------------------------------------------------
/VMPAttack/instruction_stream.cpp:
--------------------------------------------------------------------------------
1 | #include "instruction_stream.hpp"
2 |
3 | namespace vmpattack
4 | {
5 | // Advances the stream, incrementing index and returning the
6 | // instruction ptr.
7 | //
8 | const instruction* instruction_stream::next()
9 | {
10 | // Check if within bounds.
11 | //
12 | if ( begin + index > end )
13 | return nullptr;
14 |
15 | // Fetch instruction.
16 | //
17 | auto& ins = instructions[ begin + index ];
18 |
19 | // Increment index.
20 | //
21 | index++;
22 |
23 | // Return a non-owning pointer to the instruction.
24 | //
25 | return ins.get();
26 | }
27 | }
--------------------------------------------------------------------------------
/VMPAttack/instruction_stream.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include "instruction.hpp"
4 |
5 | namespace vmpattack
6 | {
7 | // This class spans over an ordered vector of instructions.
8 | // It contains an index to determine the current position in the
9 | // stream.
10 | //
11 | class instruction_stream
12 | {
13 | public:
14 | // The backing instruction vector.
15 | // This is a shared_ptr vector as instruction_streams are copyable
16 | // and thus instruction objects can have multiple owners.
17 | //
18 | std::vector> instructions;
19 |
20 | private:
21 | // Begin index of span.
22 | //
23 | uint32_t begin;
24 |
25 | // End index of span.
26 | //
27 | uint32_t end;
28 |
29 | // Current Index.
30 | //
31 | uint32_t index;
32 |
33 | public:
34 | // Default constructor / move / copy.
35 | //
36 | instruction_stream( instruction_stream&& ) = default;
37 | instruction_stream( const instruction_stream& ) = default;
38 | instruction_stream& operator= ( instruction_stream&& ) = default;
39 | instruction_stream& operator= ( const instruction_stream& ) = default;
40 |
41 | // Construct via copying existing instruction vector
42 | //
43 | instruction_stream( const std::vector>& instructions )
44 | : instructions( instructions ), begin( 0 ), end( instructions.size() - 1 ), index( 0 )
45 | {}
46 |
47 | // Get the stream base
48 | //
49 | inline uint64_t base() const
50 | {
51 | return instructions[ begin ]->ins.address;
52 | }
53 |
54 | // Disassembler bases instructions via RVA, thus base == rva.
55 | //
56 | inline uint64_t rva() const
57 | {
58 | return base();
59 | }
60 |
61 | // Resets index to 0
62 | //
63 | inline void reset()
64 | {
65 | index = 0;
66 | }
67 |
68 | // Advances the stream, incrementing index and returning the
69 | // instruction ptr.
70 | // Non-owning.
71 | //
72 | const instruction* next();
73 | };
74 | }
--------------------------------------------------------------------------------
/VMPAttack/instruction_utilities.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 |
5 | namespace vmpattack
6 | {
7 | // Determines whether or not the register's bases are equal.
8 | // e.g. RAX == AH, as base( RAX ) == AL, and base( AH ) == AL.
9 | //
10 | inline bool register_base_equal( x86_reg first, x86_reg second )
11 | {
12 | return vtil::amd64::registers.remap( first, 0, 1 ) == vtil::amd64::registers.remap( second, 0, 1 );
13 | }
14 |
15 | // Gets the register's largest architecture equivalent.
16 | //
17 | inline x86_reg get_largest_for_arch( x86_reg reg )
18 | {
19 | return vtil::amd64::registers.remap( reg, 0, 8 );
20 | }
21 | }
--------------------------------------------------------------------------------
/VMPAttack/main.cpp:
--------------------------------------------------------------------------------
1 | #ifdef _WIN32
2 | #include
3 | #endif
4 | #include
5 |
6 | #include "vmpattack.hpp"
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | #ifdef _MSC_VER
13 | #pragma comment(linker, "/STACK:34359738368")
14 | #endif
15 |
16 | using namespace vtil;
17 | using namespace vtil::optimizer;
18 | using namespace vtil::logger;
19 |
20 | namespace vmpattack
21 | {
22 | using std::uint8_t;
23 |
24 | template
25 | auto read_file(const char* filepath) -> std::vector
26 | {
27 | std::ifstream file(filepath, std::ios::binary);
28 | std::vector file_buf(std::istreambuf_iterator(file), {});
29 | return file_buf;
30 | }
31 |
32 | extern "C" int main( int argc, const char* args[])
33 | {
34 | std::filesystem::path input_file_path = { args[1] };
35 |
36 | // Create an output directory.
37 | //
38 | std::filesystem::path output_path = input_file_path;
39 | output_path.remove_filename();
40 | output_path /= "VMPAttack-Output";
41 |
42 | // Create the directory if it doesn't exist already.
43 | //
44 | std::filesystem::create_directory( output_path );
45 |
46 | std::vector buffer = read_file( input_file_path.string().c_str() );
47 |
48 | log( "** Loaded raw image buffer @ 0x%p of size 0x%llx\r\n", buffer.data(), buffer.size() );
49 |
50 | vmpattack instance( buffer );
51 |
52 | std::vector scan_results = instance.scan_for_vmentry();
53 |
54 | log( "** Found %u virtualized routines:\r\n", scan_results.size() );
55 |
56 | for ( const scan_result& scan_result : scan_results )
57 | log( "\t** RVA 0x%llx VMEntry 0x%llx Stub 0x%llx\r\n", scan_result.rva, scan_result.job.vmentry_rva, scan_result.job.entry_stub );
58 |
59 | log( "\r\n" );
60 |
61 | std::vector lifted_routines;
62 |
63 | int i = 0;
64 |
65 | for ( const scan_result& scan_result : scan_results )
66 | {
67 | log( "** Devirtualizing routine %i/%i @ 0x%llx...\r\n", i + 1, scan_results.size(), scan_result.rva );
68 |
69 | std::optional routine = instance.lift( scan_result.job );
70 |
71 | if ( routine )
72 | {
73 | log( "\t** Lifting success\r\n" );
74 | lifted_routines.push_back( *routine );
75 |
76 | std::string save_path = output_path / vtil::format::str( "0x%llx.vtil", scan_result.rva );
77 | vtil::save_routine( *routine, save_path );
78 |
79 | log( "\t** Unoptimized Saved to %s\r\n", save_path );
80 |
81 | vtil::optimizer::apply_all_profiled( *routine );
82 |
83 | log( "\t** Optimization success\r\n" );
84 |
85 | #ifdef _DEBUG
86 | vtil::debug::dump( *routine );
87 | #endif
88 |
89 | std::string optimized_save_path = output_path / vtil::format::str( "0x%llx-Optimized.vtil", scan_result.rva );
90 | vtil::save_routine( *routine, optimized_save_path );
91 |
92 | log( "\t** Optimized Saved to %s\r\n", save_path );
93 | }
94 | else
95 | log( "\t** Lifting failed\r\n" );
96 |
97 | i++;
98 | }
99 |
100 | system( "pause" );
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/VMPAttack/vm_analysis_context.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "analysis_context.hpp"
3 | #include "vm_state.hpp"
4 |
5 | namespace vmpattack
6 | {
7 | // This class provides extra pattern finding templates for VM analysis,
8 | // by matching registers to a vm_state structure.
9 | //
10 | class vm_analysis_context : public analysis_context
11 | {
12 | private:
13 | // Stack alignment used for stack operations.
14 | //
15 | const uint8_t stack_alignment = 2;
16 |
17 | // The vm_state used for providing the analysis context with
18 | // specific registers for different pattern templates.
19 | // Non-owning.
20 | //
21 | const vm_state* state;
22 |
23 | public:
24 | // Consructor via an instruction_stream pointer, and a vm_state
25 | // pointer. Both are non-owned.
26 | //
27 | vm_analysis_context( instruction_stream* stream, const vm_state* state )
28 | : analysis_context( stream ), state( state )
29 | {}
30 |
31 | // Matches for an explicitly matched mov of another register into the vip register.
32 | // Constraints: reg: the register that is mov'ed into vip.
33 | //
34 | vm_analysis_context* set_vip( inout reg )
35 | {
36 | if ( this == nullptr ) return nullptr;
37 |
38 | // Drop const.
39 | //
40 | x86_reg vip_reg = state->vip_reg;
41 |
42 | return generic_reg_reg( X86_INS_MOV, { vip_reg, true }, reg, false )
43 | ->cast();
44 | }
45 |
46 | // Matches for an instruction that adds an immediate value to the VSP register.
47 | // Constraints: imm: the immediate value added.
48 | //
49 | vm_analysis_context* add_vsp( inout imm )
50 | {
51 | if ( this == nullptr ) return nullptr;
52 |
53 | // Drop const.
54 | //
55 | x86_reg stack_reg = state->stack_reg;
56 |
57 | return generic_reg_imm( X86_INS_ADD, { stack_reg, true }, imm, false )
58 | ->cast();
59 | }
60 |
61 | // Matches for instructions that either increment or decrement the VIP
62 | // via ADD or SUB instructions, using a immedaite value.
63 | // Constraints: id: the id of the matched instruction (either ADD or SUB)
64 | // offset: the amount the vip is offseted by.
65 | //
66 | vm_analysis_context* update_vip( inout id, inout offset )
67 | {
68 | if ( this == nullptr ) return nullptr;
69 |
70 | // Drop const.
71 | //
72 | x86_reg vip_reg = state->vip_reg;
73 |
74 | // ADD VIP, %offset
75 | // or
76 | // SUB VIP, %offset
77 | // ^ %id
78 | //
79 | return update_reg( id, { vip_reg, true }, offset )
80 | ->cast();
81 | }
82 |
83 | // Matches for instructions that offset the vip register via either a lea or add instruction.
84 | // Constraints: id: the id of the matched instruction (either ADD or SUB)
85 | // offset: the register the vip is offseted by.
86 | //
87 | vm_analysis_context* offset_vip( inout id, inout offset )
88 | {
89 | if ( this == nullptr ) return nullptr;
90 |
91 | // Drop const.
92 | //
93 | x86_reg vip_reg = state->vip_reg;
94 |
95 | // lea VIP, 8:[%reg + %offset]
96 | // or
97 | // add VIP, %offset
98 | // ^ %id
99 | //
100 | return offset_reg( id, { vip_reg, true }, offset )
101 | ->cast();
102 | }
103 |
104 | // Matches for instructions that fetch memory from the vip stream.
105 | // Constraints: reg: the register the memory is stored in.
106 | // size: the size of the memory that was read.
107 | //
108 | vm_analysis_context* fetch_vip( inout reg, inout size )
109 | {
110 | // MOV(ZX) %reg, %size:[VIP]
111 | //
112 | return match( [&]( const instruction* instruction )
113 | {
114 | if ( instruction->ins.id != X86_INS_MOV
115 | && instruction->ins.id != X86_INS_MOVZX)
116 | return false;
117 |
118 | // %reg == reg
119 | //
120 | if ( reg.second )
121 | if ( instruction->operand( 0 ).reg != reg.first )
122 | return false;
123 |
124 | // Memory base is vip, there's no index.
125 | //
126 | if ( instruction->operand( 1 ).mem.base != state->vip_reg
127 | || instruction->operand( 1 ).mem.index != X86_REG_INVALID )
128 | return false;
129 |
130 | // %size == size
131 | //
132 | if ( size.second )
133 | if ( instruction->operand( 1 ).size != size.first )
134 | return false;
135 |
136 | reg.first = instruction->operand( 0 ).reg;
137 | size.first = instruction->operand( 1 ).size;
138 |
139 | return true;
140 | }, 2, { X86_OP_REG, X86_OP_MEM } )
141 | ->cast();
142 | }
143 |
144 | // Matches for instructions that fetch memory from the virtual stack.
145 | // Constraints: dst: the destination register.
146 | // size: the size of the destination that was read.
147 | // disp: the stack displacement.
148 | //
149 | vm_analysis_context* fetch_vsp( inout dst, inout size, inout disp )
150 | {
151 | // mov(zx) %size:%dst, [VSP + %disp]
152 | //
153 | return match( [&]( const instruction* instruction )
154 | {
155 | if ( instruction->ins.id != X86_INS_MOV
156 | && instruction->ins.id != X86_INS_MOVZX)
157 | return false;
158 |
159 | // %dst == dst
160 | //
161 | if ( dst.second )
162 | if ( instruction->operand( 0 ).reg != dst.first )
163 | return false;
164 |
165 | // %size == size
166 | //
167 | if ( size.second )
168 | if ( instruction->operand( 0 ).size != size.first )
169 | return false;
170 |
171 |
172 | // Memory base is vsp, there's no index.
173 | //
174 | if ( instruction->operand( 1 ).mem.base != state->stack_reg
175 | || instruction->operand( 1 ).mem.index != X86_REG_INVALID )
176 | return false;
177 |
178 | // %disp == disp
179 | //
180 | if ( disp.second )
181 | if ( instruction->operand( 1 ).mem.disp != disp.first )
182 | return false;
183 |
184 | dst.first = instruction->operand( 0 ).reg;
185 | size.first = instruction->operand( 0 ).size;
186 | disp.first = instruction->operand( 1 ).mem.disp;
187 |
188 | return true;
189 | }, 2, { X86_OP_REG, X86_OP_MEM } )
190 | ->cast();
191 | }
192 |
193 | // Matches for instructions that stores memory into the virtual stack.
194 | // Constraints: src: the source register. Comparison via base.
195 | // size: the size of the destination that was written.
196 | //
197 | vm_analysis_context* store_vsp( inout src, inout size )
198 | {
199 | // mov %size:[VSP], %src
200 | //
201 | return match( [&]( const instruction* instruction )
202 | {
203 | if ( instruction->ins.id != X86_INS_MOV )
204 | return false;
205 |
206 | // Memory base is vsp, there's no index, and there's no disp.
207 | //
208 | if ( instruction->operand( 0 ).mem.base != state->stack_reg
209 | || instruction->operand( 0 ).mem.index != X86_REG_INVALID
210 | || instruction->operand( 0 ).mem.disp != 0)
211 | return false;
212 |
213 | // %src == src
214 | //
215 | if ( src.second )
216 | if ( !register_base_equal( instruction->operand( 1 ).reg, src.first ) )
217 | return false;
218 |
219 | // %size == size
220 | //
221 | if ( size.second )
222 | if ( instruction->operand( 0 ).size != size.first )
223 | return false;
224 |
225 | src.first = instruction->operand( 1 ).reg;
226 | size.first = instruction->operand( 0 ).size;
227 |
228 | return true;
229 | }, 2, { X86_OP_MEM, X86_OP_REG } )
230 | ->cast();
231 | }
232 |
233 | // Matches for instructions that fetch memory from the virtual context, optionally displaced by a register.
234 | // Constraints: dst: the destination register.
235 | // size: the size of the virtual context that was read.
236 | // disp: the optional context displacement register. Comparison via base.
237 | //
238 | vm_analysis_context* fetch_ctx( inout dst, inout size, inout disp )
239 | {
240 | // mov(zx) %dst, %size:[VCTX + %disp]
241 | //
242 | return match( [&]( const instruction* instruction )
243 | {
244 | if ( instruction->ins.id != X86_INS_MOV
245 | && instruction->ins.id != X86_INS_MOVZX)
246 | return false;
247 |
248 | // %dst == dst
249 | //
250 | if ( dst.second )
251 | if ( instruction->operand( 0 ).reg != dst.first )
252 | return false;
253 |
254 | // %size == size
255 | //
256 | if ( size.second )
257 | if ( instruction->operand( 1 ).size != size.first )
258 | return false;
259 |
260 |
261 | // Scale is 1, disp is 0, base is vcontext reg.
262 | //
263 | if ( instruction->operand( 1 ).mem.base != state->context_reg
264 | || instruction->operand( 1 ).mem.disp != 0
265 | || instruction->operand( 1 ).mem.scale != 1)
266 | return false;
267 |
268 | // %disp == disp
269 | //
270 | if ( disp.second )
271 | if ( !register_base_equal( instruction->operand( 1 ).mem.index, disp.first ) )
272 | return false;
273 |
274 | dst.first = instruction->operand( 0 ).reg;
275 | size.first = instruction->operand( 1 ).size;
276 | disp.first = instruction->operand( 1 ).mem.index;
277 |
278 | return true;
279 | }, 2, { X86_OP_REG, X86_OP_MEM } )
280 | ->cast();
281 | }
282 |
283 | // Matches for instructions that stores memory into the virtual context, optionally offsetted by a register.
284 | // Constraints: src: the source register. Comparison via base.
285 | // size: the size of the destination that was written.
286 | // disp: the optional context displacement register. Comparison via base.
287 | //
288 | vm_analysis_context* store_ctx( inout src, inout size, inout disp )
289 | {
290 | // mov %size:[VCTX + %disp], %src
291 | //
292 | return match( [&]( const instruction* instruction )
293 | {
294 | if ( instruction->ins.id != X86_INS_MOV )
295 | return false;
296 |
297 | // Memory base is vsp, scale is 1, and there's no disp.
298 | //
299 | if ( instruction->operand( 0 ).mem.base != state->context_reg
300 | || instruction->operand( 0 ).mem.scale != 1
301 | || instruction->operand( 0 ).mem.disp != 0 )
302 | return false;
303 |
304 | // %src == src
305 | //
306 | if ( src.second )
307 | if ( !register_base_equal( instruction->operand( 1 ).reg, src.first ) )
308 | return false;
309 |
310 | // %size == size
311 | //
312 | if ( size.second )
313 | if ( instruction->operand( 0 ).size != size.first )
314 | return false;
315 |
316 | // %disp == disp
317 | //
318 | if ( disp.second )
319 | if ( !register_base_equal( instruction->operand( 0 ).mem.index, disp.first ) )
320 | return false;
321 |
322 | src.first = instruction->operand( 1 ).reg;
323 | size.first = instruction->operand( 0 ).size;
324 | disp.first = instruction->operand( 0 ).mem.index;
325 |
326 | return true;
327 | }, 2, { X86_OP_MEM, X86_OP_REG } )
328 | ->cast();
329 | }
330 |
331 | // Generates an arithmetic expression for the given register, advancing the stream to wherever the encryption sequence ends.
332 | //
333 | vm_analysis_context* record_encryption( x86_reg reg, arithmetic_expression* expression )
334 | {
335 | if ( this == nullptr ) return nullptr;
336 |
337 | // Drop const.
338 | //
339 | x86_reg rolling_key_reg = state->rolling_key_reg;
340 |
341 | return
342 | // Advance stream to where the encryption sequence begins.
343 | //
344 | begin_encryption( { reg, true }, { rolling_key_reg, true } )
345 |
346 | // Record any operations done to the register.
347 | //
348 | ->record_expression( reg, expression, [&]()
349 | {
350 | // Advance stream to where the encryption sequence ends.
351 | //
352 | return end_encryption( { reg, true }, { rolling_key_reg, true } );
353 | } )
354 | ->cast();
355 | }
356 | };
357 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_bridge.cpp:
--------------------------------------------------------------------------------
1 | #include "vm_bridge.hpp"
2 | #include "vm_analysis_context.hpp"
3 |
4 | namespace vmpattack
5 | {
6 | // Computes the next handler from the bridge, updating the context in respect.
7 | // Returns the next handler's rva.
8 | //
9 | uint64_t vm_bridge::advance( vm_context* context ) const
10 | {
11 | // XOR the encrypted next handler offset by the rolling key.
12 | //
13 | uint32_t next_handler = context->fetch( 4 ) ^ ( uint32_t )context->rolling_key;
14 |
15 | // Decrypt the next handler via the arith expression.
16 | //
17 | next_handler = ( uint32_t )handler_expression->compute( next_handler );
18 |
19 | // Update rolling key.
20 | //
21 | context->rolling_key ^= next_handler;
22 |
23 | // Emulate movsxd.
24 | //
25 | struct { int64_t sign : 32; } s;
26 | s.sign = next_handler;
27 |
28 | // Update flow.
29 | //
30 | context->state->flow += s.sign;
31 |
32 | // Flow contains next handler ea.
33 | //
34 | return context->state->flow;
35 | }
36 |
37 | // Construct a vm_bridge from an initial state and its instruction stream.
38 | // If the operation fails, returns empty {}.
39 | //
40 | std::optional> vm_bridge::from_instruction_stream( const vm_state* state, const instruction_stream* stream )
41 | {
42 | // Copy stream to drop the const.
43 | //
44 | instruction_stream copied_stream = *stream;
45 |
46 | // Initialize empty expression.h
47 | //
48 | std::unique_ptr bridge_expression = std::make_unique();
49 |
50 | vm_analysis_context bridge_analysis_context = vm_analysis_context( &copied_stream, state );
51 |
52 | x86_reg fetch_reg;
53 | size_t fetch_reg_size = 4;
54 |
55 | x86_reg rolling_key_reg = state->rolling_key_reg;
56 |
57 | auto result = ( &bridge_analysis_context )
58 | ->fetch_vip( { fetch_reg, false }, { fetch_reg_size, true } )
59 | ->xor_reg_reg( { fetch_reg, true }, { rolling_key_reg, true } )
60 | ->record_expression( fetch_reg, bridge_expression.get(), [&]()
61 | {
62 | return ( &bridge_analysis_context )
63 | ->id( X86_INS_PUSH );
64 | } );
65 |
66 | // If information fetch failed, return empty {}.
67 | //
68 | if ( !result )
69 | return {};
70 |
71 | // Construct actual vm_bridge from the information.
72 | //
73 | return std::make_unique( copied_stream.base(), std::move( bridge_expression ) );
74 | }
75 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_bridge.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "arithmetic_expression.hpp"
5 | #include "instruction_stream.hpp"
6 | #include "vm_state.hpp"
7 | #include "vm_context.hpp"
8 |
9 | namespace vmpattack
10 | {
11 | struct vm_handler;
12 |
13 | // This struct represents the virtual machine handler and entry "bridge", which
14 | // is responsible for advancing the context by computing the next handler and
15 | // branching to it.
16 | //
17 | struct vm_bridge
18 | {
19 | // The RVA of the bridge in image space
20 | //
21 | const uint64_t rva;
22 |
23 | // The arithmetic chain used to decrypt the next handler's offset
24 | //
25 | const std::unique_ptr handler_expression;
26 |
27 | // Constructor.
28 | //
29 | vm_bridge( uint64_t rva, std::unique_ptr handler_expression )
30 | : rva( rva ), handler_expression( std::move( handler_expression ) )
31 | {}
32 |
33 | // Computes the next handler from the bridge, updating the context in respect.
34 | // Returns the next handler's rva.
35 | //
36 | uint64_t advance( vm_context* context ) const;
37 |
38 | // Construct a vm_bridge from an initial state and its instruction stream.
39 | // If the operation fails, returns empty {}.
40 | //
41 | static std::optional> from_instruction_stream( const vm_state* state, const instruction_stream* stream );
42 | };
43 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_context.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 | #include "vm_state.hpp"
6 |
7 | namespace vmpattack
8 | {
9 | // This class describes the virtual machine's execution at any single moment.
10 | //
11 | class vm_context
12 | {
13 | public:
14 | // An owning pointer to the current state.
15 | //
16 | std::unique_ptr state;
17 |
18 | // The current value of the rolling key.
19 | //
20 | uint64_t rolling_key;
21 |
22 | // The current absolute value of the virtual instruction pointer.
23 | //
24 | uint64_t vip;
25 |
26 | // Constructor. Takes ownership of state.
27 | //
28 | vm_context( std::unique_ptr state, uint64_t rolling_key, uint64_t vip )
29 | : state( std::move( state ) ), rolling_key( rolling_key ), vip( vip )
30 | {}
31 |
32 | // Fetches an arbitrarily-sized value from the current virtual instruction
33 | // pointer, and increments/decrements it by that size.
34 | // Size given in bytes.
35 | //
36 | template
37 | T fetch( size_t size )
38 | {
39 | // Make sure fetched bytes can fit in result
40 | //
41 | fassert( sizeof( T ) >= size && "Provided return type size must be equal or greater than size given in parameter." );
42 |
43 | // If direction is going upwards, we must first decrement the vip
44 | // because we are not on the correct position currently.
45 | //
46 | if ( state->direction == vm_direction_up )
47 | vip -= size;
48 |
49 | // Zero-initialize the read value, then populate it via a copy from the vip stream.
50 | //
51 | T read_value = {};
52 | memcpy( &read_value, ( void* )vip, size );
53 |
54 | // If direction is going downwards, we must update the vip AFTER the read
55 | // is complete.
56 | //
57 | if ( state->direction == vm_direction_down )
58 | vip += size;
59 |
60 | return read_value;
61 | }
62 | };
63 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_handler.cpp:
--------------------------------------------------------------------------------
1 | #include "vm_handler.hpp"
2 | #include "vm_instruction_set.hpp"
3 | #include "vm_bridge.hpp"
4 | #include "arithmetic_utilities.hpp"
5 |
6 | namespace vmpattack
7 | {
8 | // Decodes and updates the context to construct a vm_instruction describing the instruction's details.
9 | //
10 | vm_instruction vm_handler::decode( vm_context* context ) const
11 | {
12 | std::vector operands;
13 |
14 | // Loop through the handler's operand information.
15 | //
16 | for ( auto const& [operand, expression] : instruction_info->operands )
17 | {
18 | uint64_t operand_value = context->fetch( operand.byte_length );
19 |
20 | operand_value ^= dynamic_size_cast( context->rolling_key, operand.byte_length );
21 | operand_value = expression->compute( operand_value, operand.byte_length );
22 | context->rolling_key ^= operand_value;
23 |
24 | // Add the decrypted operand.
25 | //
26 | operands.push_back( operand_value );
27 | }
28 |
29 | return vm_instruction( this, operands );
30 | }
31 |
32 |
33 | // Construct a vm_handler from its instruction stream.
34 | // Updates vm_state if required by the descriptor.
35 | // If the operation fails, returns empty {}.
36 | //
37 | std::optional> vm_handler::from_instruction_stream( vm_state* initial_state, const instruction_stream* stream )
38 | {
39 | const vm_instruction_desc* matched_instruction_desc = nullptr;
40 |
41 | // Allocate the vm_instruction_info.
42 | //
43 | auto instruction_info = std::make_unique();
44 |
45 | // Copy the stream, to ensure we have a fresh query for each match.
46 | //
47 | instruction_stream copied_stream = *stream;
48 |
49 | // Enumerate instruction set.
50 | //
51 | for ( auto instruction_desc : all_virtual_instructions )
52 | {
53 | //
54 | // TODO: Only update vm_state if updates_state in desc flags.
55 | //
56 |
57 | // Attempt to match the instruction.
58 | //
59 | if ( instruction_desc->match( initial_state, &copied_stream, instruction_info.get() ) )
60 | {
61 | // If match successful, save the instruction descriptor and break out of
62 | // the loop.
63 | //
64 | matched_instruction_desc = instruction_desc;
65 | break;
66 | }
67 |
68 | // Refresh stream.
69 | //
70 | copied_stream = *stream;
71 | }
72 |
73 | // If no matching descriptor found, return empty.
74 | //
75 | if ( !matched_instruction_desc )
76 | return {};
77 |
78 | // If the matched instruction updates state and its updated state is non-null, copy it into the current
79 | // VM state.
80 | //
81 | if ( matched_instruction_desc->flags & vm_instruction_updates_state && instruction_info->updated_state )
82 | *initial_state = *instruction_info->updated_state;
83 |
84 | // If the instruction is a VMEXIT, the handler will not have a bridge as it has no
85 | // forward handler to pass execution to. We can just return a handler with a null bridge.
86 | //
87 | if ( matched_instruction_desc->flags & vm_instruction_vmexit )
88 | return std::make_unique( matched_instruction_desc, std::move( instruction_info ), stream->rva(), nullptr );
89 |
90 | // Attempt to construct a bridge from the end of the stream.
91 | // The end of the stream is used as, in VMProtect, the bridge always immediately
92 | // follows the handler, so since we already advanced the stream while matching,
93 | // it should now be at the beginning of the bridge.
94 | //
95 | auto bridge = vm_bridge::from_instruction_stream( initial_state, &copied_stream );
96 |
97 | // If failed to construct bridge, return empty.
98 | //
99 | if ( !bridge )
100 | return {};
101 |
102 | // Everything was successful - we can now construct the actual vm_handler from the
103 | // information extracted.
104 | //
105 | return std::make_unique( matched_instruction_desc, std::move( instruction_info ), stream->rva(), std::move( *bridge ) );
106 | }
107 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_handler.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "vm_instruction_desc.hpp"
5 | #include "vm_state.hpp"
6 | #include "vm_instruction_info.hpp"
7 | #include "vm_bridge.hpp"
8 |
9 | namespace vmpattack
10 | {
11 | // This struct describes any virtual machine handler, responsible for executing an
12 | // instruction.
13 | //
14 | struct vm_handler
15 | {
16 | // The handler's RVA in the loaded image.
17 | //
18 | const uint64_t rva;
19 |
20 | // The backing instruction descriptor.
21 | //
22 | const vm_instruction_desc* descriptor;
23 |
24 | // The instance's instruction information.
25 | //
26 | const std::unique_ptr instruction_info;
27 |
28 | // The handler's bridge.
29 | //
30 | const std::unique_ptr bridge;
31 |
32 | // Constructor.
33 | //
34 | vm_handler( const vm_instruction_desc* descriptor, std::unique_ptr instruction_info, uint64_t rva, std::unique_ptr bridge )
35 | : descriptor( descriptor ), instruction_info( std::move( instruction_info ) ), rva( rva ), bridge( std::move( bridge ) )
36 | {}
37 |
38 | // Decodes and updates the context to construct a vm_instruction describing the instruction's details.
39 | //
40 | vm_instruction decode( vm_context* context ) const;
41 |
42 | // Construct a vm_handler from its instruction stream.
43 | // Updates vm_state if required by the descriptor.
44 | // If the operation fails, returns empty {}.
45 | //
46 | static std::optional> from_instruction_stream( vm_state* initial_state, const instruction_stream* stream );
47 | };
48 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instance.cpp:
--------------------------------------------------------------------------------
1 | #include "vm_instance.hpp"
2 | #include "analysis_context.hpp"
3 |
4 | namespace vmpattack
5 | {
6 | // Creates an initial vm_context for this instance, given an entry stub and the image's load delta.
7 | // The created vm_context is initialized at the first handler in the vip stream.
8 | //
9 | std::unique_ptr vm_instance::initialize_context( uint64_t stub, int64_t load_delta ) const
10 | {
11 | // Decrypt the stub to get the unbased (with orig imagebase) vip address.
12 | // Stub EA must always be cast to 32 bit.
13 | // Add the const 0x100000000 to the result.
14 | //
15 | uint64_t vip = ( uint32_t )vip_expression->compute( stub ) + 0x100000000;
16 |
17 | // Get the absolute vip ea by adding the load delta.
18 | //
19 | uint64_t absolute_vip = vip + load_delta;
20 |
21 | // Copy the initial state for vm_context creation.
22 | //
23 | auto copied_initial_state = std::make_unique( *initial_state );
24 |
25 | // Create a new vm_context and return it.
26 | // The rolling key is the pre-offsetted vip.
27 | //
28 | return std::make_unique( std::move( copied_initial_state ), vip, absolute_vip );
29 | }
30 |
31 | // Adds a handler to the vm_instace.
32 | //
33 | void vm_instance::add_handler( std::unique_ptr handler )
34 | {
35 | // Lock the mutex.
36 | //
37 | const std::lock_guard lock( handlers_mutex );
38 |
39 | // Push back the handler.
40 | //
41 | handlers.push_back( std::move( handler ) );
42 | }
43 |
44 | // Attempts to find a handler, given an rva.
45 | //
46 | std::optional vm_instance::find_handler( uint64_t rva )
47 | {
48 | // Lock the mutex.
49 | //
50 | const std::lock_guard lock( handlers_mutex );
51 |
52 | // Loop through owned handlers.
53 | //
54 | for ( auto& handler : handlers )
55 | {
56 | // If the rva matches, return a non-owning pointer to said handler.
57 | //
58 | if ( handler->rva == rva )
59 | return handler.get();
60 | }
61 |
62 | // If not found return empty {}.
63 | //
64 | return {};
65 | }
66 |
67 | // Attempts to construct a vm_instance from the VMEntry instruction stream.
68 | // If fails, returns empty {}.
69 | //
70 | std::optional> vm_instance::from_instruction_stream( const instruction_stream* stream )
71 | {
72 | // Copy the stream to drop the const.
73 | //
74 | instruction_stream copied_stream = *stream;
75 |
76 | // Create analysis context.
77 | //
78 | analysis_context entry_analysis_context = analysis_context( &copied_stream );
79 |
80 | std::unique_ptr vip_expression = std::make_unique();
81 |
82 | x86_insn vip_offset_ins;
83 | x86_reg vip_reg;
84 | x86_reg vip_offset_reg;
85 | uint64_t vip_stack_offset;
86 |
87 | x86_reg rsp = X86_REG_RSP;
88 | x86_reg stack_reg;
89 | uint64_t stack_alloc_size;
90 |
91 | x86_reg flow_reg;
92 | uint64_t flow_rva;
93 |
94 | x86_reg rolling_key_reg;
95 |
96 | std::vector pushed_regs;
97 |
98 | auto result = ( &entry_analysis_context )
99 | ->track_register_pushes( &pushed_regs, [&]()
100 | {
101 | return ( &entry_analysis_context )
102 | ->fetch_encrypted_vip( { vip_reg, false }, { vip_stack_offset, false } );
103 | } )
104 | ->record_expression( vip_reg, vip_expression.get(), [&]()
105 | {
106 | return ( &entry_analysis_context )
107 | ->offset_reg( { vip_offset_ins, false }, { vip_reg, true }, { vip_offset_reg, false } );
108 | } )
109 | ->mov_reg_reg( { stack_reg, false }, { rsp, true }, false )
110 | ->allocate_stack( { stack_alloc_size, false } )
111 | ->mov_reg_reg( { rolling_key_reg, false }, { vip_reg, true } )
112 | ->set_flow( { flow_reg, false }, { flow_rva, false } );
113 |
114 | // If information fetch failed, return empty {}.
115 | //
116 | if ( !result )
117 | return {};
118 |
119 | // We're gonna peek into the bridge instructions to see if the vip goes forwards or backwards.
120 | // So we have to copy the stream to not modify the previous one.
121 | //
122 | instruction_stream peek_stream = copied_stream;
123 |
124 | // Create a new analysis context from the newly copied stream.
125 | //
126 | analysis_context peek_analysis_context = analysis_context( &peek_stream );
127 |
128 | // The VIP is offseted by 4 at each handler; search for this so.
129 | //
130 | uint64_t vip_offset_size = 4;
131 | x86_insn update_vip_ins;
132 |
133 | auto bridge_result = peek_analysis_context
134 | .update_reg( { update_vip_ins, false }, { vip_reg, true }, { vip_offset_size, true } );
135 |
136 | // If nothing found, something went wrong; return empty {}.
137 | //
138 | if ( !bridge_result )
139 | return {};
140 |
141 | // Construct initial state from information.
142 | //
143 | std::unique_ptr initial_state
144 | = std::make_unique( stack_reg, vip_reg, X86_REG_RSP, rolling_key_reg, flow_reg,
145 | update_vip_ins == X86_INS_ADD ? vm_direction_down : vm_direction_up,
146 | flow_rva );
147 |
148 | // At this point we have competed vm_instance construction. But now we must create the vm_bridge
149 | // that appends the vm_instance.
150 | //
151 | auto bridge = vm_bridge::from_instruction_stream( initial_state.get(), &copied_stream );
152 |
153 | // If unsuccessful, return empty {}.
154 | //
155 | if ( !bridge )
156 | return {};
157 |
158 | // Capture the stack order.
159 | //
160 | std::vector stack;
161 | for ( x86_reg reg : pushed_regs )
162 | {
163 | if ( reg == X86_REG_EFLAGS )
164 | {
165 | stack.push_back( vtil::REG_FLAGS );
166 | continue;
167 | }
168 |
169 | stack.push_back( { vtil::register_physical, ( uint64_t )reg, 64 } );
170 | }
171 |
172 | // Last pushed value is the image base offset, which we'll push manually later.
173 | //
174 | stack.pop_back();
175 |
176 | // Otherwise, construct & return vm_instance.
177 | //
178 | return std::make_unique( copied_stream.base(), std::move( initial_state ), stack, std::move( vip_expression ), std::move( *bridge ) );
179 | }
180 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instance.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "vm_state.hpp"
7 | #include "arithmetic_expression.hpp"
8 | #include "vm_bridge.hpp"
9 | #include "vm_handler.hpp"
10 |
11 | namespace vmpattack
12 | {
13 | // This class describes a single VMProtect virtual machine instance.
14 | //
15 | class vm_instance
16 | {
17 | public:
18 | // The RVA of the first instruction of the virtual machine's VMEntry.
19 | //
20 | const uint64_t rva;
21 |
22 | // The bridge of the VMEntry.
23 | //
24 | const std::unique_ptr bridge;
25 |
26 | // Specifies the registers that were pushed at VMEntry in what order.
27 | //
28 | const std::vector entry_frame;
29 |
30 | private:
31 | // A mutex used to access the handlers vector.
32 | //
33 | std::mutex handlers_mutex;
34 |
35 | // An vector of all vm_handlers owned by the vm_instance.
36 | //
37 | std::vector> handlers;
38 |
39 | // The initial vm_state as initialized by the vm_instance.
40 | //
41 | const std::unique_ptr initial_state;
42 |
43 | // The arithmetic expression used to decrypt the VMEntry stub to the initial vip.
44 | //
45 | const std::unique_ptr vip_expression;
46 |
47 | public:
48 | // Constructor.
49 | //
50 | vm_instance( uint64_t rva, std::unique_ptr initial_state, const std::vector& entry_frame, std::unique_ptr vip_expression, std::unique_ptr bridge )
51 | : rva( rva ), initial_state( std::move( initial_state ) ), entry_frame( entry_frame ), vip_expression( std::move( vip_expression ) ), bridge( std::move( bridge ) )
52 | {}
53 |
54 | // Creates an initial vm_context for this instance, given an entry stub and the image's load delta.
55 | // The vm_context is initialized at just before this vm_instance's VMEntry bridge.
56 | //
57 | std::unique_ptr initialize_context( uint64_t stub, int64_t load_delta ) const;
58 |
59 | // Adds a handler to the vm_instace.
60 | //
61 | void add_handler( std::unique_ptr handler );
62 |
63 | // Attempts to find a handler, given an rva.
64 | //
65 | std::optional find_handler( uint64_t rva );
66 |
67 | // Attempts to construct a vm_instance from the VMEntry instruction stream.
68 | // If fails, returns empty {}.
69 | //
70 | static std::optional> from_instruction_stream( const instruction_stream* stream );
71 | };
72 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instruction.cpp:
--------------------------------------------------------------------------------
1 | #include "vm_instruction.hpp"
2 | #include "vm_handler.hpp"
3 | #include
4 | #include
5 |
6 | namespace vmpattack
7 | {
8 | // Converts the instruction to human-readable format.
9 | //
10 | std::string vm_instruction::to_string() const
11 | {
12 | std::stringstream name_stream;
13 |
14 | name_stream << handler->descriptor->name;
15 | name_stream << "\t";
16 |
17 | // Loop through each of the handler's operands.
18 | //
19 | for ( int i = 0; i < operands.size(); i++ )
20 | {
21 | // Fetch operand and its value.
22 | //
23 | vm_operand& operand = handler->instruction_info->operands[ i ].first;
24 | uint64_t operand_value = operands[ i ];
25 |
26 | switch ( operand.type )
27 | {
28 | case vm_operand_imm:
29 | name_stream << std::hex << operand.size << ":0x" << operand_value;
30 | break;
31 | case vm_operand_reg:
32 | name_stream << "REG:" << operand.size << ":0x" << std::hex << operand_value;
33 | break;
34 | }
35 |
36 | // Is last operand?
37 | //
38 | if ( i != operands.size() - 1 )
39 | name_stream << ",\t";
40 | }
41 |
42 | // Form string from stringstream.
43 | //
44 | return name_stream.str();
45 | }
46 |
47 | // Construct vm_instruction from its handler and a context.
48 | //
49 | std::unique_ptr vm_instruction::from_context( const vm_handler* handler, vm_context* context )
50 | {
51 | std::vector decrypted_operands;
52 |
53 | // Loop through each of the handler's operands.
54 | //
55 | for ( auto& operand : handler->instruction_info->operands )
56 | {
57 | // Fetch the byte_length from the context.
58 | //
59 | uint64_t fetched_operand = context->fetch( operand.first.byte_length );
60 |
61 | // Decrypt the fetched operand bytes with its expression.
62 | //
63 | fetched_operand = operand.second->compute( fetched_operand );
64 |
65 | // Add to vector.
66 | //
67 | decrypted_operands.push_back( fetched_operand );
68 | }
69 |
70 | // Construct object.
71 | //
72 | return std::make_unique( handler, decrypted_operands );
73 | }
74 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instruction.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "vm_instruction_info.hpp"
3 | #include "vm_context.hpp"
4 |
5 | namespace vmpattack
6 | {
7 | struct vm_handler;
8 |
9 | // This struct represents a fully-formed virtual instruction instance, containing all decoded
10 | // information required for full execution, including VIP-derived information.
11 | //
12 | struct vm_instruction
13 | {
14 | // A non-owning pointer to the instruction's fully-formed handler.
15 | //
16 | const vm_handler* handler;
17 |
18 | // A vector containing the instruction's operands.
19 | // NOTE: even though this is just a vector of uint64_t's, these can represent any size
20 | // (e.g. 1/2/4 bytes) and can be register offsets or immediate values depending on the
21 | // vm_instruction_info in the handler.
22 | //
23 | const std::vector operands;
24 |
25 | // Constructor.
26 | //
27 | vm_instruction( const vm_handler* handler, const std::vector& operands )
28 | : handler( handler ), operands( operands )
29 | {}
30 |
31 | // Converts the instruction to human-readable format.
32 | //
33 | std::string to_string() const;
34 |
35 | // Construct vm_instruction from its handler and a context.
36 | //
37 | static std::unique_ptr from_context( const vm_handler* handler, vm_context* context );
38 | };
39 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instruction_desc.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include "vm_instruction.hpp"
4 |
5 | namespace vmpattack
6 | {
7 | class instruction_stream;
8 | struct vm_state;
9 | struct vm_instruction_info;
10 |
11 | // Describes flags for information required by the instruction parser.
12 | //
13 | enum vm_instruction_flags : uint32_t
14 | {
15 | // None.
16 | //
17 | vm_instruction_none = 0,
18 |
19 | // The virtual instruction causes the VIP to be modified.
20 | //
21 | vm_instruction_branch = 1 << 0,
22 |
23 | // The virtual instruction causes the VM to exit the virtual context.
24 | //
25 | vm_instruction_vmexit = 1 << 1,
26 |
27 | // The virtual instruction updates the vm state.
28 | //
29 | vm_instruction_updates_state = 1 << 3,
30 |
31 | // The virtual instruction acts creates a new basic block, but does not branch.
32 | //
33 | vm_instruction_creates_basic_block = 1 << 4,
34 | };
35 |
36 | // This struct describes a virtual machine instruction and its
37 | // semantics.
38 | //
39 | struct vm_instruction_desc
40 | {
41 | // Function prototype used to match an instruction stream to a virtual instruction.
42 | // Returns whether or not the match succeeded, and if so, updates the vm_state to
43 | // the state after instruction execution, and sets vm_instruction_info based on the
44 | // instruction instance information.
45 | //
46 | using fn_match = bool( * )( const vm_state* state, instruction_stream* stream, vm_instruction_info* info );
47 |
48 | // Function prototype used to generate VTIL given a virtual instruction.
49 | //
50 | using fn_generate = void( * )( vtil::basic_block* block, const vm_instruction* instruction );
51 |
52 | // The user-friendly name of the instruction.
53 | //
54 | const std::string name;
55 |
56 | // The number of operands the instruction takes in.
57 | //
58 | const uint32_t operand_count;
59 |
60 | // Any flags depicting special instruction behaviours.
61 | //
62 | const uint32_t flags;
63 |
64 | // The match delegate.
65 | //
66 | const fn_match match;
67 |
68 | // The generate delegate.
69 | //
70 | const fn_generate generate;
71 |
72 | // Constructor.
73 | //
74 | vm_instruction_desc( const std::string& name, uint32_t operand_count, uint32_t flags, fn_match match, fn_generate generate )
75 | : name( name ), operand_count( operand_count ), flags( flags ), match( match ), generate( generate )
76 | {}
77 | };
78 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_instruction_info.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "vm_state.hpp"
7 | #include "arithmetic_expression.hpp"
8 |
9 | namespace vmpattack
10 | {
11 | // The type of the operand.
12 | //
13 | enum vm_operand_type
14 | {
15 | // Immediate.
16 | //
17 | vm_operand_imm,
18 |
19 | // Register (context offset).
20 | //
21 | vm_operand_reg,
22 | };
23 |
24 | // Describes a single virtual instruction operand.
25 | //
26 | struct vm_operand
27 | {
28 | // The type of this operand.
29 | //
30 | vm_operand_type type;
31 |
32 | // The execution size of this operand.
33 | // e.g. an 8 byte register would be 8.
34 | //
35 | size_t size;
36 |
37 | // The byte length of this operand ie. how many vip bytes it consumes.
38 | // e.g. an 8 byte register would be 2, as the index occupies 2 bytes.
39 | //
40 | size_t byte_length;
41 |
42 | // Constructor.
43 | //
44 | vm_operand( vm_operand_type type, size_t size, size_t byte_length )
45 | : type( type ), size( size ), byte_length( byte_length )
46 | {}
47 | };
48 |
49 | // This struct describes the virtual instruction's instace information.
50 | // It describes properties such as the operands and sizes.
51 | // It does not hold any VIP-derived information.
52 | //
53 | struct vm_instruction_info
54 | {
55 | // A map of operand information with their corresponding arithmetic expression used for
56 | // obfuscation.
57 | //
58 | std::vector>> operands;
59 |
60 | // A vector of arbitrary sizes, determined during matching phase and
61 | // used during generation phase.
62 | //
63 | std::vector sizes;
64 |
65 | // Instruction-specific data.
66 | //
67 | vtil::variant custom_data;
68 |
69 | // If the instruction updates the state, the updated state after instruction execution is
70 | // stored here.
71 | //
72 | std::optional updated_state;
73 |
74 | // Empty constructor.
75 | //
76 | vm_instruction_info()
77 | : operands{}, sizes{}
78 | {}
79 |
80 | // Construct via initial operand list.
81 | //
82 | vm_instruction_info( std::vector>> operands )
83 | : operands( std::move( operands ) ), sizes{}
84 | {}
85 | };
86 | }
--------------------------------------------------------------------------------
/VMPAttack/vm_state.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 |
5 | namespace vmpattack
6 | {
7 | // Specifies the direction of the Fetch->Decode->Execute loop.
8 | //
9 | enum vm_direction : uint8_t
10 | {
11 | // Specified that the vip is decremented after instruction.
12 | // execution (ie. via SUB)
13 | //
14 | vm_direction_up,
15 |
16 | // Specified that the vip is incremented after instruction
17 | // execution (ie. via ADD).
18 | //
19 | vm_direction_down,
20 | };
21 |
22 | // This struct describes the current translation state of the virtual machine
23 | // ie. the assignation of registers, the vip direction, and the handler offset base.
24 | //
25 | struct vm_state
26 | {
27 | // The virtual stack register.
28 | //
29 | x86_reg stack_reg;
30 |
31 | // The virtual instruction pointer.
32 | //
33 | x86_reg vip_reg;
34 |
35 | // The virtual context register.
36 | //
37 | x86_reg context_reg;
38 |
39 | // The rolling decryption key register.
40 | //
41 | x86_reg rolling_key_reg;
42 |
43 | // The absolute EIP / RIP that the handlers are offseted from.
44 | //
45 | x86_reg flow_reg;
46 |
47 | // The current fetch direction.
48 | //
49 | vm_direction direction;
50 |
51 | // The absolute EIP / RIP of the block's base, by which the handlers are
52 | // offseted by.
53 | //
54 | uint64_t flow;
55 |
56 | // Full constructor.
57 | //
58 | vm_state( x86_reg stack_reg, x86_reg vip_reg, x86_reg context_reg, x86_reg rolling_key_reg, x86_reg flow_reg, vm_direction direction, uint64_t flow )
59 | : stack_reg( stack_reg ), vip_reg( vip_reg ), context_reg( context_reg ), rolling_key_reg( rolling_key_reg ), flow_reg( flow_reg ), direction( direction ), flow( flow )
60 | {}
61 | };
62 | }
--------------------------------------------------------------------------------
/VMPAttack/vmentry.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "instruction.hpp"
3 | #include
4 |
5 | namespace vmpattack
6 | {
7 | // This struct represents a single routine to be lifted.
8 | //
9 | struct lifting_job
10 | {
11 | // An encrypted pointer to the vip instruction stream.
12 | //
13 | uint64_t entry_stub;
14 |
15 | // The RVA of the function's vmentry.
16 | //
17 | uint64_t vmentry_rva;
18 |
19 | // Constructor.
20 | //
21 | lifting_job( uint64_t entry_stub, uint64_t vmentry_rva )
22 | : entry_stub( entry_stub ), vmentry_rva( vmentry_rva )
23 | {}
24 | };
25 |
26 | // Describes data retrieved from a code scan.
27 | //
28 | struct scan_result
29 | {
30 | // The code RVA followed to create the job.
31 | //
32 | uint64_t rva;
33 |
34 | // The retrieved lifting job.
35 | //
36 | lifting_job job;
37 |
38 | // Constructor.
39 | //
40 | scan_result( uint64_t rva, lifting_job job )
41 | : rva( rva ), job( job )
42 | {}
43 | };
44 |
45 | // This struct represents the information returned by vmentry stub analysis.
46 | //
47 | struct vmentry_analysis_result
48 | {
49 | // Optional instruction that caused the vm-exit.
50 | //
51 | std::optional> exit_instruction;
52 |
53 | // The lifting job described by the vmentry stub.
54 | //
55 | lifting_job job;
56 |
57 | vmentry_analysis_result( std::shared_ptr exit_instruction, lifting_job job )
58 | : exit_instruction( exit_instruction ), job( job )
59 | {}
60 |
61 | vmentry_analysis_result( lifting_job job )
62 | : exit_instruction{}, job( job )
63 | {}
64 | };
65 | }
--------------------------------------------------------------------------------
/VMPAttack/vmpattack.cpp:
--------------------------------------------------------------------------------
1 | #include "vmpattack.hpp"
2 | #include "disassembler.hpp"
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | //#define VMPATTACK_VERBOSE_1
11 |
12 | namespace vmpattack
13 | {
14 | // Attempts to find a vm_instance for the specified rva. If succeeded, returns
15 | // said instance. Otherwise returns nullptr.
16 | //
17 | vm_instance* vmpattack::lookup_instance( uint64_t rva )
18 | {
19 | // Lock the mutex.
20 | //
21 | const std::lock_guard lock( instances_mutex );
22 |
23 | // Enumerate instances without acquiring them.
24 | //
25 | for ( auto& instance : instances )
26 | {
27 | // If rva is equal, return a non-owning ptr.
28 | //
29 | if ( instance->rva == rva )
30 | return instance.get();
31 | }
32 |
33 | // None found - return nullptr.
34 | //
35 | return nullptr;
36 | }
37 |
38 | // Adds the specified vm_instance to the cached list, exersizing thread-safe behaviour
39 | // in doing so.
40 | //
41 | void vmpattack::add_instance( std::unique_ptr instance )
42 | {
43 | // Lock the mutex.
44 | //
45 | const std::lock_guard lock( instances_mutex );
46 |
47 | // Add the instance.
48 | //
49 | instances.push_back( std::move( instance ) );
50 | }
51 |
52 | // Performs the specified lifting job, returning a raw, unoptimized vtil routine.
53 | // Optionally takes in a previous block to fork. If null, creates a new block via a new routine.
54 | // If the passed previous block is not completed, it is completed with a jmp to the newly created block.
55 | //
56 | std::optional vmpattack::lift_internal( uint64_t rva, uint64_t stub, vtil::basic_block* prev_block )
57 | {
58 | // First we must either lookup or create the vm_instance.
59 | //
60 | vm_instance* instance = lookup_instance( rva );
61 |
62 | instruction_stream stream = disassembler::get().disassemble( image_base, rva );
63 |
64 | if ( !instance )
65 | {
66 | // Try to construct from instruction_stream.
67 | //
68 | auto new_instance = vm_instance::from_instruction_stream( &stream );
69 |
70 | // If creation failed, return empty {}.
71 | //
72 | if ( !new_instance )
73 | return {};
74 |
75 | // Otherwise, append vm_instance to cached list and fetch a non-owning ptr.
76 | //
77 | instance = new_instance->get();
78 | add_instance( std::move( *new_instance ) );
79 | }
80 |
81 | // Construct the initial vm_context from the vip stub.
82 | //
83 | std::unique_ptr initial_context = instance->initialize_context( stub, image_base - preferred_image_base );
84 |
85 | vtil::basic_block* block = nullptr;
86 | if ( prev_block )
87 | {
88 | vtil::vip_t block_vip = initial_context->vip - image_base + preferred_image_base;
89 |
90 | // Complete the prev block if not yet completed.
91 | //
92 | if ( !prev_block->is_complete() )
93 | prev_block->jmp( block_vip );
94 |
95 | block = prev_block->fork( block_vip );
96 |
97 | if ( !block )
98 | return {};
99 | }
100 | else
101 | block = vtil::basic_block::begin( initial_context->vip - image_base + preferred_image_base );
102 |
103 | // Push 2 arbitrary values to represent the VM stub and retaddr pushed by VMP.
104 | //
105 | block
106 | ->push( 0xDEADC0DEDEADC0DE )
107 | ->push( 0xBABEBABEBABEBABE );
108 |
109 | // Push all registers on VMENTRY.
110 | //
111 | for ( const vtil::register_desc& reg : instance->entry_frame )
112 | block->push( reg );
113 |
114 | // Offset image base by the preferred image base.
115 | // This is because, currently, the IMGBASE reg is assigned to the offset. This is incorrect.
116 | // It must be assigned to the actual image base.
117 | auto t0 = block->tmp( 64 );
118 | block
119 | ->mov( t0, vtil::REG_IMGBASE )
120 | // ->sub( t0, preferred_image_base )
121 | ->push( t0 );
122 |
123 | if ( !lift_block( instance, block, initial_context.get(), instance->bridge->advance( initial_context.get() ), {} ) )
124 | return {};
125 |
126 | return block->owner;
127 | }
128 |
129 | std::vector map_image( const vtil::pe_image& image )
130 | {
131 | // Kinda amazing that there's no SizeOfImage in a PE wrapper.......
132 | //
133 | uint8_t* mapped_buffer = new uint8_t[ 0x10000000 ]();
134 |
135 | // Copy PE headers.
136 | // TODO: Fix this hardcoded trash.
137 | //
138 | memcpy( mapped_buffer, image.cdata(), 0x1000 );
139 |
140 | // Copy each section.
141 | //
142 | for ( const vtil::section_descriptor& section : image )
143 | {
144 | // Sanity check for potentially broken PEs.
145 | //
146 | if ( image.raw_bytes.size() >= section.physical_address + section.physical_size )
147 | memcpy( &mapped_buffer[ section.virtual_address ], &image.raw_bytes[ section.physical_address ], section.physical_size );
148 | }
149 |
150 | // Copy into a vector.
151 | //
152 | std::vector mapped_image = { mapped_buffer, mapped_buffer + 0x10000000 };
153 |
154 | // Delete the raw buffer.
155 | //
156 | delete[] mapped_buffer;
157 |
158 | return mapped_image;
159 | }
160 |
161 | // Construct from raw image bytes vector.
162 | //
163 | vmpattack::vmpattack( const std::vector& raw_bytes ) :
164 | image( raw_bytes ), mapped_image( map_image( image ) ), image_base( ( uint64_t )mapped_image.data() ), preferred_image_base( 0x0000000140000000 )
165 | {}
166 |
167 | // Lifts a single basic block, given the appropriate information.
168 | //
169 | bool vmpattack::lift_block( vm_instance* instance, vtil::basic_block* block, vm_context* context, uint64_t first_handler_rva, std::vector explored_blocks )
170 | {
171 | #ifdef VMPATTACK_VERBOSE_0
172 | vtil::logger::log( "==> Lifting Basic Block @ VIP RVA 0x%llx and Handler RVA 0x%llx\r\n", context->vip - image_base, first_handler_rva );
173 | #endif
174 |
175 | // Add current block to explored list.
176 | //
177 | explored_blocks.push_back( block->entry_vip );
178 |
179 | uint64_t current_handler_rva = first_handler_rva;
180 | vm_handler* current_handler = nullptr;
181 |
182 | // Main loop responsible for lifting all instructions in this block.
183 | //
184 | while ( true )
185 | {
186 | // Try to lookup a cached handler.
187 | //
188 | auto handler_lookup = instance->find_handler( current_handler_rva );
189 | if ( !handler_lookup )
190 | {
191 | // No cached handler found; construct it ourselves.
192 | //
193 | instruction_stream stream = disassembler::get().disassemble( image_base, current_handler_rva );
194 | auto handler = vm_handler::from_instruction_stream( context->state.get(), &stream );
195 |
196 | // Assert that we matched a handler.
197 | //
198 | fassert( handler && "Failed to match handler. Please report this error with the target." );
199 |
200 | #ifdef _DEBUG
201 | if ( !handler )
202 | __debugbreak();
203 | #endif
204 | // Store the non-owning ptr to the handler, and give ownership of the handler
205 | // to vm_instance by adding it to its list.
206 | //
207 | current_handler = handler->get();
208 | instance->add_handler( std::move( *handler ) );
209 | }
210 | else
211 | {
212 | // Fetch the cached handler.
213 | //
214 | current_handler = *handler_lookup;
215 |
216 | // We much update the VM state manually if nessecary, as we are fetching a cached handler.
217 | //
218 | if ( current_handler->descriptor->flags & vm_instruction_updates_state && current_handler->instruction_info->updated_state )
219 | *context->state = *current_handler->instruction_info->updated_state;
220 | }
221 |
222 | // Save the rolling key before instruction decoding.
223 | //
224 | uint64_t prev_rolling_key = context->rolling_key;
225 |
226 | // Decode the current handler using the context, advancing it.
227 | //
228 | vm_instruction decoded_instruction = current_handler->decode( context );
229 |
230 | std::string vmp_il_text = vtil::format::str( "0x%016x | 0x%016x | 0x%016x | %s", context->vip - preferred_image_base, current_handler_rva, prev_rolling_key, decoded_instruction.to_string().c_str() );
231 |
232 | // Print the instruction.
233 | //
234 | #ifdef VMPATTACK_VERBOSE_1
235 | vtil::logger::log( "%s\n", vmp_il_text );
236 | #endif
237 |
238 | // if ( block->size() != 0 )
239 | // block->label( vmp_il_text );
240 |
241 | // Emit VTIL.
242 | //
243 | current_handler->descriptor->generate( block, &decoded_instruction );
244 |
245 | // Handle VMEXITs.
246 | //
247 | if ( current_handler->descriptor->flags & vm_instruction_vmexit )
248 | {
249 | // Fetch the address the vmexit returns to.
250 | //
251 | auto t0 = block->tmp( 64 );
252 | block
253 | ->pop( t0 );
254 |
255 | // Helper lambda to remove the REG_IMGBASE register from expressions.
256 | //
257 | auto remove_imgbase = [&]( vtil::symbolic::expression::reference src ) -> vtil::symbolic::expression::reference
258 | {
259 | return src.transform( []( vtil::symbolic::expression::delegate& ex )
260 | {
261 | if ( ex->is_variable() )
262 | {
263 | auto& var = ex->uid.get();
264 | if ( var.is_register() && var.reg() == vtil::REG_IMGBASE )
265 | *+ex = { 0, ex->size() };
266 | }
267 | }, true, false ).simplify();
268 | };
269 |
270 | // We might be able to continue lifting if we can determine the VMEXIT return address.
271 | //
272 | vtil::cached_tracer tracer;
273 | vtil::symbolic::expression::reference vmexit_dest = remove_imgbase( tracer.rtrace( { block->end(), t0 } ) );
274 |
275 | #ifdef VMPATTACK_VERBOSE_0
276 | vtil::logger::log( "VMEXIT Destination: %s\r\n", vmexit_dest.simplify( true ) );
277 | #endif
278 |
279 | // First check if the VMEXIT is due to an unsupported instruction that must be manually emitted.
280 | //
281 |
282 | // Is the VMEXIT destination address a constant?
283 | //
284 | if ( vmexit_dest->is_constant() )
285 | {
286 | if ( uint64_t vmexit_ea = *vmexit_dest->get() )
287 | {
288 | uint64_t vmexit_rva = vmexit_ea - preferred_image_base;
289 |
290 | // Is this VMEXIT just caused by an unsupported instruction that we need to manually emit?
291 | // Attempt to analyze the potential entry stub the vmexit exits to.
292 | //
293 | if ( std::optional analysis = analyze_entry_stub( vmexit_rva ) )
294 | {
295 | // If there is an instruction that caused the VMEXIT, emit it.
296 | //
297 | if ( analysis->exit_instruction )
298 | {
299 | // Get registers read / written to from exit instruction.
300 | //
301 | auto [regs_read, regs_write] = ( *analysis->exit_instruction )->get_regs_accessed();
302 |
303 | // Pin any registers read.
304 | //
305 | for ( x86_reg reg_read : regs_read )
306 | block->vpinr( reg_read );
307 |
308 | // Emit the instruction.
309 | //
310 | std::shared_ptr& exit_instruction = *analysis->exit_instruction;
311 | for ( int i = 0; i < exit_instruction->ins.size; i++ )
312 | block->vemit( exit_instruction->ins.bytes[ i ] );
313 |
314 | // Pin any registers written.
315 | //
316 | for ( x86_reg reg_write : regs_write )
317 | block->vpinw( reg_write );
318 | }
319 |
320 | // Continue lifting via the current basic block.
321 | //
322 | lift_internal( analysis->job.vmentry_rva, analysis->job.entry_stub, block );
323 | return true;
324 | }
325 | }
326 | }
327 |
328 | // Next, check if the VMEXIT is due to VXCALL.
329 | //
330 |
331 | // If it is a VXCALL, the next 64 bit value pushed on the stack will be a constant pointer
332 | // to the VMENTRY stub that control will be returned to after non-virtual function execution.
333 | //
334 | auto t1 = block->tmp( 64 );
335 | block->pop( t1 );
336 |
337 | // Flush the cache as we modified the instruction stream.
338 | //
339 | tracer.flush();
340 |
341 | // Tracer will only need to search the current block, as multiblock tracing is not needed for VMEXITs.
342 | //
343 | vtil::symbolic::expression::reference potential_retaddr = remove_imgbase( tracer.rtrace( { block->end(), t1 } ) );
344 |
345 | #ifdef VMPATTACK_VERBOSE_0
346 | vtil::logger::log( "VMEXIT Potential retaddr: %s\r\n", potential_retaddr.to_string() );
347 | #endif
348 |
349 | // Is the potential retaddr a constant?
350 | //
351 | if ( potential_retaddr->is_constant() )
352 | {
353 | // Get the actual RVA without the preferred imagebase injected by VMP.
354 | //
355 | uint64_t potential_retaddr_rva = *potential_retaddr->get() - preferred_image_base;
356 |
357 | // Try to perform VMENTRY stub analysis on the constant retaddr.
358 | //
359 | if ( std::optional analysis = analyze_entry_stub( potential_retaddr_rva ) )
360 | {
361 | // Said retaddr is a VMENTRY stub! We can now conclude that the VMEXIT is caused by a VXCALL.
362 | // So we emit a VXCALL, and continue lifting via the current basic block.
363 | //
364 | block->vxcall( t0 );
365 | lift_internal( analysis->job.vmentry_rva, analysis->job.entry_stub, block );
366 |
367 | return true;
368 | }
369 | }
370 |
371 | // Fall back to simple vexit.
372 | //
373 | block->vexit( t0 );
374 |
375 | // Finish recursion, breaking out of the loop.
376 | //
377 | break;
378 | }
379 |
380 | // If it is a branching instruction, we must follow its behaviour by
381 | // changing our lifting vip.
382 | //
383 | if ( current_handler->descriptor->flags & vm_instruction_branch )
384 | {
385 | // Use the VTIL tracer to trace the branch at the end of the block,
386 | // that was just emitted.
387 | // Cross-block is set to true, as the image base offset is used from
388 | // previous blocks in VMP.
389 | //
390 | vtil::cached_tracer tracer;
391 | vtil::optimizer::aux::branch_info branches_info = vtil::optimizer::aux::analyze_branch( block, &tracer, { .cross_block = true, .pack = true, .resolve_opaque = true } );
392 |
393 | #ifdef VMPATTACK_VERBOSE_0
394 | vtil::logger::log( "Potential Branch Destinations: %s\r\n", branches_info.destinations );
395 | #endif
396 | // Loop through any destinations resolved by the analyzer.
397 | //
398 | for ( auto branch : branches_info.destinations )
399 | {
400 | // Only attempt to resolve branches to constant VIPs.
401 | //
402 | if ( branch->is_constant() )
403 | {
404 | vtil::vip_t branch_ea = *branch->get();
405 | uint64_t branch_rva = branch_ea - preferred_image_base;
406 |
407 | if ( auto next_block = block->fork( branch_ea ) )
408 | {
409 | // If block has already been explored, we can skip it.
410 | //
411 | if ( std::find( explored_blocks.begin(), explored_blocks.end(), branch_ea ) != explored_blocks.end() )
412 | {
413 | #ifdef VMPATTACK_VERBOSE_0
414 | vtil::logger::log( "Skipping already explored block 0x%p\r\n", branch_ea );
415 | #endif
416 | continue;
417 | }
418 |
419 | // If the direction is up, add 1 to the block destination to get the actual ea.
420 | // This is because we offseted it -1 in the ret instruction. So the branch dest
421 | // will be off by -1.
422 | // Thanks to Can for this bugfix!
423 | //
424 | branch_rva += context->state->direction == vm_direction_up ? 1 : 0;
425 |
426 | // Copy context for the branch.
427 | // This is done as we will be walking each possible branch location, and
428 | // each needs its own context, as we cannot taint the current context
429 | // because it needs to be "fresh" for each branch walked.
430 | //
431 | // Since state is a unique_ptr (ie. it cannot by copied), we must manually copy it
432 | // by creating a new vm_state.
433 | // The new branch's initial rolling key is its initial non-relocated vip.
434 | //
435 | vm_context branch_context = { std::make_unique( *context->state ), branch_rva + preferred_image_base, branch_rva + image_base };
436 |
437 | // Update the newly-created context with the handler's bridge, to resolve the first
438 | // handler's rva.
439 | //
440 | uint64_t branch_first_handler_rva = current_handler->bridge->advance( &branch_context );
441 |
442 | // Recursively lift the next block.
443 | // TODO: Multi-thread this part!
444 | //
445 | lift_block( instance, next_block, &branch_context, branch_first_handler_rva, explored_blocks );
446 | }
447 | }
448 | }
449 |
450 | // Branch has been encountered - we cannot continue lifting this block as it has finished.
451 | //
452 | break;
453 | }
454 |
455 | // We need to fork and create a new block if specified so by the instruction
456 | // flags.
457 | //
458 | if ( current_handler->descriptor->flags & vm_instruction_creates_basic_block )
459 | {
460 | vtil::vip_t new_block_ea = context->vip - image_base + preferred_image_base;
461 |
462 | // Offset by -1 if direction is upwards so downwards/upwards streams to the
463 | // same EA don't collide.
464 | //
465 | if ( context->state->direction == vm_direction_up )
466 | new_block_ea -= 1;
467 |
468 | // Jump to the newly-created block.
469 | //
470 | block->jmp( new_block_ea );
471 |
472 | // Fork the current block to create the new block.
473 | //
474 | if ( vtil::basic_block* new_block = block->fork( new_block_ea ) )
475 | {
476 | // Continue lifting via the newly created block.
477 | // Use the current context as we are not changing control flow.
478 | //
479 | return lift_block( instance, new_block, context, current_handler->bridge->advance( context ), explored_blocks );
480 | }
481 | break;
482 | }
483 |
484 | current_handler_rva = current_handler->bridge->advance( context );
485 | }
486 |
487 | return true;
488 | }
489 |
490 | // Performs the specified lifting job, returning a raw, unoptimized vtil routine.
491 | //
492 | std::optional vmpattack::lift( const lifting_job& job )
493 | {
494 | #ifdef VMPATTACK_VERBOSE_0
495 | vtil::logger::log( "=> Began Lifting Job for RVA 0x%llx with stub 0x%llx\r\n", job.vmentry_rva, job.entry_stub );
496 | #endif
497 |
498 | return lift_internal( job.vmentry_rva, job.entry_stub, nullptr );
499 | }
500 |
501 | // Performs an analysis on the specified vmentry stub rva, returning relevant information.
502 | //
503 | std::optional vmpattack::analyze_entry_stub( uint64_t rva ) const
504 | {
505 | // Disassemble at the specified rva, stopping at any branch.
506 | //
507 | instruction_stream stream = disassembler::get().disassemble( image_base, rva, disassembler_none );
508 |
509 | // TODO: Verify this is correct.
510 | // In VMProtect 3, only one instruction can cause a vm exit at any single time.
511 | // So we have two possibilities:
512 | // - [Some instruction that caused a VMExit]
513 | // - PUSH %stub
514 | // - CALL %vmentry_handler
515 | // or
516 | // - PUSH %stub
517 | // - CALL %vmentry_handler
518 |
519 | // Check size validity.
520 | //
521 | if ( stream.instructions.size() > 3 || stream.instructions.size() < 2 )
522 | return {};
523 |
524 | std::shared_ptr call_ins = stream.instructions[ stream.instructions.size() - 1 ];
525 | std::shared_ptr push_ins = stream.instructions[ stream.instructions.size() - 2 ];
526 |
527 | // Check if call is valid.
528 | //
529 | if ( call_ins->ins.id != X86_INS_CALL || call_ins->operand_type( 0 ) != X86_OP_IMM )
530 | return {};
531 |
532 | // Check if stub push is valid.
533 | //
534 | if ( push_ins->ins.id != X86_INS_PUSH || push_ins->operand_type( 0 ) != X86_OP_IMM )
535 | return {};
536 |
537 | uint64_t entry_stub = push_ins->operand( 0 ).imm;
538 | uint64_t vmentry_rva = call_ins->operand( 0 ).imm;
539 |
540 | // If there's an instruction that caused the VMExit, include it in the analysis data.
541 | //
542 | if ( stream.instructions.size() == 3 )
543 | return vmentry_analysis_result { stream.instructions[ 0 ], { entry_stub, vmentry_rva } };
544 |
545 | return vmentry_analysis_result{ { entry_stub, vmentry_rva } };
546 | }
547 |
548 | // Sanitize section strings to make them eligible for comparison with constants.
549 | //
550 | std::string sanitize_section_name( std::string name )
551 | {
552 | return std::string( name.c_str() );
553 | };
554 |
555 | // Scans the given instruction vector for VM entries.
556 | // Returns a list of results, of [root rva, lifting_job]
557 | //
558 | std::vector vmpattack::scan_for_vmentry( const std::vector>& instructions ) const
559 | {
560 | std::vector results = {};
561 |
562 | std::vector potential_vmp_sections = {};
563 |
564 | // Lambda to determine whether the given section name is potentially a VMP section.
565 | //
566 | auto is_vmp_section = []( const std::string& section_name ) -> bool
567 | {
568 | return section_name.ends_with( "0" ) || section_name.ends_with( "1" );
569 | };
570 |
571 | // Lambda to determine whether the given rva is within any of the potential VMP sections.
572 | //
573 | auto within_potential_vmp_sections = [&]( uint64_t rva ) -> bool
574 | {
575 | auto [rva_section, rva_section_size] = image.rva_to_section( rva );
576 |
577 | for ( const vtil::section_descriptor& section : potential_vmp_sections )
578 | if ( rva_section.name == section.name )
579 | return true;
580 |
581 | return false;
582 | };
583 |
584 | // Enumerate all sections.
585 | //
586 | for ( const vtil::section_descriptor& section : image )
587 | if ( is_vmp_section( sanitize_section_name( section.name ) ) )
588 | potential_vmp_sections.push_back( section );
589 |
590 | // Iterate through each instruction.
591 | //
592 | for ( const std::unique_ptr& instruction : instructions )
593 | {
594 | // If instruction is JMP IMM, follow it.
595 | //
596 | if ( instruction->is_uncond_jmp() && instruction->operand( 0 ).type == X86_OP_IMM )
597 | {
598 | // Is the potential stub within a VMP section?
599 | //
600 | uint64_t potential_vmentry_rva = instruction->operand( 0 ).imm;
601 | if ( within_potential_vmp_sections( potential_vmentry_rva ) )
602 | {
603 | // Try to analyze the address to verify that it is indeed a VMENTRY stub.
604 | //
605 | if ( std::optional analysis_result = analyze_entry_stub( potential_vmentry_rva ) )
606 | {
607 | // Only accept stubs with no exit instructions.
608 | // Even though this should never really happen, just use this sanity check here for good measure.
609 | //
610 | if ( !analysis_result->exit_instruction )
611 | results.push_back( { instruction->ins.address, analysis_result->job } );
612 | }
613 | }
614 | }
615 | }
616 |
617 | // Return the accumulated scan results.
618 | //
619 | return results;
620 | }
621 |
622 | // Scans the given code section for VM entries.
623 | // Returns a list of results, of [root rva, lifting_job]
624 | //
625 | std::vector vmpattack::scan_for_vmentry( const std::string& section_name ) const
626 | {
627 | std::optional target_section = {};
628 |
629 | std::string sanitized_section_name = sanitize_section_name( section_name );
630 |
631 | // Find target section.
632 | //
633 | for ( const vtil::section_descriptor& section : image )
634 | {
635 | if ( sanitize_section_name( section.name ) == sanitized_section_name )
636 | {
637 | target_section = section;
638 | break;
639 | }
640 | }
641 |
642 | // Is the desired section was not found, return empty {}.
643 | //
644 | if ( !target_section )
645 | return {};
646 |
647 | // Get a vector of instructions in the .text section, starting from the very beginning.
648 | //
649 | std::vector> text_instructions = disassembler::get().disassembly_simple( image_base, target_section->virtual_address, target_section->virtual_address + target_section->virtual_size );
650 |
651 | // Scan the retrieved instructions.
652 | //
653 | return scan_for_vmentry( text_instructions );
654 | }
655 |
656 | // Scans all executable sections for VM entries.
657 | // Returns a list of results, of [root rva, lifting_job]
658 | //
659 | std::vector vmpattack::scan_for_vmentry() const
660 | {
661 | std::vector results = {};
662 |
663 | // Enumerate all sections.
664 | //
665 | for ( const vtil::section_descriptor& section : image )
666 | {
667 | if ( section.execute )
668 | {
669 | // Get a vector of instructions in the .text section, starting from the very beginning.
670 | //
671 | std::vector> text_instructions = disassembler::get().disassembly_simple( image_base, section.virtual_address, section.virtual_address + section.virtual_size );
672 |
673 | // Scan the retrieved instructions and concat the result.
674 | //
675 | std::vector section_results = scan_for_vmentry( text_instructions );
676 | results.insert( results.end(), section_results.begin(), section_results.end() );
677 | }
678 | }
679 |
680 | return results;
681 | }
682 | }
--------------------------------------------------------------------------------
/VMPAttack/vmpattack.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "vm_instance.hpp"
3 | #include "vmentry.hpp"
4 | #include
5 | #include
6 | #include
7 |
8 | namespace vmpattack
9 | {
10 | // This class is the root object, controlling all other interfaces.
11 | //
12 | class vmpattack
13 | {
14 | private:
15 | // The PE image descriptor.
16 | //
17 | const vtil::pe_image image;
18 |
19 | // The mapped PE image buffer.
20 | //
21 | const std::vector mapped_image;
22 |
23 | // The image's preferred image base.
24 | //
25 | const uint64_t preferred_image_base;
26 |
27 | // A pointer to the loaded image in memory's base.
28 | //
29 | const uint64_t image_base;
30 |
31 | // A mutex to handle shared writes to the cached instances vector.
32 | //
33 | std::mutex instances_mutex;
34 |
35 | // A vector of all cached vm_instances.
36 | //
37 | std::vector> instances;
38 |
39 | // Attempts to find a vm_instance for the specified rva. If succeeded, returns
40 | // said instance. Otherwise returns nullptr.
41 | //
42 | vm_instance* lookup_instance( uint64_t rva );
43 |
44 | // Adds the specified vm_instance to the cached list, exersizing thread-safe behaviour
45 | // in doing so.
46 | //
47 | void add_instance( std::unique_ptr instance );
48 |
49 | // Lifts a single basic block, given the appropriate information.
50 | //
51 | bool lift_block( vm_instance* instance, vtil::basic_block* block, vm_context* context, uint64_t first_handler_rva, std::vector explored_blocks );
52 |
53 | // Performs the specified lifting job, returning a raw, unoptimized vtil routine.
54 | // Optionally takes in a previous block to fork. If null, creates a new block via a new routine.
55 | // If the passed previous block is not completed, it is completed with a jmp to the newly created block.
56 | //
57 | std::optional lift_internal( uint64_t rva, uint64_t stub, vtil::basic_block* block );
58 |
59 | // Scans the given instruction vector for VM entries.
60 | // Returns a list of results, of [root rva, lifting_job]
61 | //
62 | std::vector scan_for_vmentry( const std::vector>& instructions ) const;
63 |
64 | public:
65 | // Constructor.
66 | //
67 | vmpattack( uint64_t preferred_image_base, uint64_t image_base )
68 | : preferred_image_base( preferred_image_base ), image_base( image_base )
69 | {}
70 |
71 | // Construct from raw image bytes vector.
72 | //
73 | vmpattack( const std::vector& raw_image_bytes );
74 |
75 | // Performs the specified lifting job, returning a raw, unoptimized vtil routine.
76 | //
77 | std::optional lift( const lifting_job& job );
78 |
79 | // Performs an analysis on the specified vmentry stub rva, returning relevant information.
80 | //
81 | std::optional analyze_entry_stub( uint64_t rva ) const;
82 |
83 | // Scans the given code section for VM entries.
84 | // Returns a list of results, of [root rva, lifting_job]
85 | //
86 | std::vector scan_for_vmentry( const std::string& section_name ) const;
87 |
88 | // Scans all executable sections for VM entries.
89 | // Returns a list of results, of [root rva, lifting_job]
90 | //
91 | std::vector scan_for_vmentry() const;
92 | };
93 | }
--------------------------------------------------------------------------------
/VMPAttack_Tester/Assembly.asm:
--------------------------------------------------------------------------------
1 | PUBLIC __vmpfnc
2 |
3 | INCLUDE VMProtectSDKa.inc
4 |
5 | .code
6 |
7 | __vmpfnc PROC
8 | call VMProtectIsVirtualMachinePresent
9 | ret
10 | __vmpfnc ENDP
11 |
12 | END
13 |
14 |
--------------------------------------------------------------------------------
/VMPAttack_Tester/VMPAttack_Tester.cpp:
--------------------------------------------------------------------------------
1 | // VMPAttack_Tester.cpp : This file contains the 'main' function. Program execution begins and ends there.
2 | //
3 |
4 | #include
5 | #include
6 |
7 | extern "C" void __vmpfnc();
8 |
9 | #pragma pack(push, 1)
10 | struct much_complex_object
11 | {
12 | uint32_t a;
13 | uint8_t b;
14 | uint8_t c;
15 | uint16_t d;
16 |
17 | __declspec( noinline ) int32_t wow()
18 | {
19 | return c * b + a;
20 | }
21 | };
22 | #pragma pack(pop)
23 |
24 | int main( int argc, const char* args[] )
25 | {
26 |
27 |
28 | while ( r >= 0x50 )
29 | {
30 | r *= 0x43;
31 | r /= getchar();
32 | r <<= 7;
33 | r = _rotl( r, getchar() );
34 | r ^= __rdtsc();
35 | }
36 |
37 | return r;
38 | }
39 |
40 | // Run program: Ctrl + F5 or Debug > Start Without Debugging menu
41 | // Debug program: F5 or Debug > Start Debugging menu
42 |
43 | // Tips for Getting Started:
44 | // 1. Use the Solution Explorer window to add/manage files
45 | // 2. Use the Team Explorer window to connect to source control
46 | // 3. Use the Output window to see build output and other messages
47 | // 4. Use the Error List window to view errors
48 | // 5. Go to Project > Add New Item to create new code files, or Project > Add Existing Item to add existing code files to the project
49 | // 6. In the future, to open this project again, go to File > Open > Project and select the .sln file
50 |
--------------------------------------------------------------------------------
/VMPAttack_Tester/VMPAttack_Tester.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | 16.0
23 | Win32Proj
24 | {a8b46b1a-78c8-4003-9848-5f02549d0ed1}
25 | VMPAttackTester
26 | 10.0
27 |
28 |
29 |
30 | Application
31 | true
32 | v142
33 | Unicode
34 |
35 |
36 | Application
37 | false
38 | v142
39 | true
40 | Unicode
41 |
42 |
43 | Application
44 | true
45 | v142
46 | Unicode
47 |
48 |
49 | Application
50 | false
51 | v142
52 | true
53 | Unicode
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 | true
76 |
77 |
78 | false
79 |
80 |
81 | true
82 | C:\Program Files\VMProtect Ultimate\Include\C;$(IncludePath)
83 | C:\Program Files\VMProtect Ultimate\Lib\Windows;$(LibraryPath)
84 |
85 |
86 | false
87 | C:\Program Files\VMProtect Ultimate\Include\C;$(IncludePath)
88 | C:\Program Files\VMProtect Ultimate\Lib\Windows;$(LibraryPath)
89 |
90 |
91 |
92 | Level3
93 | true
94 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
95 | true
96 |
97 |
98 | Console
99 | true
100 |
101 |
102 |
103 |
104 | Level3
105 | true
106 | true
107 | true
108 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
109 | true
110 |
111 |
112 | Console
113 | true
114 | true
115 | true
116 |
117 |
118 |
119 |
120 | Level3
121 | true
122 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions)
123 | true
124 |
125 |
126 | Console
127 | true
128 | VMProtectSDK64.lib;%(AdditionalDependencies)
129 |
130 |
131 | C:\Program Files\VMProtect Ultimate\Include\ASM;%(IncludePaths)
132 |
133 |
134 |
135 |
136 | Level3
137 | true
138 | true
139 | true
140 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
141 | true
142 |
143 |
144 | Console
145 | true
146 | true
147 | true
148 | VMProtectSDK64.lib;%(AdditionalDependencies)
149 | true
150 |
151 |
152 |
153 |
154 | C:\Program Files\VMProtect Ultimate\Include\ASM;%(IncludePaths)
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 | Document
163 |
164 |
165 |
166 |
167 |
168 |
169 |
--------------------------------------------------------------------------------
/VMPAttack_Tester/VMPAttack_Tester.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;c++;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | Source Files
20 |
21 |
22 |
23 |
24 | Source Files
25 |
26 |
27 |
--------------------------------------------------------------------------------
/entry_stub.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xnobody/vmpattack/74945ac3e05cbcfec549abde72a7667fd92a76cc/entry_stub.png
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xnobody/vmpattack/74945ac3e05cbcfec549abde72a7667fd92a76cc/screenshot.png
--------------------------------------------------------------------------------