├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── install.sh ├── kvm-rvmi-kmod ├── COPYING ├── Kbuild ├── Makefile ├── README ├── configure ├── external-module-compat-comm.h ├── external-module-compat.c ├── include-compat │ ├── asm-x86 │ │ ├── asm.h │ │ ├── clocksource.h │ │ ├── cmpxchg.h │ │ ├── fpu-internal.h │ │ ├── fpu │ │ │ └── api.h │ │ ├── mce.h │ │ ├── msidef.h │ │ ├── msr-index.h │ │ ├── perf_event.h │ │ ├── pvclock-abi.h │ │ ├── pvclock.h │ │ ├── xcr.h │ │ └── xsave.h │ ├── linux │ │ ├── atomic.h │ │ ├── bsearch.h │ │ ├── context_tracking.h │ │ ├── eventfd.h │ │ ├── export.h │ │ ├── ftrace_event.h │ │ ├── intel-iommu.h │ │ ├── iova.h │ │ ├── irq_work.h │ │ ├── irqbypass.h │ │ ├── jump_label.h │ │ ├── magic.h │ │ ├── marker.h │ │ ├── math64.h │ │ ├── mmu_context.h │ │ ├── mmu_notifier.h │ │ ├── msi.h │ │ ├── mutex.h │ │ ├── perf_event.h │ │ ├── pvclock_gtod.h │ │ ├── ratelimit.h │ │ ├── refcount.h │ │ ├── sched │ │ │ ├── mm.h │ │ │ ├── signal.h │ │ │ └── stat.h │ │ ├── swait.h │ │ ├── syscore_ops.h │ │ ├── tboot.h │ │ ├── time64.h │ │ ├── timekeeper_internal.h │ │ ├── trace_events.h │ │ ├── tracepoint.h │ │ └── user-return-notifier.h │ └── trace │ │ └── define_trace.h ├── kvm-kmod.spec ├── powerpc │ └── Makefile.pre ├── refcount.c ├── scripts │ ├── 65-kvm.rules │ └── make-release ├── srcu.c ├── swait.c ├── sync ├── unifdef.h └── x86 │ ├── Kbuild │ ├── Makefile.pre │ ├── compat-x86.c │ ├── external-module-compat.h │ ├── pmu-stubs.c │ └── preempt.c ├── python └── qmp │ ├── qmp │ ├── __init__.py │ └── qmp.py │ └── setup.py └── resources └── rvmi.png /.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.d 3 | *~ 4 | *.flat 5 | *.a 6 | .*.cmd 7 | *.ko 8 | *.mod.c 9 | kvm-rvmi-kmod/config.mak 10 | kvm-rvmi-kmod/kvm-kmod-config.h 11 | kvm-rvmi-kmod/modules.order 12 | kvm-rvmi-kmod/Module.symvers 13 | kvm-rvmi-kmod/Modules.symvers 14 | kvm-rvmi-kmod/Module.markers 15 | kvm-rvmi-kmod/.tmp_versions 16 | kvm-rvmi-kmod/.tmp.kvm-kmod.* 17 | kvm-rvmi-kmod/include-compat/asm 18 | kvm-rvmi-kmod/include 19 | kvm-rvmi-kmod/x86/modules.order 20 | kvm-rvmi-kmod/x86/i825[49].[ch] 21 | kvm-rvmi-kmod/x86/kvm_main.c 22 | kvm-rvmi-kmod/x86/kvm_svm.h 23 | kvm-rvmi-kmod/x86/vmx.[ch] 24 | kvm-rvmi-kmod/x86/svm.[ch] 25 | kvm-rvmi-kmod/x86/mmu.[ch] 26 | kvm-rvmi-kmod/x86/mmu_audit.c 27 | kvm-rvmi-kmod/x86/paging_tmpl.h 28 | kvm-rvmi-kmod/x86/ioapic.[ch] 29 | kvm-rvmi-kmod/x86/iodev.h 30 | kvm-rvmi-kmod/x86/irq.[ch] 31 | kvm-rvmi-kmod/x86/lapic.[ch] 32 | kvm-rvmi-kmod/x86/tss.h 33 | kvm-rvmi-kmod/x86/x86.[ch] 34 | kvm-rvmi-kmod/x86/coalesced_mmio.[ch] 35 | kvm-rvmi-kmod/x86/kvm_cache_regs.h 36 | kvm-rvmi-kmod/x86/irq_comm.c 37 | kvm-rvmi-kmod/x86/timer.c 38 | kvm-rvmi-kmod/x86/kvm_timer.h 39 | kvm-rvmi-kmod/x86/iommu.c 40 | kvm-rvmi-kmod/x86/svm-trace.h 41 | kvm-rvmi-kmod/x86/trace-arch.h 42 | kvm-rvmi-kmod/x86/trace.h 43 | kvm-rvmi-kmod/x86/vmx-trace.h 44 | kvm-rvmi-kmod/x86/assigned-dev.c 45 | kvm-rvmi-kmod/x86/emulate.c 46 | kvm-rvmi-kmod/x86/eventfd.c 47 | kvm-rvmi-kmod/x86/mmutrace.h 48 | kvm-rvmi-kmod/x86/async_pf.[ch] 49 | kvm-rvmi-kmod/x86/pmu.c 50 | kvm-rvmi-kmod/x86/cpuid.[ch] 51 | kvm-rvmi-kmod/x86/debugfs.c 52 | kvm-rvmi-kmod/x86/hyperv.[ch] 53 | kvm-rvmi-kmod/x86/irqchip.c 54 | kvm-rvmi-kmod/x86/mtrr.c 55 | kvm-rvmi-kmod/x86/page_track.c 56 | kvm-rvmi-kmod/x86/pmu.h 57 | kvm-rvmi-kmod/x86/pmu_amd.c 58 | kvm-rvmi-kmod/x86/pmu_intel.c 59 | kvm-rvmi-kmod/x86/vfio.[ch] 60 | kvm-rvmi-kmod/x86/vmi.[ch] 61 | kvm-rvmi-kmod/.stgit-* 62 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rvmi-qemu"] 2 | path = rvmi-qemu 3 | url = https://github.com/fireeye/rvmi-qemu.git 4 | [submodule "rvmi-rekall"] 5 | path = rvmi-rekall 6 | url = https://github.com/fireeye/rvmi-rekall.git 7 | [submodule "kvm-rvmi-kmod/linux"] 8 | path = kvm-rvmi-kmod/linux 9 | url = https://github.com/fireeye/rvmi-kvm.git 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![rVMI Logo](/resources/rvmi.png) 2 | 3 | # rVMI 4 | 5 | rVMI is a debugger on steroids. It leverages Virtual Machine Introspection (VMI) 6 | and memory forensics to provide full system analysis. This means that an analyst 7 | can inspect userspace processes, kernel drivers, and pre-boot environments in a 8 | single tool. 9 | 10 | It was specifically designed for interactive dynamic malware analysis. rVMI isolates 11 | itself from the malware by placing its interactive debugging environment out of the 12 | virtual machine (VM) onto the hypervisor-level. Through the use of VMI the analyst 13 | still has full control of the VM, which allows her to pause the VM at any point in 14 | time and to use typical debugging features such as breakpoints and watchpoints. In 15 | addition, rVMI provides access to the entire Rekall feature set, which enables an 16 | analyst to inspect the kernel and its data structures with ease. 17 | 18 | NOTE: rVMI will only run on Intel CPUs with virtualization extensions. Additionally, 19 | do not try to run rVMI within a virtualized environment. As rVMI depends on hardware 20 | virtualization, it will not run in an already virtualized environment. 21 | 22 | ## Installation 23 | 24 | rVMI consists of three components, KVM kernel modules, QEMU, and Rekall. This 25 | repository will pull in all required components and install them with one 26 | simple install script. 27 | 28 | For those that are interested, the repositories for these components can be 29 | found here: 30 | https://github.com/fireeye/rvmi-kvm 31 | https://github.com/fireeye/rvmi-qemu 32 | https://github.com/fireeye/rvmi-rekall 33 | 34 | ### Getting Started 35 | 36 | Begin by cloning the rVMI repository: 37 | 38 | ``` 39 | $ git clone --recursive https://github.com/fireeye/rvmi.git 40 | $ cd rvmi 41 | ``` 42 | 43 | ### Build 44 | 45 | Building all components is handled by the install script. Simply perform the 46 | following steps: 47 | 48 | ``` 49 | $ ./install.sh build 50 | ``` 51 | 52 | ### Install 53 | 54 | The install script can also handle the installation of all components. This 55 | will install the following components: 56 | * qmp python module 57 | * rVMI QEMU 58 | * rVMI Rekall 59 | * rVMI KVM modules 60 | 61 | Installing these components can be achieved with the following command: 62 | 63 | ``` 64 | $ ./install.sh install 65 | ``` 66 | 67 | #### Kernel Module Persistence 68 | This will not install the kernel modules in a persistent manner (it will not 69 | survive a reboot). In order to make these changes persistent, you must replace 70 | your KVM modules on the disk. Once built, the kernel modules can be found here: 71 | kvm-rvmi-kmod/x86/*.ko 72 | 73 | These modules must be copied to the proper location on your machine. This can 74 | be found by running: 75 | ``` 76 | $ modinfo kvm 77 | ``` 78 | 79 | Copy the kernel modules to the location specified by the "filename" output of 80 | the above command. 81 | 82 | ## Using rVMI 83 | 84 | ### Start the VM 85 | The first step in starting rVMI is to start a VM. We will not cover creating a VM 86 | as the steps are the same as creating a VM for QEMU and these instructions are 87 | readily available online. We do recommend that you use a qcow2 image as this will 88 | support snapshots within the image format. 89 | 90 | You may start qemu in the standard way, paying attention that you enable KVM and QMP: 91 | 92 | ``` 93 | $ qemu-system-x86_64 -enable-kvm -qmp unix:[QMP SOCK PATH],server,nowait [...] 94 | ``` 95 | 96 | We have also included a python wrapper script that automatically incorporates these 97 | options. You can access the help for this script using the -h flag. 98 | 99 | ``` 100 | $ qemu.py -h 101 | ``` 102 | 103 | Important is that you have the qmp socket path to pass to rekall in the next step. 104 | 105 | ### Start Rekall 106 | 107 | Use the qmp socket path to start rekall. 108 | 109 | ``` 110 | $ rekall -f [QMP SOCK PATH] 111 | ``` 112 | 113 | ## Licensing and Copyright 114 | 115 | Copyright 2017 FireEye, Inc. All Rights Reserved. 116 | 117 | All Rights Reserved 118 | 119 | This program is free software; you can redistribute it and/or 120 | modify it under the terms of the GNU General Public License 121 | as published by the Free Software Foundation. Version 2 122 | of the License. 123 | 124 | This program is distributed in the hope that it will be useful, 125 | but WITHOUT ANY WARRANTY; without even the implied warranty of 126 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127 | GNU General Public License for more details. 128 | 129 | You should have received a copy of the GNU General Public License 130 | along with this program; if not, write to the Free Software 131 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 132 | 02111-1307, USA. 133 | 134 | ## Bugs and Support 135 | 136 | There is no support provided. There is NO 137 | warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR 138 | PURPOSE. 139 | 140 | If you think you've found a bug and you are not sure to which subproject 141 | (rVMI-KVM, rVMI-QEMU, rVMI-Rekall) it belongs or if you want to file a 142 | general bug, please report here: 143 | 144 | https://github.com/fireeye/rvmi/issues 145 | 146 | Otherwise please report the bug in the repository of the subproject 147 | where the bug is located in:. 148 | 149 | https://github.com/fireeye/rvmi-qemu/issues 150 | https://github.com/fireeye/rvmi-kvm/issues 151 | https://github.com/fireeye/rvmi-rekall/issues 152 | 153 | For more details on the bug submission process, take a look at the 154 | README file of the subproject. 155 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "${1}" = "build" ]; then 4 | echo "build rVMI" 5 | 6 | cd rvmi-qemu 7 | ./configure --target-list=x86_64-softmmu 8 | make 9 | cd ../ 10 | 11 | cd kvm-rvmi-kmod 12 | ./configure 13 | make sync 14 | make 15 | cd ../ 16 | 17 | elif [ "${1}" = "install" ]; then 18 | echo "install rVMI" 19 | 20 | cd python/qmp/ 21 | python ./setup.py install 22 | cd ../../ 23 | 24 | cd rvmi-qemu 25 | make install 26 | cd ../ 27 | 28 | cd rvmi-rekall/rekall-core 29 | python ./setup.py install 30 | cd ../rekall-agent 31 | python ./setup.py install 32 | cd ../ 33 | python ./setup.py install 34 | cd ../ 35 | 36 | rmmod kvm-intel 37 | rmmod kvm 38 | insmod kvm-rvmi-kmod/x86/kvm.ko 39 | insmod kvm-rvmi-kmod/x86/kvm-intel.ko 40 | else 41 | echo "Usage: ${0} [build | install]" 42 | fi 43 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/COPYING: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/Kbuild: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_X86) += x86/ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/Makefile: -------------------------------------------------------------------------------- 1 | $(if $(wildcard config.mak),,$(error Please run configure first)) 2 | include config.mak 3 | 4 | ARCH_DIR = $(if $(filter $(ARCH),x86_64 i386),x86,$(ARCH)) 5 | ARCH_CONFIG := $(shell echo $(ARCH_DIR) | tr '[:lower:]' '[:upper:]') 6 | # NONARCH_CONFIG used for unifdef, and only cover X86 now 7 | NONARCH_CONFIG = $(filter-out $(ARCH_CONFIG),X86) 8 | 9 | DESTDIR = / 10 | 11 | MAKEFILE_PRE = $(ARCH_DIR)/Makefile.pre 12 | 13 | export INSTALL_MOD_DIR=updates 14 | 15 | rpmrelease = devel 16 | 17 | LINUX = ./linux 18 | 19 | all:: prerequisite 20 | # include header priority 1) $LINUX 2) $KERNELDIR 3) include-compat 21 | $(MAKE) -C $(KERNELDIR) M=`pwd` \ 22 | LINUXINCLUDE="-I`pwd`/include -I`pwd`/include/uapi -Iinclude \ 23 | $(if $(KERNELSOURCEDIR),\ 24 | -Iinclude2 -I$(KERNELSOURCEDIR)/include \ 25 | -I$(KERNELSOURCEDIR)/include/uapi \ 26 | -I$(KERNELSOURCEDIR)/arch/${ARCH_DIR}/include \ 27 | -I$(KERNELSOURCEDIR)/arch/${ARCH_DIR}/include/uapi, \ 28 | -Iinclude/uapi -Iarch/${ARCH_DIR}/include \ 29 | -Iarch/${ARCH_DIR}/include/uapi) \ 30 | -Iinclude/generated/uapi -Iarch/${ARCH_DIR}/include/generated \ 31 | -Iarch/${ARCH_DIR}/include/generated/uapi \ 32 | -I`pwd`/include-compat -I`pwd`/${ARCH_DIR} \ 33 | -include $(if $(wildcard $(KERNELDIR)/include/generated), \ 34 | include/generated/autoconf.h, \ 35 | include/linux/autoconf.h) \ 36 | -include `pwd`/$(ARCH_DIR)/external-module-compat.h" \ 37 | "$$@" 38 | 39 | include $(MAKEFILE_PRE) 40 | 41 | KVM_VERSION_GIT = $(if $(and $(filter kvm-devel,$(KVM_VERSION)), \ 42 | $(wildcard $(LINUX)/.git)), \ 43 | $(shell git --git-dir=$(LINUX)/.git describe), \ 44 | $(KVM_VERSION)) 45 | 46 | sync: 47 | ./sync -v $(KVM_VERSION_GIT) -l $(LINUX) 48 | 49 | KVM_KMOD_VERSION = $(strip $(if $(wildcard KVM_VERSION), \ 50 | $(shell cat KVM_VERSION), \ 51 | $(if $(wildcard .git), \ 52 | $(shell git describe), \ 53 | kvm-devel))) 54 | 55 | modules_install: 56 | $(MAKE) -C $(KERNELDIR) M=`pwd` INSTALL_MOD_PATH=$(DESTDIR)/$(INSTALL_MOD_PATH) $@ 57 | 58 | install: modules_install 59 | install -m 644 -D scripts/65-kvm.rules $(DESTDIR)/etc/udev/rules.d/65-kvm.rules 60 | 61 | tmpspec = .tmp.kvm-kmod.spec 62 | 63 | rpm-topdir := $$(pwd)/rpmtop 64 | 65 | RPMDIR = $(rpm-topdir)/RPMS 66 | 67 | rpm: all 68 | mkdir -p $(rpm-topdir)/BUILD $(RPMDIR)/$$(uname -i) 69 | sed 's/^Release:.*/Release: $(rpmrelease)/; s/^%define kverrel.*/%define kverrel $(KERNELVERSION)/' \ 70 | kvm-kmod.spec > $(tmpspec) 71 | rpmbuild --define="kverrel $(KERNELVERSION)" \ 72 | --define="objdir $$(pwd)/$(ARCH_DIR)" \ 73 | --define="_rpmdir $(RPMDIR)" \ 74 | --define="_topdir $(rpm-topdir)" \ 75 | -bb $(tmpspec) 76 | 77 | clean: 78 | $(MAKE) -C $(KERNELDIR) M=`pwd` $@ 79 | 80 | distclean: clean 81 | rm -f config.mak kvm-kmod-config.h include/asm include-compat/asm $(tmpspec) 82 | 83 | .PHONY: all sync install rpm clean distclean 84 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/README: -------------------------------------------------------------------------------- 1 | kvm-kmod Quickstart 2 | ------------------- 3 | 4 | ./configure 5 | make 6 | make install 7 | 8 | 9 | Build Details 10 | ------------- 11 | 12 | Building the KVM kernel module is performed differently depending on whether 13 | you are working from a clone of the git repository or from a source release. 14 | Notice that two kernels are involved: One from which the KVM sources 15 | are taken (kernel A), and one for which the module is built (kernel B). 16 | In almost all cases, kernel A is more recent than kernel B. 17 | 18 | - To build from a release (this is the default case), simply 19 | use ./configure (possibly with any arguments that are required for 20 | your setup, see ./configure --help) and make. The kernel specified 21 | with --kerneldir refers to kernel B, that is, the kernel for which 22 | the module is built. All sources required from kernel A are already 23 | included in the release. 24 | 25 | - Building from a cloned git repository (most likely useful for developers 26 | only) requires a kernel tree with the main kvm sources (kernel A) that 27 | is included as a submodule in the linux/ directory. By default, the KVM 28 | development tree on git.kernel.org is used, but this can be changed in 29 | the git configuration after the 'submodule init' step. 30 | 31 | Before the kvm module can be built, the linux submodule must be initialised 32 | and populated. The required sequence of commands is 33 | 34 | git submodule init 35 | git submodule update 36 | ./configure 37 | make sync 38 | make 39 | 40 | Notice that you can also specify an existing Linux tree for the 41 | synchronisation stage by using 42 | 43 | make sync LINUX=/path/to/kernel/A 44 | 45 | LINUX specifies the path to kernel A from which the KVM sources are taken. 46 | The directory must point to a local git tree, not to a plain directory 47 | containing the kernel sources. If LINUX is unset, the default value is 48 | linux/, i.e., the git submodule. 49 | 50 | Note that configure may refuse to build against your target kernel if it 51 | considers it too new or too old. If you know what you are doing, you can 52 | override this check by passing --force. 53 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/configure: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # minimum is 3.x 4 | min_kernel_version=7 5 | 6 | force_build= 7 | kernelsourcedir= 8 | kerneldir=/lib/modules/$(uname -r)/build 9 | cc=gcc 10 | ld=ld 11 | objcopy=objcopy 12 | ar=ar 13 | want_module=1 14 | cross_prefix= 15 | arch=`uname -m` 16 | if [ -z "TMPDIR" ] ; then 17 | TMPDIR=. 18 | fi 19 | 20 | usage() { 21 | cat <<-EOF 22 | Usage: $0 [options] 23 | 24 | Options include: 25 | --arch=ARCH architecture to compile for ($arch) 26 | --cross-prefix=PREFIX prefix for cross compile 27 | --kerneldir=DIR kernel build directory ($kerneldir) 28 | --force continue even if kernel is not supported 29 | --help this helpful text 30 | EOF 31 | exit 1 32 | } 33 | 34 | while [[ "$1" = -* ]]; do 35 | opt="$1"; shift 36 | arg= 37 | hasarg= 38 | if [[ "$opt" = *=* ]]; then 39 | arg="${opt#*=}" 40 | opt="${opt%%=*}" 41 | hasarg=1 42 | fi 43 | case "$opt" in 44 | --kerneldir) 45 | kerneldir="$arg" 46 | ;; 47 | --force) 48 | force_build=1 49 | ;; 50 | --with-patched-kernel) 51 | want_module= 52 | ;; 53 | --arch) 54 | arch="$arg" 55 | ;; 56 | --cross-prefix) 57 | cross_prefix="$arg" 58 | ;; 59 | --help) 60 | usage 61 | ;; 62 | *) 63 | usage 64 | ;; 65 | esac 66 | done 67 | 68 | karch="$arch" 69 | 70 | case $arch in 71 | i?86*|x86_64*) 72 | arch=${arch/#i?86/i386} 73 | karch="x86" 74 | ;; 75 | esac 76 | 77 | kvm_version() { 78 | local fname="$(dirname "$0")/KVM_VERSION" 79 | 80 | if test -f "$fname"; then 81 | cat "$fname" 82 | else 83 | echo "kvm-devel" 84 | fi 85 | } 86 | 87 | arch=${arch%%-*} 88 | 89 | kerneldir=$(cd $kerneldir; pwd) 90 | 91 | # see if we have split build and source directories 92 | if [ ! -e "$kerneldir/Kbuild" ]; then 93 | kernelsourcedir=$kerneldir/source 94 | if [ ! -L "$kernelsourcedir" ]; then 95 | kernelsourcedir=${kerneldir%/build*}/source 96 | fi 97 | fi 98 | 99 | kernel_version_str= 100 | if [ -e "$kerneldir/.kernelrelease" ]; then 101 | kernel_version_str=`cat "$kerneldir/.kernelrelease"` 102 | elif [ -e "$kerneldir/include/config/kernel.release" ]; then 103 | kernel_version_str=`cat "$kerneldir/include/config/kernel.release"` 104 | elif [ -e "$kerneldir/.config" ]; then 105 | kernel_version_str=$(awk '/Linux kernel version:/ { print $NF }' \ 106 | "$kerneldir/.config") 107 | fi 108 | if [ ! -n "$kernel_version_str" ]; then 109 | echo 110 | echo "Error: kernel version not found." 111 | echo "Please make sure your kernel is configured." 112 | echo 113 | exit 1 114 | fi 115 | 116 | kernel_version=`echo $kernel_version_str | sed 's/\([0-9]*\)\.[0-9]*\.[0-9]*.*/\1/'` 117 | kernel_patchlevel=`echo $kernel_version_str | sed 's/[0-9]*\.\([0-9]*\)\.[0-9]*.*/\1/'` 118 | if [ ! -n "$force_build" ]; then 119 | if [ $kernel_version -eq 2 ] || 120 | [ $kernel_version -eq 3 -a $kernel_patchlevel -lt $min_kernel_version ]; then 121 | echo 122 | echo "Error: kernel is too old for this kvm-kmod release." 123 | echo 124 | exit 1 125 | fi 126 | fi 127 | 128 | kernel_extraversion=`echo $kernel_version_str | sed 's/[0-9]*\.[0-9]*\.[0-9]*\.\([0-9]*\)*.*/\1/'` 129 | if [ ! -n $kernel_extraversion ] || [ $kernel_extraversion = $kernel_version_str ]; then 130 | kernel_extraversion=0 131 | fi 132 | 133 | . "$kerneldir/.config" 134 | 135 | if [ -n "$no_uname" -a "$want_module" ]; then 136 | depmod_version=$kernel_version_str 137 | fi 138 | 139 | # Check if it is a Fedora kernel, e.g. 2.6.35.13-91.fc14.x86_64 140 | if echo "$kernel_version_str" | grep -qE '[0-9.-]+\.fc[0-9]+\..+'; then 141 | config_fedora_kernel="#define CONFIG_FEDORA_KERNEL 1" 142 | else 143 | config_fedora_kernel="#undef CONFIG_FEDORA_KERNEL" 144 | fi 145 | 146 | rm -f include/asm include/uapi/asm include-compat/asm 147 | mkdir -p include/uapi 148 | ln -sf asm-"$karch" include/asm 149 | ln -sf asm-"$karch" include/uapi/asm 150 | ln -sf asm-"$karch" include-compat/asm 151 | 152 | cat < config.mak 153 | export ARCH=$arch 154 | PREFIX=$prefix 155 | KERNELDIR=$kerneldir 156 | KERNELSOURCEDIR=$kernelsourcedir 157 | KERNELVERSION=$kernel_version_str 158 | export CROSS_COMPILE=$cross_prefix 159 | export CC=$cross_prefix$cc 160 | export LD=$cross_prefix$ld 161 | export OBJCOPY=$cross_prefix$objcopy 162 | export AR=$cross_prefix$ar 163 | KVM_VERSION=$(kvm_version) 164 | EOF 165 | 166 | cat < kvm-kmod-config.h 167 | #define KERNEL_EXTRAVERSION $kernel_extraversion 168 | $config_fedora_kernel 169 | EOF 170 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/external-module-compat.c: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | * smp_call_function_single() is not exported below 2.6.20. 4 | */ 5 | 6 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) 7 | 8 | /* The 'nonatomic' argument was removed in 2.6.27. */ 9 | 10 | #undef smp_call_function_single 11 | 12 | #include 13 | 14 | #ifdef CONFIG_SMP 15 | int kvm_smp_call_function_single(int cpu, void (*func)(void *info), 16 | void *info, int wait) 17 | { 18 | return smp_call_function_single(cpu, func, info, 0, wait); 19 | } 20 | #else /* !CONFIG_SMP */ 21 | int kvm_smp_call_function_single(int cpu, void (*func)(void *info), 22 | void *info, int wait) 23 | { 24 | WARN_ON(cpu != 0); 25 | local_irq_disable(); 26 | func(info); 27 | local_irq_enable(); 28 | return 0; 29 | 30 | } 31 | #endif /* !CONFIG_SMP */ 32 | EXPORT_SYMBOL_GPL(kvm_smp_call_function_single); 33 | 34 | #define smp_call_function_single kvm_smp_call_function_single 35 | 36 | #endif 37 | 38 | /* div64_u64 is fairly new */ 39 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) 40 | 41 | #ifndef CONFIG_64BIT 42 | 43 | /* 64bit divisor, dividend and result. dynamic precision */ 44 | uint64_t div64_u64(uint64_t dividend, uint64_t divisor) 45 | { 46 | uint32_t high, d; 47 | 48 | high = divisor >> 32; 49 | if (high) { 50 | unsigned int shift = fls(high); 51 | 52 | d = divisor >> shift; 53 | dividend >>= shift; 54 | } else 55 | d = divisor; 56 | 57 | do_div(dividend, d); 58 | 59 | return dividend; 60 | } 61 | 62 | #endif 63 | 64 | #endif 65 | 66 | /* 67 | * smp_call_function_mask() is not defined/exported below 2.6.24 on all 68 | * targets and below 2.6.26 on x86-64 69 | */ 70 | 71 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) || \ 72 | (defined CONFIG_X86_64 && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) 73 | 74 | #include 75 | 76 | struct kvm_call_data_struct { 77 | void (*func) (void *info); 78 | void *info; 79 | atomic_t started; 80 | atomic_t finished; 81 | int wait; 82 | }; 83 | 84 | static void kvm_ack_smp_call(void *_data) 85 | { 86 | struct kvm_call_data_struct *data = _data; 87 | /* if wait == 0, data can be out of scope 88 | * after atomic_inc(info->started) 89 | */ 90 | void (*func) (void *info) = data->func; 91 | void *info = data->info; 92 | int wait = data->wait; 93 | 94 | smp_mb(); 95 | atomic_inc(&data->started); 96 | (*func)(info); 97 | if (wait) { 98 | smp_mb(); 99 | atomic_inc(&data->finished); 100 | } 101 | } 102 | 103 | int kvm_smp_call_function_mask(cpumask_t mask, 104 | void (*func) (void *info), void *info, int wait) 105 | { 106 | #ifdef CONFIG_SMP 107 | struct kvm_call_data_struct data; 108 | cpumask_t allbutself; 109 | int cpus; 110 | int cpu; 111 | int me; 112 | 113 | me = get_cpu(); 114 | WARN_ON(irqs_disabled()); 115 | allbutself = cpu_online_map; 116 | cpu_clear(me, allbutself); 117 | 118 | cpus_and(mask, mask, allbutself); 119 | cpus = cpus_weight(mask); 120 | 121 | if (!cpus) 122 | goto out; 123 | 124 | data.func = func; 125 | data.info = info; 126 | atomic_set(&data.started, 0); 127 | data.wait = wait; 128 | if (wait) 129 | atomic_set(&data.finished, 0); 130 | 131 | for (cpu = first_cpu(mask); cpu != NR_CPUS; cpu = next_cpu(cpu, mask)) 132 | smp_call_function_single(cpu, kvm_ack_smp_call, &data, 0); 133 | 134 | while (atomic_read(&data.started) != cpus) { 135 | cpu_relax(); 136 | barrier(); 137 | } 138 | 139 | if (!wait) 140 | goto out; 141 | 142 | while (atomic_read(&data.finished) != cpus) { 143 | cpu_relax(); 144 | barrier(); 145 | } 146 | out: 147 | put_cpu(); 148 | #endif /* CONFIG_SMP */ 149 | return 0; 150 | } 151 | 152 | #include 153 | 154 | static void vcpu_kick_intr(void *info) 155 | { 156 | } 157 | 158 | struct kvm_kick { 159 | int cpu; 160 | struct work_struct work; 161 | }; 162 | 163 | static void kvm_do_smp_call_function(struct work_struct *work) 164 | { 165 | int me = get_cpu(); 166 | struct kvm_kick *kvm_kick = container_of(work, struct kvm_kick, work); 167 | 168 | if (kvm_kick->cpu != me) 169 | smp_call_function_single(kvm_kick->cpu, vcpu_kick_intr, 170 | NULL, 0); 171 | kfree(kvm_kick); 172 | put_cpu(); 173 | } 174 | 175 | void kvm_queue_smp_call_function(int cpu) 176 | { 177 | struct kvm_kick *kvm_kick = kmalloc(sizeof(struct kvm_kick), GFP_ATOMIC); 178 | 179 | INIT_WORK(&kvm_kick->work, kvm_do_smp_call_function); 180 | 181 | schedule_work(&kvm_kick->work); 182 | } 183 | 184 | void kvm_smp_send_reschedule(int cpu) 185 | { 186 | if (irqs_disabled()) { 187 | kvm_queue_smp_call_function(cpu); 188 | return; 189 | } 190 | smp_call_function_single(cpu, vcpu_kick_intr, NULL, 0); 191 | } 192 | #endif 193 | 194 | #include 195 | 196 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) 197 | 198 | int intel_iommu_found() 199 | { 200 | return 0; 201 | } 202 | 203 | #endif 204 | 205 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) 206 | 207 | static enum hrtimer_restart kvm_hrtimer_wakeup(struct hrtimer *timer) 208 | { 209 | struct hrtimer_sleeper *t = 210 | container_of(timer, struct hrtimer_sleeper, timer); 211 | struct task_struct *task = t->task; 212 | 213 | t->task = NULL; 214 | if (task) 215 | wake_up_process(task); 216 | 217 | return HRTIMER_NORESTART; 218 | } 219 | 220 | int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode) 221 | { 222 | struct hrtimer_sleeper t; 223 | 224 | /* 225 | * Optimize when a zero timeout value is given. It does not 226 | * matter whether this is an absolute or a relative time. 227 | */ 228 | if (expires && !expires->tv64) { 229 | __set_current_state(TASK_RUNNING); 230 | return 0; 231 | } 232 | 233 | /* 234 | * A NULL parameter means "inifinte" 235 | */ 236 | if (!expires) { 237 | schedule(); 238 | __set_current_state(TASK_RUNNING); 239 | return -EINTR; 240 | } 241 | 242 | hrtimer_init(&t.timer, CLOCK_MONOTONIC, mode); 243 | t.timer.expires = *expires; 244 | 245 | t.timer.function = kvm_hrtimer_wakeup; 246 | t.task = current; 247 | 248 | hrtimer_start(&t.timer, t.timer.expires, mode); 249 | if (!hrtimer_active(&t.timer)) 250 | t.task = NULL; 251 | 252 | if (likely(t.task)) 253 | schedule(); 254 | 255 | hrtimer_cancel(&t.timer); 256 | 257 | __set_current_state(TASK_RUNNING); 258 | 259 | return !t.task ? 0 : -EINTR; 260 | } 261 | 262 | #endif 263 | 264 | #ifndef CONFIG_USER_RETURN_NOTIFIER 265 | 266 | DEFINE_PER_CPU(struct kvm_user_return_notifier *, kvm_urn) = NULL; 267 | 268 | #endif /* CONFIG_USER_RETURN_NOTIFIER */ 269 | 270 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) 271 | #include 272 | #include 273 | 274 | int kvm_suspend(void); 275 | void kvm_resume(void); 276 | 277 | static int kvm_compat_suspend(struct sys_device *dev, pm_message_t state) 278 | { 279 | kvm_suspend(); 280 | return 0; 281 | } 282 | 283 | static struct sysdev_class kvm_sysdev_class = { 284 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) 285 | .name = "kvm", 286 | #else 287 | set_kset_name("kvm"), 288 | #endif 289 | .suspend = kvm_compat_suspend, 290 | .resume = (int (*)(struct sys_device *))kvm_resume, 291 | }; 292 | 293 | static struct sys_device kvm_sysdev = { 294 | .id = 0, 295 | .cls = &kvm_sysdev_class, 296 | }; 297 | 298 | void register_syscore_ops(struct syscore_ops *ops) 299 | { 300 | int r; 301 | 302 | r = sysdev_class_register(&kvm_sysdev_class); 303 | BUG_ON(r); 304 | 305 | r = sysdev_register(&kvm_sysdev); 306 | BUG_ON(r); 307 | } 308 | EXPORT_SYMBOL_GPL(register_syscore_ops); 309 | 310 | void unregister_syscore_ops(struct syscore_ops *ops) 311 | { 312 | sysdev_unregister(&kvm_sysdev); 313 | sysdev_class_unregister(&kvm_sysdev_class); 314 | } 315 | EXPORT_SYMBOL_GPL(unregister_syscore_ops); 316 | 317 | #endif /* < 2.6.39 */ 318 | 319 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) 320 | void *bsearch(const void *key, const void *base, size_t num, size_t size, 321 | int (*cmp)(const void *key, const void *elt)) 322 | { 323 | size_t start = 0, end = num; 324 | int result; 325 | 326 | while (start < end) { 327 | size_t mid = start + (end - start) / 2; 328 | 329 | result = cmp(key, base + mid * size); 330 | if (result < 0) 331 | end = mid; 332 | else if (result > 0) 333 | start = mid + 1; 334 | else 335 | return (void *)base + mid * size; 336 | } 337 | 338 | return NULL; 339 | } 340 | EXPORT_SYMBOL(bsearch); 341 | #endif /* < 3.0 */ 342 | 343 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) 344 | u64 ktime_get_boot_ns(void) 345 | { 346 | struct timespec ts; 347 | 348 | ktime_get_ts(&ts); 349 | kvm_monotonic_to_bootbased(&ts); 350 | return timespec_to_ns(&ts); 351 | } 352 | 353 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) 354 | #include 355 | 356 | u64 kvm_get_boot_base_ns(struct timekeeper *tk) 357 | { 358 | struct timespec ts = tk->wall_to_monotonic; 359 | 360 | kvm_monotonic_to_bootbased(&ts); 361 | return timespec_to_ns(&ts) + tk->xtime_sec * (u64)NSEC_PER_SEC; 362 | } 363 | #endif 364 | #endif 365 | 366 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,3) 367 | void *get_xsave_addr(struct xsave_struct *xsave, int feature) 368 | { 369 | int index = fls64(feature) - 1; 370 | u32 size, offset, ecx, edx; 371 | 372 | cpuid_count(0xd, index, &size, &offset, &ecx, &edx); 373 | return (u8 *)xsave + offset; 374 | } 375 | #endif 376 | 377 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) 378 | bool single_task_running(void) 379 | { 380 | /* Not exactly the same... */ 381 | return !need_resched(); 382 | } 383 | #endif 384 | 385 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) 386 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) 387 | /* Instead of backporting everything, just include the code from 3.19's 388 | * kvm_get_user_page_io, which was generalized into __get_user_pages_unlocked. 389 | */ 390 | static long kvm_get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 391 | unsigned long addr, int nr_pages, struct page **pagep, 392 | struct vm_area_struct **vmas, int *locked, 393 | bool notify_drop, unsigned int flags) 394 | { 395 | int npages; 396 | flags |= (pagep ? FOLL_GET : 0); 397 | 398 | BUG_ON(nr_pages != 1); 399 | 400 | /* 401 | * If retrying the fault, we get here *not* having allowed the filemap 402 | * to wait on the page lock. We should now allow waiting on the IO with 403 | * the mmap semaphore released. 404 | */ 405 | npages = __get_user_pages(tsk, mm, addr, nr_pages, flags, pagep, vmas, 406 | locked); 407 | if (!*locked) { 408 | VM_BUG_ON(npages); 409 | 410 | if (!pagep) 411 | return 0; 412 | 413 | /* 414 | * The previous call has now waited on the IO. Now we can 415 | * retry and complete. Pass TRIED to ensure we do not re 416 | * schedule async IO (see e.g. filemap_fault). 417 | */ 418 | down_read(&mm->mmap_sem); 419 | *locked = 1; 420 | npages = __get_user_pages(tsk, mm, addr, nr_pages, flags | FOLL_TRIED, 421 | pagep, vmas, NULL); 422 | if (notify_drop) { 423 | /* 424 | * We must let the caller know we temporarily dropped the lock 425 | * and so the critical section protected by it was lost. 426 | */ 427 | up_read(&mm->mmap_sem); 428 | *locked = 0; 429 | } 430 | } 431 | return npages; 432 | } 433 | #else 434 | #define kvm_get_user_pages_locked __get_user_pages_unlocked 435 | #endif 436 | 437 | #if LINUX_VERSION_CODE == KERNEL_VERSION(4,9,0) 438 | long kvm_get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 439 | unsigned long start, unsigned long nr_pages, 440 | unsigned int gup_flags, struct page **pages, 441 | struct vm_area_struct **vmas, int *locked) 442 | { 443 | if (*locked) { 444 | up_read(&mm->mmap_sem); 445 | *locked = 0; 446 | } 447 | 448 | return kvm_get_user_pages_locked(tsk, mm, start, nr_pages, pages, 449 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); 450 | } 451 | 452 | long kvm_get_user_pages_unlocked(unsigned long addr, unsigned long nr_pages, 453 | struct page **pagep, unsigned int flags) 454 | { 455 | long ret; 456 | 457 | ret = kvm_get_user_pages_locked(current, current->mm, addr, nr_pages, 458 | pagep, 459 | flags | FOLL_TOUCH); 460 | return ret; 461 | } 462 | #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) 463 | long kvm_get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 464 | unsigned long start, unsigned long nr_pages, 465 | unsigned int gup_flags, struct page **pages, 466 | struct vm_area_struct **vmas, int *locked) 467 | { 468 | if (*locked) { 469 | up_read(&mm->mmap_sem); 470 | *locked = 0; 471 | } 472 | 473 | return kvm_get_user_pages_locked(tsk, mm, start, nr_pages, 1, 0, 474 | pages, 475 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); 476 | } 477 | 478 | long kvm_get_user_pages_unlocked(unsigned long addr, unsigned long nr_pages, 479 | struct page **pagep, unsigned int flags) 480 | { 481 | long ret; 482 | 483 | ret = kvm_get_user_pages_locked(current, current->mm, addr, nr_pages, 484 | 1, 0, pagep, 485 | flags | FOLL_TOUCH); 486 | return ret; 487 | } 488 | #endif 489 | 490 | 491 | #endif 492 | 493 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) 494 | #include 495 | 496 | int irq_bypass_register_consumer(struct irq_bypass_consumer *c) 497 | { 498 | return 0; 499 | } 500 | 501 | void irq_bypass_unregister_consumer(struct irq_bypass_consumer *c) 502 | { 503 | } 504 | 505 | void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 506 | { 507 | *ut = p->utime; 508 | *st = p->stime; 509 | } 510 | #endif 511 | 512 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) 513 | #ifndef VM_FAULT_SIGSEGV 514 | #define VM_FAULT_SIGSEGV 0 515 | #endif 516 | 517 | static inline 518 | bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) 519 | { 520 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); 521 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 522 | 523 | if (!(vm_flags & vma->vm_flags)) 524 | return false; 525 | 526 | /* arch_vma_access_permitted check removed---assuming that 527 | * pkeys are not in use. 528 | */ 529 | return true; 530 | } 531 | 532 | int kvm_fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 533 | unsigned long address, unsigned int flags, 534 | bool *unlocked) 535 | { 536 | struct vm_area_struct *vma; 537 | int ret, major = 0; 538 | unsigned int fault_flags = 0; 539 | 540 | VM_WARN_ON_ONCE(flags & ~(FOLL_WRITE|FOLL_NOWAIT| 541 | FOLL_TRIED|FOLL_HWPOISON)); 542 | 543 | if (flags & FOLL_WRITE) 544 | fault_flags |= FAULT_FLAG_WRITE; 545 | if (unlocked) 546 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; 547 | if (flags & FOLL_NOWAIT) { 548 | VM_WARN_ON_ONCE(unlocked); 549 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 550 | } 551 | if (flags & FOLL_TRIED) { 552 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); 553 | fault_flags |= FAULT_FLAG_TRIED; 554 | } 555 | 556 | retry: 557 | vma = find_extend_vma(mm, address); 558 | 559 | if (!vma || address < vma->vm_start) 560 | return -EFAULT; 561 | 562 | if (!vma_permits_fault(vma, fault_flags)) 563 | return -EFAULT; 564 | 565 | ret = handle_mm_fault(mm, vma, address, fault_flags); 566 | major |= ret & VM_FAULT_MAJOR; 567 | if (ret & VM_FAULT_ERROR) { 568 | if (ret & VM_FAULT_OOM) 569 | return -ENOMEM; 570 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 571 | return flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 572 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 573 | return -EFAULT; 574 | BUG(); 575 | } 576 | 577 | if (ret & VM_FAULT_RETRY) { 578 | if ((fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 579 | return -EBUSY; 580 | 581 | down_read(&mm->mmap_sem); 582 | if (!(fault_flags & FAULT_FLAG_TRIED)) { 583 | *unlocked = true; 584 | fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; 585 | fault_flags |= FAULT_FLAG_TRIED; 586 | goto retry; 587 | } 588 | } 589 | 590 | if (tsk) { 591 | if (major) 592 | tsk->maj_flt++; 593 | else 594 | tsk->min_flt++; 595 | } 596 | return 0; 597 | } 598 | #endif 599 | 600 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) 601 | static int (*kvm_cpu_notifier_startup[2])(unsigned int cpu); 602 | static int (*kvm_cpu_notifier_teardown[2])(unsigned int cpu); 603 | 604 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 605 | void *v) 606 | { 607 | unsigned int cpu = raw_smp_processor_id(); 608 | val &= ~CPU_TASKS_FROZEN; 609 | switch (val) { 610 | case CPU_DYING: 611 | kvm_cpu_notifier_startup[CPUHP_AP_KVM_STARTING](cpu); 612 | break; 613 | case CPU_STARTING: 614 | kvm_cpu_notifier_teardown[CPUHP_AP_KVM_STARTING](cpu); 615 | break; 616 | } 617 | return NOTIFY_OK; 618 | } 619 | 620 | static int kvmclock_cpu_notifier(struct notifier_block *nfb, 621 | unsigned long action, void *hcpu) 622 | { 623 | unsigned int cpu = raw_smp_processor_id(); 624 | switch (action) { 625 | case CPU_ONLINE: 626 | case CPU_DOWN_FAILED: 627 | kvm_cpu_notifier_startup[CPUHP_AP_X86_KVM_CLK_ONLINE](cpu); 628 | break; 629 | case CPU_DOWN_PREPARE: 630 | kvm_cpu_notifier_teardown[CPUHP_AP_X86_KVM_CLK_ONLINE](cpu); 631 | break; 632 | } 633 | return NOTIFY_OK; 634 | } 635 | 636 | 637 | static struct notifier_block kvm_cpu_notifier[] = { 638 | [CPUHP_AP_KVM_STARTING] = { 639 | .notifier_call = kvm_cpu_hotplug, 640 | }, 641 | [CPUHP_AP_X86_KVM_CLK_ONLINE] = { 642 | .notifier_call = kvmclock_cpu_notifier, 643 | .priority = -INT_MAX 644 | } 645 | }; 646 | 647 | static void call_fn(void *info) 648 | { 649 | unsigned int cpu = raw_smp_processor_id(); 650 | kvm_cpu_notifier_startup[CPUHP_AP_X86_KVM_CLK_ONLINE](cpu); 651 | } 652 | 653 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) 654 | int cpuhp_setup_state(enum kvm_cpuhp_state state, 655 | const char *name, 656 | int (*startup)(unsigned int cpu), 657 | int (*teardown)(unsigned int cpu)) 658 | { 659 | int cpu; 660 | BUG_ON(state != CPUHP_AP_X86_KVM_CLK_ONLINE); 661 | kvm_cpu_notifier_startup[state] = startup; 662 | kvm_cpu_notifier_teardown[state] = teardown; 663 | 664 | cpu_notifier_register_begin(); 665 | for_each_online_cpu(cpu) 666 | smp_call_function_single(cpu, (void *)call_fn, NULL, 1); 667 | 668 | __register_hotcpu_notifier(&kvm_cpu_notifier[state]); 669 | cpu_notifier_register_done(); 670 | return 0; 671 | } 672 | 673 | int cpuhp_setup_state_nocalls(enum kvm_cpuhp_state state, 674 | const char *name, 675 | int (*startup)(unsigned int cpu), 676 | int (*teardown)(unsigned int cpu)) 677 | { 678 | BUG_ON(state == CPUHP_AP_X86_KVM_CLK_ONLINE); 679 | kvm_cpu_notifier_startup[state] = startup; 680 | kvm_cpu_notifier_teardown[state] = teardown; 681 | return register_cpu_notifier(&kvm_cpu_notifier[state]); 682 | } 683 | 684 | void cpuhp_remove_state_nocalls(enum kvm_cpuhp_state state) 685 | { 686 | if (state == CPUHP_AP_X86_KVM_CLK_ONLINE) 687 | unregister_hotcpu_notifier(&kvm_cpu_notifier[state]); 688 | else 689 | unregister_cpu_notifier(&kvm_cpu_notifier[state]); 690 | } 691 | #endif 692 | 693 | #endif 694 | 695 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) 696 | 697 | #include 698 | 699 | struct kthread_worker * 700 | kthread_create_worker(unsigned int flags, const char namefmt[], ...) 701 | { 702 | struct kthread_worker *worker; 703 | va_list args; 704 | struct task_struct *task; 705 | char comm[sizeof(task->comm) + 1]; 706 | 707 | WARN_ON(flags); 708 | va_start(args, namefmt); 709 | vsnprintf(comm, sizeof(task->comm), namefmt, args); 710 | va_end(args); 711 | 712 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); 713 | if (!worker) 714 | return ERR_PTR(-ENOMEM); 715 | 716 | kthread_init_worker(worker); 717 | task = kthread_run(kthread_worker_fn, worker, "%s", comm); 718 | if (IS_ERR(task)) 719 | goto fail_task; 720 | 721 | flush_kthread_worker(worker); 722 | WARN_ON(worker->task != task); 723 | return worker; 724 | 725 | fail_task: 726 | kfree(worker); 727 | return ERR_CAST(task); 728 | } 729 | EXPORT_SYMBOL(kthread_create_worker); 730 | 731 | void kthread_destroy_worker(struct kthread_worker *worker) 732 | { 733 | struct task_struct *task; 734 | 735 | task = worker->task; 736 | if (WARN_ON(!task)) 737 | return; 738 | 739 | kthread_flush_worker(worker); 740 | kthread_stop(task); 741 | WARN_ON(!list_empty(&worker->work_list)); 742 | kfree(worker); 743 | } 744 | EXPORT_SYMBOL(kthread_destroy_worker); 745 | 746 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0) && LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) 747 | long kvm_get_user_pages(unsigned long start, unsigned long nr_pages, 748 | unsigned int gup_flags, struct page **pages, 749 | struct vm_area_struct **vmas) 750 | { 751 | return get_user_pages(current, current->mm, start, nr_pages, 752 | !!(gup_flags & FOLL_WRITE), 753 | !!(gup_flags & FOLL_FORCE), 754 | pages, vmas); 755 | } 756 | #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) 757 | long kvm_get_user_pages(unsigned long start, unsigned long nr_pages, 758 | unsigned int gup_flags, struct page **pages, 759 | struct vm_area_struct **vmas) 760 | { 761 | return get_user_pages(current, current->mm, start, nr_pages, 762 | !!(gup_flags & FOLL_WRITE), 763 | !!(gup_flags & FOLL_FORCE), 764 | pages, vmas); 765 | } 766 | #else 767 | long kvm_get_user_pages(unsigned long start, unsigned long nr_pages, 768 | unsigned int gup_flags, struct page **pages, 769 | struct vm_area_struct **vmas) 770 | { 771 | return get_user_pages(start, nr_pages, 772 | !!(gup_flags & FOLL_WRITE), 773 | !!(gup_flags & FOLL_FORCE), 774 | pages, vmas); 775 | } 776 | #endif 777 | 778 | #endif 779 | 780 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) 781 | #include 782 | 783 | /** 784 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 785 | * failure, fall back to non-contiguous (vmalloc) allocation. 786 | * @size: size of the request. 787 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 788 | * @node: numa node to allocate from 789 | * 790 | * Uses kmalloc to get the memory but if the allocation fails then falls back 791 | * to the vmalloc allocator. Use kvfree for freeing the memory. 792 | * 793 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported 794 | * 795 | * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. 796 | */ 797 | void *kvmalloc_node(size_t size, gfp_t flags, int node) 798 | { 799 | gfp_t kmalloc_flags = flags; 800 | void *ret; 801 | 802 | /* 803 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 804 | * so the given set of flags has to be compatible. 805 | */ 806 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 807 | 808 | /* 809 | * Make sure that larger requests are not too disruptive - no OOM 810 | * killer and no allocation failure warnings as we have a fallback 811 | */ 812 | if (size > PAGE_SIZE) 813 | kmalloc_flags |= __GFP_NORETRY | __GFP_NOWARN; 814 | 815 | ret = kmalloc_node(size, kmalloc_flags, node); 816 | 817 | /* 818 | * It doesn't really make sense to fallback to vmalloc for sub page 819 | * requests 820 | */ 821 | if (ret || size <= PAGE_SIZE) 822 | return ret; 823 | 824 | if (flags & __GFP_ZERO) 825 | return vzalloc_node(size, node); 826 | else 827 | return vmalloc_node(size, node); 828 | } 829 | EXPORT_SYMBOL(kvmalloc_node); 830 | #endif 831 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/asm.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/clocksource.h: -------------------------------------------------------------------------------- 1 | /* x86-specific clocksource additions */ 2 | 3 | #ifndef _ASM_X86_CLOCKSOURCE_H 4 | #define _ASM_X86_CLOCKSOURCE_H 5 | 6 | #ifdef CONFIG_X86_64 7 | 8 | #define VCLOCK_NONE 0 /* No vDSO clock available. */ 9 | #define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ 10 | #define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */ 11 | #define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */ 12 | 13 | struct arch_clocksource_data { 14 | int vclock_mode; 15 | }; 16 | 17 | #endif /* CONFIG_X86_64 */ 18 | 19 | #endif /* _ASM_X86_CLOCKSOURCE_H */ 20 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/cmpxchg.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/fpu-internal.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/fpu/api.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/mce.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/msidef.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_MSIDEF_H 2 | #define _ASM_X86_MSIDEF_H 3 | 4 | /* 5 | * Constants for Intel APIC based MSI messages. 6 | */ 7 | 8 | /* 9 | * Shifts for MSI data 10 | */ 11 | 12 | #define MSI_DATA_VECTOR_SHIFT 0 13 | #define MSI_DATA_VECTOR_MASK 0x000000ff 14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ 15 | MSI_DATA_VECTOR_MASK) 16 | 17 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) 19 | #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) 20 | 21 | #define MSI_DATA_LEVEL_SHIFT 14 22 | #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) 23 | #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) 24 | 25 | #define MSI_DATA_TRIGGER_SHIFT 15 26 | #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) 27 | #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) 28 | 29 | /* 30 | * Shift/mask fields for msi address 31 | */ 32 | 33 | #define MSI_ADDR_BASE_HI 0 34 | #define MSI_ADDR_BASE_LO 0xfee00000 35 | 36 | #define MSI_ADDR_DEST_MODE_SHIFT 2 37 | #define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) 38 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) 39 | 40 | #define MSI_ADDR_REDIRECTION_SHIFT 3 41 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) 42 | /* dedicated cpu */ 43 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) 44 | /* lowest priority */ 45 | 46 | #define MSI_ADDR_DEST_ID_SHIFT 12 47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 49 | MSI_ADDR_DEST_ID_MASK) 50 | 51 | #define MSI_ADDR_IR_EXT_INT (1 << 4) 52 | #define MSI_ADDR_IR_SHV (1 << 3) 53 | #define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) 54 | #define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) 55 | #endif /* _ASM_X86_MSIDEF_H */ 56 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/msr-index.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/perf_event.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/pvclock-abi.h: -------------------------------------------------------------------------------- 1 | #ifndef _ASM_X86_PVCLOCK_ABI_H 2 | #define _ASM_X86_PVCLOCK_ABI_H 3 | #ifndef __ASSEMBLY__ 4 | 5 | /* 6 | * These structs MUST NOT be changed. 7 | * They are the ABI between hypervisor and guest OS. 8 | * Both Xen and KVM are using this. 9 | * 10 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp 11 | * of the last update. So the guest can use the tsc delta to get a 12 | * more precise system time. There is one per virtual cpu. 13 | * 14 | * pvclock_wall_clock references the point in time when the system 15 | * time was zero (usually boot time), thus the guest calculates the 16 | * current wall clock by adding the system time. 17 | * 18 | * Protocol for the "version" fields is: hypervisor raises it (making 19 | * it uneven) before it starts updating the fields and raises it again 20 | * (making it even) when it is done. Thus the guest can make sure the 21 | * time values it got are consistent by checking the version before 22 | * and after reading them. 23 | */ 24 | 25 | struct pvclock_vcpu_time_info { 26 | u32 version; 27 | u32 pad0; 28 | u64 tsc_timestamp; 29 | u64 system_time; 30 | u32 tsc_to_system_mul; 31 | s8 tsc_shift; 32 | u8 flags; 33 | u8 pad[2]; 34 | } __attribute__((__packed__)); /* 32 bytes */ 35 | 36 | struct pvclock_wall_clock { 37 | u32 version; 38 | u32 sec; 39 | u32 nsec; 40 | } __attribute__((__packed__)); 41 | 42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) 43 | #define PVCLOCK_GUEST_STOPPED (1 << 1) 44 | #endif /* __ASSEMBLY__ */ 45 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ 46 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/pvclock.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/xcr.h: -------------------------------------------------------------------------------- 1 | /* -*- linux-c -*- ------------------------------------------------------- * 2 | * 3 | * Copyright 2008 rPath, Inc. - All Rights Reserved 4 | * 5 | * This file is part of the Linux kernel, and is made available under 6 | * the terms of the GNU General Public License version 2 or (at your 7 | * option) any later version; incorporated herein by reference. 8 | * 9 | * ----------------------------------------------------------------------- */ 10 | 11 | /* 12 | * asm-x86/xcr.h 13 | * 14 | * Definitions for the eXtended Control Register instructions 15 | */ 16 | 17 | #ifndef _ASM_X86_XCR_H 18 | #define _ASM_X86_XCR_H 19 | 20 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 21 | 22 | #ifdef __KERNEL__ 23 | # ifndef __ASSEMBLY__ 24 | 25 | #include 26 | 27 | static inline u64 xgetbv(u32 index) 28 | { 29 | u32 eax, edx; 30 | 31 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ 32 | : "=a" (eax), "=d" (edx) 33 | : "c" (index)); 34 | return eax + ((u64)edx << 32); 35 | } 36 | 37 | static inline void xsetbv(u32 index, u64 value) 38 | { 39 | u32 eax = value; 40 | u32 edx = value >> 32; 41 | 42 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ 43 | : : "a" (eax), "d" (edx), "c" (index)); 44 | } 45 | 46 | # endif /* __ASSEMBLY__ */ 47 | #endif /* __KERNEL__ */ 48 | 49 | #endif /* _ASM_X86_XCR_H */ 50 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/asm-x86/xsave.h: -------------------------------------------------------------------------------- 1 | /* empty file to keep #include happy */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/atomic.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/bsearch.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_BSEARCH_H 2 | #define _LINUX_BSEARCH_H 3 | 4 | #include 5 | 6 | void *bsearch(const void *key, const void *base, size_t num, size_t size, 7 | int (*cmp)(const void *key, const void *elt)); 8 | 9 | #endif /* _LINUX_BSEARCH_H */ 10 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/context_tracking.h: -------------------------------------------------------------------------------- 1 | /* Dummy file to satisfy #include */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/eventfd.h: -------------------------------------------------------------------------------- 1 | /* Dummy file */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/export.h: -------------------------------------------------------------------------------- 1 | /* Dummy file */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/ftrace_event.h: -------------------------------------------------------------------------------- 1 | /* dummy file for #include compatibility */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/intel-iommu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2006, Intel Corporation. 3 | * 4 | * This program is free software; you can redistribute it and/or modify it 5 | * under the terms and conditions of the GNU General Public License, 6 | * version 2, as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope it will be useful, but WITHOUT 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 | * more details. 12 | * 13 | * You should have received a copy of the GNU General Public License along with 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. 16 | * 17 | * Copyright (C) 2006-2008 Intel Corporation 18 | * Author: Ashok Raj 19 | * Author: Anil S Keshavamurthy 20 | */ 21 | 22 | #ifndef _INTEL_IOMMU_H_ 23 | #define _INTEL_IOMMU_H_ 24 | 25 | #include 26 | #include 27 | #include 28 | #include "iova.h" 29 | #include 30 | 31 | /* 32 | * We need a fixed PAGE_SIZE of 4K irrespective of 33 | * arch PAGE_SIZE for IOMMU page tables. 34 | */ 35 | #define PAGE_SHIFT_4K (12) 36 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) 37 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) 38 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) 39 | 40 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) 41 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 42 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 43 | 44 | /* 45 | * Intel IOMMU register specification per version 1.0 public spec. 46 | */ 47 | 48 | #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ 49 | #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ 50 | #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ 51 | #define DMAR_GCMD_REG 0x18 /* Global command register */ 52 | #define DMAR_GSTS_REG 0x1c /* Global status register */ 53 | #define DMAR_RTADDR_REG 0x20 /* Root entry table */ 54 | #define DMAR_CCMD_REG 0x28 /* Context command reg */ 55 | #define DMAR_FSTS_REG 0x34 /* Fault Status register */ 56 | #define DMAR_FECTL_REG 0x38 /* Fault control register */ 57 | #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ 58 | #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ 59 | #define DMAR_FEUADDR_REG 0x44 /* Upper address register */ 60 | #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ 61 | #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ 62 | #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ 63 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ 64 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ 65 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ 66 | 67 | #define OFFSET_STRIDE (9) 68 | /* 69 | #define dmar_readl(dmar, reg) readl(dmar + reg) 70 | #define dmar_readq(dmar, reg) ({ \ 71 | u32 lo, hi; \ 72 | lo = readl(dmar + reg); \ 73 | hi = readl(dmar + reg + 4); \ 74 | (((u64) hi) << 32) + lo; }) 75 | */ 76 | static inline u64 dmar_readq(void __iomem *addr) 77 | { 78 | u32 lo, hi; 79 | lo = readl(addr); 80 | hi = readl(addr + 4); 81 | return (((u64) hi) << 32) + lo; 82 | } 83 | 84 | static inline void dmar_writeq(void __iomem *addr, u64 val) 85 | { 86 | writel((u32)val, addr); 87 | writel((u32)(val >> 32), addr + 4); 88 | } 89 | 90 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) 91 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) 92 | 93 | /* 94 | * Decoding Capability Register 95 | */ 96 | #define cap_read_drain(c) (((c) >> 55) & 1) 97 | #define cap_write_drain(c) (((c) >> 54) & 1) 98 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) 99 | #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) 100 | #define cap_pgsel_inv(c) (((c) >> 39) & 1) 101 | 102 | #define cap_super_page_val(c) (((c) >> 34) & 0xf) 103 | #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ 104 | * OFFSET_STRIDE) + 21) 105 | 106 | #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) 107 | #define cap_max_fault_reg_offset(c) \ 108 | (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) 109 | 110 | #define cap_zlr(c) (((c) >> 22) & 1) 111 | #define cap_isoch(c) (((c) >> 23) & 1) 112 | #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) 113 | #define cap_sagaw(c) (((c) >> 8) & 0x1f) 114 | #define cap_caching_mode(c) (((c) >> 7) & 1) 115 | #define cap_phmr(c) (((c) >> 6) & 1) 116 | #define cap_plmr(c) (((c) >> 5) & 1) 117 | #define cap_rwbf(c) (((c) >> 4) & 1) 118 | #define cap_afl(c) (((c) >> 3) & 1) 119 | #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) 120 | /* 121 | * Extended Capability Register 122 | */ 123 | 124 | #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1) 125 | #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) 126 | #define ecap_max_iotlb_offset(e) \ 127 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) 128 | #define ecap_coherent(e) ((e) & 0x1) 129 | 130 | 131 | /* IOTLB_REG */ 132 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 133 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 134 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 135 | #define DMA_TLB_IIRG(type) ((type >> 60) & 7) 136 | #define DMA_TLB_IAIG(val) (((val) >> 57) & 7) 137 | #define DMA_TLB_READ_DRAIN (((u64)1) << 49) 138 | #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) 139 | #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) 140 | #define DMA_TLB_IVT (((u64)1) << 63) 141 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) 142 | #define DMA_TLB_MAX_SIZE (0x3f) 143 | 144 | /* PMEN_REG */ 145 | #define DMA_PMEN_EPM (((u32)1)<<31) 146 | #define DMA_PMEN_PRS (((u32)1)<<0) 147 | 148 | /* GCMD_REG */ 149 | #define DMA_GCMD_TE (((u32)1) << 31) 150 | #define DMA_GCMD_SRTP (((u32)1) << 30) 151 | #define DMA_GCMD_SFL (((u32)1) << 29) 152 | #define DMA_GCMD_EAFL (((u32)1) << 28) 153 | #define DMA_GCMD_WBF (((u32)1) << 27) 154 | 155 | /* GSTS_REG */ 156 | #define DMA_GSTS_TES (((u32)1) << 31) 157 | #define DMA_GSTS_RTPS (((u32)1) << 30) 158 | #define DMA_GSTS_FLS (((u32)1) << 29) 159 | #define DMA_GSTS_AFLS (((u32)1) << 28) 160 | #define DMA_GSTS_WBFS (((u32)1) << 27) 161 | 162 | /* CCMD_REG */ 163 | #define DMA_CCMD_ICC (((u64)1) << 63) 164 | #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) 165 | #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) 166 | #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) 167 | #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) 168 | #define DMA_CCMD_MASK_NOBIT 0 169 | #define DMA_CCMD_MASK_1BIT 1 170 | #define DMA_CCMD_MASK_2BIT 2 171 | #define DMA_CCMD_MASK_3BIT 3 172 | #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) 173 | #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) 174 | 175 | /* FECTL_REG */ 176 | #define DMA_FECTL_IM (((u32)1) << 31) 177 | 178 | /* FSTS_REG */ 179 | #define DMA_FSTS_PPF ((u32)2) 180 | #define DMA_FSTS_PFO ((u32)1) 181 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) 182 | 183 | /* FRCD_REG, 32 bits access */ 184 | #define DMA_FRCD_F (((u32)1) << 31) 185 | #define dma_frcd_type(d) ((d >> 30) & 1) 186 | #define dma_frcd_fault_reason(c) (c & 0xff) 187 | #define dma_frcd_source_id(c) (c & 0xffff) 188 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ 189 | 190 | /* 191 | * 0: Present 192 | * 1-11: Reserved 193 | * 12-63: Context Ptr (12 - (haw-1)) 194 | * 64-127: Reserved 195 | */ 196 | struct root_entry { 197 | u64 val; 198 | u64 rsvd1; 199 | }; 200 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) 201 | static inline bool root_present(struct root_entry *root) 202 | { 203 | return (root->val & 1); 204 | } 205 | static inline void set_root_present(struct root_entry *root) 206 | { 207 | root->val |= 1; 208 | } 209 | static inline void set_root_value(struct root_entry *root, unsigned long value) 210 | { 211 | root->val |= value & PAGE_MASK_4K; 212 | } 213 | 214 | struct context_entry; 215 | static inline struct context_entry * 216 | get_context_addr_from_root(struct root_entry *root) 217 | { 218 | return (struct context_entry *) 219 | (root_present(root)?phys_to_virt( 220 | root->val & PAGE_MASK_4K): 221 | NULL); 222 | } 223 | 224 | /* 225 | * low 64 bits: 226 | * 0: present 227 | * 1: fault processing disable 228 | * 2-3: translation type 229 | * 12-63: address space root 230 | * high 64 bits: 231 | * 0-2: address width 232 | * 3-6: aval 233 | * 8-23: domain id 234 | */ 235 | struct context_entry { 236 | u64 lo; 237 | u64 hi; 238 | }; 239 | #define context_present(c) ((c).lo & 1) 240 | #define context_fault_disable(c) (((c).lo >> 1) & 1) 241 | #define context_translation_type(c) (((c).lo >> 2) & 3) 242 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) 243 | #define context_address_width(c) ((c).hi & 7) 244 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) 245 | 246 | #define context_set_present(c) do {(c).lo |= 1;} while (0) 247 | #define context_set_fault_enable(c) \ 248 | do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) 249 | #define context_set_translation_type(c, val) \ 250 | do { \ 251 | (c).lo &= (((u64)-1) << 4) | 3; \ 252 | (c).lo |= ((val) & 3) << 2; \ 253 | } while (0) 254 | #define CONTEXT_TT_MULTI_LEVEL 0 255 | #define context_set_address_root(c, val) \ 256 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) 257 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) 258 | #define context_set_domain_id(c, val) \ 259 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) 260 | #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) 261 | 262 | /* 263 | * 0: readable 264 | * 1: writable 265 | * 2-6: reserved 266 | * 7: super page 267 | * 8-11: available 268 | * 12-63: Host physcial address 269 | */ 270 | struct dma_pte { 271 | u64 val; 272 | }; 273 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) 274 | 275 | #define DMA_PTE_READ (1) 276 | #define DMA_PTE_WRITE (2) 277 | 278 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) 279 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) 280 | #define dma_set_pte_prot(p, prot) \ 281 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) 282 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) 283 | #define dma_set_pte_addr(p, addr) do {\ 284 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) 285 | #define dma_pte_present(p) (((p).val & 3) != 0) 286 | 287 | struct intel_iommu; 288 | 289 | struct dmar_domain { 290 | int id; /* domain id */ 291 | struct intel_iommu *iommu; /* back pointer to owning iommu */ 292 | 293 | struct list_head devices; /* all devices' list */ 294 | struct iova_domain iovad; /* iova's that belong to this domain */ 295 | 296 | struct dma_pte *pgd; /* virtual address */ 297 | spinlock_t mapping_lock; /* page table lock */ 298 | int gaw; /* max guest address width */ 299 | 300 | /* adjusted guest address width, 0 is level 2 30-bit */ 301 | int agaw; 302 | 303 | #define DOMAIN_FLAG_MULTIPLE_DEVICES 1 304 | int flags; 305 | }; 306 | 307 | /* PCI domain-device relationship */ 308 | struct device_domain_info { 309 | struct list_head link; /* link to domain siblings */ 310 | struct list_head global; /* link to global list */ 311 | u8 bus; /* PCI bus numer */ 312 | u8 devfn; /* PCI devfn number */ 313 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 314 | struct dmar_domain *domain; /* pointer to domain */ 315 | }; 316 | 317 | extern int init_dmars(void); 318 | 319 | struct intel_iommu { 320 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 321 | u64 cap; 322 | u64 ecap; 323 | unsigned long *domain_ids; /* bitmap of domains */ 324 | struct dmar_domain **domains; /* ptr to domains */ 325 | int seg; 326 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 327 | spinlock_t lock; /* protect context, domain ids */ 328 | spinlock_t register_lock; /* protect register handling */ 329 | struct root_entry *root_entry; /* virtual address */ 330 | 331 | unsigned int irq; 332 | unsigned char name[7]; /* Device Name */ 333 | struct msi_msg saved_msg; 334 | struct sys_device sysdev; 335 | }; 336 | 337 | #ifndef CONFIG_DMAR_GFX_WA 338 | static inline void iommu_prepare_gfx_mapping(void) 339 | { 340 | return; 341 | } 342 | #endif /* !CONFIG_DMAR_GFX_WA */ 343 | 344 | void intel_iommu_domain_exit(struct dmar_domain *domain); 345 | struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev); 346 | int intel_iommu_context_mapping(struct dmar_domain *domain, 347 | struct pci_dev *pdev); 348 | int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova, 349 | u64 hpa, size_t size, int prot); 350 | void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn); 351 | struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev); 352 | int intel_iommu_found(void); 353 | u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova); 354 | 355 | #endif 356 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/iova.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2006, Intel Corporation. 3 | * 4 | * This file is released under the GPLv2. 5 | * 6 | * Copyright (C) 2006-2008 Intel Corporation 7 | * Author: Anil S Keshavamurthy 8 | * 9 | */ 10 | 11 | #ifndef _IOVA_H_ 12 | #define _IOVA_H_ 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | /* IO virtual address start page frame number */ 20 | #define IOVA_START_PFN (1) 21 | 22 | /* iova structure */ 23 | struct iova { 24 | struct rb_node node; 25 | unsigned long pfn_hi; /* IOMMU dish out addr hi */ 26 | unsigned long pfn_lo; /* IOMMU dish out addr lo */ 27 | }; 28 | 29 | /* holds all the iova translations for a domain */ 30 | struct iova_domain { 31 | spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */ 32 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 33 | struct rb_root rbroot; /* iova domain rbtree root */ 34 | struct rb_node *cached32_node; /* Save last alloced node */ 35 | unsigned long dma_32bit_pfn; 36 | }; 37 | 38 | struct iova *alloc_iova_mem(void); 39 | void free_iova_mem(struct iova *iova); 40 | void free_iova(struct iova_domain *iovad, unsigned long pfn); 41 | void __free_iova(struct iova_domain *iovad, struct iova *iova); 42 | struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, 43 | unsigned long limit_pfn, 44 | bool size_aligned); 45 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 46 | unsigned long pfn_hi); 47 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 48 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); 49 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 50 | void put_iova_domain(struct iova_domain *iovad); 51 | 52 | #endif 53 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/irq_work.h: -------------------------------------------------------------------------------- 1 | struct irq_work { 2 | }; 3 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/irqbypass.h: -------------------------------------------------------------------------------- 1 | /* 2 | * IRQ offload/bypass manager 3 | * 4 | * Copyright (C) 2015 Red Hat, Inc. 5 | * Copyright (c) 2015 Linaro Ltd. 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License version 2 as 9 | * published by the Free Software Foundation. 10 | */ 11 | #ifndef IRQBYPASS_H 12 | #define IRQBYPASS_H 13 | 14 | #include 15 | 16 | struct irq_bypass_consumer; 17 | 18 | /* 19 | * Theory of operation 20 | * 21 | * The IRQ bypass manager is a simple set of lists and callbacks that allows 22 | * IRQ producers (ex. physical interrupt sources) to be matched to IRQ 23 | * consumers (ex. virtualization hardware that allows IRQ bypass or offload) 24 | * via a shared token (ex. eventfd_ctx). Producers and consumers register 25 | * independently. When a token match is found, the optional @stop callback 26 | * will be called for each participant. The pair will then be connected via 27 | * the @add_* callbacks, and finally the optional @start callback will allow 28 | * any final coordination. When either participant is unregistered, the 29 | * process is repeated using the @del_* callbacks in place of the @add_* 30 | * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings 31 | * are not supported. 32 | */ 33 | 34 | /** 35 | * struct irq_bypass_producer - IRQ bypass producer definition 36 | * @node: IRQ bypass manager private list management 37 | * @token: opaque token to match between producer and consumer 38 | * @irq: Linux IRQ number for the producer device 39 | * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) 40 | * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) 41 | * @stop: Perform any quiesce operations necessary prior to add/del (optional) 42 | * @start: Perform any startup operations necessary after add/del (optional) 43 | * 44 | * The IRQ bypass producer structure represents an interrupt source for 45 | * participation in possible host bypass, for instance an interrupt vector 46 | * for a physical device assigned to a VM. 47 | */ 48 | struct irq_bypass_producer { 49 | struct list_head node; 50 | void *token; 51 | int irq; 52 | int (*add_consumer)(struct irq_bypass_producer *, 53 | struct irq_bypass_consumer *); 54 | void (*del_consumer)(struct irq_bypass_producer *, 55 | struct irq_bypass_consumer *); 56 | void (*stop)(struct irq_bypass_producer *); 57 | void (*start)(struct irq_bypass_producer *); 58 | }; 59 | 60 | /** 61 | * struct irq_bypass_consumer - IRQ bypass consumer definition 62 | * @node: IRQ bypass manager private list management 63 | * @token: opaque token to match between producer and consumer 64 | * @add_producer: Connect the IRQ consumer to an IRQ producer 65 | * @del_producer: Disconnect the IRQ consumer from an IRQ producer 66 | * @stop: Perform any quiesce operations necessary prior to add/del (optional) 67 | * @start: Perform any startup operations necessary after add/del (optional) 68 | * 69 | * The IRQ bypass consumer structure represents an interrupt sink for 70 | * participation in possible host bypass, for instance a hypervisor may 71 | * support offloads to allow bypassing the host entirely or offload 72 | * portions of the interrupt handling to the VM. 73 | */ 74 | struct irq_bypass_consumer { 75 | struct list_head node; 76 | void *token; 77 | int (*add_producer)(struct irq_bypass_consumer *, 78 | struct irq_bypass_producer *); 79 | void (*del_producer)(struct irq_bypass_consumer *, 80 | struct irq_bypass_producer *); 81 | void (*stop)(struct irq_bypass_consumer *); 82 | void (*start)(struct irq_bypass_consumer *); 83 | }; 84 | 85 | int irq_bypass_register_producer(struct irq_bypass_producer *); 86 | void irq_bypass_unregister_producer(struct irq_bypass_producer *); 87 | int irq_bypass_register_consumer(struct irq_bypass_consumer *); 88 | void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); 89 | 90 | #endif /* IRQBYPASS_H */ 91 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/jump_label.h: -------------------------------------------------------------------------------- 1 | /* to satify #include */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/magic.h: -------------------------------------------------------------------------------- 1 | #ifndef __LINUX_MAGIC_H__ 2 | #define __LINUX_MAGIC_H__ 3 | 4 | #define ADFS_SUPER_MAGIC 0xadf5 5 | #define AFFS_SUPER_MAGIC 0xadff 6 | #define AFS_SUPER_MAGIC 0x5346414F 7 | #define AUTOFS_SUPER_MAGIC 0x0187 8 | #define CODA_SUPER_MAGIC 0x73757245 9 | #define EFS_SUPER_MAGIC 0x414A53 10 | #define EXT2_SUPER_MAGIC 0xEF53 11 | #define EXT3_SUPER_MAGIC 0xEF53 12 | #define EXT4_SUPER_MAGIC 0xEF53 13 | #define HPFS_SUPER_MAGIC 0xf995e849 14 | #define ISOFS_SUPER_MAGIC 0x9660 15 | #define JFFS2_SUPER_MAGIC 0x72b6 16 | #define KVMFS_SUPER_MAGIC 0x19700426 17 | 18 | #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ 19 | #define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */ 20 | #define MINIX2_SUPER_MAGIC 0x2468 /* minix V2 fs */ 21 | #define MINIX2_SUPER_MAGIC2 0x2478 /* minix V2 fs, 30 char names */ 22 | #define MINIX3_SUPER_MAGIC 0x4d5a /* minix V3 fs */ 23 | 24 | #define MSDOS_SUPER_MAGIC 0x4d44 /* MD */ 25 | #define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */ 26 | #define NFS_SUPER_MAGIC 0x6969 27 | #define OPENPROM_SUPER_MAGIC 0x9fa1 28 | #define PROC_SUPER_MAGIC 0x9fa0 29 | #define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */ 30 | 31 | #define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */ 32 | /* used by file system utilities that 33 | look at the superblock, etc. */ 34 | #define REISERFS_SUPER_MAGIC_STRING "ReIsErFs" 35 | #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" 36 | #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs" 37 | 38 | #define SMB_SUPER_MAGIC 0x517B 39 | #define USBDEVICE_SUPER_MAGIC 0x9fa2 40 | 41 | #endif /* __LINUX_MAGIC_H__ */ 42 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/marker.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Alternative file to satisfy #include for older kernels. 3 | */ 4 | #ifndef _LINUX_MARKER_H 5 | #define _LINUX_MARKER_H 6 | 7 | /* 8 | * Code markup for dynamic and static tracing. 9 | * 10 | * See Documentation/marker.txt. 11 | * 12 | * (C) Copyright 2006 Mathieu Desnoyers 13 | * 14 | * This file is released under the GPLv2. 15 | * See the file COPYING for more details. 16 | */ 17 | 18 | #include 19 | 20 | struct module; 21 | struct marker; 22 | 23 | /** 24 | * marker_probe_func - Type of a marker probe function 25 | * @probe_private: probe private data 26 | * @call_private: call site private data 27 | * @fmt: format string 28 | * @args: variable argument list pointer. Use a pointer to overcome C's 29 | * inability to pass this around as a pointer in a portable manner in 30 | * the callee otherwise. 31 | * 32 | * Type of marker probe functions. They receive the mdata and need to parse the 33 | * format string to recover the variable argument list. 34 | */ 35 | typedef void marker_probe_func(void *probe_private, void *call_private, 36 | const char *fmt, va_list *args); 37 | 38 | struct marker_probe_closure { 39 | marker_probe_func *func; /* Callback */ 40 | void *probe_private; /* Private probe data */ 41 | }; 42 | 43 | struct marker { 44 | const char *name; /* Marker name */ 45 | const char *format; /* Marker format string, describing the 46 | * variable argument list. 47 | */ 48 | char state; /* Marker state. */ 49 | char ptype; /* probe type : 0 : single, 1 : multi */ 50 | void (*call)(const struct marker *mdata, /* Probe wrapper */ 51 | void *call_private, const char *fmt, ...); 52 | struct marker_probe_closure single; 53 | struct marker_probe_closure *multi; 54 | } __attribute__((aligned(8))); 55 | 56 | #define __trace_mark(name, call_private, format, args...) \ 57 | __mark_check_format(format, ## args) 58 | static inline void marker_update_probe_range(struct marker *begin, 59 | struct marker *end) 60 | { } 61 | 62 | /** 63 | * trace_mark - Marker 64 | * @name: marker name, not quoted. 65 | * @format: format string 66 | * @args...: variable argument list 67 | * 68 | * Places a marker. 69 | */ 70 | #define trace_mark(name, format, args...) \ 71 | __trace_mark(name, NULL, format, ## args) 72 | 73 | /** 74 | * MARK_NOARGS - Format string for a marker with no argument. 75 | */ 76 | #define MARK_NOARGS " " 77 | 78 | /* To be used for string format validity checking with gcc */ 79 | static inline void __attribute__((format(printf,1,2))) 80 | ___mark_check_format(const char *fmt, ...) 81 | { 82 | } 83 | 84 | #define __mark_check_format(format, args...) \ 85 | do { \ 86 | if (0) \ 87 | ___mark_check_format(format, ## args); \ 88 | } while (0) 89 | 90 | extern marker_probe_func __mark_empty_function; 91 | 92 | extern void marker_probe_cb(const struct marker *mdata, 93 | void *call_private, const char *fmt, ...); 94 | extern void marker_probe_cb_noarg(const struct marker *mdata, 95 | void *call_private, const char *fmt, ...); 96 | 97 | /* 98 | * Connect a probe to a marker. 99 | * private data pointer must be a valid allocated memory address, or NULL. 100 | */ 101 | extern int marker_probe_register(const char *name, const char *format, 102 | marker_probe_func *probe, void *probe_private); 103 | 104 | /* 105 | * Returns the private data given to marker_probe_register. 106 | */ 107 | extern int marker_probe_unregister(const char *name, 108 | marker_probe_func *probe, void *probe_private); 109 | /* 110 | * Unregister a marker by providing the registered private data. 111 | */ 112 | extern int marker_probe_unregister_private_data(marker_probe_func *probe, 113 | void *probe_private); 114 | 115 | extern void *marker_get_private_data(const char *name, marker_probe_func *probe, 116 | int num); 117 | 118 | #endif 119 | 120 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/math64.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/mmu_context.h: -------------------------------------------------------------------------------- 1 | /* to satify #include */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/mmu_notifier.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_MMU_NOTIFIER_H 2 | #define _LINUX_MMU_NOTIFIER_H 3 | 4 | struct mmu_notifier {}; 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/msi.h: -------------------------------------------------------------------------------- 1 | #ifndef LINUX_MSI_H 2 | #define LINUX_MSI_H 3 | 4 | #include 5 | 6 | struct msi_msg { 7 | u32 address_lo; /* low 32 bits of msi message address */ 8 | u32 address_hi; /* high 32 bits of msi message address */ 9 | u32 data; /* 16 bits of msi message data */ 10 | }; 11 | 12 | /* Helper functions */ 13 | extern void mask_msi_irq(unsigned int irq); 14 | extern void unmask_msi_irq(unsigned int irq); 15 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 16 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); 17 | 18 | struct msi_desc { 19 | struct { 20 | __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ 21 | __u8 maskbit : 1; /* mask-pending bit supported ? */ 22 | __u8 masked : 1; 23 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 24 | __u8 pos; /* Location of the msi capability */ 25 | __u32 maskbits_mask; /* mask bits mask */ 26 | __u16 entry_nr; /* specific enabled entry */ 27 | unsigned default_irq; /* default pre-assigned irq */ 28 | }msi_attrib; 29 | 30 | unsigned int irq; 31 | struct list_head list; 32 | 33 | void __iomem *mask_base; 34 | struct pci_dev *dev; 35 | 36 | /* Last set MSI message */ 37 | struct msi_msg msg; 38 | }; 39 | 40 | /* 41 | * The arch hook for setup up msi irqs 42 | */ 43 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); 44 | void arch_teardown_msi_irq(unsigned int irq); 45 | extern int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 46 | extern void arch_teardown_msi_irqs(struct pci_dev *dev); 47 | extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); 48 | 49 | 50 | #endif /* LINUX_MSI_H */ 51 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/mutex.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/perf_event.h: -------------------------------------------------------------------------------- 1 | /* Dummy file to satisfy #include */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/pvclock_gtod.h: -------------------------------------------------------------------------------- 1 | /* Dummy file */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/ratelimit.h: -------------------------------------------------------------------------------- 1 | #define DEFINE_RATELIMIT_STATE(__name, a, b) int (__name) 2 | 3 | #define __ratelimit(__dummy) \ 4 | ({ *__dummy = *__dummy; printk_ratelimit(); }) 5 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/refcount.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_REFCOUNT_H 2 | #define _LINUX_REFCOUNT_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | typedef struct refcount_struct { 10 | atomic_t refs; 11 | } refcount_t; 12 | 13 | #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 14 | 15 | static inline void refcount_set(refcount_t *r, unsigned int n) 16 | { 17 | atomic_set(&r->refs, n); 18 | } 19 | 20 | static inline unsigned int refcount_read(const refcount_t *r) 21 | { 22 | return atomic_read(&r->refs); 23 | } 24 | 25 | extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); 26 | extern void refcount_add(unsigned int i, refcount_t *r); 27 | 28 | extern __must_check bool refcount_inc_not_zero(refcount_t *r); 29 | extern void refcount_inc(refcount_t *r); 30 | 31 | extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); 32 | extern void refcount_sub(unsigned int i, refcount_t *r); 33 | 34 | extern __must_check bool refcount_dec_and_test(refcount_t *r); 35 | extern void refcount_dec(refcount_t *r); 36 | 37 | extern __must_check bool refcount_dec_if_one(refcount_t *r); 38 | extern __must_check bool refcount_dec_not_one(refcount_t *r); 39 | extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); 40 | extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); 41 | 42 | #endif /* _LINUX_REFCOUNT_H */ 43 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/sched/mm.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/sched/signal.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/sched/stat.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/swait.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_SWAIT_H 2 | #define _LINUX_SWAIT_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /* 10 | * Simple wait queues 11 | * 12 | * While these are very similar to the other/complex wait queues (wait.h) the 13 | * most important difference is that the simple waitqueue allows for 14 | * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold 15 | * times. 16 | * 17 | * In order to make this so, we had to drop a fair number of features of the 18 | * other waitqueue code; notably: 19 | * 20 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; 21 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right 22 | * sleeper state. 23 | * 24 | * - the exclusive mode; because this requires preserving the list order 25 | * and this is hard. 26 | * 27 | * - custom wake functions; because you cannot give any guarantees about 28 | * random code. 29 | * 30 | * As a side effect of this; the data structures are slimmer. 31 | * 32 | * One would recommend using this wait queue where possible. 33 | */ 34 | 35 | struct task_struct; 36 | 37 | struct swait_queue_head { 38 | raw_spinlock_t lock; 39 | struct list_head task_list; 40 | }; 41 | 42 | struct swait_queue { 43 | struct task_struct *task; 44 | struct list_head task_list; 45 | }; 46 | 47 | #define __SWAITQUEUE_INITIALIZER(name) { \ 48 | .task = current, \ 49 | .task_list = LIST_HEAD_INIT((name).task_list), \ 50 | } 51 | 52 | #define DECLARE_SWAITQUEUE(name) \ 53 | struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) 54 | 55 | #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ 56 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 57 | .task_list = LIST_HEAD_INIT((name).task_list), \ 58 | } 59 | 60 | #define DECLARE_SWAIT_QUEUE_HEAD(name) \ 61 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) 62 | 63 | extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 64 | struct lock_class_key *key); 65 | 66 | #define init_swait_queue_head(q) \ 67 | do { \ 68 | static struct lock_class_key __key; \ 69 | __init_swait_queue_head((q), #q, &__key); \ 70 | } while (0) 71 | 72 | #ifdef CONFIG_LOCKDEP 73 | # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 74 | ({ init_swait_queue_head(&name); name; }) 75 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ 76 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) 77 | #else 78 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ 79 | DECLARE_SWAIT_QUEUE_HEAD(name) 80 | #endif 81 | 82 | static inline int swait_active(struct swait_queue_head *q) 83 | { 84 | return !list_empty(&q->task_list); 85 | } 86 | 87 | extern void swake_up(struct swait_queue_head *q); 88 | extern void swake_up_all(struct swait_queue_head *q); 89 | extern void swake_up_locked(struct swait_queue_head *q); 90 | 91 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 92 | extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); 93 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); 94 | 95 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); 96 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); 97 | 98 | /* as per ___wait_event() but for swait, therefore "exclusive == 0" */ 99 | #define ___swait_event(wq, condition, state, ret, cmd) \ 100 | ({ \ 101 | struct swait_queue __wait; \ 102 | long __ret = ret; \ 103 | \ 104 | INIT_LIST_HEAD(&__wait.task_list); \ 105 | for (;;) { \ 106 | long __int = prepare_to_swait_event(&wq, &__wait, state);\ 107 | \ 108 | if (condition) \ 109 | break; \ 110 | \ 111 | if (___wait_is_interruptible(state) && __int) { \ 112 | __ret = __int; \ 113 | break; \ 114 | } \ 115 | \ 116 | cmd; \ 117 | } \ 118 | finish_swait(&wq, &__wait); \ 119 | __ret; \ 120 | }) 121 | 122 | #define __swait_event(wq, condition) \ 123 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 124 | schedule()) 125 | 126 | #define swait_event(wq, condition) \ 127 | do { \ 128 | if (condition) \ 129 | break; \ 130 | __swait_event(wq, condition); \ 131 | } while (0) 132 | 133 | #define __swait_event_timeout(wq, condition, timeout) \ 134 | ___swait_event(wq, ___wait_cond_timeout(condition), \ 135 | TASK_UNINTERRUPTIBLE, timeout, \ 136 | __ret = schedule_timeout(__ret)) 137 | 138 | #define swait_event_timeout(wq, condition, timeout) \ 139 | ({ \ 140 | long __ret = timeout; \ 141 | if (!___wait_cond_timeout(condition)) \ 142 | __ret = __swait_event_timeout(wq, condition, timeout); \ 143 | __ret; \ 144 | }) 145 | 146 | #define __swait_event_interruptible(wq, condition) \ 147 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ 148 | schedule()) 149 | 150 | #define swait_event_interruptible(wq, condition) \ 151 | ({ \ 152 | int __ret = 0; \ 153 | if (!(condition)) \ 154 | __ret = __swait_event_interruptible(wq, condition); \ 155 | __ret; \ 156 | }) 157 | 158 | #define __swait_event_interruptible_timeout(wq, condition, timeout) \ 159 | ___swait_event(wq, ___wait_cond_timeout(condition), \ 160 | TASK_INTERRUPTIBLE, timeout, \ 161 | __ret = schedule_timeout(__ret)) 162 | 163 | #define swait_event_interruptible_timeout(wq, condition, timeout) \ 164 | ({ \ 165 | long __ret = timeout; \ 166 | if (!___wait_cond_timeout(condition)) \ 167 | __ret = __swait_event_interruptible_timeout(wq, \ 168 | condition, timeout); \ 169 | __ret; \ 170 | }) 171 | 172 | #endif /* _LINUX_SWAIT_H */ 173 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/syscore_ops.h: -------------------------------------------------------------------------------- 1 | /* 2 | * syscore_ops.h - System core operations. 3 | * 4 | * Copyright (C) 2011 Rafael J. Wysocki , Novell Inc. 5 | * 6 | * This file is released under the GPLv2. 7 | */ 8 | 9 | #ifndef _LINUX_SYSCORE_OPS_H 10 | #define _LINUX_SYSCORE_OPS_H 11 | 12 | #include 13 | 14 | struct syscore_ops { 15 | struct list_head node; 16 | int (*suspend)(void); 17 | void (*resume)(void); 18 | void (*shutdown)(void); 19 | }; 20 | 21 | extern void register_syscore_ops(struct syscore_ops *ops); 22 | extern void unregister_syscore_ops(struct syscore_ops *ops); 23 | #ifdef CONFIG_PM_SLEEP 24 | extern int syscore_suspend(void); 25 | extern void syscore_resume(void); 26 | #endif 27 | extern void syscore_shutdown(void); 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/tboot.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/time64.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_TIME64_H 2 | #define _LINUX_TIME64_H 3 | 4 | #include 5 | 6 | typedef __s64 time64_t; 7 | 8 | /* 9 | * This wants to go into uapi/linux/time.h once we agreed about the 10 | * userspace interfaces. 11 | */ 12 | #if __BITS_PER_LONG == 64 13 | # define timespec64 timespec 14 | #else 15 | struct timespec64 { 16 | time64_t tv_sec; /* seconds */ 17 | long tv_nsec; /* nanoseconds */ 18 | }; 19 | #endif 20 | 21 | #endif /* _LINUX_TIME64_H */ 22 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/timekeeper_internal.h: -------------------------------------------------------------------------------- 1 | /* Dummy file */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/trace_events.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/tracepoint.h: -------------------------------------------------------------------------------- 1 | /* Dummy file to satisfy #include */ 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/linux/user-return-notifier.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Empty file to satisfy #include for older kernels. 3 | */ 4 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/include-compat/trace/define_trace.h: -------------------------------------------------------------------------------- 1 | /* Empty file to satisfy include */ 2 | 3 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/kvm-kmod.spec: -------------------------------------------------------------------------------- 1 | %define kmod_name kvm 2 | 3 | Name: kvm-kmod 4 | Version: 0.0 5 | Release: 0 6 | Summary: %{kmod_name} kernel module 7 | 8 | Group: System Environment/Kernel 9 | License: GPL 10 | URL: http://www.qumranet.com 11 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release} 12 | 13 | ExclusiveArch: i386 x86_64 14 | 15 | %description 16 | This kernel module provides support for virtual machines using hardware support 17 | (Intel VT-x&VT-i or AMD SVM). 18 | 19 | %prep 20 | 21 | %build 22 | 23 | rm -rf %{buildroot} 24 | 25 | %install 26 | 27 | %define kverrel unknown 28 | %define moddir /lib/modules/%{kverrel}/extra 29 | mkdir -p %{buildroot}/%{moddir} 30 | cp %{objdir}/%{kmod_name}.ko %{objdir}/%{kmod_name}-*.ko %{buildroot}/%{moddir} 31 | chmod u+x %{buildroot}/%{moddir}/%{kmod_name}*.ko 32 | 33 | %post 34 | 35 | depmod %{kverrel} 36 | 37 | %postun 38 | 39 | depmod %{kverrel} 40 | 41 | %clean 42 | %{__rm} -rf %{buildroot} 43 | 44 | %files 45 | %{moddir}/%{kmod_name}.ko 46 | %ifarch i386 x86_64 47 | %{moddir}/%{kmod_name}-amd.ko 48 | %endif 49 | %{moddir}/%{kmod_name}-intel.ko 50 | 51 | 52 | %changelog 53 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/powerpc/Makefile.pre: -------------------------------------------------------------------------------- 1 | prerequisite: 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/refcount.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Variant of atomic_t specialized for reference counts. 3 | * 4 | * The interface matches the atomic_t interface (to aid in porting) but only 5 | * provides the few functions one should use for reference counting. 6 | * 7 | * It differs in that the counter saturates at UINT_MAX and will not move once 8 | * there. This avoids wrapping the counter and causing 'spurious' 9 | * use-after-free issues. 10 | * 11 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions 12 | * and provide only what is strictly required for refcounts. 13 | * 14 | * The increments are fully relaxed; these will not provide ordering. The 15 | * rationale is that whatever is used to obtain the object we're increasing the 16 | * reference count on will provide the ordering. For locked data structures, 17 | * its the lock acquire, for RCU/lockless data structures its the dependent 18 | * load. 19 | * 20 | * Do note that inc_not_zero() provides a control dependency which will order 21 | * future stores against the inc, this ensures we'll never modify the object 22 | * if we did not in fact acquire a reference. 23 | * 24 | * The decrements will provide release order, such that all the prior loads and 25 | * stores will be issued before, it also provides a control dependency, which 26 | * will order us against the subsequent free(). 27 | * 28 | * The control dependency is against the load of the cmpxchg (ll/sc) that 29 | * succeeded. This means the stores aren't fully ordered, but this is fine 30 | * because the 1->0 transition indicates no concurrency. 31 | * 32 | * Note that the allocator is responsible for ordering things between free() 33 | * and alloc(). 34 | * 35 | */ 36 | 37 | #include 38 | #include 39 | 40 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) 41 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) 42 | { 43 | unsigned int old, new, val = atomic_read(&r->refs); 44 | 45 | for (;;) { 46 | if (!val) 47 | return false; 48 | 49 | if (unlikely(val == UINT_MAX)) 50 | return true; 51 | 52 | new = val + i; 53 | if (new < val) 54 | new = UINT_MAX; 55 | old = atomic_cmpxchg(&r->refs, val, new); 56 | if (old == val) 57 | break; 58 | 59 | val = old; 60 | } 61 | 62 | WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 63 | 64 | return true; 65 | } 66 | EXPORT_SYMBOL_GPL(refcount_add_not_zero); 67 | 68 | void refcount_add(unsigned int i, refcount_t *r) 69 | { 70 | WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); 71 | } 72 | EXPORT_SYMBOL_GPL(refcount_add); 73 | 74 | /* 75 | * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. 76 | * 77 | * Provides no memory ordering, it is assumed the caller has guaranteed the 78 | * object memory to be stable (RCU, etc.). It does provide a control dependency 79 | * and thereby orders future stores. See the comment on top. 80 | */ 81 | bool refcount_inc_not_zero(refcount_t *r) 82 | { 83 | unsigned int old, new, val = atomic_read(&r->refs); 84 | 85 | for (;;) { 86 | new = val + 1; 87 | 88 | if (!val) 89 | return false; 90 | 91 | if (unlikely(!new)) 92 | return true; 93 | 94 | old = atomic_cmpxchg(&r->refs, val, new); 95 | if (old == val) 96 | break; 97 | 98 | val = old; 99 | } 100 | 101 | WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 102 | 103 | return true; 104 | } 105 | EXPORT_SYMBOL_GPL(refcount_inc_not_zero); 106 | 107 | /* 108 | * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. 109 | * 110 | * Provides no memory ordering, it is assumed the caller already has a 111 | * reference on the object, will WARN when this is not so. 112 | */ 113 | void refcount_inc(refcount_t *r) 114 | { 115 | WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); 116 | } 117 | EXPORT_SYMBOL_GPL(refcount_inc); 118 | 119 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) 120 | { 121 | unsigned int old, new, val = atomic_read(&r->refs); 122 | 123 | for (;;) { 124 | if (unlikely(val == UINT_MAX)) 125 | return false; 126 | 127 | new = val - i; 128 | if (new > val) { 129 | WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 130 | return false; 131 | } 132 | 133 | old = atomic_cmpxchg(&r->refs, val, new); 134 | if (old == val) 135 | break; 136 | 137 | val = old; 138 | } 139 | 140 | return !new; 141 | } 142 | EXPORT_SYMBOL_GPL(refcount_sub_and_test); 143 | 144 | /* 145 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to 146 | * decrement when saturated at UINT_MAX. 147 | * 148 | * Provides release memory ordering, such that prior loads and stores are done 149 | * before, and provides a control dependency such that free() must come after. 150 | * See the comment on top. 151 | */ 152 | bool refcount_dec_and_test(refcount_t *r) 153 | { 154 | return refcount_sub_and_test(1, r); 155 | } 156 | EXPORT_SYMBOL_GPL(refcount_dec_and_test); 157 | 158 | /* 159 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement 160 | * when saturated at UINT_MAX. 161 | * 162 | * Provides release memory ordering, such that prior loads and stores are done 163 | * before. 164 | */ 165 | 166 | void refcount_dec(refcount_t *r) 167 | { 168 | WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); 169 | } 170 | EXPORT_SYMBOL_GPL(refcount_dec); 171 | 172 | /* 173 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the 174 | * success thereof. 175 | * 176 | * Like all decrement operations, it provides release memory order and provides 177 | * a control dependency. 178 | * 179 | * It can be used like a try-delete operator; this explicit case is provided 180 | * and not cmpxchg in generic, because that would allow implementing unsafe 181 | * operations. 182 | */ 183 | bool refcount_dec_if_one(refcount_t *r) 184 | { 185 | return atomic_cmpxchg(&r->refs, 1, 0) == 1; 186 | } 187 | EXPORT_SYMBOL_GPL(refcount_dec_if_one); 188 | 189 | /* 190 | * No atomic_t counterpart, it decrements unless the value is 1, in which case 191 | * it will return false. 192 | * 193 | * Was often done like: atomic_add_unless(&var, -1, 1) 194 | */ 195 | bool refcount_dec_not_one(refcount_t *r) 196 | { 197 | unsigned int old, new, val = atomic_read(&r->refs); 198 | 199 | for (;;) { 200 | if (unlikely(val == UINT_MAX)) 201 | return true; 202 | 203 | if (val == 1) 204 | return false; 205 | 206 | new = val - 1; 207 | if (new > val) { 208 | WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 209 | return true; 210 | } 211 | 212 | old = atomic_cmpxchg(&r->refs, val, new); 213 | if (old == val) 214 | break; 215 | 216 | val = old; 217 | } 218 | 219 | return true; 220 | } 221 | EXPORT_SYMBOL_GPL(refcount_dec_not_one); 222 | 223 | /* 224 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail 225 | * to decrement when saturated at UINT_MAX. 226 | * 227 | * Provides release memory ordering, such that prior loads and stores are done 228 | * before, and provides a control dependency such that free() must come after. 229 | * See the comment on top. 230 | */ 231 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) 232 | { 233 | if (refcount_dec_not_one(r)) 234 | return false; 235 | 236 | mutex_lock(lock); 237 | if (!refcount_dec_and_test(r)) { 238 | mutex_unlock(lock); 239 | return false; 240 | } 241 | 242 | return true; 243 | } 244 | EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); 245 | 246 | /* 247 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to 248 | * decrement when saturated at UINT_MAX. 249 | * 250 | * Provides release memory ordering, such that prior loads and stores are done 251 | * before, and provides a control dependency such that free() must come after. 252 | * See the comment on top. 253 | */ 254 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) 255 | { 256 | if (refcount_dec_not_one(r)) 257 | return false; 258 | 259 | spin_lock(lock); 260 | if (!refcount_dec_and_test(r)) { 261 | spin_unlock(lock); 262 | return false; 263 | } 264 | 265 | return true; 266 | } 267 | EXPORT_SYMBOL_GPL(refcount_dec_and_lock); 268 | #endif 269 | 270 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/scripts/65-kvm.rules: -------------------------------------------------------------------------------- 1 | KERNEL=="kvm", MODE="0660", GROUP="kvm" 2 | ACTION=="add|change", SUBSYSTEM=="dmi", KERNEL=="id", RUN+="/bin/sh -c 'grep -q vmx /proc/cpuinfo && /sbin/modprobe kvm-intel; grep -q svm /proc/cpuinfo && /sbin/modprobe kvm-amd'" 3 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/scripts/make-release: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | usage() { 4 | echo "usage: $0 [--upload] [--formal] commit [name]" 5 | exit 1 6 | } 7 | 8 | [[ -f ~/.kvmreleaserc ]] && . ~/.kvmreleaserc 9 | 10 | upload= 11 | formal= 12 | 13 | [[ -z "$TMP" ]] && TMP="/tmp" 14 | tmpdir="$TMP/kvm-kmod-make-release.$$" 15 | while [[ "$1" = -* ]]; do 16 | opt="$1" 17 | shift 18 | case "$opt" in 19 | --upload) 20 | upload="yes" 21 | formal="yes" 22 | ;; 23 | --formal) 24 | formal="yes" 25 | ;; 26 | *) 27 | usage 28 | ;; 29 | esac 30 | done 31 | 32 | commit="$1" 33 | name="$2" 34 | 35 | if [[ -z "$commit" ]]; then 36 | usage 37 | fi 38 | 39 | if [[ -z "$name" ]]; then 40 | name="$commit" 41 | fi 42 | 43 | if [[ -n "$formal" ]] && ! git tag -v $name 2>/dev/null >/dev/null; then 44 | git tag -as -m "" $name $commit 45 | fi 46 | 47 | releasedir=~/kvm-kmod-release 48 | rm -rf "$releasedir" 49 | mkdir "$releasedir" 50 | tarball="$releasedir/$name.tar.bz2" 51 | 52 | cd "$(dirname "$0")"/.. 53 | srcdir=`pwd` 54 | LINUX="$(readlink -f "linux")" 55 | 56 | kvm_git="$(readlink -f .git)" 57 | linux_git="$(readlink -f "$LINUX/.git")" 58 | 59 | mkdir -p "$tmpdir/$name" 60 | mkdir -p "$tmpdir/$name/linux" 61 | 62 | files=("virt/kvm" "arch/x86" "include" "scripts" "Makefile" 63 | ) 64 | 65 | index="$tmpdir/index" 66 | 67 | rm -f "$index" 68 | GIT_INDEX_FILE="$index" git --git-dir="$kvm_git" read-tree "$commit" 69 | GIT_INDEX_FILE="$index" git --git-dir="$kvm_git" --work-tree="$tmpdir/$name" checkout "$commit" . 70 | lcommit=($(git --git-dir="$kvm_git" ls-tree "$commit" linux)) 71 | lcommit="${lcommit[2]}" 72 | rm -f "$index" 73 | GIT_INDEX_FILE="$index" git --git-dir="$linux_git" read-tree "$lcommit" 74 | GIT_INDEX_FILE="$index" git --git-dir="$linux_git" \ 75 | --work-tree="$tmpdir/$name/linux" \ 76 | checkout "$lcommit" "${files[@]}" 77 | 78 | cd "$tmpdir/$name" 79 | 80 | if [[ -z "$formal" ]]; then 81 | version="kvm-devel" 82 | else 83 | version="$name" 84 | fi 85 | 86 | ./configure --force 87 | make sync KVM_VERSION="$version" 88 | make distclean 89 | 90 | rm -rf "$tmpdir/$name/linux" 91 | 92 | if [[ -n "$formal" ]]; then 93 | echo "$name" > "$tmpdir/$name/KVM_VERSION" 94 | fi 95 | 96 | tar cjf "$tarball" -C "$tmpdir" "$name" 97 | 98 | rm -rf "$tmpdir" 99 | 100 | if [[ -n "$upload" ]]; then 101 | cp "$srcdir/readme-changelog.txt" "$releasedir" 102 | scp -r "$releasedir" kiszka,kvm@frs.sourceforge.net:/home/frs/project/kvm/kvm-kmod/${name/kvm-kmod-/} 103 | fi 104 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/srcu.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. 3 | * 4 | * This program is free software; you can redistribute it and/or modify 5 | * it under the terms of the GNU General Public License as published by 6 | * the Free Software Foundation; either version 2 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU General Public License 15 | * along with this program; if not, write to the Free Software 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 | * 18 | * Copyright (C) IBM Corporation, 2006 19 | * 20 | * Author: Paul McKenney 21 | * 22 | * For detailed explanation of Read-Copy Update mechanism see - 23 | * Documentation/RCU/ *.txt 24 | * 25 | */ 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) && defined(CONFIG_SMP) 39 | 40 | /* 41 | * srcu_readers_active_idx -- returns approximate number of readers 42 | * active on the specified rank of per-CPU counters. 43 | */ 44 | 45 | static int srcu_readers_active_idx(struct srcu_struct *sp, int idx) 46 | { 47 | int cpu; 48 | int sum; 49 | 50 | sum = 0; 51 | for_each_possible_cpu(cpu) 52 | sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]; 53 | return sum; 54 | } 55 | 56 | /* 57 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 58 | */ 59 | static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) 60 | { 61 | int idx; 62 | 63 | idx = sp->completed; 64 | mutex_lock(&sp->mutex); 65 | 66 | /* 67 | * Check to see if someone else did the work for us while we were 68 | * waiting to acquire the lock. We need -two- advances of 69 | * the counter, not just one. If there was but one, we might have 70 | * shown up -after- our helper's first synchronize_sched(), thus 71 | * having failed to prevent CPU-reordering races with concurrent 72 | * srcu_read_unlock()s on other CPUs (see comment below). So we 73 | * either (1) wait for two or (2) supply the second ourselves. 74 | */ 75 | 76 | if ((sp->completed - idx) >= 2) { 77 | mutex_unlock(&sp->mutex); 78 | return; 79 | } 80 | 81 | sync_func(); /* Force memory barrier on all CPUs. */ 82 | 83 | /* 84 | * The preceding synchronize_sched() ensures that any CPU that 85 | * sees the new value of sp->completed will also see any preceding 86 | * changes to data structures made by this CPU. This prevents 87 | * some other CPU from reordering the accesses in its SRCU 88 | * read-side critical section to precede the corresponding 89 | * srcu_read_lock() -- ensuring that such references will in 90 | * fact be protected. 91 | * 92 | * So it is now safe to do the flip. 93 | */ 94 | 95 | idx = sp->completed & 0x1; 96 | sp->completed++; 97 | 98 | sync_func(); /* Force memory barrier on all CPUs. */ 99 | 100 | /* 101 | * At this point, because of the preceding synchronize_sched(), 102 | * all srcu_read_lock() calls using the old counters have completed. 103 | * Their corresponding critical sections might well be still 104 | * executing, but the srcu_read_lock() primitives themselves 105 | * will have finished executing. 106 | */ 107 | 108 | while (srcu_readers_active_idx(sp, idx)) 109 | schedule_timeout_interruptible(1); 110 | 111 | sync_func(); /* Force memory barrier on all CPUs. */ 112 | 113 | /* 114 | * The preceding synchronize_sched() forces all srcu_read_unlock() 115 | * primitives that were executing concurrently with the preceding 116 | * for_each_possible_cpu() loop to have completed by this point. 117 | * More importantly, it also forces the corresponding SRCU read-side 118 | * critical sections to have also completed, and the corresponding 119 | * references to SRCU-protected data items to be dropped. 120 | * 121 | * Note: 122 | * 123 | * Despite what you might think at first glance, the 124 | * preceding synchronize_sched() -must- be within the 125 | * critical section ended by the following mutex_unlock(). 126 | * Otherwise, a task taking the early exit can race 127 | * with a srcu_read_unlock(), which might have executed 128 | * just before the preceding srcu_readers_active() check, 129 | * and whose CPU might have reordered the srcu_read_unlock() 130 | * with the preceding critical section. In this case, there 131 | * is nothing preventing the synchronize_sched() task that is 132 | * taking the early exit from freeing a data structure that 133 | * is still being referenced (out of order) by the task 134 | * doing the srcu_read_unlock(). 135 | * 136 | * Alternatively, the comparison with "2" on the early exit 137 | * could be changed to "3", but this increases synchronize_srcu() 138 | * latency for bulk loads. So the current code is preferred. 139 | */ 140 | 141 | mutex_unlock(&sp->mutex); 142 | } 143 | 144 | struct sync_req { 145 | struct list_head list; 146 | bool pending; 147 | bool success; 148 | struct completion done; 149 | }; 150 | 151 | static DEFINE_PER_CPU(struct sync_req, sync_req); 152 | static DEFINE_PER_CPU(struct task_struct *, sync_thread); 153 | static DEFINE_MUTEX(rcu_sched_expedited_mutex); 154 | 155 | static long synchronize_sched_expedited_count; 156 | 157 | static int kvm_rcu_sync_thread(void *data) 158 | { 159 | int badcpu; 160 | int cpu = (long)data; 161 | struct sync_req *req = &per_cpu(sync_req, cpu); 162 | 163 | set_current_state(TASK_INTERRUPTIBLE); 164 | while (!kthread_should_stop()) { 165 | if (!req->pending) { 166 | schedule(); 167 | set_current_state(TASK_INTERRUPTIBLE); 168 | continue; 169 | } 170 | req->pending = false; 171 | 172 | preempt_disable(); 173 | badcpu = smp_processor_id(); 174 | if (likely(cpu == badcpu)) { 175 | req->success = true; 176 | } else { 177 | req->success = false; 178 | WARN_ONCE(1, "kvm_rcu_sync_thread() on CPU %d, " 179 | "expected %d\n", badcpu, cpu); 180 | } 181 | preempt_enable(); 182 | 183 | complete(&req->done); 184 | } 185 | __set_current_state(TASK_RUNNING); 186 | 187 | return 0; 188 | } 189 | 190 | static void kvm_synchronize_sched_expedited(void) 191 | { 192 | int cpu; 193 | bool need_full_sync = 0; 194 | struct sync_req *req; 195 | long snap; 196 | int trycount = 0; 197 | 198 | smp_mb(); /* ensure prior mod happens before capturing snap. */ 199 | snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; 200 | get_online_cpus(); 201 | while (!mutex_trylock(&rcu_sched_expedited_mutex)) { 202 | put_online_cpus(); 203 | if (trycount++ < 10) 204 | udelay(trycount * num_online_cpus()); 205 | else { 206 | synchronize_sched(); 207 | return; 208 | } 209 | if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { 210 | smp_mb(); /* ensure test happens before caller kfree */ 211 | return; 212 | } 213 | get_online_cpus(); 214 | } 215 | for_each_online_cpu(cpu) { 216 | req = &per_cpu(sync_req, cpu); 217 | init_completion(&req->done); 218 | smp_wmb(); 219 | req->pending = true; 220 | wake_up_process(per_cpu(sync_thread, cpu)); 221 | } 222 | for_each_online_cpu(cpu) { 223 | req = &per_cpu(sync_req, cpu); 224 | wait_for_completion(&req->done); 225 | if (unlikely(!req->success)) 226 | need_full_sync = 1; 227 | } 228 | synchronize_sched_expedited_count++; 229 | mutex_unlock(&rcu_sched_expedited_mutex); 230 | put_online_cpus(); 231 | if (need_full_sync) 232 | synchronize_sched(); 233 | } 234 | 235 | /** 236 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient 237 | * @sp: srcu_struct with which to synchronize. 238 | * 239 | * Flip the completed counter, and wait for the old count to drain to zero. 240 | * As with classic RCU, the updater must use some separate means of 241 | * synchronizing concurrent updates. Can block; must be called from 242 | * process context. 243 | * 244 | * Note that it is illegal to call synchronize_srcu_expedited() 245 | * from the corresponding SRCU read-side critical section; doing so 246 | * will result in deadlock. However, it is perfectly legal to call 247 | * synchronize_srcu_expedited() on one srcu_struct from some other 248 | * srcu_struct's read-side critical section. 249 | */ 250 | void kvm_synchronize_srcu_expedited(struct srcu_struct *sp) 251 | { 252 | __synchronize_srcu(sp, kvm_synchronize_sched_expedited); 253 | } 254 | EXPORT_SYMBOL_GPL(kvm_synchronize_srcu_expedited); 255 | 256 | static struct sched_param sync_thread_param = { 257 | .sched_priority = MAX_RT_PRIO-1 258 | }; 259 | 260 | #ifdef CONFIG_HOTPLUG_CPU 261 | #include 262 | 263 | static int cpu_callback(struct notifier_block *nfb, unsigned long action, 264 | void *hcpu) 265 | { 266 | int hotcpu = (unsigned long)hcpu; 267 | struct task_struct *p; 268 | 269 | switch (action) { 270 | case CPU_UP_PREPARE: 271 | case CPU_UP_PREPARE_FROZEN: 272 | p = kthread_create(kvm_rcu_sync_thread, hcpu, 273 | "kvmsrcusync/%d", hotcpu); 274 | if (IS_ERR(p)) { 275 | printk(KERN_ERR "kvm: kvmsrcsync for %d failed\n", 276 | hotcpu); 277 | return NOTIFY_BAD; 278 | } 279 | kthread_bind(p, hotcpu); 280 | sched_setscheduler(p, SCHED_FIFO, &sync_thread_param); 281 | per_cpu(sync_thread, hotcpu) = p; 282 | break; 283 | case CPU_ONLINE: 284 | case CPU_ONLINE_FROZEN: 285 | wake_up_process(per_cpu(sync_thread, hotcpu)); 286 | break; 287 | case CPU_UP_CANCELED: 288 | case CPU_UP_CANCELED_FROZEN: 289 | if (!per_cpu(sync_thread, hotcpu)) 290 | break; 291 | /* Unbind so it can run. Fall thru. */ 292 | kthread_bind(per_cpu(sync_thread, hotcpu), 293 | cpumask_any(cpu_online_mask)); 294 | case CPU_DEAD: 295 | case CPU_DEAD_FROZEN: 296 | p = per_cpu(sync_thread, hotcpu); 297 | per_cpu(sync_thread, hotcpu) = NULL; 298 | kthread_stop(p); 299 | break; 300 | } 301 | return NOTIFY_OK; 302 | } 303 | 304 | static struct notifier_block cpu_nfb = { 305 | .notifier_call = cpu_callback 306 | }; 307 | #endif /* CONFIG_HOTPLUG_CPU */ 308 | 309 | int kvm_init_srcu(void) 310 | { 311 | struct task_struct *p; 312 | int cpu; 313 | int err; 314 | 315 | get_online_cpus(); 316 | for_each_online_cpu(cpu) { 317 | p = kthread_create(kvm_rcu_sync_thread, (void *)(long)cpu, 318 | "kvmsrcusync/%d", cpu); 319 | if (IS_ERR(p)) 320 | goto error_out; 321 | 322 | kthread_bind(p, cpu); 323 | sched_setscheduler(p, SCHED_FIFO, &sync_thread_param); 324 | per_cpu(sync_thread, cpu) = p; 325 | wake_up_process(p); 326 | } 327 | #ifdef CONFIG_HOTPLUG_CPU 328 | register_cpu_notifier(&cpu_nfb); 329 | #endif /* CONFIG_HOTPLUG_CPU */ 330 | put_online_cpus(); 331 | 332 | return 0; 333 | 334 | error_out: 335 | put_online_cpus(); 336 | printk(KERN_ERR "kvm: kvmsrcsync for %d failed\n", cpu); 337 | err = PTR_ERR(p); 338 | kvm_exit_srcu(); 339 | return err; 340 | } 341 | 342 | void kvm_exit_srcu(void) 343 | { 344 | int cpu; 345 | 346 | #ifdef CONFIG_HOTPLUG_CPU 347 | unregister_cpu_notifier(&cpu_nfb); 348 | #endif /* CONFIG_HOTPLUG_CPU */ 349 | for_each_online_cpu(cpu) 350 | if (per_cpu(sync_thread, cpu)) 351 | kthread_stop(per_cpu(sync_thread, cpu)); 352 | } 353 | 354 | #else 355 | 356 | int kvm_init_srcu(void) 357 | { 358 | return 0; 359 | } 360 | 361 | void kvm_exit_srcu(void) 362 | { 363 | } 364 | 365 | #endif 366 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/swait.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) 5 | void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 6 | struct lock_class_key *key) 7 | { 8 | raw_spin_lock_init(&q->lock); 9 | lockdep_set_class_and_name(&q->lock, key, name); 10 | INIT_LIST_HEAD(&q->task_list); 11 | } 12 | EXPORT_SYMBOL(__init_swait_queue_head); 13 | 14 | /* 15 | * The thing about the wake_up_state() return value; I think we can ignore it. 16 | * 17 | * If for some reason it would return 0, that means the previously waiting 18 | * task is already running, so it will observe condition true (or has already). 19 | */ 20 | void swake_up_locked(struct swait_queue_head *q) 21 | { 22 | struct swait_queue *curr; 23 | 24 | if (list_empty(&q->task_list)) 25 | return; 26 | 27 | curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 28 | wake_up_process(curr->task); 29 | list_del_init(&curr->task_list); 30 | } 31 | EXPORT_SYMBOL(swake_up_locked); 32 | 33 | void swake_up(struct swait_queue_head *q) 34 | { 35 | unsigned long flags; 36 | 37 | if (!swait_active(q)) 38 | return; 39 | 40 | raw_spin_lock_irqsave(&q->lock, flags); 41 | swake_up_locked(q); 42 | raw_spin_unlock_irqrestore(&q->lock, flags); 43 | } 44 | EXPORT_SYMBOL(swake_up); 45 | 46 | /* 47 | * Does not allow usage from IRQ disabled, since we must be able to 48 | * release IRQs to guarantee bounded hold time. 49 | */ 50 | void swake_up_all(struct swait_queue_head *q) 51 | { 52 | struct swait_queue *curr; 53 | LIST_HEAD(tmp); 54 | 55 | if (!swait_active(q)) 56 | return; 57 | 58 | raw_spin_lock_irq(&q->lock); 59 | list_splice_init(&q->task_list, &tmp); 60 | while (!list_empty(&tmp)) { 61 | curr = list_first_entry(&tmp, typeof(*curr), task_list); 62 | 63 | wake_up_process(curr->task); 64 | list_del_init(&curr->task_list); 65 | 66 | if (list_empty(&tmp)) 67 | break; 68 | 69 | raw_spin_unlock_irq(&q->lock); 70 | raw_spin_lock_irq(&q->lock); 71 | } 72 | raw_spin_unlock_irq(&q->lock); 73 | } 74 | EXPORT_SYMBOL(swake_up_all); 75 | 76 | void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) 77 | { 78 | wait->task = current; 79 | if (list_empty(&wait->task_list)) 80 | list_add(&wait->task_list, &q->task_list); 81 | } 82 | 83 | void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) 84 | { 85 | unsigned long flags; 86 | 87 | raw_spin_lock_irqsave(&q->lock, flags); 88 | __prepare_to_swait(q, wait); 89 | set_current_state(state); 90 | raw_spin_unlock_irqrestore(&q->lock, flags); 91 | } 92 | EXPORT_SYMBOL(prepare_to_swait); 93 | 94 | long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 95 | { 96 | if (signal_pending_state(state, current)) 97 | return -ERESTARTSYS; 98 | 99 | prepare_to_swait(q, wait, state); 100 | 101 | return 0; 102 | } 103 | EXPORT_SYMBOL(prepare_to_swait_event); 104 | 105 | void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 106 | { 107 | __set_current_state(TASK_RUNNING); 108 | if (!list_empty(&wait->task_list)) 109 | list_del_init(&wait->task_list); 110 | } 111 | 112 | void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 113 | { 114 | unsigned long flags; 115 | 116 | __set_current_state(TASK_RUNNING); 117 | 118 | if (!list_empty_careful(&wait->task_list)) { 119 | raw_spin_lock_irqsave(&q->lock, flags); 120 | list_del_init(&wait->task_list); 121 | raw_spin_unlock_irqrestore(&q->lock, flags); 122 | } 123 | } 124 | EXPORT_SYMBOL(finish_swait); 125 | #endif 126 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/sync: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys, os, glob, os.path, shutil, re 4 | from optparse import OptionParser 5 | 6 | glob = glob.glob 7 | 8 | def cmd(c): 9 | if os.system(c) != 0: 10 | raise Exception('command execution failed: ' + c) 11 | 12 | parser = OptionParser(usage = 'usage: %prog [-v VERSION][-l LINUX]') 13 | parser.add_option('-v', action = 'store', type = 'string', dest = 'version', \ 14 | help = 'kvm-kmod release version', default = 'kvm-devel') 15 | parser.add_option('-l', action = 'store', type = 'string', dest = 'linux', \ 16 | help = 'Linux kernel tree to sync from', \ 17 | default = 'linux') 18 | parser.set_defaults() 19 | (options, args) = parser.parse_args() 20 | version = options.version 21 | linux = options.linux 22 | 23 | _re_cache = {} 24 | 25 | def re_cache(regexp): 26 | global _re_cache 27 | if regexp not in _re_cache: 28 | _re_cache[regexp] = re.compile(regexp) 29 | return _re_cache[regexp] 30 | 31 | def hack_content(fname, data): 32 | compat_apis = str.split( 33 | 'desc_struct ldttss_desc64 desc_ptr ' 34 | 'hrtimer_add_expires_ns hrtimer_get_expires ' 35 | 'hrtimer_get_expires_ns hrtimer_start_expires ' 36 | 'hrtimer_expires_remaining smp_send_reschedule ' 37 | 'on_each_cpu relay_open anon_inode_getfd ' 38 | 'do_machine_check get_desc_base get_desc_limit ' 39 | 'vma_kernel_pagesize native_read_tsc user_return_notifier ' 40 | 'user_return_notifier_register user_return_notifier_unregister ' 41 | 'synchronize_srcu synchronize_srcu_expedited ' 42 | 'monotonic_to_bootbased ' 43 | 'check_tsc_unstable native_store_idt invalidate_tss_limit ' 44 | 'set_desc_base set_desc_limit pvclock_vcpu_time_info tboot_enabled ' 45 | 'native_write_msr_safe mmget mmgrab ' 46 | 'fpregs_state xregs_state fxregs_state fpregs_active ' 47 | 'xstate_size cpu_has_xsave cpu_has_xsaves ' 48 | '__get_user_pages_fast set_64bit siginfo_t use_mm get_user_pages ' 49 | 'unuse_mm request_threaded_irq init_fpu __this_cpu_read ' 50 | '__this_cpu_write sigset_from_compat get_user_pages_remote ' 51 | 'get_user_pages_unlocked ' 52 | 'sched_info_on x86_pmu_capability perf_get_x86_pmu_capability ' 53 | 'cpuid10_eax cpuid10_edx kern_path inode_permission path_put ' 54 | 'kmap_atomic kunmap_atomic timespec_sub ' 55 | 'static_key static_key_deferred static_key_false ' 56 | 'static_key_slow_inc static_key_slow_dec static_key_slow_dec_deferred ' 57 | 'jump_label_rate_limit fixup_user_fault __pvclock_read_cycles' 58 | ) 59 | modparam_vars = str.split( 60 | 'dbg enable_vpid flexpriority_enabled enable_ept ' 61 | 'enable_unrestricted_guest emulate_invalid_guest_state ' 62 | 'vmm_exclusive fasteoi nested ignore_msrs enable_ept_ad_bits ' 63 | 'enable_apicv_reg_vid enable_apicv enable_shadow_vmcs ' 64 | ) 65 | eventfd_file = fname == 'eventfd.c' 66 | result = [] 67 | pr_fmt = '' 68 | inside_block_state = {} 69 | finish_endif = False 70 | 71 | def sub(regexp, repl, str): 72 | return re_cache(regexp).sub(repl, str) 73 | 74 | for line in data.splitlines(): 75 | def match(regexp): 76 | return re_cache(regexp).search(line) 77 | 78 | def get_block_key(start_regexp, end_regexp): 79 | key = start_regexp + '\n' + end_regexp 80 | if not inside_block_state.has_key(key): 81 | inside_block_state[key] = False 82 | return key 83 | 84 | def inside_block(start_regexp, end_regexp): 85 | key = get_block_key(start_regexp, end_regexp) 86 | if inside_block_state[key]: 87 | if match(end_regexp): 88 | inside_block_state[key] = False 89 | elif match(start_regexp): 90 | inside_block_state[key] = True 91 | return False 92 | return inside_block_state[key] 93 | 94 | def match_block_end(start_regexp, end_regexp): 95 | key = get_block_key(start_regexp, end_regexp) 96 | if inside_block_state[key]: 97 | if match(end_regexp): 98 | inside_block_state[key] = False 99 | return True 100 | elif match(start_regexp): 101 | inside_block_state[key] = True 102 | return False 103 | 104 | def w(line, result = result): 105 | result.append(line) 106 | 107 | orig = line 108 | f = line.split() 109 | if match(r'^#define pr_fmt'): 110 | pr_fmt = sub(r'#define pr_fmt\([^)]*\) ("[^"]*").*', r'\1', line) + ' ' 111 | line = '' 112 | line = sub(r'pr_debug\(([^),]*)', r'pr_debug(' + pr_fmt + r'\1', line) 113 | if fname == 'kvm_main.c' and inside_block(r'^int kvm_init\(', r'^}'): 114 | if match(r'r = kvm_arch_init\(opaque\);'): 115 | w('\tr = kvm_init_srcu();') 116 | w('\tif (r)') 117 | w('\t\treturn r;\n') 118 | w('\tpreempt_notifier_sys_init();\n') 119 | elif match(r'return 0;'): 120 | w('\tprintk("loaded kvm module (%s)\\n");\n' % (version,)) 121 | w('\tkvm_clock_warn_suspend_bug();\n') 122 | elif match(r'return r;'): 123 | w('\tpreempt_notifier_sys_exit();') 124 | w('\tkvm_exit_srcu();') 125 | if match_block_end(r'^void kvm_exit\(void\)$', r'^}'): 126 | w('\tpreempt_notifier_sys_exit();') 127 | w('\tkvm_exit_srcu();') 128 | if match(r'MODULE_AUTHOR'): 129 | w('MODULE_INFO(version, "%s");' % (version,)) 130 | if match(r'_stat_get|lost_records_get'): 131 | if match(r'DEFINE_SIMPLE_ATTRIBUTE'): 132 | name = sub(r',', '', f[1]) 133 | w('MAKE_SIMPLE_ATTRIBUTE_GETTER(' + name + ')') 134 | else: 135 | line = sub(r'\b(\w+_stat_get|lost_records_get)', r'__\1', line) 136 | line = sub(r'linux/mm_types\.h', 'linux/mm.h', line) 137 | line = sub(r'\b__user\b', ' ', line) 138 | if match(r'#include '): 139 | line = '' 140 | if match(r'#include '): 141 | line = '' 142 | if match(r'#include '): 143 | line = '#include ' 144 | if match(r'#define TRACE_INCLUDE_PATH arch\/x86\/kvm'): 145 | line = '#define TRACE_INCLUDE_PATH .' 146 | if match(r'task_cputime_adjusted\(current, &utime, &stime\);'): 147 | line = 'task_cputime_adjusted(current, (cputime_t*)&utime, (cputime_t*)&stime);' 148 | if match(r'\t\.change_pte.*kvm_mmu_notifier_change_pte,'): 149 | line = '#ifdef MMU_NOTIFIER_HAS_CHANGE_PTE\n' + line + '\n#endif' 150 | if match(r'static void kvm_mmu_notifier_change_pte'): 151 | line = sub(r'static ', '', line) 152 | line = '#ifdef MMU_NOTIFIER_HAS_CHANGE_PTE\n' + 'static\n' + '#endif\n' + line 153 | if match(r'case KVM_CAP_SYNC_MMU'): 154 | line = '#ifdef CONFIG_MMU_NOTIFIER\n' + line + '\n#endif' 155 | for ident in compat_apis: 156 | line = sub(r'\b' + ident + r'\b', 'kvm_' + ident, line) 157 | if fname == 'svm.c': 158 | line = sub(r'\bboot_cpu_has\b', 'kvm_boot_cpu_has', line) 159 | if match(r'kvm_.*_fops\.owner = module;'): 160 | line = 'IF_ANON_INODES_DOES_REFCOUNTS(' + line + ')' 161 | if not match(r'#include'): 162 | line = sub(r'\blapic\n', 'l_apic', line) 163 | if inside_block(r'struct pt_regs regs = {', r'};'): 164 | if match(r'\.cs'): 165 | line = sub(r'cs', r'kvm_pt_regs_cs', line) 166 | if match(r'\.flags'): 167 | line = sub(r'flags', r'kvm_pt_regs_flags', line) 168 | line = sub(r'boot_cpu_data.x86_phys_bits', 'kvm_x86_phys_bits', line) 169 | if match(r'^static const struct vm_operations_struct kvm_'): 170 | line = sub(r' const ', ' ', line) 171 | if eventfd_file and line == '#include ': 172 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)') 173 | if line == 'int kvm_irqfd_init(void);': 174 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)') 175 | if line == 'void kvm_irqfd_exit(void);': 176 | w('#else') 177 | w('static inline int kvm_irqfd_init(void) { return 0; }') 178 | w('static inline void kvm_irqfd_exit(void) { }') 179 | w('#endif') 180 | if match(r'^\tcase KVM_IOEVENTFD: {'): 181 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)') 182 | if match(r'#include '): 183 | line = '' 184 | if match(r'struct fpu '): 185 | line = sub(r'struct fpu ', 'struct kvm_compat_fpu ', line) 186 | if match(r'^static int mmu_shrink\('): 187 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)') 188 | w(line) 189 | w('#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)') 190 | w('static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)') 191 | w('#else') 192 | w('static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)') 193 | line = '#endif' 194 | if line == '\tint nr_to_scan = sc->nr_to_scan;': 195 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)') 196 | w(line) 197 | line = '#endif' 198 | if line == '\tkvm_x86_ops = ops;': 199 | w('\tkvm_xstate_size_init();\n') 200 | if match_block_end(r'case CPU_STARTING:', r'hardware_enable'): 201 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)') 202 | w('\t\thardware_enable();') 203 | w('#else') 204 | w('\t\tsmp_call_function_single(cpu, (void (*)(void *))hardware_enable, NULL, 1);') 205 | line = '#endif' 206 | if match(r'case CPU_STARTING:'): 207 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)') 208 | w('\tcase CPU_STARTING:') 209 | w('#else') 210 | w('\tcase CPU_ONLINE:') 211 | line = '#endif' 212 | if line == '\tsend_sig_info(SIGBUS, &info, tsk);': 213 | line = '\tsend_sig_info(SIGBUS, (siginfo_t *)&info, tsk);' 214 | if line == '\tcase KVM_CAP_ASYNC_PF:': 215 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)') 216 | w(line) 217 | line = '#endif' 218 | if match(r'\treturn hva_to_pfn\(kvm, addr, atomic, async'): 219 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)') 220 | w('\tasync = NULL;') 221 | w('#endif') 222 | if line == '\t.llseek\t\t= noop_llseek,': 223 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)') 224 | w(line) 225 | line = '#endif' 226 | if line == '\t.test_young\t\t= kvm_mmu_notifier_test_young,': 227 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)') 228 | w(line) 229 | line = '#endif' 230 | if match(r'static int kvm_mmu_notifier_test_young'): 231 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)') 232 | if line == '\t.clear_young\t\t= kvm_mmu_notifier_clear_young,': 233 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)') 234 | w(line) 235 | line = '#endif' 236 | if match(r'static int kvm_mmu_notifier_clear_young'): 237 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)') 238 | if line == 'static int kvm_suspend(void)': 239 | line = 'int kvm_suspend(void)' 240 | if line == 'static void kvm_resume(void)': 241 | line = 'void kvm_resume(void)' 242 | if line == '\t\t\tcpuid_mask(&entry->ebx, 9);': 243 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)') 244 | w(line) 245 | w('#else') 246 | w('\t\t\tentry->ebx = 0;'); 247 | w('#endif') 248 | if match(r'#ifdef CONFIG_KVM_MMU_AUDIT'): 249 | w('#undef CONFIG_KVM_MMU_AUDIT') 250 | if line == 'static struct kvm_arch_event_perf_mapping {': 251 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)') 252 | for modparam_var in modparam_vars: 253 | if match(r'^static bool.* ' + modparam_var + '[ ;]'): 254 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)') 255 | w(sub(r' bool ', r' int ', line)) 256 | w('#else') 257 | w(line) 258 | line = '#endif' 259 | if line == '\tinit_kthread_worker(&pit->worker);': 260 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)') 261 | if match(r'kthread_stop\(.*->worker_task\)'): 262 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)') 263 | w(line) 264 | w('#else') 265 | line = sub(r'kthread_stop', 'destroy_workqueue', line) 266 | w(sub(r'worker_task', 'worker', line)) 267 | line = '#endif' 268 | if inside_block(r'^void kvm_kvfree\(', r'^}') and line == '\t\tvfree(addr);': 269 | line = '\t\tvfree((void *)addr);' 270 | if fname == 'include/linux/kvm_host.h' and line == '#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)': 271 | line ='#if defined(KVM_ARCH_WANT_MMU_NOTIFIER)' 272 | if line == '\t\tkfree_rcu(old, rcu);': 273 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)') 274 | w('\t\tcall_rcu(&old->rcu, kvm_apic_map_kfree_callback);') 275 | w('#else') 276 | w(line) 277 | line = '#endif' 278 | if line == '\tif (!kvm_fpregs_active() && !vmx->vcpu.guest_fpu_loaded)': 279 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)') 280 | if line == '\t\tstts();': 281 | w(line) 282 | w('#else') 283 | w('\tif (kvm_fpregs_active())') 284 | w('\t\tclts();') 285 | line = '#endif' 286 | if line == '\tvmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */': 287 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)') 288 | w(line) 289 | w('#else') 290 | w('\tvmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */') 291 | line = '#endif' 292 | if (inside_block('#ifdef CONFIG_X86_64', '#endif') and \ 293 | (line == 'struct pvclock_gtod_data {' or \ 294 | line == 'static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);' or \ 295 | line == 'static u64 read_tsc(void)' or \ 296 | line == 'static void pvclock_gtod_update_fn(struct work_struct *work)' or \ 297 | line == '\tbool vcpus_matched;' or \ 298 | match(r'^\tpvclock_gtod_(un)*register_notifier'))) or \ 299 | ((inside_block('static void kvm_gen_update_masterclock', '}') or \ 300 | inside_block('static void pvclock_update_vm_gtod_copy', '}')) and \ 301 | line == '#ifdef CONFIG_X86_64'): 302 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)') 303 | finish_endif = True 304 | if line == '#endif' and finish_endif: 305 | w('#endif') 306 | finish_endif = False 307 | if match(r'tkr_mono\.'): 308 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)') 309 | w(line) 310 | line = sub(r'tkr_mono\.base', 'tkr.base_mono', line) 311 | line = sub(r'tkr_mono', 'tkr', line) 312 | w('#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)') 313 | w(line) 314 | w('#else') 315 | if match(r'tkr\.cycle_last') or match(r'tkr\.mask'): 316 | w(sub(r'tkr\.', 'clock->', line)) 317 | elif match(r'tkr\.base_mono'): 318 | w('\tboot_ns = kvm_get_boot_base_ns(tk);') 319 | else: 320 | w(sub(r'tkr\.', '', line)) 321 | line = '#endif' 322 | if match_block_end('^static int kvm_mmu_notifier_clear_flush_young', '^}'): 323 | w(line) 324 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)') 325 | w('static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,') 326 | w(' struct mm_struct *mm,') 327 | w(' unsigned long hva)') 328 | w('{') 329 | w('\treturn __kvm_mmu_notifier_clear_flush_young(mn, mm, hva, hva+1);') 330 | w('}') 331 | line = '#endif' 332 | if match(r'^static int kvm_mmu_notifier_clear_flush_young'): 333 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)') 334 | w(line) 335 | w('#else') 336 | w(sub('kvm_', '__kvm_', line)) 337 | line = '#endif' 338 | if match('POSTED_INTR_NESTED_VECTOR'): 339 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)') 340 | w(line) 341 | w('#else') 342 | w(sub('POSTED_INTR_NESTED_VECTOR', 'POSTED_INTR_VECTOR', line)) 343 | line = '#endif' 344 | if line == '#ifdef CONFIG_KEXEC': 345 | line = '#if defined(CONFIG_KEXEC) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)' 346 | if line == '\tif (!cpu_has_vmx_apicv())': 347 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)') 348 | w(line) 349 | w('#else') 350 | w('if (1)') 351 | line = '#endif' 352 | if line == '#if IS_ENABLED(CONFIG_KVM)': 353 | line = '#if 1' 354 | if match(r'^\t+apic->send_IPI_mask\(get_cpu_mask\(vcpu->cpu\),$'): 355 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)') 356 | w('\t\t;') 357 | w('#else') 358 | if match(r'^\t+POSTED_INTR_VECTOR\);$'): 359 | w(line) 360 | line = '#endif' 361 | if match(r'^\tret = kvm_x86_ops->hardware_enable\(\);$'): 362 | w('\tkvm_do_store_gdt();'); 363 | line = sub(r'this_cpu_ptr\(&cpu_tss\)', 'kvm_read_tr_base()', line) 364 | if line == '\tif (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))': 365 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)') 366 | w(line) 367 | w('#else') 368 | w('\tif (!kvm_cpu_has_amd_erratum(kvm_amd_erratum_383))') 369 | line = '#endif' 370 | 371 | if line == 'static struct shrinker mmu_shrinker = {': 372 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)') 373 | w('static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)') 374 | w('{') 375 | w('\tif (sc->nr_to_scan > 0)') 376 | w('\t\tmmu_shrink_scan(shrink, sc);') 377 | w('\treturn mmu_shrink_count(shrink, sc);') 378 | w('}') 379 | w('#endif') 380 | w('') 381 | w('static struct shrinker mmu_shrinker = {') 382 | w('#if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)') 383 | w('\t.shrink = mmu_shrink,') 384 | line = '#else' 385 | if eventfd_file and line == '#include ': 386 | w('#include ') 387 | 388 | # Large FPU changes in 4.2 389 | if match(r'\b(__copy_kernel_to_fpregs|fpstate_init)\b'): 390 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)') 391 | w(line) 392 | w('#else') 393 | # .state change from union pointer to union. The older APIs 394 | # took a struct fpu. 395 | line = sub(r'\b__copy_kernel_to_fpregs\b', 'kvm_fpu_restore_checking', line) 396 | line = sub(r'\bfpstate_init\b', 'kvm_fpstate_init', line) 397 | line = sub(r'\b.state\b', '', line) 398 | w(line) 399 | line = '#endif' 400 | if match(r'\bguest_fpu\.state\.|\bxsave\b.*\bheader\b'): 401 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)') 402 | w(line) 403 | line = sub(r'\bguest_fpu\.state\.', 'guest_fpu.state->', line) 404 | line = sub(r'\bheader\.xfeatures\b', 'xsave_hdr.xstate_bv', line) 405 | if match(r'\bheader\.xcomp_bv\b'): 406 | w('#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)') 407 | line = sub(r'\bheader\.xcomp_bv\b', 'xsave_hdr.xcomp_bv', line) 408 | w(line) 409 | # Will be under if (cpu_has_xsaves), which is always 0. Just 410 | # replace with something that compiles, the C statement might 411 | # span more than one line. 412 | line = 'WARN(1, "this should never happen"),\n' + \ 413 | sub(r'xcomp_bv', 'xstate_bv', line) 414 | w('#else') 415 | w(line) 416 | line = '#endif' 417 | if line == '#include ': 418 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)') 419 | w(line) 420 | w('#else') 421 | w('#include ') 422 | w('#include ') 423 | w('#include ') 424 | line = '#endif' 425 | if match(r'^#include '): 426 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)') 427 | w(line) 428 | w('#else') 429 | w('#include ') 430 | w('#include ') 431 | w('#include ') 432 | line = '#endif' 433 | if match(r'^#include '): 434 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)') 435 | w(line) 436 | w('#else') 437 | w('#include ') 438 | line = '#endif' 439 | if match(r'(remaining|target_expiration) = 0;$'): 440 | line = sub('0', 'ktime_set(0, 0)', line) 441 | if match(r'\(struct vm_fault \*vmf\)'): 442 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)') 443 | w(line) 444 | w('#else') 445 | w(sub(r'\(.*\)', '(struct vm_area_struct *vma, struct vm_fault *vmf)', line)) 446 | line = '#endif' 447 | if match(r'vmf->vma'): 448 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)') 449 | w('\tstruct vm_area_struct *vma = vmf->vma;') 450 | w('#endif') 451 | line = sub('vmf->vma', 'vma', line); 452 | 453 | w(line) 454 | 455 | if line == '\t.scan_objects = mmu_shrink_scan,': 456 | w('#endif') 457 | if line == '\tkvm_arch_vcpu_put(vcpu);': 458 | w('\tkvm_fire_urn();') 459 | if match_block_end(r'(\tcase KVM_IOEVENTFD: {)', r'^\t}'): 460 | w('#endif') 461 | if line == '\t\tprintk(KERN_ERR "kvm: disabled by bios\\n");': 462 | w('#ifndef KVM_TBOOT_ENABLED_WORKS\n') 463 | w('\t\tprintk(KERN_ERR "kvm: if TXT is enabled in the BIOS, disable it\\n");') 464 | w('#endif') 465 | if match_block_end(r'^static int kvm_mmu_notifier_test_young', r'^}'): 466 | w('#endif') 467 | if match_block_end(r'^static int kvm_mmu_notifier_clear_young', r'^}'): 468 | w('#endif') 469 | if fname == 'cpuid.c' and line == '#include ': 470 | w('#include ') 471 | w('#include ') 472 | if match_block_end(r'void kvm_handle_pmu_event\(struct kvm_vcpu \*vcpu\)', r'^}'): 473 | w('#else') 474 | w('#include "pmu-stubs.c"') 475 | w('#endif') 476 | if match_block_end(r'init_kthread_worker\(&pit->worker\);', r'if \(IS_ERR\(pit->worker_task\)\)'): 477 | w('#else') 478 | w('\t(void)pid_nr;') 479 | w('\tpit->worker = create_singlethread_workqueue("kvm-pit");') 480 | w('\tif (!pit->worker)') 481 | w('#endif') 482 | if line == 'module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);': 483 | w('\n#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)') 484 | w('static void kvm_apic_map_kfree_callback(struct rcu_head *p)') 485 | w('{') 486 | w('\tkfree(container_of(p, struct kvm_apic_map, rcu));') 487 | w('}') 488 | w('#endif') 489 | if line == '#define _ASM_X86_KVM_HOST_H': 490 | w('#include ') 491 | 492 | if match(r'kvm_x86_ops->vcpu_free'): 493 | w('#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)') 494 | w('\tkvm_fpu_free(&vcpu->arch.guest_fpu);') 495 | w('#endif') 496 | 497 | if eventfd_file: 498 | result.append('#else\n' 499 | 'void kvm_eventfd_init(struct kvm *kvm) { }\n' 500 | 'void kvm_irqfd_release(struct kvm *kvm) { }\n' 501 | 'void kvm_irq_routing_update(struct kvm *kvm) { }\n' 502 | '#endif') 503 | data = str.join('', [line + '\n' for line in result]) 504 | return data 505 | 506 | def hack_file(T, fname): 507 | fullname = T + '/' + fname 508 | data = file(fullname).read() 509 | data = hack_content(fname, data) 510 | file(fullname, 'w').write(data) 511 | 512 | def unifdef(fname): 513 | data = file('unifdef.h').read() + file(fname).read() 514 | file(fname, 'w').write(data) 515 | 516 | hack_files = { 517 | 'x86': str.split('kvm_main.c mmu.c vmx.c svm.c x86.c x86.h irq.h lapic.c' 518 | ' lapic.h i8254.c eventfd.c emulate.c async_pf.c' 519 | ' cpuid.c pmu.c paging_tmpl.h hyperv.c trace.h'), 520 | } 521 | 522 | def mkdir(dir): 523 | if not os.path.exists(dir): 524 | os.makedirs(dir) 525 | 526 | def cp(src, dst): 527 | mkdir(os.path.dirname(dst)) 528 | file(dst, 'w').write(file(src).read()) 529 | 530 | def copy_if_changed(src, dst): 531 | for dir, subdirs, files in os.walk(src): 532 | ndir = dst + '/' + dir[len(src)+1:] 533 | mkdir(ndir) 534 | for fname in files: 535 | old = ndir + '/' + fname 536 | new = dir + '/' + fname 537 | try: 538 | if file(old).read() != file(new).read(): 539 | raise Exception('different.') 540 | except: 541 | cp(new, old) 542 | 543 | def rmtree(path): 544 | if os.path.exists(path): 545 | shutil.rmtree(path) 546 | 547 | def header_sync(arch): 548 | T = 'header' 549 | rmtree(T) 550 | for file in (glob('%(linux)s/include/linux/kvm*.h' % { 'linux': linux }) + 551 | glob('%(linux)s/include/linux/vfio.h' % { 'linux': linux }) + 552 | glob('%(linux)s/include/linux/frame.h' % { 'linux': linux }) + 553 | glob('%(linux)s/include/uapi/linux/kvm*.h' % { 'linux': linux })): 554 | out = ('%(T)s/include/linux/%(name)s' 555 | % { 'T': T, 'name': os.path.basename(file) }) 556 | cp(file, out) 557 | unifdef(out) 558 | for file in (glob('%(linux)s/include/kvm/*.h' % { 'linux': linux })): 559 | out = ('%(T)s/include/kvm/%(name)s' 560 | % { 'T': T, 'name': os.path.basename(file) }) 561 | cp(file, out) 562 | unifdef(out) 563 | for file in glob(('%(linux)s/include/trace/events/kvm*.h' 564 | % { 'linux': linux })): 565 | out = ('%(T)s/include/trace/events/%(name)s' 566 | % { 'T': T, 'name': os.path.basename(file) }) 567 | cp(file, out) 568 | unifdef(out) 569 | arch_headers = ( 570 | [x 571 | for dir in ['%(linux)s/arch/%(arch)s/include/asm/kvm*.h', 572 | '%(linux)s/arch/%(arch)s/include/asm/vmx.h', 573 | '%(linux)s/arch/%(arch)s/include/asm/svm.h', 574 | '%(linux)s/arch/%(arch)s/include/asm/virtext*.h'] 575 | for x in glob(dir % { 'arch': arch, 'linux': linux }) 576 | ]) 577 | for file in arch_headers: 578 | out = ('%(T)s/include/asm-%(arch)s/%(name)s' 579 | % { 'T': T, 'name': os.path.basename(file), 'arch': arch }) 580 | cp(file, out) 581 | unifdef(out) 582 | arch_uapi_headers = ( 583 | [x 584 | for dir in ['%(linux)s/arch/%(arch)s/include/uapi/asm/kvm*.h', 585 | '%(linux)s/arch/%(arch)s/include/uapi/asm/vmx.h', 586 | '%(linux)s/arch/%(arch)s/include/uapi/asm/svm.h', 587 | '%(linux)s/arch/%(arch)s/include/uapi/asm/msr-index.h', 588 | '%(linux)s/arch/%(arch)s/include/uapi/asm/hyperv.h'] 589 | for x in glob(dir % { 'arch': arch, 'linux': linux }) 590 | ]) 591 | for file in arch_uapi_headers: 592 | out = ('%(T)s/include/uapi/asm-%(arch)s/%(name)s' 593 | % { 'T': T, 'name': os.path.basename(file), 'arch': arch }) 594 | cp(file, out) 595 | unifdef(out) 596 | hack_file(T, 'include/linux/kvm_host.h') 597 | hack_file(T, 'include/asm-%(arch)s/kvm_host.h' % { 'arch': arch }) 598 | if arch == 'x86': 599 | hack_file(T, 'include/asm-x86/kvm_emulate.h') 600 | copy_if_changed(T, '.') 601 | rmtree(T) 602 | 603 | def source_sync(arch): 604 | T = 'source' 605 | rmtree(T) 606 | sources = [file 607 | for pattern in ['%(linux)s/arch/%(arch)s/kvm/*.[cSh]', 608 | '%(linux)s/virt/kvm/*.[cSh]'] 609 | for file in glob(pattern % { 'linux': linux, 'arch': arch }) 610 | if not file.endswith('.mod.c') 611 | ] 612 | for file in sources: 613 | out = ('%(T)s/%(name)s' 614 | % { 'T': T, 'name': os.path.basename(file) }) 615 | cp(file, out) 616 | 617 | for i in glob(T + '/*.c'): 618 | unifdef(i) 619 | 620 | for i in hack_files[arch]: 621 | hack_file(T, i) 622 | 623 | copy_if_changed(T, arch) 624 | rmtree(T) 625 | 626 | for arch in ['x86']: 627 | header_sync(arch) 628 | source_sync(arch) 629 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/unifdef.h: -------------------------------------------------------------------------------- 1 | #ifndef KVM_UNIFDEF_H 2 | #define KVM_UNIFDEF_H 3 | 4 | #ifdef __i386__ 5 | #ifndef CONFIG_X86_32 6 | #define CONFIG_X86_32 1 7 | #endif 8 | #endif 9 | 10 | #ifdef __x86_64__ 11 | #ifndef CONFIG_X86_64 12 | #define CONFIG_X86_64 1 13 | #endif 14 | #endif 15 | 16 | #if defined(__i386__) || defined (__x86_64__) 17 | #ifndef CONFIG_X86 18 | #define CONFIG_X86 1 19 | #endif 20 | #endif 21 | 22 | #ifdef __PPC__ 23 | #ifndef CONFIG_PPC 24 | #define CONFIG_PPC 1 25 | #endif 26 | #endif 27 | 28 | #ifdef __s390__ 29 | #ifndef CONFIG_S390 30 | #define CONFIG_S390 1 31 | #endif 32 | #endif 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/x86/Kbuild: -------------------------------------------------------------------------------- 1 | obj-m := kvm.o kvm-intel.o 2 | kvm-objs := kvm_main.o x86.o mmu.o emulate.o irq.o i8259.o pmu.o \ 3 | lapic.o ioapic.o preempt.o i8254.o coalesced_mmio.o irq_comm.o \ 4 | eventfd.o compat-x86.o async_pf.o cpuid.o irqchip.o mtrr.o \ 5 | ../external-module-compat.o hyperv.o page_track.o debugfs.o 6 | ifeq ($(CONFIG_KVM_VFIO),y) 7 | kvm-objs += vfio.o 8 | endif 9 | kvm-intel-objs := vmx.o pmu_intel.o vmi.o 10 | 11 | kvm-objs += ../refcount.o ../srcu.o ../swait.o 12 | 13 | CFLAGS_kvm_main.o = -DKVM_MAIN 14 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/x86/Makefile.pre: -------------------------------------------------------------------------------- 1 | prerequisite: 2 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/x86/compat-x86.c: -------------------------------------------------------------------------------- 1 | 2 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) 3 | 4 | unsigned int kvm_xstate_size; 5 | 6 | void kvm_xstate_size_init(void) 7 | { 8 | unsigned int eax, ebx, ecx, edx; 9 | 10 | /* kvm only uses xstate_size if xsave is supported */ 11 | if (cpu_has_xsave) { 12 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); 13 | kvm_xstate_size = ebx; 14 | BUG_ON(kvm_xstate_size > sizeof(union kvm_thread_xstate)); 15 | } 16 | } 17 | 18 | #endif /* < 2.6.36 */ 19 | 20 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) 21 | 22 | const int kvm_amd_erratum_383[] = 23 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 24 | 25 | EXPORT_SYMBOL_GPL(kvm_amd_erratum_383); 26 | 27 | #endif /* < 2.6.36 */ 28 | 29 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && defined(CONFIG_KVM_GUEST) 30 | void kvm_async_pf_task_wait(u32 token) 31 | { 32 | BUG(); 33 | } 34 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 35 | 36 | void kvm_async_pf_task_wake(u32 token) 37 | { 38 | BUG(); 39 | } 40 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); 41 | 42 | u32 kvm_read_and_reset_pf_reason(void) 43 | { 44 | return 0; 45 | } 46 | EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 47 | #endif /* < 2.6.38 && CONFIG_KVM_GUEST */ 48 | 49 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) 50 | 51 | #ifndef SVM_CPUID_FUNC 52 | #define SVM_CPUID_FUNC 0x8000000a 53 | #endif 54 | 55 | #define SVM_FEATURE_NPT (1 << 0) 56 | #define SVM_FEATURE_LBRV (1 << 1) 57 | #define SVM_FEATURE_NRIP (1 << 3) 58 | #define SVM_FEATURE_FLUSH_ASID (1 << 6) 59 | #define SVM_FEATURE_DECODE_ASSIST (1 << 7) 60 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) 61 | 62 | bool kvm_boot_cpu_has(unsigned int bit) 63 | { 64 | static u32 svm_features; 65 | static bool initialized; 66 | 67 | if (!initialized) { 68 | svm_features = cpuid_edx(SVM_CPUID_FUNC); 69 | initialized = true; 70 | } 71 | switch (bit) { 72 | case X86_FEATURE_NPT: 73 | return svm_features & SVM_FEATURE_NPT; 74 | case X86_FEATURE_LBRV: 75 | return svm_features & SVM_FEATURE_LBRV; 76 | case X86_FEATURE_NRIPS: 77 | return svm_features & SVM_FEATURE_NRIP; 78 | case X86_FEATURE_FLUSHBYASID: 79 | return svm_features & SVM_FEATURE_FLUSH_ASID; 80 | case X86_FEATURE_DECODEASSISTS: 81 | return svm_features & SVM_FEATURE_DECODE_ASSIST; 82 | case X86_FEATURE_PAUSEFILTER: 83 | return svm_features & SVM_FEATURE_PAUSE_FILTER; 84 | default: 85 | return boot_cpu_has(bit); 86 | } 87 | } 88 | EXPORT_SYMBOL_GPL(kvm_boot_cpu_has); 89 | #endif /* < 2.6.37 */ 90 | 91 | #include 92 | 93 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) 94 | DEFINE_PER_CPU(struct kvm_desc_ptr, kvm_host_gdt); 95 | EXPORT_SYMBOL_GPL(kvm_host_gdt); 96 | 97 | static inline void kvm_native_load_gdt(const struct kvm_desc_ptr *dtr) 98 | { 99 | asm volatile("lgdt %0"::"m" (*dtr)); 100 | } 101 | 102 | static inline void kvm_native_store_gdt(struct kvm_desc_ptr *dtr) 103 | { 104 | asm volatile("sgdt %0":"=m" (*dtr)); 105 | } 106 | 107 | void load_fixmap_gdt(int processor_id) 108 | { 109 | kvm_native_load_gdt(this_cpu_ptr(&kvm_host_gdt)); 110 | } 111 | EXPORT_SYMBOL_GPL(load_fixmap_gdt); 112 | 113 | void kvm_do_store_gdt(void) 114 | { 115 | kvm_native_store_gdt(this_cpu_ptr(&kvm_host_gdt)); 116 | } 117 | 118 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 119 | EXPORT_SYMBOL_GPL(mxcsr_feature_mask); 120 | #endif 121 | 122 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/x86/pmu-stubs.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Kernel-based Virtual Machine -- Performance Monitoring Unit support 3 | * 4 | * Compatibility stubs 5 | * 6 | * Copyright 2012 Siemens AG. 7 | * 8 | * Authors: 9 | * Jan Kiszka 10 | * 11 | * This work is licensed under the terms of the GNU GPL, version 2. See 12 | * the COPYING file in the top-level directory. 13 | * 14 | */ 15 | 16 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr) 17 | { 18 | return false; 19 | } 20 | 21 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) 22 | { 23 | return 1; 24 | } 25 | 26 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 27 | { 28 | BUG(); 29 | return -1; 30 | } 31 | 32 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 33 | { 34 | BUG(); 35 | return -1; 36 | } 37 | 38 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu) 39 | { 40 | BUG(); 41 | } 42 | 43 | void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) 44 | { 45 | } 46 | 47 | void kvm_pmu_init(struct kvm_vcpu *vcpu) 48 | { 49 | struct kvm_pmu *pmu = &vcpu->arch.pmu; 50 | 51 | memset(pmu, 0, sizeof(*pmu)); 52 | } 53 | 54 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) 55 | { 56 | } 57 | 58 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) 59 | { 60 | } 61 | 62 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu) 63 | { 64 | BUG(); 65 | } 66 | -------------------------------------------------------------------------------- /kvm-rvmi-kmod/x86/preempt.c: -------------------------------------------------------------------------------- 1 | 2 | #ifdef CONFIG_PREEMPT_NOTIFIERS_COMPAT 3 | 4 | #include 5 | #include 6 | 7 | static DEFINE_SPINLOCK(pn_lock); 8 | static LIST_HEAD(pn_list); 9 | 10 | #define dprintk(fmt) do { \ 11 | if (0) \ 12 | printk("%s (%d/%d): " fmt, __FUNCTION__, \ 13 | current->pid, raw_smp_processor_id()); \ 14 | } while (0) 15 | 16 | static void preempt_enable_sched_out_notifiers(void) 17 | { 18 | asm volatile ("mov %0, %%db0" : : "r"(schedule)); 19 | asm volatile ("mov %0, %%db7" : : "r"(0x701ul)); 20 | current->thread.kvm_compat_debugreg(7) = 0ul; 21 | #ifdef TIF_DEBUG 22 | clear_tsk_thread_flag(current, TIF_DEBUG); 23 | #endif 24 | } 25 | 26 | static void preempt_enable_sched_in_notifiers(void * addr) 27 | { 28 | asm volatile ("mov %0, %%db0" : : "r"(addr)); 29 | asm volatile ("mov %0, %%db7" : : "r"(0x701ul)); 30 | current->thread.kvm_compat_debugreg(0) = (unsigned long) addr; 31 | current->thread.kvm_compat_debugreg(7) = 0x701ul; 32 | #ifdef TIF_DEBUG 33 | set_tsk_thread_flag(current, TIF_DEBUG); 34 | #endif 35 | } 36 | 37 | static void __preempt_disable_notifiers(void) 38 | { 39 | asm volatile ("mov %0, %%db7" : : "r"(0ul)); 40 | } 41 | 42 | static void preempt_disable_notifiers(void) 43 | { 44 | __preempt_disable_notifiers(); 45 | current->thread.kvm_compat_debugreg(7) = 0ul; 46 | #ifdef TIF_DEBUG 47 | clear_tsk_thread_flag(current, TIF_DEBUG); 48 | #endif 49 | } 50 | 51 | static void fastcall __attribute__((used)) preempt_notifier_trigger(void *** ip) 52 | { 53 | struct preempt_notifier *pn; 54 | int cpu = raw_smp_processor_id(); 55 | int found = 0; 56 | 57 | dprintk(" - in\n"); 58 | //dump_stack(); 59 | spin_lock(&pn_lock); 60 | list_for_each_entry(pn, &pn_list, link) 61 | if (pn->tsk == current) { 62 | found = 1; 63 | break; 64 | } 65 | spin_unlock(&pn_lock); 66 | 67 | if (found) { 68 | if ((void *) *ip != schedule) { 69 | dprintk("sched_in\n"); 70 | preempt_enable_sched_out_notifiers(); 71 | 72 | preempt_disable(); 73 | local_irq_enable(); 74 | pn->ops->sched_in(pn, cpu); 75 | local_irq_disable(); 76 | preempt_enable_no_resched(); 77 | } else { 78 | void * sched_in_addr; 79 | dprintk("sched_out\n"); 80 | #ifdef CONFIG_X86_64 81 | sched_in_addr = **(ip+3); 82 | #else 83 | /* no special debug stack switch on x86 */ 84 | sched_in_addr = (void *) *(ip+3); 85 | #endif 86 | preempt_enable_sched_in_notifiers(sched_in_addr); 87 | 88 | preempt_disable(); 89 | local_irq_enable(); 90 | pn->ops->sched_out(pn, NULL); 91 | local_irq_disable(); 92 | preempt_enable_no_resched(); 93 | } 94 | } else 95 | __preempt_disable_notifiers(); 96 | dprintk(" - out\n"); 97 | } 98 | 99 | unsigned long orig_int1_handler; 100 | 101 | #ifdef CONFIG_X86_64 102 | 103 | #define SAVE_REGS \ 104 | "push %rax; push %rbx; push %rcx; push %rdx; " \ 105 | "push %rsi; push %rdi; push %rbp; " \ 106 | "push %r8; push %r9; push %r10; push %r11; " \ 107 | "push %r12; push %r13; push %r14; push %r15" 108 | 109 | #define RESTORE_REGS \ 110 | "pop %r15; pop %r14; pop %r13; pop %r12; " \ 111 | "pop %r11; pop %r10; pop %r9; pop %r8; " \ 112 | "pop %rbp; pop %rdi; pop %rsi; " \ 113 | "pop %rdx; pop %rcx; pop %rbx; pop %rax " 114 | 115 | #define TMP "%rax" 116 | 117 | #else 118 | 119 | #define SAVE_REGS "pusha" 120 | #define RESTORE_REGS "popa" 121 | #define TMP "%eax" 122 | 123 | #endif 124 | 125 | asm ("pn_int1_handler: \n\t" 126 | "push " TMP " \n\t" 127 | "mov %db7, " TMP " \n\t" 128 | "cmp $0x701, " TMP " \n\t" 129 | "pop " TMP " \n\t" 130 | "jnz .Lnotme \n\t" 131 | "push " TMP " \n\t" 132 | "mov %db6, " TMP " \n\t" 133 | "test $0x1, " TMP " \n\t" 134 | "pop " TMP " \n\t" 135 | "jz .Lnotme \n\t" 136 | SAVE_REGS "\n\t" 137 | #ifdef CONFIG_X86_64 138 | "leaq 120(%rsp),%rdi\n\t" 139 | #else 140 | "leal 32(%esp),%eax\n\t" 141 | #endif 142 | "call preempt_notifier_trigger \n\t" 143 | RESTORE_REGS "\n\t" 144 | #ifdef CONFIG_X86_64 145 | "orq $0x10000, 16(%rsp) \n\t" 146 | "iretq \n\t" 147 | #else 148 | "orl $0x10000, 8(%esp) \n\t" 149 | "iret \n\t" 150 | #endif 151 | ".Lnotme: \n\t" 152 | #ifdef CONFIG_X86_64 153 | "jmpq *orig_int1_handler\n\t" 154 | #else 155 | "jmpl *orig_int1_handler\n\t" 156 | #endif 157 | ); 158 | 159 | void preempt_notifier_register(struct preempt_notifier *notifier) 160 | { 161 | unsigned long flags; 162 | 163 | dprintk(" - in\n"); 164 | spin_lock_irqsave(&pn_lock, flags); 165 | preempt_enable_sched_out_notifiers(); 166 | notifier->tsk = current; 167 | list_add(¬ifier->link, &pn_list); 168 | spin_unlock_irqrestore(&pn_lock, flags); 169 | dprintk(" - out\n"); 170 | } 171 | 172 | void preempt_notifier_unregister(struct preempt_notifier *notifier) 173 | { 174 | unsigned long flags; 175 | 176 | dprintk(" - in\n"); 177 | spin_lock_irqsave(&pn_lock, flags); 178 | list_del(¬ifier->link); 179 | spin_unlock_irqrestore(&pn_lock, flags); 180 | preempt_disable_notifiers(); 181 | dprintk(" - out\n"); 182 | } 183 | 184 | struct intr_gate { 185 | u16 offset0; 186 | u16 segment; 187 | u16 junk; 188 | u16 offset1; 189 | #ifdef CONFIG_X86_64 190 | u32 offset2; 191 | u32 blah; 192 | #endif 193 | } __attribute__((packed)); 194 | 195 | struct idt_desc { 196 | u16 limit; 197 | struct intr_gate *gates; 198 | } __attribute__((packed)); 199 | 200 | static struct intr_gate orig_int1_gate; 201 | 202 | void pn_int1_handler(void); 203 | 204 | void preempt_notifier_sys_init(void) 205 | { 206 | struct idt_desc idt_desc; 207 | struct intr_gate *int1_gate; 208 | 209 | printk("kvm: emulating preempt notifiers;" 210 | " do not benchmark on this machine\n"); 211 | dprintk("\n"); 212 | asm ("sidt %0" : "=m"(idt_desc)); 213 | int1_gate = &idt_desc.gates[1]; 214 | orig_int1_gate = *int1_gate; 215 | orig_int1_handler = int1_gate->offset0 216 | | ((u32)int1_gate->offset1 << 16); 217 | #ifdef CONFIG_X86_64 218 | orig_int1_handler |= (u64)int1_gate->offset2 << 32; 219 | #endif 220 | int1_gate->offset0 = (unsigned long)pn_int1_handler; 221 | int1_gate->offset1 = (unsigned long)pn_int1_handler >> 16; 222 | #ifdef CONFIG_X86_64 223 | int1_gate->offset2 = (unsigned long)pn_int1_handler >> 32; 224 | #endif 225 | } 226 | 227 | static void do_disable(void *blah) 228 | { 229 | #ifdef TIF_DEBUG 230 | if (!test_tsk_thread_flag(current, TIF_DEBUG)) 231 | #else 232 | if (!current->thread.kvm_compat_debugreg(7)) 233 | #endif 234 | __preempt_disable_notifiers(); 235 | } 236 | 237 | void preempt_notifier_sys_exit(void) 238 | { 239 | struct idt_desc idt_desc; 240 | 241 | dprintk("\n"); 242 | kvm_on_each_cpu(do_disable, NULL, 1); 243 | asm ("sidt %0" : "=m"(idt_desc)); 244 | idt_desc.gates[1] = orig_int1_gate; 245 | } 246 | 247 | #endif 248 | -------------------------------------------------------------------------------- /python/qmp/qmp/__init__.py: -------------------------------------------------------------------------------- 1 | from qmp import * 2 | -------------------------------------------------------------------------------- /python/qmp/qmp/qmp.py: -------------------------------------------------------------------------------- 1 | # QEMU Monitor Protocol Python class 2 | # 3 | # Copyright (C) 2009, 2010 Red Hat Inc. 4 | # 5 | # Authors: 6 | # Luiz Capitulino 7 | # 8 | # This work is licensed under the terms of the GNU GPL, version 2. See 9 | # the COPYING file in the top-level directory. 10 | 11 | import json 12 | import errno 13 | import socket 14 | import sys 15 | 16 | class QMPError(Exception): 17 | pass 18 | 19 | class QMPConnectError(QMPError): 20 | pass 21 | 22 | class QMPCapabilitiesError(QMPError): 23 | pass 24 | 25 | class QMPTimeoutError(QMPError): 26 | pass 27 | 28 | class QEMUMonitorProtocol: 29 | def __init__(self, address, server=False, debug=False): 30 | """ 31 | Create a QEMUMonitorProtocol class. 32 | 33 | @param address: QEMU address, can be either a unix socket path (string) 34 | or a tuple in the form ( address, port ) for a TCP 35 | connection 36 | @param server: server mode listens on the socket (bool) 37 | @raise socket.error on socket connection errors 38 | @note No connection is established, this is done by the connect() or 39 | accept() methods 40 | """ 41 | self.__events = [] 42 | self.__address = address 43 | self._debug = debug 44 | self.__sock = self.__get_sock() 45 | if server: 46 | self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 47 | self.__sock.bind(self.__address) 48 | self.__sock.listen(1) 49 | 50 | def __get_sock(self): 51 | if isinstance(self.__address, tuple): 52 | family = socket.AF_INET 53 | else: 54 | family = socket.AF_UNIX 55 | return socket.socket(family, socket.SOCK_STREAM) 56 | 57 | def __negotiate_capabilities(self): 58 | greeting = self.__json_read() 59 | if greeting is None or not greeting.has_key('QMP'): 60 | raise QMPConnectError 61 | # Greeting seems ok, negotiate capabilities 62 | resp = self.cmd('qmp_capabilities') 63 | if "return" in resp: 64 | return greeting 65 | raise QMPCapabilitiesError 66 | 67 | def __json_read(self, only_event=False): 68 | while True: 69 | data = self.__sockfile.readline() 70 | if not data: 71 | return 72 | resp = json.loads(data) 73 | if 'event' in resp: 74 | if self._debug: 75 | print >>sys.stderr, "QMP:<<< %s" % resp 76 | self.__events.append(resp) 77 | if not only_event: 78 | continue 79 | return resp 80 | 81 | error = socket.error 82 | 83 | def __get_events(self, wait=False): 84 | """ 85 | Check for new events in the stream and cache them in __events. 86 | 87 | @param wait (bool): block until an event is available. 88 | @param wait (float): If wait is a float, treat it as a timeout value. 89 | 90 | @raise QMPTimeoutError: If a timeout float is provided and the timeout 91 | period elapses. 92 | @raise QMPConnectError: If wait is True but no events could be retrieved 93 | or if some other error occurred. 94 | """ 95 | 96 | # Check for new events regardless and pull them into the cache: 97 | self.__sock.setblocking(0) 98 | try: 99 | self.__json_read() 100 | except socket.error as err: 101 | if err[0] == errno.EAGAIN: 102 | # No data available 103 | pass 104 | self.__sock.setblocking(1) 105 | 106 | # Wait for new events, if needed. 107 | # if wait is 0.0, this means "no wait" and is also implicitly false. 108 | if not self.__events and wait: 109 | if isinstance(wait, float): 110 | self.__sock.settimeout(wait) 111 | try: 112 | ret = self.__json_read(only_event=True) 113 | except socket.timeout: 114 | raise QMPTimeoutError("Timeout waiting for event") 115 | except: 116 | raise QMPConnectError("Error while reading from socket") 117 | if ret is None: 118 | raise QMPConnectError("Error while reading from socket") 119 | self.__sock.settimeout(None) 120 | 121 | def connect(self, negotiate=True): 122 | """ 123 | Connect to the QMP Monitor and perform capabilities negotiation. 124 | 125 | @return QMP greeting dict 126 | @raise socket.error on socket connection errors 127 | @raise QMPConnectError if the greeting is not received 128 | @raise QMPCapabilitiesError if fails to negotiate capabilities 129 | """ 130 | self.__sock.connect(self.__address) 131 | self.__sockfile = self.__sock.makefile() 132 | if negotiate: 133 | return self.__negotiate_capabilities() 134 | 135 | def accept(self): 136 | """ 137 | Await connection from QMP Monitor and perform capabilities negotiation. 138 | 139 | @return QMP greeting dict 140 | @raise socket.error on socket connection errors 141 | @raise QMPConnectError if the greeting is not received 142 | @raise QMPCapabilitiesError if fails to negotiate capabilities 143 | """ 144 | self.__sock.settimeout(15) 145 | self.__sock, _ = self.__sock.accept() 146 | self.__sockfile = self.__sock.makefile() 147 | return self.__negotiate_capabilities() 148 | 149 | def cmd_obj(self, qmp_cmd): 150 | """ 151 | Send a QMP command to the QMP Monitor. 152 | 153 | @param qmp_cmd: QMP command to be sent as a Python dict 154 | @return QMP response as a Python dict or None if the connection has 155 | been closed 156 | """ 157 | if self._debug: 158 | print >>sys.stderr, "QMP:>>> %s" % qmp_cmd 159 | try: 160 | self.__sock.sendall(json.dumps(qmp_cmd)) 161 | except socket.error as err: 162 | if err[0] == errno.EPIPE: 163 | return 164 | raise socket.error(err) 165 | resp = self.__json_read() 166 | if self._debug: 167 | print >>sys.stderr, "QMP:<<< %s" % resp 168 | return resp 169 | 170 | def cmd(self, name, args=None, id=None): 171 | """ 172 | Build a QMP command and send it to the QMP Monitor. 173 | 174 | @param name: command name (string) 175 | @param args: command arguments (dict) 176 | @param id: command id (dict, list, string or int) 177 | """ 178 | qmp_cmd = { 'execute': name } 179 | if args: 180 | qmp_cmd['arguments'] = args 181 | if id: 182 | qmp_cmd['id'] = id 183 | return self.cmd_obj(qmp_cmd) 184 | 185 | def command(self, cmd, **kwds): 186 | ret = self.cmd(cmd, kwds) 187 | if ret.has_key('error'): 188 | raise Exception(ret['error']['desc']) 189 | return ret['return'] 190 | 191 | def pull_event(self, wait=False): 192 | """ 193 | Get and delete the first available QMP event. 194 | 195 | @param wait (bool): block until an event is available. 196 | @param wait (float): If wait is a float, treat it as a timeout value. 197 | 198 | @raise QMPTimeoutError: If a timeout float is provided and the timeout 199 | period elapses. 200 | @raise QMPConnectError: If wait is True but no events could be retrieved 201 | or if some other error occurred. 202 | 203 | @return The first available QMP event, or None. 204 | """ 205 | self.__get_events(wait) 206 | 207 | if self.__events: 208 | return self.__events.pop(0) 209 | return None 210 | 211 | def get_events(self, wait=False): 212 | """ 213 | Get a list of available QMP events. 214 | 215 | @param wait (bool): block until an event is available. 216 | @param wait (float): If wait is a float, treat it as a timeout value. 217 | 218 | @raise QMPTimeoutError: If a timeout float is provided and the timeout 219 | period elapses. 220 | @raise QMPConnectError: If wait is True but no events could be retrieved 221 | or if some other error occurred. 222 | 223 | @return The list of available QMP events. 224 | """ 225 | self.__get_events(wait) 226 | return self.__events 227 | 228 | def clear_events(self): 229 | """ 230 | Clear current list of pending events. 231 | """ 232 | self.__events = [] 233 | 234 | def close(self): 235 | self.__sock.close() 236 | self.__sockfile.close() 237 | 238 | timeout = socket.timeout 239 | 240 | def settimeout(self, timeout): 241 | self.__sock.settimeout(timeout) 242 | 243 | def get_sock_fd(self): 244 | return self.__sock.fileno() 245 | 246 | def is_scm_available(self): 247 | return self.__sock.family == socket.AF_UNIX 248 | -------------------------------------------------------------------------------- /python/qmp/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name = "qmp", 5 | version = "0.1", 6 | author = "Luiz Capitulino", 7 | author_email = "lcapitulino@redhat.com", 8 | description = ("QEMU Monitor Protocol Python class"), 9 | license = "GNU GPL, version 2", 10 | url = "http://www.qemu-project.org/", 11 | packages=['qmp'] 12 | ) 13 | -------------------------------------------------------------------------------- /resources/rvmi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mandiant/rvmi/86bc00338f00e3cddb507ae230726768f24e2e7f/resources/rvmi.png --------------------------------------------------------------------------------