├── LICENSE ├── README.md ├── debug ├── kvmexitreason │ ├── Makefile │ ├── README │ ├── exit-reason.h │ └── kvmexitreason.c └── kvmwrmsr │ ├── Makefile │ ├── README │ ├── apicdef.h │ ├── kvm_para.h │ ├── kvmwrmsr.c │ ├── local-msr-index.h │ ├── msr-index.h │ └── msr.h └── microbenchmark ├── apic-ipi ├── Makefile ├── README └── apic_ipi.c ├── common ├── getns.h └── rdtsc.h ├── ipi-bench ├── Makefile ├── README └── ipi_bench.c ├── msr-bench ├── Makefile ├── msr.h └── msr_bench.c ├── pio-mmio-bench ├── Makefile └── pio_mmio_bench.c └── tlb-shootdown-bench ├── Makefile └── tlb-shootdown-bench.c /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kvm-utils 2 | There are two parts of this project: debug & microbench. 3 | 4 | ## debug 5 | Base on kprobe, it could be installed/removed easily. 6 | 7 | ### kvmexitreason 8 | Report kvm exit reasons detail every second. 9 | 10 | ### kvmwrmsr 11 | Report kvm exit wrmsr detail every second. 12 | 13 | ## microbenchmark 14 | Benchmark key performence for virtual machine & bare metal. 15 | 16 | ### ipi-bench 17 | Benchmark single/broadcast IPI within/across NUMA node(s). 18 | 19 | ### msr-bench 20 | Benchmark wrmsr MSR_IA32_POWER_CTL/MSR_IA32_TSCDEADLINE. 21 | 22 | ### pio-mmio-bench 23 | Benchmark PIO pic0(handled by kernel), keyboard(handled by QEMU), 24 | empty PIO(handled by QEMU). 25 | Benchmark MMIO vram, virtio-pci-modern. 26 | 27 | ### tlb-shootdown-bench 28 | Benchmark TLB shootdown by madvise(*addr, length, MADV_DONTNEED). 29 | -------------------------------------------------------------------------------- /debug/kvmexitreason/Makefile: -------------------------------------------------------------------------------- 1 | obj-m := kvmexitreason.o 2 | KERNELDIR := /lib/modules/$(shell uname -r)/build 3 | PWD := $(shell pwd) 4 | 5 | all: 6 | make -C $(KERNELDIR) M=$(PWD) clean 7 | make -C $(KERNELDIR) M=$(PWD) modules 8 | 9 | clean: 10 | make -C $(KERNELDIR) M=$(PWD) clean 11 | -------------------------------------------------------------------------------- /debug/kvmexitreason/README: -------------------------------------------------------------------------------- 1 | HOWTO 2 | ===== 3 | make 4 | insmod kvmexitreason.ko 5 | 6 | watch -n 1 "dmesg -c -T" 7 | -------------------------------------------------------------------------------- /debug/kvmexitreason/exit-reason.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #ifndef __EXIT_REASON_H__ 5 | #define __EXIT_REASON_H__ 6 | 7 | #define REASON_NUM 66 8 | 9 | static int inited = 0; 10 | static char *reasons[REASON_NUM] = {0}; 11 | 12 | void init_reasons(void) 13 | { 14 | if (inited == 1) 15 | return; 16 | else 17 | inited = 1; 18 | 19 | reasons[0]="EXIT_REASON_EXCEPTION_NMI"; 20 | reasons[1]="EXIT_REASON_EXTERNAL_INTERRUPT"; 21 | reasons[2]="EXIT_REASON_TRIPLE_FAULT"; 22 | reasons[7]="EXIT_REASON_PENDING_INTERRUPT"; 23 | reasons[8]="EXIT_REASON_NMI_WINDOW"; 24 | reasons[9]="EXIT_REASON_TASK_SWITCH"; 25 | reasons[10]="EXIT_REASON_CPUID"; 26 | reasons[12]="EXIT_REASON_HLT"; 27 | reasons[13]="EXIT_REASON_INVD"; 28 | reasons[14]="EXIT_REASON_INVLPG"; 29 | reasons[15]="EXIT_REASON_RDPMC"; 30 | reasons[16]="EXIT_REASON_RDTSC"; 31 | reasons[18]="EXIT_REASON_VMCALL"; 32 | reasons[19]="EXIT_REASON_VMCLEAR"; 33 | reasons[20]="EXIT_REASON_VMLAUNCH"; 34 | reasons[21]="EXIT_REASON_VMPTRLD"; 35 | reasons[22]="EXIT_REASON_VMPTRST"; 36 | reasons[23]="EXIT_REASON_VMREAD"; 37 | reasons[24]="EXIT_REASON_VMRESUME"; 38 | reasons[25]="EXIT_REASON_VMWRITE"; 39 | reasons[26]="EXIT_REASON_VMOFF"; 40 | reasons[27]="EXIT_REASON_VMON"; 41 | reasons[28]="EXIT_REASON_CR_ACCESS"; 42 | reasons[29]="EXIT_REASON_DR_ACCESS"; 43 | reasons[30]="EXIT_REASON_IO_INSTRUCTION"; 44 | reasons[31]="EXIT_REASON_MSR_READ"; 45 | reasons[32]="EXIT_REASON_MSR_WRITE"; 46 | reasons[33]="EXIT_REASON_INVALID_STATE"; 47 | reasons[34]="EXIT_REASON_MSR_LOAD_FAIL"; 48 | reasons[36]="EXIT_REASON_MWAIT_INSTRUCTION"; 49 | reasons[37]="EXIT_REASON_MONITOR_TRAP_FLAG"; 50 | reasons[39]="EXIT_REASON_MONITOR_INSTRUCTION"; 51 | reasons[40]="EXIT_REASON_PAUSE_INSTRUCTION"; 52 | reasons[41]="EXIT_REASON_MCE_DURING_VMENTRY"; 53 | reasons[43]="EXIT_REASON_TPR_BELOW_THRESHOLD"; 54 | reasons[44]="EXIT_REASON_APIC_ACCESS"; 55 | reasons[45]="EXIT_REASON_EOI_INDUCED"; 56 | reasons[48]="EXIT_REASON_EPT_VIOLATION"; 57 | reasons[49]="EXIT_REASON_EPT_MISCONFIG"; 58 | reasons[50]="EXIT_REASON_INVEPT"; 59 | reasons[51]="EXIT_REASON_RDTSCP"; 60 | reasons[52]="EXIT_REASON_PREEMPTION_TIMER"; 61 | reasons[53]="EXIT_REASON_INVVPID"; 62 | reasons[54]="EXIT_REASON_WBINVD"; 63 | reasons[55]="EXIT_REASON_XSETBV"; 64 | reasons[56]="EXIT_REASON_APIC_WRITE"; 65 | reasons[58]="EXIT_REASON_INVPCID"; 66 | reasons[62]="EXIT_REASON_PML_FULL"; 67 | reasons[63]="EXIT_REASON_XSAVES"; 68 | reasons[64]="EXIT_REASON_XRSTORS"; 69 | reasons[65]="EXIT_REASON_PCOMMIT"; 70 | } 71 | 72 | static unsigned long reasons_num[REASON_NUM] = {0}; 73 | 74 | void record_reason(int r) 75 | { 76 | if (r >= REASON_NUM) 77 | return; 78 | 79 | reasons_num[r]++; 80 | } 81 | 82 | unsigned long report_reason(int r) 83 | { 84 | if (r >= REASON_NUM) 85 | return 0; 86 | 87 | return reasons_num[r]; 88 | } 89 | 90 | void reset_reason(void) 91 | { 92 | memset(reasons_num, 0x00, sizeof(reasons_num)); 93 | } 94 | 95 | char *reason2str(int r) 96 | { 97 | if (r >= REASON_NUM) 98 | return "EXIT_REASON_OVERFLOW"; 99 | 100 | init_reasons(); 101 | if (reasons[r]) 102 | return reasons[r]; 103 | else 104 | return "EXIT_REASON_UNKNOWN"; 105 | } 106 | 107 | #endif 108 | -------------------------------------------------------------------------------- /debug/kvmexitreason/kvmexitreason.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "exit-reason.h" 11 | 12 | static ktime_t __ktime; 13 | static spinlock_t showing_lock; 14 | static atomic_long_t total_exit; 15 | 16 | void show_exitreason(void) 17 | { 18 | int idx; 19 | ktime_t now = ktime_get(); 20 | 21 | if (ktime_to_ns(now) - ktime_to_ns(__ktime) < 1000*1000*1000) 22 | return; 23 | 24 | if (!spin_trylock(&showing_lock)) 25 | return; 26 | 27 | pr_info("VM EXIT REASON STATISTIC\n"); 28 | pr_info("\tTOTAL EXITS : %ld\n", atomic_long_read(&total_exit)); 29 | for (idx = 0; idx < REASON_NUM; idx++) { 30 | if (report_reason(idx)) 31 | pr_info("\t%40s : %ld\n", reason2str(idx), report_reason(idx)); 32 | } 33 | 34 | atomic_long_set(&total_exit, 0); 35 | reset_reason(); 36 | __ktime = now; 37 | spin_unlock(&showing_lock); 38 | } 39 | 40 | /* 41 | * prefer to use jprobe, but 4.19 removes jprobe. 42 | * need to remove all jprobe code in the future. 43 | */ 44 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 45 | #define DECLEAR_PROBE_FUNC(REASON,SYMBOL) \ 46 | static int kpre_##SYMBOL (struct kprobe *p, struct pt_regs *regs) \ 47 | { record_reason(REASON);\ 48 | atomic_long_inc(&total_exit); \ 49 | show_exitreason(); \ 50 | return 0;} 51 | #else 52 | #define DECLEAR_PROBE_FUNC(REASON,SYMBOL) \ 53 | static int jp_##SYMBOL (struct kvm_vcpu *vcpu) \ 54 | { record_reason(REASON);\ 55 | atomic_long_inc(&total_exit); \ 56 | show_exitreason(); \ 57 | jprobe_return();\ 58 | return 0;} 59 | #endif 60 | 61 | DECLEAR_PROBE_FUNC(EXIT_REASON_EXCEPTION_NMI, handle_exception) 62 | DECLEAR_PROBE_FUNC(EXIT_REASON_EXTERNAL_INTERRUPT, handle_external_interrupt) 63 | DECLEAR_PROBE_FUNC(EXIT_REASON_TRIPLE_FAULT, handle_triple_fault) 64 | DECLEAR_PROBE_FUNC(EXIT_REASON_NMI_WINDOW, handle_nmi_window) 65 | DECLEAR_PROBE_FUNC(EXIT_REASON_IO_INSTRUCTION, handle_io) 66 | DECLEAR_PROBE_FUNC(EXIT_REASON_CR_ACCESS, handle_cr) 67 | DECLEAR_PROBE_FUNC(EXIT_REASON_DR_ACCESS, handle_dr) 68 | DECLEAR_PROBE_FUNC(EXIT_REASON_CPUID, handle_cpuid) 69 | DECLEAR_PROBE_FUNC(EXIT_REASON_MSR_READ, handle_rdmsr) 70 | DECLEAR_PROBE_FUNC(EXIT_REASON_MSR_WRITE, handle_wrmsr) 71 | DECLEAR_PROBE_FUNC(EXIT_REASON_PENDING_INTERRUPT, handle_interrupt_window) 72 | DECLEAR_PROBE_FUNC(EXIT_REASON_HLT, handle_halt) 73 | DECLEAR_PROBE_FUNC(EXIT_REASON_INVD, handle_invd) 74 | DECLEAR_PROBE_FUNC(EXIT_REASON_INVLPG, handle_invlpg) 75 | DECLEAR_PROBE_FUNC(EXIT_REASON_RDPMC, handle_rdpmc) 76 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMCALL, handle_vmcall) 77 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMCLEAR, handle_vmclear) 78 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMLAUNCH, handle_vmlaunch) 79 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMPTRLD, handle_vmptrld) 80 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMPTRST, handle_vmptrst) 81 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMREAD, handle_vmread) 82 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMRESUME, handle_vmresume) 83 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMWRITE, handle_vmwrite) 84 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMOFF, handle_vmoff) 85 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMON, handle_vmon) 86 | DECLEAR_PROBE_FUNC(EXIT_REASON_TPR_BELOW_THRESHOLD, handle_tpr_below_threshold) 87 | DECLEAR_PROBE_FUNC(EXIT_REASON_APIC_ACCESS, handle_apic_access) 88 | DECLEAR_PROBE_FUNC(EXIT_REASON_APIC_WRITE, handle_apic_write) 89 | DECLEAR_PROBE_FUNC(EXIT_REASON_EOI_INDUCED, handle_apic_eoi_induced) 90 | DECLEAR_PROBE_FUNC(EXIT_REASON_WBINVD, handle_wbinvd) 91 | DECLEAR_PROBE_FUNC(EXIT_REASON_XSETBV, handle_xsetbv) 92 | DECLEAR_PROBE_FUNC(EXIT_REASON_TASK_SWITCH, handle_task_switch) 93 | DECLEAR_PROBE_FUNC(EXIT_REASON_MCE_DURING_VMENTRY, handle_machine_check) 94 | DECLEAR_PROBE_FUNC(EXIT_REASON_EPT_VIOLATION, handle_ept_violation) 95 | DECLEAR_PROBE_FUNC(EXIT_REASON_EPT_MISCONFIG, handle_ept_misconfig) 96 | DECLEAR_PROBE_FUNC(EXIT_REASON_PAUSE_INSTRUCTION, handle_pause) 97 | DECLEAR_PROBE_FUNC(EXIT_REASON_MWAIT_INSTRUCTION, handle_mwait) 98 | DECLEAR_PROBE_FUNC(EXIT_REASON_MONITOR_TRAP_FLAG, handle_monitor_trap) 99 | DECLEAR_PROBE_FUNC(EXIT_REASON_MONITOR_INSTRUCTION, handle_monitor) 100 | DECLEAR_PROBE_FUNC(EXIT_REASON_INVEPT, handle_invept) 101 | DECLEAR_PROBE_FUNC(EXIT_REASON_INVVPID, handle_invvpid) 102 | /* 103 | EXIT_REASON_RDRAND 104 | EXIT_REASON_RDSEED 105 | */ 106 | DECLEAR_PROBE_FUNC(EXIT_REASON_XSAVES, handle_xsaves) 107 | DECLEAR_PROBE_FUNC(EXIT_REASON_XRSTORS, handle_xrstors) 108 | DECLEAR_PROBE_FUNC(EXIT_REASON_PML_FULL, handle_pml_full) 109 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) 110 | DECLEAR_PROBE_FUNC(EXIT_REASON_VMFUNC, handle_vmfunc) 111 | #endif 112 | DECLEAR_PROBE_FUNC(EXIT_REASON_PREEMPTION_TIMER, handle_preemption_timer) 113 | 114 | 115 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 116 | #define DECLEAR_PROBE(REASON,SYMBOL) \ 117 | [REASON] = { \ 118 | .pre_handler = kpre_##SYMBOL, \ 119 | .symbol_name = #SYMBOL, \ 120 | } 121 | 122 | static struct kprobe vmx_handle_exit_probe[REASON_NUM] = { 123 | #else 124 | #define DECLEAR_PROBE(REASON,SYMBOL) \ 125 | [REASON] = { \ 126 | .entry = jp_##SYMBOL, \ 127 | .kp = { \ 128 | .symbol_name = #SYMBOL, \ 129 | }, \ 130 | } 131 | 132 | static struct jprobe vmx_handle_exit_probe[REASON_NUM] = { 133 | #endif 134 | 135 | DECLEAR_PROBE(EXIT_REASON_EXCEPTION_NMI, handle_exception), 136 | DECLEAR_PROBE(EXIT_REASON_EXCEPTION_NMI, handle_exception), 137 | DECLEAR_PROBE(EXIT_REASON_EXTERNAL_INTERRUPT, handle_external_interrupt), 138 | DECLEAR_PROBE(EXIT_REASON_TRIPLE_FAULT, handle_triple_fault), 139 | DECLEAR_PROBE(EXIT_REASON_NMI_WINDOW, handle_nmi_window), 140 | DECLEAR_PROBE(EXIT_REASON_IO_INSTRUCTION, handle_io), 141 | DECLEAR_PROBE(EXIT_REASON_CR_ACCESS, handle_cr), 142 | DECLEAR_PROBE(EXIT_REASON_DR_ACCESS, handle_dr), 143 | DECLEAR_PROBE(EXIT_REASON_CPUID, handle_cpuid), 144 | DECLEAR_PROBE(EXIT_REASON_MSR_READ, handle_rdmsr), 145 | DECLEAR_PROBE(EXIT_REASON_MSR_WRITE, handle_wrmsr), 146 | DECLEAR_PROBE(EXIT_REASON_PENDING_INTERRUPT, handle_interrupt_window), 147 | DECLEAR_PROBE(EXIT_REASON_HLT, handle_halt), 148 | DECLEAR_PROBE(EXIT_REASON_INVD, handle_invd), 149 | DECLEAR_PROBE(EXIT_REASON_INVLPG, handle_invlpg), 150 | DECLEAR_PROBE(EXIT_REASON_RDPMC, handle_rdpmc), 151 | DECLEAR_PROBE(EXIT_REASON_VMCALL, handle_vmcall), 152 | DECLEAR_PROBE(EXIT_REASON_VMCLEAR, handle_vmclear), 153 | DECLEAR_PROBE(EXIT_REASON_VMLAUNCH, handle_vmlaunch), 154 | DECLEAR_PROBE(EXIT_REASON_VMPTRLD, handle_vmptrld), 155 | DECLEAR_PROBE(EXIT_REASON_VMPTRST, handle_vmptrst), 156 | DECLEAR_PROBE(EXIT_REASON_VMREAD, handle_vmread), 157 | DECLEAR_PROBE(EXIT_REASON_VMRESUME, handle_vmresume), 158 | DECLEAR_PROBE(EXIT_REASON_VMWRITE, handle_vmwrite), 159 | DECLEAR_PROBE(EXIT_REASON_VMOFF, handle_vmoff), 160 | DECLEAR_PROBE(EXIT_REASON_VMON, handle_vmon), 161 | DECLEAR_PROBE(EXIT_REASON_TPR_BELOW_THRESHOLD, handle_tpr_below_threshold), 162 | DECLEAR_PROBE(EXIT_REASON_APIC_ACCESS, handle_apic_access), 163 | DECLEAR_PROBE(EXIT_REASON_APIC_WRITE, handle_apic_write), 164 | DECLEAR_PROBE(EXIT_REASON_EOI_INDUCED, handle_apic_eoi_induced), 165 | DECLEAR_PROBE(EXIT_REASON_WBINVD, handle_wbinvd), 166 | DECLEAR_PROBE(EXIT_REASON_XSETBV, handle_xsetbv), 167 | DECLEAR_PROBE(EXIT_REASON_TASK_SWITCH, handle_task_switch), 168 | DECLEAR_PROBE(EXIT_REASON_MCE_DURING_VMENTRY, handle_machine_check), 169 | DECLEAR_PROBE(EXIT_REASON_EPT_VIOLATION, handle_ept_violation), 170 | DECLEAR_PROBE(EXIT_REASON_EPT_MISCONFIG, handle_ept_misconfig), 171 | DECLEAR_PROBE(EXIT_REASON_PAUSE_INSTRUCTION, handle_pause), 172 | DECLEAR_PROBE(EXIT_REASON_MWAIT_INSTRUCTION, handle_mwait), 173 | DECLEAR_PROBE(EXIT_REASON_MONITOR_TRAP_FLAG, handle_monitor_trap), 174 | DECLEAR_PROBE(EXIT_REASON_MONITOR_INSTRUCTION, handle_monitor), 175 | DECLEAR_PROBE(EXIT_REASON_INVEPT, handle_invept), 176 | DECLEAR_PROBE(EXIT_REASON_INVVPID, handle_invvpid), 177 | /* 178 | EXIT_REASON_RDRAND 179 | EXIT_REASON_RDSEED 180 | */ 181 | DECLEAR_PROBE(EXIT_REASON_XSAVES, handle_xsaves), 182 | DECLEAR_PROBE(EXIT_REASON_XRSTORS, handle_xrstors), 183 | DECLEAR_PROBE(EXIT_REASON_PML_FULL, handle_pml_full), 184 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) 185 | DECLEAR_PROBE(EXIT_REASON_VMFUNC, handle_vmfunc), 186 | #endif 187 | DECLEAR_PROBE(EXIT_REASON_PREEMPTION_TIMER, handle_preemption_timer), 188 | }; 189 | 190 | #ifndef ARRAY_SIZE 191 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 192 | #endif 193 | 194 | void unregister_all_probes(void) 195 | { 196 | int idx; 197 | 198 | for (idx = 0; idx < ARRAY_SIZE(vmx_handle_exit_probe); idx++) { 199 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 200 | if (!vmx_handle_exit_probe[idx].symbol_name) { 201 | pr_info("kvmexitreason : unregister_probe skip reason %d\n", idx); 202 | continue; 203 | } 204 | 205 | unregister_kprobe(&vmx_handle_exit_probe[idx]); 206 | #else 207 | if (!vmx_handle_exit_probe[idx].entry) { 208 | pr_info("kvmexitreason : unregister_jprobe skip reason %d\n", idx); 209 | continue; 210 | } 211 | 212 | unregister_jprobe(&vmx_handle_exit_probe[idx]); 213 | #endif 214 | } 215 | } 216 | 217 | static int __init probe_init(void) 218 | { 219 | int idx; 220 | int ret; 221 | 222 | for (idx = 0; idx < ARRAY_SIZE(vmx_handle_exit_probe); idx++) { 223 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 224 | if (!vmx_handle_exit_probe[idx].symbol_name) { 225 | pr_info("kvmexitreason : register_probe skip reason %d\n", idx); 226 | continue; 227 | } 228 | 229 | ret = register_kprobe(&vmx_handle_exit_probe[idx]); 230 | if (ret < 0) { 231 | pr_err("kvmexitreason : register_kprobe %d jprobe failed : %d\n", 232 | idx, ret); 233 | unregister_all_probes(); 234 | return -1; 235 | } 236 | #else 237 | if (!vmx_handle_exit_probe[idx].entry) { 238 | pr_info("kvmexitreason : register_jprobe skip reason %d\n", idx); 239 | continue; 240 | } 241 | 242 | ret = register_jprobe(&vmx_handle_exit_probe[idx]); 243 | if (ret < 0) { 244 | pr_err("kvmexitreason : register_jprobe %d jprobe failed : %d\n", 245 | idx, ret); 246 | unregister_all_probes(); 247 | return -1; 248 | } 249 | #endif 250 | } 251 | 252 | init_reasons(); 253 | spin_lock_init(&showing_lock); 254 | atomic_long_set(&total_exit, 0); 255 | __ktime = ktime_get(); 256 | 257 | return 0; 258 | } 259 | 260 | static void __exit probe_exit(void) 261 | { 262 | unregister_all_probes(); 263 | } 264 | 265 | module_init(probe_init) 266 | module_exit(probe_exit) 267 | MODULE_LICENSE("GPL"); 268 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 269 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/Makefile: -------------------------------------------------------------------------------- 1 | obj-m := kvmwrmsr.o 2 | KERNELDIR := /lib/modules/$(shell uname -r)/build 3 | PWD := $(shell pwd) 4 | 5 | all: 6 | make -C $(KERNELDIR) M=$(PWD) clean 7 | make -C $(KERNELDIR) M=$(PWD) modules 8 | 9 | clean: 10 | make -C $(KERNELDIR) M=$(PWD) clean 11 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/README: -------------------------------------------------------------------------------- 1 | HOWTO 2 | ===== 3 | make 4 | insmod kvmwrmsr.ko 5 | 6 | watch -n 1 "dmesg -c -T" 7 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/apicdef.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | #ifndef _ASM_X86_APICDEF_H 3 | #define _ASM_X86_APICDEF_H 4 | 5 | /* 6 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) 7 | * 8 | * Alan Cox , 1995. 9 | * Ingo Molnar , 1999, 2000 10 | */ 11 | 12 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 13 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 14 | 15 | /* 16 | * This is the IO-APIC register space as specified 17 | * by Intel docs: 18 | */ 19 | #define IO_APIC_SLOT_SIZE 1024 20 | 21 | #define APIC_ID 0x20 22 | 23 | #define APIC_LVR 0x30 24 | #define APIC_LVR_MASK 0xFF00FF 25 | #define APIC_LVR_DIRECTED_EOI (1 << 24) 26 | #define GET_APIC_VERSION(x) ((x) & 0xFFu) 27 | #define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) 28 | #ifdef CONFIG_X86_32 29 | # define APIC_INTEGRATED(x) ((x) & 0xF0u) 30 | #else 31 | # define APIC_INTEGRATED(x) (1) 32 | #endif 33 | #define APIC_XAPIC(x) ((x) >= 0x14) 34 | #define APIC_EXT_SPACE(x) ((x) & 0x80000000) 35 | #define APIC_TASKPRI 0x80 36 | #define APIC_TPRI_MASK 0xFFu 37 | #define APIC_ARBPRI 0x90 38 | #define APIC_ARBPRI_MASK 0xFFu 39 | #define APIC_PROCPRI 0xA0 40 | #define APIC_EOI 0xB0 41 | #define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */ 42 | #define APIC_RRR 0xC0 43 | #define APIC_LDR 0xD0 44 | #define APIC_LDR_MASK (0xFFu << 24) 45 | #define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) 46 | #define SET_APIC_LOGICAL_ID(x) (((x) << 24)) 47 | #define APIC_ALL_CPUS 0xFFu 48 | #define APIC_DFR 0xE0 49 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul 50 | #define APIC_DFR_FLAT 0xFFFFFFFFul 51 | #define APIC_SPIV 0xF0 52 | #define APIC_SPIV_DIRECTED_EOI (1 << 12) 53 | #define APIC_SPIV_FOCUS_DISABLED (1 << 9) 54 | #define APIC_SPIV_APIC_ENABLED (1 << 8) 55 | #define APIC_ISR 0x100 56 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ 57 | #define APIC_TMR 0x180 58 | #define APIC_IRR 0x200 59 | #define APIC_ESR 0x280 60 | #define APIC_ESR_SEND_CS 0x00001 61 | #define APIC_ESR_RECV_CS 0x00002 62 | #define APIC_ESR_SEND_ACC 0x00004 63 | #define APIC_ESR_RECV_ACC 0x00008 64 | #define APIC_ESR_SENDILL 0x00020 65 | #define APIC_ESR_RECVILL 0x00040 66 | #define APIC_ESR_ILLREGA 0x00080 67 | #define APIC_LVTCMCI 0x2f0 68 | #define APIC_ICR 0x300 69 | #define APIC_DEST_SELF 0x40000 70 | #define APIC_DEST_ALLINC 0x80000 71 | #define APIC_DEST_ALLBUT 0xC0000 72 | #define APIC_ICR_RR_MASK 0x30000 73 | #define APIC_ICR_RR_INVALID 0x00000 74 | #define APIC_ICR_RR_INPROG 0x10000 75 | #define APIC_ICR_RR_VALID 0x20000 76 | #define APIC_INT_LEVELTRIG 0x08000 77 | #define APIC_INT_ASSERT 0x04000 78 | #define APIC_ICR_BUSY 0x01000 79 | #define APIC_DEST_LOGICAL 0x00800 80 | #define APIC_DEST_PHYSICAL 0x00000 81 | #define APIC_DM_FIXED 0x00000 82 | #define APIC_DM_FIXED_MASK 0x00700 83 | #define APIC_DM_LOWEST 0x00100 84 | #define APIC_DM_SMI 0x00200 85 | #define APIC_DM_REMRD 0x00300 86 | #define APIC_DM_NMI 0x00400 87 | #define APIC_DM_INIT 0x00500 88 | #define APIC_DM_STARTUP 0x00600 89 | #define APIC_DM_EXTINT 0x00700 90 | #define APIC_VECTOR_MASK 0x000FF 91 | #define APIC_ICR2 0x310 92 | #define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) 93 | #define SET_APIC_DEST_FIELD(x) ((x) << 24) 94 | #define APIC_LVTT 0x320 95 | #define APIC_LVTTHMR 0x330 96 | #define APIC_LVTPC 0x340 97 | #define APIC_LVT0 0x350 98 | #define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) 99 | #define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) 100 | #define SET_APIC_TIMER_BASE(x) (((x) << 18)) 101 | #define APIC_TIMER_BASE_CLKIN 0x0 102 | #define APIC_TIMER_BASE_TMBASE 0x1 103 | #define APIC_TIMER_BASE_DIV 0x2 104 | #define APIC_LVT_TIMER_ONESHOT (0 << 17) 105 | #define APIC_LVT_TIMER_PERIODIC (1 << 17) 106 | #define APIC_LVT_TIMER_TSCDEADLINE (2 << 17) 107 | #define APIC_LVT_MASKED (1 << 16) 108 | #define APIC_LVT_LEVEL_TRIGGER (1 << 15) 109 | #define APIC_LVT_REMOTE_IRR (1 << 14) 110 | #define APIC_INPUT_POLARITY (1 << 13) 111 | #define APIC_SEND_PENDING (1 << 12) 112 | #define APIC_MODE_MASK 0x700 113 | #define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) 114 | #define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) 115 | #define APIC_MODE_FIXED 0x0 116 | #define APIC_MODE_NMI 0x4 117 | #define APIC_MODE_EXTINT 0x7 118 | #define APIC_LVT1 0x360 119 | #define APIC_LVTERR 0x370 120 | #define APIC_TMICT 0x380 121 | #define APIC_TMCCT 0x390 122 | #define APIC_TDCR 0x3E0 123 | #define APIC_SELF_IPI 0x3F0 124 | #define APIC_TDR_DIV_TMBASE (1 << 2) 125 | #define APIC_TDR_DIV_1 0xB 126 | #define APIC_TDR_DIV_2 0x0 127 | #define APIC_TDR_DIV_4 0x1 128 | #define APIC_TDR_DIV_8 0x2 129 | #define APIC_TDR_DIV_16 0x3 130 | #define APIC_TDR_DIV_32 0x8 131 | #define APIC_TDR_DIV_64 0x9 132 | #define APIC_TDR_DIV_128 0xA 133 | #define APIC_EFEAT 0x400 134 | #define APIC_ECTRL 0x410 135 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) 136 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ 137 | #define APIC_EILVT_NR_AMD_10H 4 138 | #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H 139 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) 140 | #define APIC_EILVT_MSG_FIX 0x0 141 | #define APIC_EILVT_MSG_SMI 0x2 142 | #define APIC_EILVT_MSG_NMI 0x4 143 | #define APIC_EILVT_MSG_EXT 0x7 144 | #define APIC_EILVT_MASKED (1 << 16) 145 | 146 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) 147 | #define APIC_BASE_MSR 0x800 148 | #define XAPIC_ENABLE (1UL << 11) 149 | #define X2APIC_ENABLE (1UL << 10) 150 | 151 | #ifdef CONFIG_X86_32 152 | # define MAX_IO_APICS 64 153 | # define MAX_LOCAL_APIC 256 154 | #else 155 | # define MAX_IO_APICS 128 156 | # define MAX_LOCAL_APIC 32768 157 | #endif 158 | 159 | /* 160 | * All x86-64 systems are xAPIC compatible. 161 | * In the following, "apicid" is a physical APIC ID. 162 | */ 163 | #define XAPIC_DEST_CPUS_SHIFT 4 164 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) 165 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) 166 | #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) 167 | #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) 168 | #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) 169 | #define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) 170 | 171 | /* 172 | * the local APIC register structure, memory mapped. Not terribly well 173 | * tested, but we might eventually use this one in the future - the 174 | * problem why we cannot use it right now is the P5 APIC, it has an 175 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... 176 | */ 177 | #define u32 unsigned int 178 | 179 | struct local_apic { 180 | 181 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; 182 | 183 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; 184 | 185 | /*020*/ struct { /* APIC ID Register */ 186 | u32 __reserved_1 : 24, 187 | phys_apic_id : 4, 188 | __reserved_2 : 4; 189 | u32 __reserved[3]; 190 | } id; 191 | 192 | /*030*/ const 193 | struct { /* APIC Version Register */ 194 | u32 version : 8, 195 | __reserved_1 : 8, 196 | max_lvt : 8, 197 | __reserved_2 : 8; 198 | u32 __reserved[3]; 199 | } version; 200 | 201 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; 202 | 203 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; 204 | 205 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; 206 | 207 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; 208 | 209 | /*080*/ struct { /* Task Priority Register */ 210 | u32 priority : 8, 211 | __reserved_1 : 24; 212 | u32 __reserved_2[3]; 213 | } tpr; 214 | 215 | /*090*/ const 216 | struct { /* Arbitration Priority Register */ 217 | u32 priority : 8, 218 | __reserved_1 : 24; 219 | u32 __reserved_2[3]; 220 | } apr; 221 | 222 | /*0A0*/ const 223 | struct { /* Processor Priority Register */ 224 | u32 priority : 8, 225 | __reserved_1 : 24; 226 | u32 __reserved_2[3]; 227 | } ppr; 228 | 229 | /*0B0*/ struct { /* End Of Interrupt Register */ 230 | u32 eoi; 231 | u32 __reserved[3]; 232 | } eoi; 233 | 234 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; 235 | 236 | /*0D0*/ struct { /* Logical Destination Register */ 237 | u32 __reserved_1 : 24, 238 | logical_dest : 8; 239 | u32 __reserved_2[3]; 240 | } ldr; 241 | 242 | /*0E0*/ struct { /* Destination Format Register */ 243 | u32 __reserved_1 : 28, 244 | model : 4; 245 | u32 __reserved_2[3]; 246 | } dfr; 247 | 248 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ 249 | u32 spurious_vector : 8, 250 | apic_enabled : 1, 251 | focus_cpu : 1, 252 | __reserved_2 : 22; 253 | u32 __reserved_3[3]; 254 | } svr; 255 | 256 | /*100*/ struct { /* In Service Register */ 257 | /*170*/ u32 bitfield; 258 | u32 __reserved[3]; 259 | } isr [8]; 260 | 261 | /*180*/ struct { /* Trigger Mode Register */ 262 | /*1F0*/ u32 bitfield; 263 | u32 __reserved[3]; 264 | } tmr [8]; 265 | 266 | /*200*/ struct { /* Interrupt Request Register */ 267 | /*270*/ u32 bitfield; 268 | u32 __reserved[3]; 269 | } irr [8]; 270 | 271 | /*280*/ union { /* Error Status Register */ 272 | struct { 273 | u32 send_cs_error : 1, 274 | receive_cs_error : 1, 275 | send_accept_error : 1, 276 | receive_accept_error : 1, 277 | __reserved_1 : 1, 278 | send_illegal_vector : 1, 279 | receive_illegal_vector : 1, 280 | illegal_register_address : 1, 281 | __reserved_2 : 24; 282 | u32 __reserved_3[3]; 283 | } error_bits; 284 | struct { 285 | u32 errors; 286 | u32 __reserved_3[3]; 287 | } all_errors; 288 | } esr; 289 | 290 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; 291 | 292 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; 293 | 294 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; 295 | 296 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; 297 | 298 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; 299 | 300 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; 301 | 302 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; 303 | 304 | /*300*/ struct { /* Interrupt Command Register 1 */ 305 | u32 vector : 8, 306 | delivery_mode : 3, 307 | destination_mode : 1, 308 | delivery_status : 1, 309 | __reserved_1 : 1, 310 | level : 1, 311 | trigger : 1, 312 | __reserved_2 : 2, 313 | shorthand : 2, 314 | __reserved_3 : 12; 315 | u32 __reserved_4[3]; 316 | } icr1; 317 | 318 | /*310*/ struct { /* Interrupt Command Register 2 */ 319 | union { 320 | u32 __reserved_1 : 24, 321 | phys_dest : 4, 322 | __reserved_2 : 4; 323 | u32 __reserved_3 : 24, 324 | logical_dest : 8; 325 | } dest; 326 | u32 __reserved_4[3]; 327 | } icr2; 328 | 329 | /*320*/ struct { /* LVT - Timer */ 330 | u32 vector : 8, 331 | __reserved_1 : 4, 332 | delivery_status : 1, 333 | __reserved_2 : 3, 334 | mask : 1, 335 | timer_mode : 1, 336 | __reserved_3 : 14; 337 | u32 __reserved_4[3]; 338 | } lvt_timer; 339 | 340 | /*330*/ struct { /* LVT - Thermal Sensor */ 341 | u32 vector : 8, 342 | delivery_mode : 3, 343 | __reserved_1 : 1, 344 | delivery_status : 1, 345 | __reserved_2 : 3, 346 | mask : 1, 347 | __reserved_3 : 15; 348 | u32 __reserved_4[3]; 349 | } lvt_thermal; 350 | 351 | /*340*/ struct { /* LVT - Performance Counter */ 352 | u32 vector : 8, 353 | delivery_mode : 3, 354 | __reserved_1 : 1, 355 | delivery_status : 1, 356 | __reserved_2 : 3, 357 | mask : 1, 358 | __reserved_3 : 15; 359 | u32 __reserved_4[3]; 360 | } lvt_pc; 361 | 362 | /*350*/ struct { /* LVT - LINT0 */ 363 | u32 vector : 8, 364 | delivery_mode : 3, 365 | __reserved_1 : 1, 366 | delivery_status : 1, 367 | polarity : 1, 368 | remote_irr : 1, 369 | trigger : 1, 370 | mask : 1, 371 | __reserved_2 : 15; 372 | u32 __reserved_3[3]; 373 | } lvt_lint0; 374 | 375 | /*360*/ struct { /* LVT - LINT1 */ 376 | u32 vector : 8, 377 | delivery_mode : 3, 378 | __reserved_1 : 1, 379 | delivery_status : 1, 380 | polarity : 1, 381 | remote_irr : 1, 382 | trigger : 1, 383 | mask : 1, 384 | __reserved_2 : 15; 385 | u32 __reserved_3[3]; 386 | } lvt_lint1; 387 | 388 | /*370*/ struct { /* LVT - Error */ 389 | u32 vector : 8, 390 | __reserved_1 : 4, 391 | delivery_status : 1, 392 | __reserved_2 : 3, 393 | mask : 1, 394 | __reserved_3 : 15; 395 | u32 __reserved_4[3]; 396 | } lvt_error; 397 | 398 | /*380*/ struct { /* Timer Initial Count Register */ 399 | u32 initial_count; 400 | u32 __reserved_2[3]; 401 | } timer_icr; 402 | 403 | /*390*/ const 404 | struct { /* Timer Current Count Register */ 405 | u32 curr_count; 406 | u32 __reserved_2[3]; 407 | } timer_ccr; 408 | 409 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; 410 | 411 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; 412 | 413 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; 414 | 415 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; 416 | 417 | /*3E0*/ struct { /* Timer Divide Configuration Register */ 418 | u32 divisor : 4, 419 | __reserved_1 : 28; 420 | u32 __reserved_2[3]; 421 | } timer_dcr; 422 | 423 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; 424 | 425 | } __attribute__ ((packed)); 426 | 427 | #undef u32 428 | 429 | #ifdef CONFIG_X86_32 430 | #define BAD_APICID 0xFFu 431 | #else 432 | #define BAD_APICID 0xFFFFu 433 | #endif 434 | 435 | enum ioapic_irq_destination_types { 436 | dest_Fixed = 0, 437 | dest_LowestPrio = 1, 438 | dest_SMI = 2, 439 | dest__reserved_1 = 3, 440 | dest_NMI = 4, 441 | dest_INIT = 5, 442 | dest__reserved_2 = 6, 443 | dest_ExtINT = 7 444 | }; 445 | 446 | #endif /* _ASM_X86_APICDEF_H */ 447 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/kvm_para.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 | #ifndef _UAPI_ASM_X86_KVM_PARA_H 3 | #define _UAPI_ASM_X86_KVM_PARA_H 4 | 5 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 6 | * should be used to determine that a VM is running under KVM. 7 | */ 8 | #define KVM_CPUID_SIGNATURE 0x40000000 9 | 10 | /* This CPUID returns a feature bitmap in eax. Before enabling a particular 11 | * paravirtualization, the appropriate feature bit should be checked. 12 | */ 13 | #define KVM_CPUID_FEATURES 0x40000001 14 | #define KVM_FEATURE_CLOCKSOURCE 0 15 | #define KVM_FEATURE_NOP_IO_DELAY 1 16 | #define KVM_FEATURE_MMU_OP 2 17 | /* This indicates that the new set of kvmclock msrs 18 | * are available. The use of 0x11 and 0x12 is deprecated 19 | */ 20 | #define KVM_FEATURE_CLOCKSOURCE2 3 21 | #define KVM_FEATURE_ASYNC_PF 4 22 | #define KVM_FEATURE_STEAL_TIME 5 23 | #define KVM_FEATURE_PV_EOI 6 24 | #define KVM_FEATURE_PV_UNHALT 7 25 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 26 | #define KVM_FEATURE_PV_SEND_IPI 11 27 | 28 | /* The last 8 bits are used to indicate how to interpret the flags field 29 | * in pvclock structure. If no bits are set, all flags are ignored. 30 | */ 31 | #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 32 | 33 | #define MSR_KVM_WALL_CLOCK 0x11 34 | #define MSR_KVM_SYSTEM_TIME 0x12 35 | 36 | #define KVM_MSR_ENABLED 1 37 | /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ 38 | #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 39 | #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 40 | #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 41 | #define MSR_KVM_STEAL_TIME 0x4b564d03 42 | #define MSR_KVM_PV_EOI_EN 0x4b564d04 43 | 44 | #define KVM_STEAL_ALIGNMENT_BITS 5 45 | #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1))) 46 | #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1) 47 | 48 | #define KVM_MAX_MMU_OP_BATCH 32 49 | 50 | #define KVM_ASYNC_PF_ENABLED (1 << 0) 51 | #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1) 52 | #define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2) 53 | 54 | /* Operations for KVM_HC_MMU_OP */ 55 | #define KVM_MMU_OP_WRITE_PTE 1 56 | #define KVM_MMU_OP_FLUSH_TLB 2 57 | #define KVM_MMU_OP_RELEASE_PT 3 58 | #define KVM_PV_REASON_PAGE_NOT_PRESENT 1 59 | #define KVM_PV_REASON_PAGE_READY 2 60 | 61 | #define KVM_PV_EOI_BIT 0 62 | #define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT) 63 | #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK 64 | #define KVM_PV_EOI_DISABLED 0x0 65 | 66 | 67 | #endif /* _UAPI_ASM_X86_KVM_PARA_H */ 68 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/kvmwrmsr.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "msr.h" 10 | 11 | static ktime_t __ktime; 12 | static spinlock_t showing_lock; 13 | static atomic_long_t total_wrmsr; 14 | 15 | void show_wrmsr(void) 16 | { 17 | unsigned int idx; 18 | unsigned int msr, count; 19 | 20 | pr_info("WRMSR STATISTIC\n"); 21 | idx = MSR_IA32_APICBASE; 22 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 23 | idx = MSR_IA32_TSC_ADJUST; 24 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 25 | idx = MSR_IA32_TSCDEADLINE; 26 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 27 | idx = MSR_IA32_MISC_ENABLE; 28 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 29 | idx = MSR_IA32_MCG_STATUS; 30 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 31 | idx = MSR_IA32_MCG_CTL; 32 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 33 | idx = MSR_IA32_MCG_EXT_CTL; 34 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 35 | idx = MSR_IA32_SMBASE; 36 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 37 | idx = MSR_PLATFORM_INFO; 38 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 39 | idx = MSR_MISC_FEATURES_ENABLES; 40 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 41 | idx = MSR_KVM_WALL_CLOCK; 42 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 43 | idx = MSR_KVM_SYSTEM_TIME; 44 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 45 | idx = MSR_CORE_PERF_FIXED_CTR0; 46 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 47 | idx = MSR_CORE_PERF_FIXED_CTR1; 48 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 49 | idx = MSR_CORE_PERF_FIXED_CTR2; 50 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 51 | idx = MSR_CORE_PERF_FIXED_CTR_CTRL; 52 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 53 | idx = MSR_CORE_PERF_GLOBAL_STATUS; 54 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 55 | idx = MSR_CORE_PERF_GLOBAL_CTRL; 56 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 57 | idx = MSR_CORE_PERF_GLOBAL_OVF_CTRL; 58 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 59 | idx = 0; 60 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 61 | 62 | /* show lapic */ 63 | pr_info("APIC STATISTIC\n"); 64 | idx = APIC_BASE_MSR + (APIC_TASKPRI>>4); 65 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 66 | idx = APIC_BASE_MSR + (APIC_EOI>>4); 67 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 68 | idx = APIC_BASE_MSR + (APIC_LDR>>4); 69 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 70 | idx = APIC_BASE_MSR + (APIC_DFR>>4); 71 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 72 | idx = APIC_BASE_MSR + (APIC_SPIV>>4); 73 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 74 | idx = APIC_BASE_MSR + (APIC_ICR>>4); 75 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 76 | idx = APIC_BASE_MSR + (APIC_ICR2>>4); 77 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 78 | idx = APIC_BASE_MSR + (APIC_LVT0>>4); 79 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 80 | idx = APIC_BASE_MSR + (APIC_LVTT>>4); 81 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 82 | idx = APIC_BASE_MSR + (APIC_SELF_IPI>>4); 83 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 84 | idx = APIC_BASE_MSR + (0>>4); 85 | pr_info("\t[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 86 | 87 | /* other msrs*/ 88 | if (report_other_wrmsr(0, &msr, &count) == 0) { 89 | pr_info("OTHER MSRS STATISTIC\n"); 90 | for (idx = 0; idx < OTHER_MSRS; idx++) { 91 | if (report_other_wrmsr(idx, &msr, &count)) 92 | break; 93 | 94 | pr_info("\t[OTHER MSR 0x%x] %d\n", msr, count); 95 | } 96 | } 97 | } 98 | 99 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 100 | static int kp_vmx_set_msr(struct kprobe *p, struct pt_regs *regs) 101 | { 102 | struct msr_data *msr_info = (struct msr_data *)regs->si; 103 | unsigned int idx = msr_info->index; 104 | #else 105 | static int kp_vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 106 | { 107 | unsigned int idx = msr_info->index; 108 | #endif 109 | ktime_t now = ktime_get(); 110 | //pr_info("kprobe : msr_index = %d", msr_info->index); 111 | 112 | atomic_long_inc(&total_wrmsr); 113 | record_wrmsr(idx); 114 | //pr_info("WRMSR[%s] %ld\n", msr2str(idx), report_wrmsr(idx)); 115 | 116 | if (ktime_to_ns(now) - ktime_to_ns(__ktime) > 1000*1000*1000) { 117 | if (!spin_trylock(&showing_lock)) 118 | goto out; 119 | 120 | pr_info("total_wrmsr = %ld\n", atomic_long_read(&total_wrmsr)); 121 | atomic_long_set(&total_wrmsr, 0); 122 | show_wrmsr(); 123 | reset_wrmsr(); 124 | 125 | __ktime = now; 126 | spin_unlock(&showing_lock); 127 | } 128 | 129 | out : 130 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0) 131 | /* Always end with a call to jprobe_return(). */ 132 | jprobe_return(); 133 | #endif 134 | return 0; 135 | } 136 | 137 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 138 | static struct kprobe vmx_set_msr_probe = { 139 | .pre_handler = kp_vmx_set_msr, 140 | .symbol_name = "vmx_set_msr", 141 | }; 142 | #else 143 | static struct jprobe vmx_set_msr_probe = { 144 | .entry = kp_vmx_set_msr, 145 | .kp = { 146 | .symbol_name = "vmx_set_msr", 147 | }, 148 | }; 149 | #endif 150 | 151 | static int __init probe_init(void) 152 | { 153 | int ret; 154 | void *addr; 155 | 156 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 157 | ret = register_kprobe(&vmx_set_msr_probe); 158 | addr = vmx_set_msr_probe.addr; 159 | #else 160 | ret = register_jprobe(&vmx_set_msr_probe); 161 | addr = vmx_set_msr_probe.kp.addr; 162 | #endif 163 | if (ret < 0) { 164 | pr_err("kvmwrmsr : register_probe failed, returned %d\n", ret); 165 | return -1; 166 | } 167 | 168 | spin_lock_init(&showing_lock); 169 | atomic_long_set(&total_wrmsr, 0); 170 | init_wrmsr(); 171 | __ktime = ktime_get(); 172 | pr_info("kvmwrmsr : planted probe at %p\n", addr); 173 | return 0; 174 | } 175 | 176 | static void __exit probe_exit(void) 177 | { 178 | void *addr; 179 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) 180 | addr = vmx_set_msr_probe.addr; 181 | unregister_kprobe(&vmx_set_msr_probe); 182 | #else 183 | addr = vmx_set_msr_probe.kp.addr; 184 | unregister_jprobe(&vmx_set_msr_probe); 185 | #endif 186 | pr_info("kvmwrmsr : probe at %p unregistered\n", addr); 187 | } 188 | 189 | module_init(probe_init) 190 | module_exit(probe_exit) 191 | MODULE_LICENSE("GPL"); 192 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 193 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/local-msr-index.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #ifndef __LOCAL_MSR_H__ 5 | #define __LOCAL_MSR_H__ 6 | 7 | #ifndef MSR_MISC_FEATURES_ENABLES 8 | #define MSR_MISC_FEATURES_ENABLES 0x00000140 9 | #endif 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /debug/kvmwrmsr/msr-index.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | #ifndef _ASM_X86_MSR_INDEX_H 3 | #define _ASM_X86_MSR_INDEX_H 4 | 5 | /* 6 | * CPU model specific register (MSR) numbers. 7 | * 8 | * Do not add new entries to this file unless the definitions are shared 9 | * between multiple compilation units. 10 | */ 11 | 12 | /* x86-64 specific MSRs */ 13 | #define MSR_EFER 0xc0000080 /* extended feature register */ 14 | #define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ 15 | #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 16 | #define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ 17 | #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 18 | #define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 19 | #define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ 20 | #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ 21 | #define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ 22 | 23 | /* EFER bits: */ 24 | #define _EFER_SCE 0 /* SYSCALL/SYSRET */ 25 | #define _EFER_LME 8 /* Long mode enable */ 26 | #define _EFER_LMA 10 /* Long mode active (read-only) */ 27 | #define _EFER_NX 11 /* No execute enable */ 28 | #define _EFER_SVME 12 /* Enable virtualization */ 29 | #define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ 30 | #define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ 31 | 32 | #define EFER_SCE (1<<_EFER_SCE) 33 | #define EFER_LME (1<<_EFER_LME) 34 | #define EFER_LMA (1<<_EFER_LMA) 35 | #define EFER_NX (1<<_EFER_NX) 36 | #define EFER_SVME (1<<_EFER_SVME) 37 | #define EFER_LMSLE (1<<_EFER_LMSLE) 38 | #define EFER_FFXSR (1<<_EFER_FFXSR) 39 | 40 | /* Intel MSRs. Some also available on other CPUs */ 41 | 42 | #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 43 | #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 44 | #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 45 | #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ 46 | #define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ 47 | 48 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 49 | #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 50 | 51 | #define MSR_PPIN_CTL 0x0000004e 52 | #define MSR_PPIN 0x0000004f 53 | 54 | #define MSR_IA32_PERFCTR0 0x000000c1 55 | #define MSR_IA32_PERFCTR1 0x000000c2 56 | #define MSR_FSB_FREQ 0x000000cd 57 | #define MSR_PLATFORM_INFO 0x000000ce 58 | #define MSR_PLATFORM_INFO_CPUID_FAULT_BIT 31 59 | #define MSR_PLATFORM_INFO_CPUID_FAULT BIT_ULL(MSR_PLATFORM_INFO_CPUID_FAULT_BIT) 60 | 61 | #define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 62 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) 63 | #define NHM_C1_AUTO_DEMOTE (1UL << 26) 64 | #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) 65 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 66 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 67 | 68 | #define MSR_MTRRcap 0x000000fe 69 | 70 | #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a 71 | #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ 72 | #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ 73 | #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ 74 | #define ARCH_CAP_SSB_NO (1 << 4) /* 75 | * Not susceptible to Speculative Store Bypass 76 | * attack, so no Speculative Store Bypass 77 | * control required. 78 | */ 79 | 80 | #define MSR_IA32_FLUSH_CMD 0x0000010b 81 | #define L1D_FLUSH (1 << 0) /* 82 | * Writeback and invalidate the 83 | * L1 data cache. 84 | */ 85 | 86 | #define MSR_IA32_BBL_CR_CTL 0x00000119 87 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e 88 | 89 | #define MSR_IA32_SYSENTER_CS 0x00000174 90 | #define MSR_IA32_SYSENTER_ESP 0x00000175 91 | #define MSR_IA32_SYSENTER_EIP 0x00000176 92 | 93 | #define MSR_IA32_MCG_CAP 0x00000179 94 | #define MSR_IA32_MCG_STATUS 0x0000017a 95 | #define MSR_IA32_MCG_CTL 0x0000017b 96 | #define MSR_IA32_MCG_EXT_CTL 0x000004d0 97 | 98 | #define MSR_OFFCORE_RSP_0 0x000001a6 99 | #define MSR_OFFCORE_RSP_1 0x000001a7 100 | #define MSR_TURBO_RATIO_LIMIT 0x000001ad 101 | #define MSR_TURBO_RATIO_LIMIT1 0x000001ae 102 | #define MSR_TURBO_RATIO_LIMIT2 0x000001af 103 | 104 | #define MSR_LBR_SELECT 0x000001c8 105 | #define MSR_LBR_TOS 0x000001c9 106 | #define MSR_LBR_NHM_FROM 0x00000680 107 | #define MSR_LBR_NHM_TO 0x000006c0 108 | #define MSR_LBR_CORE_FROM 0x00000040 109 | #define MSR_LBR_CORE_TO 0x00000060 110 | 111 | #define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */ 112 | #define LBR_INFO_MISPRED BIT_ULL(63) 113 | #define LBR_INFO_IN_TX BIT_ULL(62) 114 | #define LBR_INFO_ABORT BIT_ULL(61) 115 | #define LBR_INFO_CYCLES 0xffff 116 | 117 | #define MSR_IA32_PEBS_ENABLE 0x000003f1 118 | #define MSR_IA32_DS_AREA 0x00000600 119 | #define MSR_IA32_PERF_CAPABILITIES 0x00000345 120 | #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 121 | 122 | #define MSR_IA32_RTIT_CTL 0x00000570 123 | #define MSR_IA32_RTIT_STATUS 0x00000571 124 | #define MSR_IA32_RTIT_ADDR0_A 0x00000580 125 | #define MSR_IA32_RTIT_ADDR0_B 0x00000581 126 | #define MSR_IA32_RTIT_ADDR1_A 0x00000582 127 | #define MSR_IA32_RTIT_ADDR1_B 0x00000583 128 | #define MSR_IA32_RTIT_ADDR2_A 0x00000584 129 | #define MSR_IA32_RTIT_ADDR2_B 0x00000585 130 | #define MSR_IA32_RTIT_ADDR3_A 0x00000586 131 | #define MSR_IA32_RTIT_ADDR3_B 0x00000587 132 | #define MSR_IA32_RTIT_CR3_MATCH 0x00000572 133 | #define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560 134 | #define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561 135 | 136 | #define MSR_MTRRfix64K_00000 0x00000250 137 | #define MSR_MTRRfix16K_80000 0x00000258 138 | #define MSR_MTRRfix16K_A0000 0x00000259 139 | #define MSR_MTRRfix4K_C0000 0x00000268 140 | #define MSR_MTRRfix4K_C8000 0x00000269 141 | #define MSR_MTRRfix4K_D0000 0x0000026a 142 | #define MSR_MTRRfix4K_D8000 0x0000026b 143 | #define MSR_MTRRfix4K_E0000 0x0000026c 144 | #define MSR_MTRRfix4K_E8000 0x0000026d 145 | #define MSR_MTRRfix4K_F0000 0x0000026e 146 | #define MSR_MTRRfix4K_F8000 0x0000026f 147 | #define MSR_MTRRdefType 0x000002ff 148 | 149 | #define MSR_IA32_CR_PAT 0x00000277 150 | 151 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 152 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db 153 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc 154 | #define MSR_IA32_LASTINTFROMIP 0x000001dd 155 | #define MSR_IA32_LASTINTTOIP 0x000001de 156 | 157 | /* DEBUGCTLMSR bits (others vary by model): */ 158 | #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ 159 | #define DEBUGCTLMSR_BTF_SHIFT 1 160 | #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ 161 | #define DEBUGCTLMSR_TR (1UL << 6) 162 | #define DEBUGCTLMSR_BTS (1UL << 7) 163 | #define DEBUGCTLMSR_BTINT (1UL << 8) 164 | #define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) 165 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 166 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 167 | #define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14 168 | #define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT) 169 | 170 | #define MSR_PEBS_FRONTEND 0x000003f7 171 | 172 | #define MSR_IA32_POWER_CTL 0x000001fc 173 | 174 | #define MSR_IA32_MC0_CTL 0x00000400 175 | #define MSR_IA32_MC0_STATUS 0x00000401 176 | #define MSR_IA32_MC0_ADDR 0x00000402 177 | #define MSR_IA32_MC0_MISC 0x00000403 178 | 179 | /* C-state Residency Counters */ 180 | #define MSR_PKG_C3_RESIDENCY 0x000003f8 181 | #define MSR_PKG_C6_RESIDENCY 0x000003f9 182 | #define MSR_ATOM_PKG_C6_RESIDENCY 0x000003fa 183 | #define MSR_PKG_C7_RESIDENCY 0x000003fa 184 | #define MSR_CORE_C3_RESIDENCY 0x000003fc 185 | #define MSR_CORE_C6_RESIDENCY 0x000003fd 186 | #define MSR_CORE_C7_RESIDENCY 0x000003fe 187 | #define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff 188 | #define MSR_PKG_C2_RESIDENCY 0x0000060d 189 | #define MSR_PKG_C8_RESIDENCY 0x00000630 190 | #define MSR_PKG_C9_RESIDENCY 0x00000631 191 | #define MSR_PKG_C10_RESIDENCY 0x00000632 192 | 193 | /* Interrupt Response Limit */ 194 | #define MSR_PKGC3_IRTL 0x0000060a 195 | #define MSR_PKGC6_IRTL 0x0000060b 196 | #define MSR_PKGC7_IRTL 0x0000060c 197 | #define MSR_PKGC8_IRTL 0x00000633 198 | #define MSR_PKGC9_IRTL 0x00000634 199 | #define MSR_PKGC10_IRTL 0x00000635 200 | 201 | /* Run Time Average Power Limiting (RAPL) Interface */ 202 | 203 | #define MSR_RAPL_POWER_UNIT 0x00000606 204 | 205 | #define MSR_PKG_POWER_LIMIT 0x00000610 206 | #define MSR_PKG_ENERGY_STATUS 0x00000611 207 | #define MSR_PKG_PERF_STATUS 0x00000613 208 | #define MSR_PKG_POWER_INFO 0x00000614 209 | 210 | #define MSR_DRAM_POWER_LIMIT 0x00000618 211 | #define MSR_DRAM_ENERGY_STATUS 0x00000619 212 | #define MSR_DRAM_PERF_STATUS 0x0000061b 213 | #define MSR_DRAM_POWER_INFO 0x0000061c 214 | 215 | #define MSR_PP0_POWER_LIMIT 0x00000638 216 | #define MSR_PP0_ENERGY_STATUS 0x00000639 217 | #define MSR_PP0_POLICY 0x0000063a 218 | #define MSR_PP0_PERF_STATUS 0x0000063b 219 | 220 | #define MSR_PP1_POWER_LIMIT 0x00000640 221 | #define MSR_PP1_ENERGY_STATUS 0x00000641 222 | #define MSR_PP1_POLICY 0x00000642 223 | 224 | /* Config TDP MSRs */ 225 | #define MSR_CONFIG_TDP_NOMINAL 0x00000648 226 | #define MSR_CONFIG_TDP_LEVEL_1 0x00000649 227 | #define MSR_CONFIG_TDP_LEVEL_2 0x0000064A 228 | #define MSR_CONFIG_TDP_CONTROL 0x0000064B 229 | #define MSR_TURBO_ACTIVATION_RATIO 0x0000064C 230 | 231 | #define MSR_PLATFORM_ENERGY_STATUS 0x0000064D 232 | 233 | #define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 234 | #define MSR_PKG_ANY_CORE_C0_RES 0x00000659 235 | #define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A 236 | #define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B 237 | 238 | #define MSR_CORE_C1_RES 0x00000660 239 | #define MSR_MODULE_C6_RES_MS 0x00000664 240 | 241 | #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 242 | #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 243 | 244 | #define MSR_ATOM_CORE_RATIOS 0x0000066a 245 | #define MSR_ATOM_CORE_VIDS 0x0000066b 246 | #define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c 247 | #define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d 248 | 249 | 250 | #define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 251 | #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 252 | #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 253 | 254 | /* Hardware P state interface */ 255 | #define MSR_PPERF 0x0000064e 256 | #define MSR_PERF_LIMIT_REASONS 0x0000064f 257 | #define MSR_PM_ENABLE 0x00000770 258 | #define MSR_HWP_CAPABILITIES 0x00000771 259 | #define MSR_HWP_REQUEST_PKG 0x00000772 260 | #define MSR_HWP_INTERRUPT 0x00000773 261 | #define MSR_HWP_REQUEST 0x00000774 262 | #define MSR_HWP_STATUS 0x00000777 263 | 264 | /* CPUID.6.EAX */ 265 | #define HWP_BASE_BIT (1<<7) 266 | #define HWP_NOTIFICATIONS_BIT (1<<8) 267 | #define HWP_ACTIVITY_WINDOW_BIT (1<<9) 268 | #define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) 269 | #define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) 270 | 271 | /* IA32_HWP_CAPABILITIES */ 272 | #define HWP_HIGHEST_PERF(x) (((x) >> 0) & 0xff) 273 | #define HWP_GUARANTEED_PERF(x) (((x) >> 8) & 0xff) 274 | #define HWP_MOSTEFFICIENT_PERF(x) (((x) >> 16) & 0xff) 275 | #define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff) 276 | 277 | /* IA32_HWP_REQUEST */ 278 | #define HWP_MIN_PERF(x) (x & 0xff) 279 | #define HWP_MAX_PERF(x) ((x & 0xff) << 8) 280 | #define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) 281 | #define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24) 282 | #define HWP_EPP_PERFORMANCE 0x00 283 | #define HWP_EPP_BALANCE_PERFORMANCE 0x80 284 | #define HWP_EPP_BALANCE_POWERSAVE 0xC0 285 | #define HWP_EPP_POWERSAVE 0xFF 286 | #define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32) 287 | #define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42) 288 | 289 | /* IA32_HWP_STATUS */ 290 | #define HWP_GUARANTEED_CHANGE(x) (x & 0x1) 291 | #define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) 292 | 293 | /* IA32_HWP_INTERRUPT */ 294 | #define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) 295 | #define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) 296 | 297 | #define MSR_AMD64_MC0_MASK 0xc0010044 298 | 299 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 300 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) 301 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) 302 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) 303 | 304 | #define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) 305 | 306 | /* These are consecutive and not in the normal 4er MCE bank block */ 307 | #define MSR_IA32_MC0_CTL2 0x00000280 308 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) 309 | 310 | #define MSR_P6_PERFCTR0 0x000000c1 311 | #define MSR_P6_PERFCTR1 0x000000c2 312 | #define MSR_P6_EVNTSEL0 0x00000186 313 | #define MSR_P6_EVNTSEL1 0x00000187 314 | 315 | #define MSR_KNC_PERFCTR0 0x00000020 316 | #define MSR_KNC_PERFCTR1 0x00000021 317 | #define MSR_KNC_EVNTSEL0 0x00000028 318 | #define MSR_KNC_EVNTSEL1 0x00000029 319 | 320 | /* Alternative perfctr range with full access. */ 321 | #define MSR_IA32_PMC0 0x000004c1 322 | 323 | /* AMD64 MSRs. Not complete. See the architecture manual for a more 324 | complete list. */ 325 | 326 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b 327 | #define MSR_AMD64_TSC_RATIO 0xc0000104 328 | #define MSR_AMD64_NB_CFG 0xc001001f 329 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 330 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 331 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 332 | #define MSR_AMD64_LS_CFG 0xc0011020 333 | #define MSR_AMD64_DC_CFG 0xc0011022 334 | #define MSR_AMD64_BU_CFG2 0xc001102a 335 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 336 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 337 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 338 | #define MSR_AMD64_IBSFETCH_REG_COUNT 3 339 | #define MSR_AMD64_IBSFETCH_REG_MASK ((1UL< 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "../common/rdtsc.h" 11 | 12 | static int options; 13 | module_param(options, int, 0444); 14 | 15 | #define LOOP 100000 16 | #define MAXNUMA 4 17 | 18 | static char *ipi_funcs[] = { 19 | "kvm_send_ipi_mask", 20 | "default_send_IPI_mask_sequence_phys", 21 | "flat_send_IPI_mask", 22 | "x2apic_send_IPI_mask", 23 | }; 24 | 25 | static void bench(char *func, int nodes) 26 | { 27 | unsigned long start, end; 28 | int loop, node; 29 | unsigned int currentcpu, targetcpu; 30 | unsigned char numa[MAXNUMA] = {0}; 31 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); 32 | 33 | send_IPI_mask = (void (*)(const struct cpumask *mask, int vector))kallsyms_lookup_name(func); 34 | 35 | currentcpu = get_cpu(); 36 | 37 | /* from current cpu to each NUMA node IPI */ 38 | for (node = 0; node < nodes && node < MAXNUMA; node++) { 39 | for_each_online_cpu(targetcpu) { 40 | if (targetcpu == currentcpu) 41 | continue; 42 | 43 | if (numa[cpu_to_node(targetcpu)]) 44 | continue; 45 | 46 | break; 47 | } 48 | 49 | 50 | printk(KERN_INFO "apic_ipi: IPI[%s] from CPU[%d] to CPU[%d]\n", func, currentcpu, targetcpu); 51 | 52 | start = ins_rdtsc(); 53 | for (loop = 0; loop < LOOP; loop++) { 54 | send_IPI_mask(cpumask_of(targetcpu), CALL_FUNCTION_SINGLE_VECTOR); 55 | } 56 | end = ins_rdtsc(); 57 | 58 | printk(KERN_INFO "apic_ipi: total cycles %ld, avg %ld\n", end - start, (end - start) / LOOP); 59 | 60 | numa[cpu_to_node(targetcpu)] = 1; 61 | } 62 | 63 | put_cpu(); 64 | } 65 | 66 | static int apic_ipi_init(void) 67 | { 68 | int nodes = num_online_nodes(); 69 | char name[KSYM_NAME_LEN]; 70 | int i; 71 | 72 | if (!options) { 73 | printk(KERN_INFO "apic_ipi: you should run insmod apic_ipi.ko options=XX, bit flags:\n"); 74 | for (i = 0; i < sizeof(ipi_funcs) / sizeof(ipi_funcs[0]); i++) { 75 | printk(KERN_INFO "apic_ipi: bit[%d] %s\n", i, ipi_funcs[i]); 76 | } 77 | 78 | return -1; 79 | } 80 | 81 | printk(KERN_INFO "apic_ipi: %d NUMA node(s)\n", nodes); 82 | printk(KERN_INFO "apic_ipi: apic [%s]\n", apic->name); 83 | 84 | if (sprint_symbol(name, (unsigned long)apic->send_IPI) > 0) { 85 | printk(KERN_INFO "apic_ipi: apic->send_IPI[%s]\n", name); 86 | } else { 87 | printk(KERN_INFO "apic_ipi: apic->send_IPI[%lx]\n", (unsigned long)apic->send_IPI); 88 | } 89 | 90 | if (sprint_symbol(name, (unsigned long)apic->send_IPI_mask) > 0) { 91 | printk(KERN_INFO "apic_ipi: apic->send_IPI_mask[%s]\n", name); 92 | } else { 93 | printk(KERN_INFO "apic_ipi: apic->send_IPI_mask[%lx]\n", (unsigned long)apic->send_IPI_mask); 94 | } 95 | 96 | for (i = 0; i < sizeof(ipi_funcs) / sizeof(ipi_funcs[0]); i++) { 97 | if (options & (1 << i)) { 98 | bench(ipi_funcs[i], nodes); 99 | } 100 | } 101 | 102 | return -1; 103 | } 104 | 105 | static void apic_ipi_exit(void) 106 | { 107 | /* it should never run */ 108 | printk(KERN_INFO "apic_ipi: %s\n", __func__); 109 | } 110 | 111 | module_init(apic_ipi_init); 112 | module_exit(apic_ipi_exit); 113 | MODULE_LICENSE("GPL"); 114 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 115 | -------------------------------------------------------------------------------- /microbenchmark/common/getns.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #ifndef __GETNS_H__ 5 | #define __GETNS_H__ 6 | 7 | static inline unsigned long getns(void) 8 | { 9 | return ktime_to_ns(ktime_get()); 10 | } 11 | 12 | #endif 13 | -------------------------------------------------------------------------------- /microbenchmark/common/rdtsc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #ifndef __RDTSC_H__ 5 | #define __RDTSC_H__ 6 | 7 | static inline unsigned long ins_rdtsc(void) 8 | { 9 | unsigned long low, high; 10 | 11 | asm volatile("rdtsc" : "=a" (low), "=d" (high) ); 12 | 13 | return ((low) | (high) << 32); 14 | } 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /microbenchmark/ipi-bench/Makefile: -------------------------------------------------------------------------------- 1 | obj-m := ipi_bench.o 2 | KERNELDIR := /lib/modules/$(shell uname -r)/build 3 | #KERNELDIR := /root/source/linux-image-bm/ 4 | PWD := $(shell pwd) 5 | 6 | all: 7 | make -C $(KERNELDIR) M=$(PWD) clean 8 | make -C $(KERNELDIR) M=$(PWD) modules 9 | 10 | clean: 11 | make -C $(KERNELDIR) M=$(PWD) clean 12 | -------------------------------------------------------------------------------- /microbenchmark/ipi-bench/README: -------------------------------------------------------------------------------- 1 | HOWTO 2 | ===== 3 | ~# make 4 | 5 | To get benchmark options 6 | ------------------------ 7 | ~# insmod ipi_bench.ko ; dmesg -c 8 | ipi_bench: you should run insmod ipi_bench.ko options=XX, bit flags: 9 | ipi_bench: bit[0] self-ipi: send ipi to self, you can specify CPU by param srccpu=XX(default current CPU). 10 | ipi_bench: bit[1] single-ipi: send ipi from srccpu=XX to dstcpu=YY(default different random XX and YY), wait=0/1 to specify wait or not. 11 | ipi_bench: bit[2] mesh-ipi: send single ipi from one CPU to another CPU for all the CPUs, use pairs=XX to set number of benchmark pairs(default num_cpus / 2). 12 | ipi_bench: bit[3] all-ipi: send ipi from srccpu=XX to all the CPUs, use lock=0/1 to specify spin lock option in callback function, wait=0/1 to specify wait or not. 13 | 14 | To run single-ipi from CPU3 to CPU8 15 | ----------------------------------- 16 | ~# insmod ipi_bench.ko options=2 srccpu=3 dstcpu=8 ; dmesg -c 17 | -------------------------------------------------------------------------------- /microbenchmark/ipi-bench/ipi_bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019-2021 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "../common/rdtsc.h" 11 | #include "../common/getns.h" 12 | 13 | static inline long unsigned gettime(void) 14 | { 15 | return getns(); 16 | } 17 | 18 | static int loops = 1000000; 19 | module_param(loops, int, 0444); 20 | 21 | static int srccpu = -1; 22 | module_param(srccpu, int, 0444); 23 | 24 | static int dstcpu = -1; 25 | module_param(dstcpu, int, 0444); 26 | 27 | static int pairs = -1; 28 | module_param(pairs, int, 0444); 29 | 30 | static int acrossnuma = 1; 31 | module_param(acrossnuma, int, 0444); 32 | 33 | static int broadcasts = -1; 34 | module_param(broadcasts, int, 0444); 35 | 36 | static int __read_mostly wait = 1; 37 | module_param(wait, int, 0444); 38 | 39 | static int __read_mostly lock = 1; 40 | module_param(lock, int, 0444); 41 | 42 | static int options; 43 | module_param(options, int, 0444); 44 | 45 | static unsigned long timeoutms = 10000; 46 | 47 | static atomic64_t ready_to_run; 48 | static atomic64_t should_run; 49 | static atomic64_t complete_run; 50 | 51 | static DECLARE_WAIT_QUEUE_HEAD(wait_complete); 52 | 53 | #define SELF_IPI (1<<0) 54 | #define SINGLE_IPI (1<<1) 55 | #define MESH_IPI (1<<2) 56 | #define ALL_IPI (1<<3) 57 | 58 | static char *benchcases[] = { 59 | "self-ipi: send ipi to self, you can specify CPU by param srccpu=XX(default current CPU).", 60 | "single-ipi: send ipi from srccpu=XX to dstcpu=YY(default different random XX and YY), wait=0/1 to specify wait or not.", 61 | "mesh-ipi: send single ipi from one CPU to another CPU for all the CPUs, use pairs=XX to set number of benchmark pairs(default num_cpus / 2); use acrossnuma=0/1 to set IPI across NUMA(default = 1).", 62 | "all-ipi: send ipi from one CPU to all the CPUs, use lock=0/1 to specify spin lock option in callback function; wait=0/1 to specify wait or not; use broadcasts=XX to set workers(default 1).", 63 | }; 64 | 65 | struct bench_args { 66 | int src; /* src CPU */ 67 | int dst; /* dst CPU */ 68 | atomic64_t forked; /* forked timestamp */ 69 | atomic64_t start; /* start to run timestamp */ 70 | atomic64_t finish; /* finish bench timestamp */ 71 | atomic64_t ipitime; /* ipi time */ 72 | #ifdef CONFIG_SCHED_INFO 73 | atomic64_t run_delay; /* sched run delay */ 74 | #endif 75 | char name[64]; 76 | }; 77 | 78 | static inline unsigned long __random(void) 79 | { 80 | return get_random_long(); 81 | } 82 | 83 | static void ipi_bench_gettime(void *info) 84 | { 85 | unsigned long starttime = atomic64_read(info); 86 | unsigned long now = gettime(); 87 | unsigned long diff = 0; 88 | 89 | if(now > starttime) 90 | diff = now - starttime; 91 | 92 | atomic64_set(info, diff); 93 | } 94 | 95 | static int ipi_bench_one(struct bench_args *ba) 96 | { 97 | unsigned long ipitime = 0, now; 98 | int loop, ret, dst = ba->dst; 99 | 100 | for (loop = loops; loop > 0; loop--) { 101 | atomic64_set((atomic64_t *)&now, gettime()); 102 | ret = smp_call_function_single(dst, ipi_bench_gettime, &now, wait); 103 | if (ret < 0) 104 | return ret; 105 | 106 | if (wait) 107 | ipitime += atomic64_read((const atomic64_t *)&now); 108 | } 109 | 110 | atomic64_set(&ba->ipitime, ipitime); 111 | 112 | return 0; 113 | } 114 | 115 | static void ipi_bench_record_run_delay(struct bench_args *ba) 116 | { 117 | #ifdef CONFIG_SCHED_INFO 118 | atomic64_set(&ba->run_delay, current->sched_info.run_delay); 119 | #endif 120 | } 121 | 122 | static int ipi_bench_single_task(void *data) 123 | { 124 | struct bench_args *ba = (struct bench_args*)data; 125 | 126 | atomic64_set(&ba->forked, gettime()); 127 | 128 | /* let all threads run at the same time. to avoid wakeup delay */ 129 | atomic64_add(1, (atomic64_t *)&ready_to_run); 130 | while (atomic64_read(&ready_to_run) < atomic64_read(&should_run)); 131 | if (atomic64_read(&ready_to_run) != atomic64_read(&should_run)) { 132 | printk(KERN_INFO "ipi_bench: BUG, exit benchmark\n"); 133 | return -1; 134 | } 135 | 136 | atomic64_set(&ba->start, gettime()); 137 | ipi_bench_one(ba); 138 | atomic64_set(&ba->finish, gettime()); 139 | ipi_bench_record_run_delay(ba); 140 | atomic64_add(1, (atomic64_t *)&complete_run); 141 | 142 | wake_up_interruptible(&wait_complete); 143 | 144 | return 0; 145 | } 146 | 147 | static int ipi_bench_one_task(struct bench_args *ba) 148 | { 149 | struct task_struct *tsk; 150 | int src = ba->src; 151 | int dst = ba->dst; 152 | 153 | printk(KERN_INFO "ipi_bench: prepare single IPI from CPU[%3d] to CPU[%3d]\n", src, dst); 154 | snprintf(ba->name, sizeof(ba->name), "ipi_bench_%d_%d", src, dst); 155 | 156 | tsk = kthread_create_on_node(ipi_bench_single_task, ba, cpu_to_node(src), ba->name); 157 | if (IS_ERR(tsk)) { 158 | printk(KERN_INFO "ipi_bench: create kthread failed\n"); 159 | return -1; 160 | } 161 | 162 | kthread_bind(tsk, src); 163 | wake_up_process(tsk); 164 | 165 | return 0; 166 | } 167 | 168 | static void ipi_bench_empty(void *info) 169 | { 170 | } 171 | 172 | static void ipi_bench_spinlock(void *info) 173 | { 174 | spinlock_t *lock = (spinlock_t *)info; 175 | 176 | spin_lock(lock); 177 | spin_unlock(lock); 178 | } 179 | 180 | static int ipi_bench_many(void) 181 | { 182 | int loop; 183 | DEFINE_SPINLOCK(spinlock); 184 | 185 | for (loop = loops; loop > 0; loop--) { 186 | if (lock) { 187 | smp_call_function_many(cpu_online_mask, ipi_bench_spinlock, &spinlock, wait); 188 | } else { 189 | smp_call_function_many(cpu_online_mask, ipi_bench_empty, NULL, wait); 190 | } 191 | } 192 | 193 | return 0; 194 | } 195 | 196 | static int ipi_bench_many_task(void *data) 197 | { 198 | struct bench_args *ba = (struct bench_args*)data; 199 | 200 | atomic64_set(&ba->forked, gettime()); 201 | 202 | /* let all threads run at the same time. to avoid wakeup delay */ 203 | atomic64_add(1, (atomic64_t *)&ready_to_run); 204 | while (atomic64_read(&ready_to_run) < atomic64_read(&should_run)); 205 | if (atomic64_read(&ready_to_run) != atomic64_read(&should_run)) { 206 | printk(KERN_INFO "ipi_bench: BUG, exit benchmark\n"); 207 | return -1; 208 | } 209 | 210 | atomic64_set(&ba->start, gettime()); 211 | ipi_bench_many(); 212 | atomic64_set(&ba->finish, gettime()); 213 | ipi_bench_record_run_delay(ba); 214 | atomic64_add(1, (atomic64_t *)&complete_run); 215 | 216 | wake_up_interruptible(&wait_complete); 217 | 218 | return 0; 219 | } 220 | 221 | static int ipi_bench_all_task(struct bench_args *ba) 222 | { 223 | struct task_struct *tsk; 224 | int src = ba->src; 225 | 226 | printk(KERN_INFO "ipi_bench: prepare broadcast IPI from CPU[%3d] to all CPUs\n", src); 227 | snprintf(ba->name, sizeof(ba->name), "ipi_bench_all_%d", src); 228 | 229 | tsk = kthread_create_on_node(ipi_bench_many_task, ba, cpu_to_node(src), ba->name); 230 | if (IS_ERR(tsk)) { 231 | printk(KERN_INFO "ipi_bench: create kthread failed\n"); 232 | return -1; 233 | } 234 | 235 | kthread_bind(tsk, src); 236 | wake_up_process(tsk); 237 | 238 | return 0; 239 | } 240 | 241 | static inline void ipi_bench_wait_all(void) 242 | { 243 | wait_event_interruptible_timeout(wait_complete, 244 | atomic64_read(&complete_run) == atomic64_read(&should_run), 245 | msecs_to_jiffies(timeoutms)); 246 | } 247 | 248 | static inline void ipi_bench_report_single(struct bench_args *ba) 249 | { 250 | int src = ba->src; 251 | int dst = ba->dst; 252 | unsigned long forked = atomic64_read(&ba->forked); 253 | unsigned long start = atomic64_read(&ba->start); 254 | unsigned long finish = atomic64_read(&ba->finish); 255 | unsigned long ipitime = atomic64_read(&ba->ipitime); 256 | unsigned long run_delay = atomic64_read(&ba->run_delay); 257 | unsigned long elapsed = finish - start; 258 | 259 | if (!finish) { 260 | printk(KERN_INFO "ipi_bench: too many loops\n"); 261 | return; 262 | } 263 | 264 | if (elapsed > run_delay) { 265 | elapsed -= run_delay; 266 | } 267 | 268 | printk(KERN_INFO "ipi_bench: CPU [%3d] [NODE%d] -> CPU [%3d] [NODE%d], wait [%d], loops [%d] " 269 | "forked [%ld], start [%ld], finish [%ld], elapsed [%ld], ipitime [%ld], run delay [%ld] in ms, " 270 | "AVG call [%ld], ipi [%ld] in ns\n", 271 | src, cpu_to_node(src), dst, cpu_to_node(dst), wait, loops, 272 | forked / 1000, start / 1000, finish / 1000, elapsed / 1000, ipitime / 1000, run_delay / 1000, 273 | elapsed / loops, ipitime / loops); 274 | } 275 | 276 | static inline void ipi_bench_report_all(struct bench_args *ba) 277 | { 278 | int src = ba->src; 279 | unsigned long forked = atomic64_read(&ba->forked); 280 | unsigned long start = atomic64_read(&ba->start); 281 | unsigned long finish = atomic64_read(&ba->finish); 282 | unsigned long run_delay = atomic64_read(&ba->run_delay); 283 | unsigned long elapsed = finish - start; 284 | 285 | if (!finish) { 286 | printk(KERN_INFO "ipi_bench: too many loops\n"); 287 | return; 288 | } 289 | 290 | if (elapsed > run_delay) { 291 | elapsed -= run_delay; 292 | } 293 | 294 | printk(KERN_INFO "ipi_bench: CPU [%3d] [NODE%d] -> all CPUs, wait [%d], loops [%d] " 295 | "forked [%ld], start [%ld], finish [%ld], elapsed [%ld], run delay [%ld] in ms " 296 | "AVG call [%ld] in ns\n", 297 | src, cpu_to_node(src), wait, loops, 298 | forked / 1000, start / 1000, finish / 1000, elapsed / 1000, run_delay / 1000, 299 | elapsed / loops); 300 | } 301 | 302 | static int ipi_bench_self(int src) 303 | { 304 | struct bench_args *ba; 305 | 306 | ba = kzalloc(sizeof(*ba), GFP_KERNEL); 307 | if (!ba) { 308 | printk(KERN_INFO "ipi_bench: no enough memory\n"); 309 | return -1; 310 | } 311 | 312 | ba->src = src; 313 | ba->dst = src; 314 | 315 | atomic64_set(&should_run, 1); 316 | ipi_bench_one_task(ba); 317 | ipi_bench_wait_all(); 318 | ipi_bench_report_single(ba); 319 | 320 | kfree(ba); 321 | 322 | return 0; 323 | } 324 | 325 | static int ipi_bench_single(int src, int dst) 326 | { 327 | struct bench_args *ba; 328 | 329 | ba = kzalloc(sizeof(*ba), GFP_KERNEL); 330 | if (!ba) { 331 | printk(KERN_INFO "ipi_bench: no enough memory\n"); 332 | return -1; 333 | } 334 | 335 | ba->src = src; 336 | ba->dst = dst; 337 | 338 | atomic64_set(&should_run, 1); 339 | ipi_bench_one_task(ba); 340 | ipi_bench_wait_all(); 341 | ipi_bench_report_single(ba); 342 | 343 | kfree(ba); 344 | 345 | return 0; 346 | } 347 | 348 | /* node: select an valid CPU which belongs to. ignore -1 */ 349 | static int __random_unused_cpu_in_cpumask(cpumask_var_t cpumask, int node) 350 | { 351 | int num_cpus = num_online_cpus(); 352 | int retry = 100000; 353 | int cpu = -1; 354 | 355 | while (retry--) { 356 | cpu = __random() % num_cpus; 357 | if (cpumask_test_cpu(cpu, cpumask)) { 358 | continue; 359 | } 360 | 361 | if ((node >= 0) && (cpu_to_node(cpu) != node)) { 362 | continue; 363 | } 364 | 365 | cpumask_set_cpu(cpu, cpumask); 366 | return cpu; 367 | }; 368 | 369 | return -1; 370 | } 371 | 372 | static int ipi_bench_mesh(int pairs, int acrossnuma) 373 | { 374 | cpumask_var_t cpumask; 375 | struct bench_args *bas = NULL; 376 | struct bench_args *ba; 377 | unsigned long startns, elapsed; 378 | int ret = -1, i, node = -1; 379 | 380 | zalloc_cpumask_var(&cpumask, GFP_KERNEL); 381 | bas = kzalloc(sizeof(*ba) * pairs, GFP_KERNEL); 382 | if (!bas) { 383 | printk(KERN_INFO "ipi_bench: no enough memory\n"); 384 | goto out; 385 | } 386 | 387 | atomic64_set(&should_run, pairs); 388 | 389 | /* build pairs one by one */ 390 | for (i = 0; i < pairs; i++) { 391 | ba = bas + i; 392 | ba->src = __random_unused_cpu_in_cpumask(cpumask, -1); 393 | if (!acrossnuma) { 394 | node = cpu_to_node(ba->src); 395 | } 396 | ba->dst = __random_unused_cpu_in_cpumask(cpumask, node); 397 | 398 | if ((ba->src < 0) || (ba->dst < 0)) { 399 | printk(KERN_INFO "ipi_bench: init mesh failed\n"); 400 | goto out; 401 | } 402 | } 403 | 404 | for (i = 0; i < pairs; i++) { 405 | ba = bas + i; 406 | ipi_bench_one_task(ba); 407 | } 408 | 409 | startns = getns(); 410 | ipi_bench_wait_all(); 411 | elapsed = getns() - startns; 412 | 413 | for (i = 0; i < pairs; i++) { 414 | ba = bas + i; 415 | ipi_bench_report_single(ba); 416 | } 417 | 418 | printk(KERN_INFO "ipi_bench: throughput %ld ipi/s\n", pairs * loops * 1000000000UL / elapsed); 419 | 420 | ret = 0; 421 | 422 | out: 423 | kfree(bas); 424 | free_cpumask_var(cpumask); 425 | 426 | return ret; 427 | } 428 | 429 | static int ipi_bench_all(int broadcasts) 430 | { 431 | cpumask_var_t cpumask; 432 | struct bench_args *bas = NULL; 433 | struct bench_args *ba; 434 | unsigned long startns, elapsed; 435 | int ret = -1, i; 436 | 437 | zalloc_cpumask_var(&cpumask, GFP_KERNEL); 438 | bas = kzalloc(sizeof(*ba) * broadcasts, GFP_KERNEL); 439 | if (!bas) { 440 | printk(KERN_INFO "ipi_bench: no enough memory\n"); 441 | goto out; 442 | } 443 | 444 | atomic64_set(&should_run, broadcasts); 445 | 446 | /* build broadcasts one by one */ 447 | for (i = 0; i < broadcasts; i++) { 448 | ba = bas + i; 449 | ba->src = __random_unused_cpu_in_cpumask(cpumask, -1); 450 | if (ba->src < 0) { 451 | printk(KERN_INFO "ipi_bench: init broadcast workers failed\n"); 452 | goto out; 453 | } 454 | } 455 | 456 | for (i = 0; i < broadcasts; i++) { 457 | ba = bas + i; 458 | ipi_bench_all_task(ba); 459 | } 460 | 461 | startns = getns(); 462 | ipi_bench_wait_all(); 463 | elapsed = getns() - startns; 464 | 465 | for (i = 0; i < broadcasts; i++) { 466 | ba = bas + i; 467 | ipi_bench_report_all(ba); 468 | } 469 | 470 | 471 | printk(KERN_INFO "ipi_bench: throughput %ld ipi/s\n", broadcasts * (num_online_cpus() - 1) * loops * 1000000000UL / elapsed); 472 | 473 | ret = 0; 474 | 475 | out: 476 | kfree(bas); 477 | free_cpumask_var(cpumask); 478 | 479 | return ret; 480 | } 481 | 482 | static void ipi_bench_options(void) 483 | { 484 | int i; 485 | 486 | printk(KERN_INFO "ipi_bench: you should run insmod ipi_bench.ko options=XX, bit flags:\n"); 487 | for (i = 0; i < sizeof(benchcases) / sizeof(benchcases[0]); i++) { 488 | printk(KERN_INFO "ipi_bench:\tbit[%d] %s\n", i, benchcases[i]); 489 | } 490 | } 491 | 492 | /* assign different src & dst cpu if unspecified */ 493 | static int ipi_bench_init_params(void) 494 | { 495 | int num_cpus = num_online_cpus(); 496 | 497 | if (num_cpus < 2) { 498 | printk(KERN_INFO "ipi_bench: total cpu num %d, no need to test\n", num_cpus); 499 | return -1; 500 | } 501 | 502 | if ((srccpu >= num_cpus) || (dstcpu >= num_cpus)) { 503 | printk(KERN_INFO "ipi_bench: cpu out of range, total cpu num %d\n", num_cpus); 504 | return -1; 505 | } 506 | 507 | if (pairs > (num_cpus / 2)) { 508 | printk(KERN_INFO "ipi_bench: pairs out of range, total cpu num %d\n", num_cpus); 509 | return -1; 510 | } 511 | 512 | while ((srccpu == -1) || (srccpu == dstcpu)) { 513 | srccpu = __random() % num_cpus; 514 | } 515 | 516 | while ((dstcpu == -1) || (srccpu == dstcpu)) { 517 | dstcpu = __random() % num_cpus; 518 | } 519 | 520 | if (pairs == -1) { 521 | pairs = num_cpus / 2; 522 | } 523 | 524 | if (broadcasts == -1) { 525 | broadcasts = 1; 526 | } 527 | 528 | return 0; 529 | } 530 | 531 | static int ipi_bench_init(void) 532 | { 533 | if (!options) { 534 | ipi_bench_options(); 535 | return -1; 536 | } 537 | 538 | if (ipi_bench_init_params() < 0) { 539 | return -1; 540 | } 541 | 542 | if (options & SELF_IPI) { 543 | ipi_bench_self(srccpu); 544 | } 545 | 546 | if (options & SINGLE_IPI) { 547 | ipi_bench_single(srccpu, dstcpu); 548 | } 549 | 550 | if (options & MESH_IPI) { 551 | ipi_bench_mesh(pairs, acrossnuma); 552 | } 553 | 554 | if (options & ALL_IPI) { 555 | ipi_bench_all(broadcasts); 556 | } 557 | 558 | return -1; 559 | } 560 | 561 | static void ipi_bench_exit(void) 562 | { 563 | /* should never run */ 564 | printk(KERN_INFO "ipi_bench: %s\n", __func__); 565 | } 566 | 567 | module_init(ipi_bench_init); 568 | module_exit(ipi_bench_exit); 569 | MODULE_LICENSE("GPL"); 570 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 571 | -------------------------------------------------------------------------------- /microbenchmark/msr-bench/Makefile: -------------------------------------------------------------------------------- 1 | obj-m := msr_bench.o 2 | KERNELDIR := /lib/modules/$(shell uname -r)/build 3 | #KERNELDIR := /root/source/linux-image-bm/ 4 | PWD := $(shell pwd) 5 | 6 | all: 7 | make -C $(KERNELDIR) M=$(PWD) clean 8 | make -C $(KERNELDIR) M=$(PWD) modules 9 | 10 | clean: 11 | make -C $(KERNELDIR) M=$(PWD) clean 12 | -------------------------------------------------------------------------------- /microbenchmark/msr-bench/msr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2018 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #ifndef __MSR_H__ 5 | #define __MSR_H__ 6 | 7 | static inline void ins_wrmsr(unsigned int msr, unsigned int low, unsigned int high) 8 | { 9 | asm volatile("wrmsr\n" 10 | : : "c" (msr), "a"(low), "d" (high) : "memory"); 11 | } 12 | 13 | static inline void ins_wrmsrl(unsigned int msr, unsigned long val) 14 | { 15 | ins_wrmsr(msr, (unsigned int)(val & 0xffffffffULL), (unsigned int)(val >> 32)); 16 | } 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /microbenchmark/msr-bench/msr_bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2018 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #include 5 | #include 6 | #include "msr.h" 7 | #include "../common/rdtsc.h" 8 | 9 | /* see intel SDM, or linux source code */ 10 | #define __MSR_IA32_TSCDEADLINE 0x000006e0 11 | #define __MSR_IA32_POWER_CTL 0x000001fc 12 | 13 | #define LOOP 100000 14 | 15 | static inline void msr_bench_report(char *tag, unsigned long elapsed) 16 | { 17 | printk(KERN_INFO "msr_bench: %s loop = %d, elapsed = %ld cycles, " 18 | "average = %ld cycles\n", tag, LOOP, elapsed, elapsed / LOOP); 19 | } 20 | 21 | static inline int wrmsr_bench(char *tag, unsigned int msr, unsigned long val) 22 | { 23 | int loop; 24 | u32 low, high; 25 | 26 | low = val & 0xffffffffULL; 27 | high = val >> 32; 28 | 29 | /* check operation firstly, prefer to use asm instruction */ 30 | if (native_write_msr_safe(msr, low, high) == 0) { 31 | printk(KERN_INFO "msr_bench: wrmsr %s OK, " 32 | "benchmark asm instrucion\n", tag); 33 | 34 | /* bench loop */ 35 | for (loop = LOOP; loop > 0; loop--) 36 | ins_wrmsrl(__MSR_IA32_TSCDEADLINE, val); 37 | } else { 38 | printk(KERN_INFO "msr_bench: wrmsr %s fail, " 39 | "benchmark native safe API\n", tag); 40 | 41 | /* bench loop */ 42 | for (loop = LOOP; loop > 0; loop--) 43 | native_write_msr_safe(msr, low, high); 44 | } 45 | 46 | return 0; 47 | } 48 | 49 | static int wrmsr_bench_tscdeadline(void) 50 | { 51 | /* a little long time, make sure no timer irq within test case 52 | * but val should less than 22s to avoid soft watchdog schedule. 53 | */ 54 | unsigned long val = ins_rdtsc() + 10*1000*1000*1000UL; 55 | 56 | return wrmsr_bench("MSR_IA32_TSCDEADLINE", __MSR_IA32_TSCDEADLINE, val); 57 | } 58 | 59 | static int wrmsr_bench_power(void) 60 | { 61 | /* kvm does not support this msr, just test vm-exit. */ 62 | unsigned long val = 0; 63 | 64 | return wrmsr_bench("MSR_IA32_POWER_CTL", __MSR_IA32_POWER_CTL, val); 65 | } 66 | 67 | 68 | static int msr_bench_init(void) 69 | { 70 | unsigned long starttime, elapsed; 71 | printk(KERN_INFO "msr_bench: %s start\n", __func__); 72 | 73 | /* case MSR_IA32_POWER_CTL */ 74 | starttime = ins_rdtsc(); 75 | if (wrmsr_bench_power() == 0) { 76 | 77 | elapsed = ins_rdtsc() - starttime; 78 | msr_bench_report("MSR_IA32_POWER_CTL", elapsed); 79 | } 80 | 81 | /* case MSR_IA32_TSCDEADLINE */ 82 | starttime = ins_rdtsc(); 83 | if (wrmsr_bench_tscdeadline() == 0) { 84 | 85 | elapsed = ins_rdtsc() - starttime; 86 | msr_bench_report("MSR_IA32_TSCDEADLINE", elapsed); 87 | } 88 | 89 | printk(KERN_INFO "msr_bench: %s finish\n", __func__); 90 | 91 | return -1; 92 | } 93 | 94 | static void msr_bench_exit(void) 95 | { 96 | /* should never run */ 97 | printk(KERN_INFO "msr_bench: %s\n", __func__); 98 | } 99 | 100 | module_init(msr_bench_init); 101 | module_exit(msr_bench_exit); 102 | MODULE_LICENSE("GPL"); 103 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 104 | -------------------------------------------------------------------------------- /microbenchmark/pio-mmio-bench/Makefile: -------------------------------------------------------------------------------- 1 | obj-m := pio_mmio_bench.o 2 | KERNELDIR := /lib/modules/$(shell uname -r)/build 3 | #KERNELDIR := /root/source/linux-image-bm/ 4 | PWD := $(shell pwd) 5 | 6 | all: 7 | make -C $(KERNELDIR) M=$(PWD) clean 8 | make -C $(KERNELDIR) M=$(PWD) modules 9 | 10 | clean: 11 | make -C $(KERNELDIR) M=$(PWD) clean 12 | -------------------------------------------------------------------------------- /microbenchmark/pio-mmio-bench/pio_mmio_bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2019 zhenwei pi pizhenwei@bytedance.com. 3 | */ 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "../common/rdtsc.h" 9 | 10 | #define LOOP 10000 11 | #define DEBUG(...) 12 | //#define DEBUG(fmt, ...) printk(fmt, ##__VA_ARGS__) 13 | 14 | static inline void pio_mmio_bench_report(char *tag, const char *name, unsigned long addr, unsigned long elapsed) 15 | { 16 | printk(KERN_INFO "pio_mmio_bench: %4s %18s, address = 0x%lx, elapsed = %ld cycles, average = %ld cycles\n", tag, name, addr, elapsed, elapsed / LOOP); 17 | } 18 | 19 | int pio_bench(char *tag, const char *name, unsigned long addr) 20 | { 21 | int loop; 22 | unsigned long starttime, elapsed; 23 | 24 | starttime = ins_rdtsc(); 25 | for (loop = LOOP; loop > 0; loop--) { 26 | inb(addr); 27 | } 28 | 29 | elapsed = ins_rdtsc() - starttime; 30 | 31 | pio_mmio_bench_report(tag, name, addr, elapsed); 32 | 33 | return 0; 34 | } 35 | 36 | int mmio_bench(char *tag, const char *name, unsigned long addr) 37 | { 38 | int loop; 39 | unsigned long starttime, elapsed; 40 | void *va; 41 | 42 | va = ioremap(addr, 0x1000); 43 | 44 | starttime = ins_rdtsc(); 45 | for (loop = LOOP; loop > 0; loop--) { 46 | readb((void*)va); 47 | } 48 | 49 | elapsed = ins_rdtsc() - starttime; 50 | 51 | iounmap(va); 52 | pio_mmio_bench_report(tag, name, addr, elapsed); 53 | 54 | return 0; 55 | } 56 | 57 | /* 58 | * in fact, we need hold the resource lock, but it's decleared a static 59 | * variable. we should NOT attach/detach any device during test. 60 | */ 61 | void pio_walk_resource(void) 62 | { 63 | struct resource *res, *tmp; 64 | 65 | for (res = ioport_resource.child; res; res = res->sibling) { 66 | DEBUG("pio_walk_resource name=%s, start=%llx, end=%llx\n", res->name, res->start, res->end); 67 | /* walk PCI Bus 0000:00 */ 68 | if (!strcmp(res->name, "PCI Bus 0000:00")) { 69 | for (tmp = res->child; tmp; tmp = tmp->sibling) { 70 | DEBUG("pio_walk_resource name=%s, start=%llx, end=%llx\n", tmp->name, tmp->start, tmp->end); 71 | if (!strcmp(tmp->name, "pic1") 72 | || !(strcmp(tmp->name, "timer0"))) { 73 | pio_bench("PIO", tmp->name, tmp->start); 74 | } else if (!strcmp(tmp->name, "keyboard")) { 75 | pio_bench("PIO", "empty", tmp->start - 1); 76 | pio_bench("PIO", tmp->name, tmp->start); 77 | break; 78 | } 79 | } 80 | } 81 | } 82 | } 83 | 84 | void mmio_walk_resource(void) 85 | { 86 | struct resource *res, *tmp; 87 | 88 | for (res = iomem_resource.child; res; res = res->sibling) { 89 | DEBUG("mmio_walk_resource name=%s, start=%llx, end=%llx\n", res->name, res->start, res->end); 90 | if (!strcmp(res->name, "IOAPIC 0") 91 | || !strcmp(res->name, "Local APIC")) { 92 | mmio_bench("MMIO", res->name, res->start); 93 | } 94 | if (!strcmp(res->name, "PCI Bus 0000:00")) { 95 | mmio_bench("MMIO", res->name, res->start); 96 | for (tmp = res->child; tmp; tmp = tmp->sibling) { 97 | DEBUG("mmio_walk_resource PCI Bus name=%s, start=%llx, end=%llx\n", tmp->name, tmp->start, tmp->end); 98 | if (tmp->child) { 99 | DEBUG("mmio_walk_resource PCI Device name=%s, start=%llx, end=%llx\n", tmp->child->name, tmp->child->start, tmp->child->end); 100 | if (strstr(tmp->child->name, "vram")) { 101 | mmio_bench("MMIO", tmp->child->name, tmp->child->start); 102 | } else if (strstr(tmp->child->name, "virtio-pci-modern")) { 103 | /* only bench the first virtio device */ 104 | mmio_bench("MMIO", tmp->child->name, tmp->child->start); 105 | } 106 | } 107 | } 108 | } 109 | } 110 | } 111 | 112 | static int pio_mmio_bench_init(void) 113 | { 114 | printk(KERN_INFO "pio_mmio_bench: %s start\n", __func__); 115 | 116 | pio_walk_resource(); 117 | mmio_walk_resource(); 118 | 119 | printk(KERN_INFO "pio_mmio_bench: %s finish\n", __func__); 120 | 121 | return -1; 122 | } 123 | 124 | static void pio_mmio_bench_exit(void) 125 | { 126 | /* should never run */ 127 | printk(KERN_INFO "pio_mmio_bench: %s\n", __func__); 128 | } 129 | 130 | module_init(pio_mmio_bench_init); 131 | module_exit(pio_mmio_bench_exit); 132 | MODULE_LICENSE("GPL"); 133 | MODULE_AUTHOR("zhenwei pi pizhewnei@bytedance.com"); 134 | -------------------------------------------------------------------------------- /microbenchmark/tlb-shootdown-bench/Makefile: -------------------------------------------------------------------------------- 1 | all : 2 | gcc tlb-shootdown-bench.c -lpthread -g -o tlb-shootdown-bench 3 | 4 | clean : 5 | @rm -rf tlb-shootdown-bench 6 | -------------------------------------------------------------------------------- /microbenchmark/tlb-shootdown-bench/tlb-shootdown-bench.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #define __USE_GNU 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | pid_t __gettid(); 13 | suseconds_t __time_diff(); 14 | 15 | #define print_err_and_exit(en, msg) \ 16 | do { errno = en; perror(msg); exit(EXIT_FAILURE); } while (0) 17 | 18 | #define TRACE() \ 19 | do { printf("[TID %d]TRACE : %s\n", __gettid(), __func__); } while(0); 20 | 21 | 22 | unsigned long buf_size = 256*1024*1024; //64M 23 | int bench_loops = 1; 24 | int use_unmap = 0; 25 | int use_interleave = 0; 26 | 27 | 28 | struct thread_info { 29 | pthread_t thread_id; 30 | int core_id; 31 | pid_t tid; 32 | suseconds_t usec_fill; 33 | suseconds_t usec_leak; 34 | int ret; 35 | }; 36 | 37 | 38 | inline pid_t __gettid() 39 | { 40 | return syscall(__NR_gettid); 41 | } 42 | 43 | inline suseconds_t __time_diff(struct timeval *start, struct timeval *end) 44 | { 45 | return (end->tv_sec - start->tv_sec) * 1000 * 1000 + end->tv_usec - start->tv_usec; 46 | } 47 | 48 | void *__routine(void *arg) 49 | { 50 | struct thread_info *thread_info = arg; 51 | const pthread_t pid = pthread_self(); 52 | const int core_id = thread_info->core_id; 53 | struct timeval start, end; 54 | int ret; 55 | 56 | TRACE(); 57 | thread_info->tid = __gettid(); 58 | 59 | cpu_set_t cpuset; 60 | CPU_ZERO(&cpuset); 61 | CPU_SET(core_id, &cpuset); 62 | 63 | ret = pthread_setaffinity_np(pid, sizeof(cpu_set_t), &cpuset); 64 | if (ret != 0) { 65 | print_err_and_exit(ret, "pthread_setaffinity_np"); 66 | } 67 | 68 | ret = pthread_getaffinity_np(pid, sizeof(cpu_set_t), &cpuset); 69 | if (ret != 0) { 70 | print_err_and_exit(ret, "pthread_getaffinity_np"); 71 | } 72 | 73 | int index; 74 | char *p = (char*)mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 75 | 76 | int loop; 77 | for (loop = 0; loop < bench_loops; loop++) { 78 | gettimeofday(&start, NULL); 79 | for (index = 0; index < buf_size; index += 4096) 80 | *(p+index) = 0; 81 | 82 | gettimeofday(&end, NULL); 83 | thread_info->usec_fill += __time_diff(&start, &end); 84 | 85 | gettimeofday(&start, NULL); 86 | if (use_unmap) { 87 | ret = munmap(p, buf_size); 88 | if (ret != 0) { 89 | print_err_and_exit(ret, "munmap"); 90 | } 91 | //printf("munmap ret = %d\n", ret); 92 | char *p = (char*)mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 93 | } else { 94 | for (index = 0; index < buf_size; index += 4096) { 95 | ret = madvise(p+index, 4096, MADV_DONTNEED); 96 | if (ret) { 97 | perror("madvise"); 98 | thread_info->ret = ret; 99 | break; 100 | } 101 | } 102 | } 103 | gettimeofday(&end, NULL); 104 | thread_info->usec_leak += __time_diff(&start, &end); 105 | } 106 | 107 | 108 | return NULL; 109 | } 110 | 111 | void show_help() 112 | { 113 | printf("Usage :\n"); 114 | printf("\t-l NUM : bench loops\n"); 115 | printf("\t-n CPUs : bench cpus\n"); 116 | printf("\t-u : use munmap instead of madvise\n"); 117 | printf("\t-i : use interleave cpu sequence\n"); 118 | } 119 | 120 | int main(int argc, char *argv[]) 121 | { 122 | int opt; 123 | int num_threads = 8; 124 | suseconds_t fill_elapled = 0, leak_elapled; 125 | 126 | while ((opt = getopt(argc, argv, "il:n:u")) != -1) { 127 | switch (opt) { 128 | case 'i': 129 | use_interleave = 1; 130 | break; 131 | 132 | case 'l': 133 | bench_loops = atoi(optarg); 134 | break; 135 | 136 | case 'n': 137 | num_threads = atoi(optarg); 138 | break; 139 | 140 | case 'u': 141 | use_unmap = 1; 142 | break; 143 | } 144 | } 145 | 146 | printf("Test %d threads\n", num_threads); 147 | if (use_unmap) 148 | printf("\tuse_unamp flag = %d\n", use_unmap); 149 | if (use_interleave) 150 | printf("\tuse_interleave flag = %d\n", use_interleave); 151 | 152 | struct thread_info *thread_info = calloc(num_threads, sizeof(struct thread_info)); 153 | if (thread_info == NULL) { 154 | perror("calloc"); 155 | return 0; 156 | } 157 | 158 | int tnum; 159 | for (tnum = 0; tnum < num_threads; tnum++) { 160 | if (use_interleave) 161 | thread_info[tnum].core_id = tnum * 2; 162 | else 163 | thread_info[tnum].core_id = tnum; 164 | const int create_result = pthread_create(&thread_info[tnum].thread_id, NULL, __routine, &thread_info[tnum]); 165 | if (create_result != 0) { 166 | print_err_and_exit(create_result, "pthread_create"); 167 | } 168 | } 169 | 170 | fill_elapled = 0; 171 | leak_elapled = 0; 172 | for (tnum = 0; tnum < num_threads; tnum++) { 173 | void *res; 174 | const int join_result = pthread_join(thread_info[tnum].thread_id, &res); 175 | if (join_result != 0) { 176 | print_err_and_exit(join_result, "pthread_join"); 177 | } 178 | 179 | printf("Joined with thread %d; tid %d, ret = %d\n", thread_info[tnum].core_id, thread_info[tnum].tid, thread_info[tnum].ret); 180 | fill_elapled += thread_info[tnum].usec_fill; 181 | leak_elapled += thread_info[tnum].usec_leak; 182 | free(res); 183 | } 184 | 185 | printf("fill_elapled %ld usec, leak_elapled %ld usec\n", fill_elapled, leak_elapled); 186 | 187 | free(thread_info); 188 | return 0; 189 | } 190 | 191 | --------------------------------------------------------------------------------