├── .circleci ├── README.md └── config.yml ├── COPYING ├── README ├── TODO ├── USAGE-INSTALLATION ├── bandwidth-latency └── bandwidth-latency.sh ├── comm_startup_lat ├── comm_startup_lat.sh ├── gnometerm.trace ├── lowriter.trace ├── replay-startup-io.cc └── xterm.trace ├── config_params.sh ├── create_config.sh ├── def_config.sh ├── fairness └── fairness.sh ├── file-copy └── file-copy.sh ├── interleaved_io └── interleaved_io.sh ├── kern_dev_tasks-vs-rw └── kern_dev_tasks_vs_rw.sh ├── process_config.sh ├── run_multiple_benchmarks ├── run_main_benchmarks.sh └── test_responsiveness.sh ├── run_unit_tests.sh ├── throughput-sync └── throughput-sync.sh ├── unit_tests ├── get_bats.sh ├── prev_impl.bash └── test_config_scripts.bats ├── utilities ├── calc_avg_and_co.sh ├── calc_overall_stats.sh ├── check_dependencies.sh ├── lib_utils.sh ├── plot_bar_errbar_subplots.py ├── plot_stacked_bar_subplots.py ├── plot_stats.sh └── tracing.sh ├── video_playing_vs_commands ├── WALL-E HD 1080p Trailer.mp4 └── video_play_vs_comms.sh └── video_streaming ├── README ├── conf.sh ├── read_files.sh ├── reader.c ├── vlc-0.8.6.c-limit-loss-rate.patch ├── vlc-1.0.6-limit-loss-rate.patch ├── vlc-2.1.4-limit-loss-rate.patch ├── vlc_auto.sh └── vlc_test.sh /.circleci/README.md: -------------------------------------------------------------------------------- 1 | 2 | # automated testing 3 | 4 | We've enabled automated test runs for all major releases of bash that bats-core supports (bash 3.2 and above). 5 | 6 | Getting lsblk installed on old releases of linux is difficult so we use alpine containers to test bash 4.0 and 3.2 7 | 8 | | job | bash ver | bash released | df information | 9 | |--------------|----------|---------------|--------------------| 10 | | deb10buster | 5.0 | Jan 2019 | GNU coreutils 8.30 | 11 | | deb09stretch | 4.4 | Sep 2016 | GNU coreutils 8.26 | 12 | | vm | 4.3 | Feb 2014 | GNU coreutils 8.25 | 13 | | deb08jessie | 4.3 | Feb 2014 | GNU coreutils 8.23 | 14 | | cryptsetup | 4.3 | Feb 2014 | BusyBox v1.26.2 | 15 | | deb07wheezy | 4.2 | Feb 2011 | GNU coreutils 8.13 | 16 | | deb06squeeze | 4.1 | Dec 2009 | GNU coreutils 8.5 | 17 | | bash_4_0 | 4.0 | Feb 2009 | BusyBox v1.31.1 | 18 | | bash_3_2 | 3.2 | Oct 2006 | BusyBox v1.31.1 | 19 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # inspired by github.com/sdolenc/debian-matrix 2 | 3 | version: 2 4 | jobs: 5 | download-bats-core: 6 | docker: 7 | - image: curlimages/curl 8 | # download bash unit testing framework 9 | # then persist it for other jobs. 10 | steps: 11 | - checkout 12 | - run: 13 | command: sh ./unit_tests/get_bats.sh 14 | - persist_to_workspace: 15 | root: /tmp 16 | paths: 17 | - bats-core 18 | deb10buster: 19 | docker: 20 | - image: sdolenc/debian-circleci:buster 21 | <<: &commonSteps 22 | steps: 23 | - checkout 24 | - attach_workspace: 25 | at: /tmp 26 | - run: 27 | command: | 28 | bash --version | head -2 29 | echo "" 30 | df --version 2>&1 | head -2 || true 31 | echo "" 32 | 33 | bash ./run_unit_tests.sh 34 | deb09stretch: 35 | docker: 36 | - image: sdolenc/debian-circleci:stretch 37 | <<: *commonSteps 38 | deb08jessie: 39 | docker: 40 | - image: sdolenc/debian-circleci:jessie 41 | <<: *commonSteps 42 | deb07wheezy: 43 | docker: 44 | - image: sdolenc/debian-circleci:wheezy 45 | <<: *commonSteps 46 | deb06squeeze: 47 | docker: 48 | - image: sdolenc/debian-circleci:squeeze 49 | <<: *commonSteps 50 | bash_4_0: 51 | docker: 52 | - image: bash:4.0 53 | <<: *commonSteps 54 | bash_3_2: 55 | docker: 56 | - image: bash:3.2 57 | <<: *commonSteps 58 | cryptsetup: 59 | docker: 60 | - image: sdolenc/encrypted-circleci:cryptsetup 61 | <<: *commonSteps 62 | vm: 63 | machine: 64 | image: ubuntu-1604:201903-01 65 | <<: *commonSteps 66 | 67 | workflows: 68 | version: 2 69 | all: 70 | jobs: 71 | - download-bats-core 72 | - deb10buster: 73 | requires: 74 | - download-bats-core 75 | - deb09stretch: 76 | requires: 77 | - download-bats-core 78 | - deb08jessie: 79 | requires: 80 | - download-bats-core 81 | - deb07wheezy: 82 | requires: 83 | - download-bats-core 84 | - deb06squeeze: 85 | requires: 86 | - download-bats-core 87 | - bash_4_0: 88 | requires: 89 | - download-bats-core 90 | - bash_3_2: 91 | requires: 92 | - download-bats-core 93 | - cryptsetup: 94 | requires: 95 | - download-bats-core 96 | - vm: 97 | requires: 98 | - download-bats-core 99 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | 2 | NOTE: the GPL below is copyrighted by the Free Software Foundation, 3 | but the instance of code that it refers to (the S benchmark suite) 4 | is copyrighted by me and others who actually wrote it. 5 | 6 | Also note that the only valid version of the GPL as far as the kernel 7 | is concerned is _this_ particular version of the license (ie v2, not 8 | v2.2 or v3.x or whatever), unless explicitly otherwise stated. 9 | 10 | Paolo Valente 11 | 12 | ---------------------------------------- 13 | 14 | GNU GENERAL PUBLIC LICENSE 15 | Version 2, June 1991 16 | 17 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. 18 | 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 | Everyone is permitted to copy and distribute verbatim copies 20 | of this license document, but changing it is not allowed. 21 | 22 | Preamble 23 | 24 | The licenses for most software are designed to take away your 25 | freedom to share and change it. By contrast, the GNU General Public 26 | License is intended to guarantee your freedom to share and change free 27 | software--to make sure the software is free for all its users. This 28 | General Public License applies to most of the Free Software 29 | Foundation's software and to any other program whose authors commit to 30 | using it. (Some other Free Software Foundation software is covered by 31 | the GNU Library General Public License instead.) You can apply it to 32 | your programs, too. 33 | 34 | When we speak of free software, we are referring to freedom, not 35 | price. Our General Public Licenses are designed to make sure that you 36 | have the freedom to distribute copies of free software (and charge for 37 | this service if you wish), that you receive source code or can get it 38 | if you want it, that you can change the software or use pieces of it 39 | in new free programs; and that you know you can do these things. 40 | 41 | To protect your rights, we need to make restrictions that forbid 42 | anyone to deny you these rights or to ask you to surrender the rights. 43 | These restrictions translate to certain responsibilities for you if you 44 | distribute copies of the software, or if you modify it. 45 | 46 | For example, if you distribute copies of such a program, whether 47 | gratis or for a fee, you must give the recipients all the rights that 48 | you have. You must make sure that they, too, receive or can get the 49 | source code. And you must show them these terms so they know their 50 | rights. 51 | 52 | We protect your rights with two steps: (1) copyright the software, and 53 | (2) offer you this license which gives you legal permission to copy, 54 | distribute and/or modify the software. 55 | 56 | Also, for each author's protection and ours, we want to make certain 57 | that everyone understands that there is no warranty for this free 58 | software. If the software is modified by someone else and passed on, we 59 | want its recipients to know that what they have is not the original, so 60 | that any problems introduced by others will not reflect on the original 61 | authors' reputations. 62 | 63 | Finally, any free program is threatened constantly by software 64 | patents. We wish to avoid the danger that redistributors of a free 65 | program will individually obtain patent licenses, in effect making the 66 | program proprietary. To prevent this, we have made it clear that any 67 | patent must be licensed for everyone's free use or not licensed at all. 68 | 69 | The precise terms and conditions for copying, distribution and 70 | modification follow. 71 | 72 | GNU GENERAL PUBLIC LICENSE 73 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 74 | 75 | 0. This License applies to any program or other work which contains 76 | a notice placed by the copyright holder saying it may be distributed 77 | under the terms of this General Public License. The "Program", below, 78 | refers to any such program or work, and a "work based on the Program" 79 | means either the Program or any derivative work under copyright law: 80 | that is to say, a work containing the Program or a portion of it, 81 | either verbatim or with modifications and/or translated into another 82 | language. (Hereinafter, translation is included without limitation in 83 | the term "modification".) Each licensee is addressed as "you". 84 | 85 | Activities other than copying, distribution and modification are not 86 | covered by this License; they are outside its scope. The act of 87 | running the Program is not restricted, and the output from the Program 88 | is covered only if its contents constitute a work based on the 89 | Program (independent of having been made by running the Program). 90 | Whether that is true depends on what the Program does. 91 | 92 | 1. You may copy and distribute verbatim copies of the Program's 93 | source code as you receive it, in any medium, provided that you 94 | conspicuously and appropriately publish on each copy an appropriate 95 | copyright notice and disclaimer of warranty; keep intact all the 96 | notices that refer to this License and to the absence of any warranty; 97 | and give any other recipients of the Program a copy of this License 98 | along with the Program. 99 | 100 | You may charge a fee for the physical act of transferring a copy, and 101 | you may at your option offer warranty protection in exchange for a fee. 102 | 103 | 2. You may modify your copy or copies of the Program or any portion 104 | of it, thus forming a work based on the Program, and copy and 105 | distribute such modifications or work under the terms of Section 1 106 | above, provided that you also meet all of these conditions: 107 | 108 | a) You must cause the modified files to carry prominent notices 109 | stating that you changed the files and the date of any change. 110 | 111 | b) You must cause any work that you distribute or publish, that in 112 | whole or in part contains or is derived from the Program or any 113 | part thereof, to be licensed as a whole at no charge to all third 114 | parties under the terms of this License. 115 | 116 | c) If the modified program normally reads commands interactively 117 | when run, you must cause it, when started running for such 118 | interactive use in the most ordinary way, to print or display an 119 | announcement including an appropriate copyright notice and a 120 | notice that there is no warranty (or else, saying that you provide 121 | a warranty) and that users may redistribute the program under 122 | these conditions, and telling the user how to view a copy of this 123 | License. (Exception: if the Program itself is interactive but 124 | does not normally print such an announcement, your work based on 125 | the Program is not required to print an announcement.) 126 | 127 | These requirements apply to the modified work as a whole. If 128 | identifiable sections of that work are not derived from the Program, 129 | and can be reasonably considered independent and separate works in 130 | themselves, then this License, and its terms, do not apply to those 131 | sections when you distribute them as separate works. But when you 132 | distribute the same sections as part of a whole which is a work based 133 | on the Program, the distribution of the whole must be on the terms of 134 | this License, whose permissions for other licensees extend to the 135 | entire whole, and thus to each and every part regardless of who wrote it. 136 | 137 | Thus, it is not the intent of this section to claim rights or contest 138 | your rights to work written entirely by you; rather, the intent is to 139 | exercise the right to control the distribution of derivative or 140 | collective works based on the Program. 141 | 142 | In addition, mere aggregation of another work not based on the Program 143 | with the Program (or with a work based on the Program) on a volume of 144 | a storage or distribution medium does not bring the other work under 145 | the scope of this License. 146 | 147 | 3. You may copy and distribute the Program (or a work based on it, 148 | under Section 2) in object code or executable form under the terms of 149 | Sections 1 and 2 above provided that you also do one of the following: 150 | 151 | a) Accompany it with the complete corresponding machine-readable 152 | source code, which must be distributed under the terms of Sections 153 | 1 and 2 above on a medium customarily used for software interchange; or, 154 | 155 | b) Accompany it with a written offer, valid for at least three 156 | years, to give any third party, for a charge no more than your 157 | cost of physically performing source distribution, a complete 158 | machine-readable copy of the corresponding source code, to be 159 | distributed under the terms of Sections 1 and 2 above on a medium 160 | customarily used for software interchange; or, 161 | 162 | c) Accompany it with the information you received as to the offer 163 | to distribute corresponding source code. (This alternative is 164 | allowed only for noncommercial distribution and only if you 165 | received the program in object code or executable form with such 166 | an offer, in accord with Subsection b above.) 167 | 168 | The source code for a work means the preferred form of the work for 169 | making modifications to it. For an executable work, complete source 170 | code means all the source code for all modules it contains, plus any 171 | associated interface definition files, plus the scripts used to 172 | control compilation and installation of the executable. However, as a 173 | special exception, the source code distributed need not include 174 | anything that is normally distributed (in either source or binary 175 | form) with the major components (compiler, kernel, and so on) of the 176 | operating system on which the executable runs, unless that component 177 | itself accompanies the executable. 178 | 179 | If distribution of executable or object code is made by offering 180 | access to copy from a designated place, then offering equivalent 181 | access to copy the source code from the same place counts as 182 | distribution of the source code, even though third parties are not 183 | compelled to copy the source along with the object code. 184 | 185 | 4. You may not copy, modify, sublicense, or distribute the Program 186 | except as expressly provided under this License. Any attempt 187 | otherwise to copy, modify, sublicense or distribute the Program is 188 | void, and will automatically terminate your rights under this License. 189 | However, parties who have received copies, or rights, from you under 190 | this License will not have their licenses terminated so long as such 191 | parties remain in full compliance. 192 | 193 | 5. You are not required to accept this License, since you have not 194 | signed it. However, nothing else grants you permission to modify or 195 | distribute the Program or its derivative works. These actions are 196 | prohibited by law if you do not accept this License. Therefore, by 197 | modifying or distributing the Program (or any work based on the 198 | Program), you indicate your acceptance of this License to do so, and 199 | all its terms and conditions for copying, distributing or modifying 200 | the Program or works based on it. 201 | 202 | 6. Each time you redistribute the Program (or any work based on the 203 | Program), the recipient automatically receives a license from the 204 | original licensor to copy, distribute or modify the Program subject to 205 | these terms and conditions. You may not impose any further 206 | restrictions on the recipients' exercise of the rights granted herein. 207 | You are not responsible for enforcing compliance by third parties to 208 | this License. 209 | 210 | 7. If, as a consequence of a court judgment or allegation of patent 211 | infringement or for any other reason (not limited to patent issues), 212 | conditions are imposed on you (whether by court order, agreement or 213 | otherwise) that contradict the conditions of this License, they do not 214 | excuse you from the conditions of this License. If you cannot 215 | distribute so as to satisfy simultaneously your obligations under this 216 | License and any other pertinent obligations, then as a consequence you 217 | may not distribute the Program at all. For example, if a patent 218 | license would not permit royalty-free redistribution of the Program by 219 | all those who receive copies directly or indirectly through you, then 220 | the only way you could satisfy both it and this License would be to 221 | refrain entirely from distribution of the Program. 222 | 223 | If any portion of this section is held invalid or unenforceable under 224 | any particular circumstance, the balance of the section is intended to 225 | apply and the section as a whole is intended to apply in other 226 | circumstances. 227 | 228 | It is not the purpose of this section to induce you to infringe any 229 | patents or other property right claims or to contest validity of any 230 | such claims; this section has the sole purpose of protecting the 231 | integrity of the free software distribution system, which is 232 | implemented by public license practices. Many people have made 233 | generous contributions to the wide range of software distributed 234 | through that system in reliance on consistent application of that 235 | system; it is up to the author/donor to decide if he or she is willing 236 | to distribute software through any other system and a licensee cannot 237 | impose that choice. 238 | 239 | This section is intended to make thoroughly clear what is believed to 240 | be a consequence of the rest of this License. 241 | 242 | 8. If the distribution and/or use of the Program is restricted in 243 | certain countries either by patents or by copyrighted interfaces, the 244 | original copyright holder who places the Program under this License 245 | may add an explicit geographical distribution limitation excluding 246 | those countries, so that distribution is permitted only in or among 247 | countries not thus excluded. In such case, this License incorporates 248 | the limitation as if written in the body of this License. 249 | 250 | 9. The Free Software Foundation may publish revised and/or new versions 251 | of the General Public License from time to time. Such new versions will 252 | be similar in spirit to the present version, but may differ in detail to 253 | address new problems or concerns. 254 | 255 | Each version is given a distinguishing version number. If the Program 256 | specifies a version number of this License which applies to it and "any 257 | later version", you have the option of following the terms and conditions 258 | either of that version or of any later version published by the Free 259 | Software Foundation. If the Program does not specify a version number of 260 | this License, you may choose any version ever published by the Free Software 261 | Foundation. 262 | 263 | 10. If you wish to incorporate parts of the Program into other free 264 | programs whose distribution conditions are different, write to the author 265 | to ask for permission. For software which is copyrighted by the Free 266 | Software Foundation, write to the Free Software Foundation; we sometimes 267 | make exceptions for this. Our decision will be guided by the two goals 268 | of preserving the free status of all derivatives of our free software and 269 | of promoting the sharing and reuse of software generally. 270 | 271 | NO WARRANTY 272 | 273 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 274 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 275 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 276 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 277 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 278 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 279 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 280 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 281 | REPAIR OR CORRECTION. 282 | 283 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 284 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 285 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 286 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 287 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 288 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 289 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 290 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 291 | POSSIBILITY OF SUCH DAMAGES. 292 | 293 | END OF TERMS AND CONDITIONS 294 | 295 | How to Apply These Terms to Your New Programs 296 | 297 | If you develop a new program, and you want it to be of the greatest 298 | possible use to the public, the best way to achieve this is to make it 299 | free software which everyone can redistribute and change under these terms. 300 | 301 | To do so, attach the following notices to the program. It is safest 302 | to attach them to the start of each source file to most effectively 303 | convey the exclusion of warranty; and each file should have at least 304 | the "copyright" line and a pointer to where the full notice is found. 305 | 306 | 307 | Copyright (C) 308 | 309 | This program is free software; you can redistribute it and/or modify 310 | it under the terms of the GNU General Public License as published by 311 | the Free Software Foundation; either version 2 of the License, or 312 | (at your option) any later version. 313 | 314 | This program is distributed in the hope that it will be useful, 315 | but WITHOUT ANY WARRANTY; without even the implied warranty of 316 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 317 | GNU General Public License for more details. 318 | 319 | You should have received a copy of the GNU General Public License 320 | along with this program; if not, write to the Free Software 321 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 322 | 323 | 324 | Also add information on how to contact you by electronic and paper mail. 325 | 326 | If the program is interactive, make it output a short notice like this 327 | when it starts in an interactive mode: 328 | 329 | Gnomovision version 69, Copyright (C) year name of author 330 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 331 | This is free software, and you are welcome to redistribute it 332 | under certain conditions; type `show c' for details. 333 | 334 | The hypothetical commands `show w' and `show c' should show the appropriate 335 | parts of the General Public License. Of course, the commands you use may 336 | be called something other than `show w' and `show c'; they could even be 337 | mouse-clicks or menu items--whatever suits your program. 338 | 339 | You should also get your employer (if you work as a programmer) or your 340 | school, if any, to sign a "copyright disclaimer" for the program, if 341 | necessary. Here is a sample; alter the names: 342 | 343 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 344 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 345 | 346 | , 1 April 1989 347 | Ty Coon, President of Vice 348 | 349 | This General Public License does not permit incorporating your program into 350 | proprietary programs. If your program is a subroutine library, you may 351 | consider it more useful to permit linking proprietary applications with the 352 | library. If this is what you want to do, use the GNU Library General 353 | Public License instead of this License. 354 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Small collection of benchmarks for storage I/O - Version 3.6 2 | 3 | This suite evaluates: 4 | - responsiveness, by measuring start-up times of real applications 5 | under configurable background workloads 6 | - latency for soft real-time applications, by measuring playback 7 | quality (drop rate) of video and audio under configurable background 8 | workloads 9 | - speed of code-development tasks (make, git checkout, git merge, git 10 | grep) under configurable background workloads, plus responsiveness 11 | while one of these dev tasks is executed 12 | - minimum per-client bandwidth guaranteed to a set of clients doing 13 | configurable types of I/O 14 | - maximum per-client latency guaranteed to a set of clients doing 15 | configurable types of I/O 16 | - throughput with processes doing filesystem or raw I/O in parallel 17 | (figure of merit measured by many other suites too) 18 | - throughput with processes doing interleaved I/O, which mimics the 19 | typical I/O pattern of applications like qemu 20 | - efficiency of block layer and I/O schedulers in terms of maximum 21 | number of IOPS supported (this functionality makes it easy to 22 | profile the desired component of the block layer) 23 | 24 | See USAGE-INSTALLATION for all details. In particular, that file 25 | also contains a quick but complete example of use. 26 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | * Extend benchmarks to run correctly also in a virtualized environment: 2 | 3 | . add a "guest mode" to the benchmarks, for which the following 4 | functionalities are provided 5 | 6 | . choice of the host I/O scheduler 7 | 8 | . possibility to start a background workload on the host 9 | before starting the benchmark in the guest 10 | 11 | . the script executed in the host may send sowmehow a signal (for 12 | example by creating a file in some shared folder, or by executing 13 | a kill with the proper signal in the guest, ...) when the workload 14 | has settled in the host, i.e., after the usual initial sleep 15 | 16 | . after invoking the host script, the script executed in the guest 17 | blocks until the above signal is received 18 | 19 | . at the end of the bechmark, the guest script sends somehow a 20 | signal to the host script (for example by executing a kill 21 | with the right signal in the host), which shuts down the 22 | workload (hence the host script must run the workload 23 | indefinitely, till it receives the stop signal) 24 | 25 | . sync&drop or just drop caches also on the host when appropriate 26 | 27 | . make sure that also the host-related actions are logged on stdout, 28 | so that the guest log contains all the information one may need to 29 | check whether the benchmark has been correctly executed 30 | 31 | . automatically add an extra tag just after the scheduler name in 32 | the name of the output stat file, for example with the following 33 | format, where is the name of the host scheduler in 34 | uppercase (to help back-end scripts not get confused with the 35 | guest scheduler): 36 | 37 | . no_host_wl- If no workload is run on the host 38 | 39 | . host_r[w][_]-- If n readers and m writers 40 | of some type are run in the 41 | host, in particular w- is 42 | added only if m>0, and 43 | _ is added only if it is 44 | not seq 45 | 46 | . write the above tag and not just the scheduler name in the output stat file 47 | 48 | * Using the above new functionalities of the benchamrk scripts, define 49 | a new run_all_test script for virtualized environments 50 | 51 | . for each benchmark, run consecutively the benchmark script for all 52 | possible guest and host configurations, and, for each repetition, 53 | put all output stat files in the same folder 54 | 55 | . possibily merge this run_all_tests script with the existing 56 | run_all_tests script -------------------------------------------------------------------------------- /USAGE-INSTALLATION: -------------------------------------------------------------------------------- 1 | INSTALLATION 2 | 3 | No actual installation needed, just download/clone the package; e.g., 4 | $ git clone https://github.com/Algodev-github/S 5 | 6 | The suite has some dependencies, which get installed automatically as 7 | scripts are executed. See section MAIN DEPENDENCIES for more details. 8 | 9 | FOR THE IMPATIENTS 10 | 11 | Jump directly to the last section "QUICK EXAMPLE OF USE". 12 | 13 | CONTENTS 14 | 15 | To learn how to use the suite, it may be good to start by learning 16 | what it contains. 17 | 18 | In the root directory: 19 | . def_config.sh 20 | 21 | Default configuration of general parameters, driving the 22 | execution of all benchmarks. Read the comments in this file to 23 | learn what can be configured and how. On the very first 24 | execution of some benchmark script by a user, this file is 25 | copied into the user home, as .S-config.sh. This is done even 26 | if just the -h option is passed to the script. The file 27 | .S-config.sh is the one that is actually read to set the 28 | values of the parameters, regardless of whether the file has 29 | just been copied from def_config.sh, or was already present in 30 | the home directory. Thus .S-config.sh is the file to modify, 31 | to change the configuration. 32 | . create_config.sh 33 | This script creates the file .S-config.sh in the user home, by 34 | copying def_config.sh. This is an alternative way for creating 35 | .S-config.sh, compared with just executing a benchmark script 36 | (as explained above). This script may be useful if you want to 37 | modify .S-config.sh before executing any benchmark script. 38 | 39 | Each benchmark is implemented by a bash script, stored in a separate directory. 40 | Here is the list of the directories and of the main scripts they contain: 41 | throughput-sync: throughput-sync.sh 42 | Measures aggregated throughput with parallel, greedy (i.e., 43 | continuously issuing I/O requests), sync readers and/or writers. 44 | Both readers and writers are implemented with fio, and may be 45 | sequential or random. 46 | This type of workload is also used as background in the other tests. 47 | Being the I/O sync, every I/O request tends to cause the maximum 48 | possible overhead throughput the system. Then the workloads generated 49 | by this script are the most demanding static workloads for reaching 50 | a high throughput. 51 | At the end of the test, the min, max, avg, std deviation, 52 | confidence interval of the read/write/total aggregated throughput 53 | values sampled during the run is reported. 54 | This benchmark can also be used to measure the maximum throughput 55 | sustainable by the block layer, as a function of the current I/O 56 | scheduler. To attain this goal, it is sufficient to: 57 | - set NULLB=yes in .S-config.sh 58 | - enable the performance profiling mode from the command line when 59 | invoking throughput-sync.sh 60 | - set raw_rand as type of I/O (or raw_seq for specific goals, if you 61 | know what you are doing) 62 | - start a number of readers equal to, or multiple of, the number of 63 | available virtual CPUs 64 | file-copy: file-copy.sh 65 | Launches parallel file copies. Only generates background workload, 66 | and does not compute any statistic. 67 | Creates the files to copy if they do not exist. Files are copied 68 | to and read from $BASE_DIR. 69 | comm_startup_lat: comm_startup_lat.sh 70 | Measures the cold-cache startup latency of the given command, 71 | launched while the same configurable workload used in the agg_thr 72 | test is running. At the end of the test reports min, max, avg, std 73 | dev and conf interval of both the sampled latencies and the read/ 74 | write/total aggregated throughput. 75 | bandwidth_latency: 76 | Measures bandwidth and latency enjoyed by processes competing for a 77 | storage devices, with and without I/O control. 78 | video_playing_vs_commands: video_playing_vs_commands.sh 79 | Measures the number of frames dropped while playing a video 80 | clip and, at the same time, 1) repeatedly invoking a short 81 | command and 2) serving one of the workloads of the of the 82 | agg_thr test. 83 | video_streaming 84 | This is a small package, made of a few scripts, programs and patches 85 | for setting up a simple experiment with a video server. 86 | In particular, a patched version of vlc is used, which also logs the 87 | frame-loss rate. This information is used by the scripts to execute 88 | the following experiment: measure the maximum number of movies that 89 | can be streamed in parallel without exceeding 1% frame loss rate. In 90 | brief, the steps are: start streaming a new movie, in parallel 91 | with the ones already being streamed, every N seconds; stop if 1% 92 | frame-loss rate has been reached. To perturbe the streaming, several 93 | intermittent file readers are run in parallel too. More details 94 | and instructions in the README within the package. 95 | kern_compil_tasks-vs-rw: task_vs_rw.sh 96 | Measures the progress of a make, git checkout or git merge task, 97 | executed while the same configurable workload used in the agg_thr 98 | test is running. At the end of the test reports the number of lines 99 | written to stdout by make, or the progress of the file checkout 100 | phase of git merge or git checkout, plus the same statistics on I/O 101 | throughput as the other tests. 102 | This script currently plays with 2.6.30, 2.6.32 and 2.6.33. You must 103 | provide a git tree containing at least these three versions. 104 | WARNING: the make test overwrites .config, whereas the other two tests 105 | create new branches. 106 | interleaved_io: interleaved_io.sh 107 | Measures the aggregate throughput against an I/O pattern with 108 | interleaved readers. The script will spawn the desired number 109 | of parallel processes, each reading sequentially a 16KB-zone 110 | of the storage. The zones are interleaved, in that the zone 111 | read by the first process is contiguous to the zone read by 112 | the second process, and so on. At the end of the test, the 113 | min, max, avg, std deviation, confidence interval of the 114 | read/write/total aggregated throughput values sampled during 115 | the run is reported. By default, the script won't create any 116 | file, but read directly from the device on which the root 117 | directory is mounted. 118 | fairness: fairness.sh 119 | Measures how the device throughput is distributed among parallel 120 | sequential readers. This is more a work in progress than the other 121 | scripts. 122 | run_multiple_benchmarks 123 | Scripts to execute subsets of the above tests. For example, 124 | there is the script run_main_benchmarks.sh, which repeatedly 125 | executes all the tests, apart from the video playing/streaming 126 | ones, with several workloads. It can be configured only by 127 | changing its code (you may want to change the number of 128 | repetitions of each test, the schedulers used, ...). 129 | run_main_benchmarks.sh sends mail reports to let the test progress 130 | be checked without accessing the machine (and possibly 131 | perturbing the tests themselves). This service can be 132 | configured by changing some related parameters in 133 | ~/.S-config.sh 134 | utilities: several files here 135 | . lib_utils.sh 136 | Common functions used by the test scripts 137 | . calc_avg_and_co.sh 138 | Support script used by the other scripts to compute stats 139 | . calc_overall_stats.sh 140 | Takes as input a directory and, if the directory 141 | contains at least one subdirectory containing, in its 142 | turn, the results of one of the benchmarks, then, for 143 | each of these subdirectory: 1) searches recursively, 144 | in all the directories rooted at these subdirectories, 145 | all the files named as any of the result files 146 | produced by the benchmark; 2) considers any set of 147 | files with the same name as the result files produced 148 | in a set of repetitions of the same test, and 149 | computes: a) min/max/avg/std_dev/confidence_interval 150 | statistics on any of the avg values reported in these 151 | files (hence it computes statistics over multiple 152 | repetitions of the same test), b) tables containing 153 | averages across the avg values reported in these files 154 | (output table files also contains all the information 155 | need to generate complete plots, see plot_stats.sh 156 | below). If the directory passed as input is not the 157 | root of a tree of benchmark-subdirectories, but 158 | contains just the results of a benchmark, then the 159 | script executes the same two steps as above in the 160 | input directory. In more detail, the script tries to 161 | guess whether a directory or a subdirectory contains 162 | the results of a benchmark as a function of the name 163 | of the directory or subdirectory itself. If the name 164 | provides no hint, then the type of the results must be 165 | passed explicitly on the command line (see the usage 166 | message for more details). So far, only the 167 | directories containing the results of 168 | agg_thr-with-greedy_rw.sh, comm_startup_lat.sh or 169 | task_vs_rw.sh scripts are supported. The latter is 170 | however still to be tested. The script can collect 171 | statistics in a completely automatic way on the trees 172 | generated by the execution of the run_main_benchmarks.sh. 173 | . plot_stats.sh 174 | Takes as input any table file and generates a plot 175 | from it. 176 | 177 | MAIN DEPENDENCIES 178 | 179 | All dependencies are installed automatically, except for LibreOffice 180 | Writer (needed only for some specific test). These dependencies are at 181 | least fio, iostat (from the sysstat package), awk, time and bc 182 | installed. For the file-copy.sh script you need pv. For the 183 | kernel-development benchmark you also need git and make, whereas 184 | gnuplot is needed to generate plots through plot_stats.sh (gnuplot-x11 185 | to handle also x11 terminals, i.e., to make plot windows), and mplayer 186 | is needed for the video-playing benchmark. To let the 187 | run_main_benchmarks.sh script send mail reports about the test 188 | progress, you must have a mail transfer agent (such as, e.g., msmtp) 189 | and a mail client (such as, e.g., mailx) installed and correctly 190 | configured to send e-mails. 191 | 192 | USAGE AND OUTPUT OF THE BENCHMARKS 193 | 194 | Option A: 195 | i) run each benchmark manually. To execute each script, first cd to 196 | the dir that contains it. Most scripts invoke commands that 197 | require root privileges. Each benchmark produces a result file 198 | that contains statistics on the quantities of interest (throughput, 199 | latency, number of lines produced by make, ...). 200 | 201 | ii) if you repeat a test more than once manually and store the result 202 | files in a given directory or in its subdirs then you may use, 203 | first, utilities/calc_overall_stats.sh to further aggregate the 204 | results and compute statistics on the avg values across the 205 | repetitions (calc_overall_stats.sh works for most but not yet all 206 | benchmarks), and then utilities/plot_stats.sh to generate plots 207 | from the table files produced by calc_overall_stats.sh 208 | 209 | Option B: 210 | i) run multiple benchmarks automatically through a general script like 211 | run_multiple_benchmarks/run_main_benchmarks.sh. After executing the 212 | benchmarks, this script also invokes calc_overall_stats.sh and 213 | plot_stats.sh to generate both global statistics and plots 214 | automatically (for most but not yet all benchmarks). 215 | 216 | A special case is the test_responsiveness.sh script, commented in the 217 | next section. 218 | 219 | For examples and brief help just invoke the desired script with -h. 220 | 221 | QUICK EXAMPLE OF USE 222 | 223 | If you are interested only in having an idea of the responsiveness of 224 | your system, then type: 225 | 226 | $ S/run_multiple_benchmarks/test_responsiveness.sh 227 | 228 | This will measure the time needed to start an average-size 229 | application, for each available I/O scheduler (automatically 230 | selected), and with two of the most responsiveness-unfriendly I/O 231 | workloads in the background. 232 | 233 | If, in addition to responsiveness, you also want to measure throughput 234 | with filesystem I/O, for several significant workloads, and again for 235 | each available I/O scheduler, then: 236 | 237 | $ sudo S/run_multiple_benchmarks/run_main_benchmarks.sh "throughput replayed-gnome-term-startup" 238 | 239 | Or pass only throughput if you want to measure only throughput. 240 | 241 | Both scripts take their measurements by creating, reading and writing 242 | some test files in the S directory itself. If this is not what you 243 | want, then invoke first 244 | 245 | $ sudo S/run_multiple_benchmarks/run_main_benchmarks.sh -h 246 | 247 | just to make sure that the file ~/.S-config.sh gets created 248 | automatically (if not present already). 249 | 250 | After that, modify the value assigned to BASE_DIR in ~/.S-config.sh if 251 | you want to change the directory in which test files are created. Or, 252 | if you want to choose explicitly the device on which to run bechmarks, 253 | set TEST_DEV to the name of that device. Read the comments on TEST_DEV 254 | in the file ~/.S-config.sh for full details and other options. -------------------------------------------------------------------------------- /comm_startup_lat/comm_startup_lat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Paolo Valente 3 | # Arianna Avanzini 4 | 5 | ../utilities/check_dependencies.sh awk dd fio iostat bc /usr/include/libaio.h 6 | if [[ $? -ne 0 ]]; then 7 | exit 8 | fi 9 | 10 | export LC_NUMERIC=C 11 | export TIMEFORMAT=%R 12 | . ../config_params.sh 13 | . ../utilities/lib_utils.sh 14 | UTIL_DIR=`cd ../utilities; pwd` 15 | # Set to yes if you want also iostat to be executed in parallel 16 | IOSTAT=yes 17 | 18 | sched=$1 19 | NUM_READERS=${2-0} 20 | NUM_WRITERS=${3-0} 21 | RW_TYPE=${4-seq} 22 | NUM_ITER=${5-5} 23 | COMMAND=${6-"gnome-terminal -e /bin/true"} 24 | STAT_DEST_DIR=${7-.} 25 | MAX_STARTUP=${8-120} 26 | IDLE_DISK_LAT=$9 27 | 28 | if [[ "${10}" == "" && "$1" != "-h" ]]; then # compute MAXRATE automatically 29 | dev=$(echo $DEVS | awk '{ print $1 }') 30 | if [[ "$(cat /sys/block/$dev/queue/rotational)" == "1" ]]; then 31 | MAXRATE=4000 32 | echo Automatically limiting write rate to ${MAXRATE}KB/s 33 | else 34 | MAXRATE=0 # no write-rate limitation for flash-based storage 35 | fi 36 | else 37 | MAXRATE=${10} 38 | fi 39 | 40 | VERBOSITY=${11} 41 | 42 | if [[ "$VERBOSITY" == verbose ]]; then 43 | REDIRECT=/dev/stdout 44 | else 45 | REDIRECT=/dev/null 46 | fi 47 | 48 | function show_usage { 49 | echo "\ 50 | Usage (as root): ./comm_startup_lat.sh [\"\" | | cur-sched] 51 | [] 52 | [] [seq | rand | raw_seq | raw_rand] 53 | [] 54 | [ | 55 | replay-startup-io xterm|gnometerm|lowriter] 56 | [] 57 | [] [] 58 | [] [verbose] 59 | 60 | first parameter equal to \"\" or to cur-sched -> do not change scheduler 61 | 62 | raw_seq/raw_rand -> read directly from device (no writers allowed) 63 | 64 | command | replay-startup-io -> two possibilities here: 65 | - write a generic command line (examples 66 | below of command lines that allow the 67 | command start-up times of some popular 68 | applicaitons to be measured) 69 | - invoke the replayer of the I/O done by 70 | some popular applications during start up; 71 | this allows start-up times to be evaluated 72 | without actually needing to execute those 73 | applications. 74 | 75 | max_startup-time -> maximum duration allowed for each command 76 | invocation, in seconds; if the command does not 77 | start within the maximum duration, then the command 78 | is killed, no other iteration is performed and 79 | no output file is created. If max_startup_time 80 | is set to 0, then no control is performed. 81 | 82 | idle_device_lat -> reference command start-up time to print in each iteration, 83 | nothing printed if this option is not given or is set to \"\" 84 | 85 | max_write-kB-per-sec -> maximum total sequential write rate [kB/s], 86 | used to reduce the risk that the system 87 | becomes unresponsive. For random writers, this 88 | value is further divided by 60. If set to 0, 89 | then no limitation is enforced on the write rate. 90 | If no value is set, then a default value is 91 | computed automatically as a function of whether 92 | the device is rotational. In particular, for 93 | a rotational device, the current default value is 94 | such that the system seems still usable (at least) 95 | under bfq, with a 90 MB/s HDD. On the opposite end, 96 | no write-rate limitation is enforced for a 97 | non-rotational device. 98 | 99 | num_iterations == 0 -> infinite iterations 100 | 101 | Example: 102 | sudo ./comm_startup_lat.sh bfq 5 5 seq 20 \"xterm /bin/true\" mydir 103 | switches to bfq and, after launching 5 sequential readers and 5 sequential 104 | writers, runs \"bash -c exit\" for 20 times. The file containing the computed 105 | statistics is stored in the mydir subdir of the current dir. 106 | 107 | Default parameter values are: \"\", $NUM_READERS, $NUM_WRITERS, $RW_TYPE, 108 | $NUM_ITER, \"$COMMAND\", $STAT_DEST_DIR, $MAX_STARTUP, $IDLE_DISK_LAT and $MAXRATE 109 | 110 | Other commands you may want to test: 111 | \"bash -c exit\", \"xterm /bin/true\", \"ssh localhost exit\", 112 | \"lowriter --terminate_after_init\"" 113 | } 114 | 115 | if [ "$1" == "-h" ]; then 116 | show_usage 117 | exit 118 | fi 119 | 120 | # keep at least three seconds, to make sure that iostat sample are enough 121 | SLEEPTIME_ITER=3 122 | 123 | function clean_and_exit { 124 | if [[ "$KILLPROC" != "" ]]; then 125 | kill -9 $KILLPROC > /dev/null 2>&1 126 | fi 127 | shutdwn 'fio iostat' 128 | cd .. 129 | # rm work dir 130 | if [ -f results-${sched}/trace ]; then 131 | cp -f results-${sched}/trace . 132 | fi 133 | rm -rf results-$sched 134 | rm -f Stop-iterations current-pid # remove possible garbage 135 | if [[ "$XHOST_CONTROL" != "" ]]; then 136 | xhost - > /dev/null 2>&1 137 | fi 138 | exit 139 | } 140 | 141 | function invoke_commands { 142 | if [[ "$IDLE_DISK_LAT" != "" ]]; then 143 | REF_TIME=$IDLE_DISK_LAT 144 | 145 | # do not tolerate an unbearable inflation of the start-up time 146 | MAX_INFLATION=$(echo "$IDLE_DISK_LAT * 500 + 1" | bc -l) 147 | GREATER=$(echo "$MAX_STARTUP > $MAX_INFLATION" | bc -l) 148 | if [[ $GREATER == 1 ]]; then 149 | MAX_STARTUP=$MAX_INFLATION 150 | echo Maximum start-up time reduced to $MAX_STARTUP seconds 151 | fi 152 | else 153 | REF_TIME=1 154 | fi 155 | 156 | rm -f Stop-iterations current-pid # remove possible garbage 157 | 158 | if (($NUM_WRITERS > 0)); then 159 | # increase difficulty by periodically syncing (in 160 | # parallel, as sync is blocking) 161 | (while true; do echo ; echo Syncing again in parallel; \ 162 | sync & sleep 2; done) > $REDIRECT & 163 | fi 164 | 165 | for ((i = 0 ; $NUM_ITER == 0 || i < $NUM_ITER ; i++)) ; do 166 | echo 167 | if (($NUM_ITER > 0)); then 168 | printf "Iteration $(($i+1)) / $NUM_ITER\n" 169 | fi 170 | 171 | if [[ $i -eq 0 || "$TAKE_GLOBAL_TRACE" == "" ]]; then 172 | init_tracing 173 | set_tracing 0 174 | set_tracing 1 175 | fi 176 | 177 | TIME=`(time sleep $SLEEPTIME_ITER) 2>&1` 178 | TOO_LONG=$(echo "$TIME > $SLEEPTIME_ITER * 10 + 10" | bc -l) 179 | if [[ "$MAX_STARTUP" != 0 && $TOO_LONG == 1 ]]; then 180 | echo Even the pre-command sleep timed out: stopping iterations 181 | clean_and_exit 182 | fi 183 | if [[ "$MAX_STARTUP" != "0" ]]; then 184 | bash -c "sleep $MAX_STARTUP && \ 185 | echo Timeout: killing command;\ 186 | cat current-pid | xargs -I pid kill -9 -pid ;\ 187 | touch Stop-iterations" & 188 | KILLPROC=$! 189 | disown 190 | fi 191 | 192 | sleep 1 # introduce a minimal pause between invocations 193 | printf "Starting \"$SHORTNAME\" with cold caches ... " 194 | 195 | # To get correct and precise results, the I/O scheduler has to 196 | # work only on the I/O generated by the command to benchmark, 197 | # plus the desired background I/O. Unfortunately, the following 198 | # sequence of commands generates a little, but misleading extra 199 | # amount of I/O, right before the start of the command to 200 | # benchmark. To mitigate this problem, the "time sleep 0.2", 201 | # in the middle of the next sequence of commands, reduces 202 | # this misleading extra I/O. It works as follows: 203 | # 1. it separates, in time, the I/O made by preceding 204 | # intructions, from the I/O made by the command under test 205 | # 2. it warms up the command "time", increasing the probability 206 | # that the latter will do very little, or no I/O, right 207 | # before the start of the command to benchmark 208 | COM_TIME=`setsid bash -c "echo "'$BASHPID'" > current-pid;\ 209 | echo 3 > /proc/sys/vm/drop_caches;\ 210 | { time sleep 0.2 ; } >/dev/null 2>&1; \ 211 | time ""$COMMAND" 2>&1` 212 | 213 | # If you want to exploit group scheduling, the 214 | # following trick apparently does not work well: add 215 | # the following command in the above variable 216 | # echo $BASHPID > /cgroup//cgroup.procs; 217 | # 218 | # So, as of now, the best way is to execute all this 219 | # script in a group, with 0 readers and 0 writers, and 220 | # to generate background workload with a separate 221 | # script (such as throughput_sync.sh). 222 | 223 | TIME=$(echo $COM_TIME | awk '{print $NF}') 224 | 225 | if [[ "$MAX_STARTUP" != "0" ]]; then 226 | if [[ "$KILLPROC" != "" && \ 227 | "$(ps $KILLPROC | tail -n +2)" != "" ]]; then 228 | # kill unfired timeout 229 | kill -9 $KILLPROC > /dev/null 2>&1 230 | KILLPROC= 231 | fi 232 | fi 233 | echo done 234 | echo "$TIME" >> lat-${sched} 235 | printf " Start-up time: " 236 | 237 | NUM=`echo "( $TIME / $REF_TIME ) * 2" | bc -l` 238 | NUM=`printf "%0.f" $NUM` 239 | for ((j = 0 ; $j < $NUM ; j++)); 240 | do 241 | printf \# 242 | done 243 | echo " $TIME sec" 244 | if [[ "$IDLE_DISK_LAT" != "" ]]; then 245 | echo Idle-device start-up time: \#\# $IDLE_DISK_LAT sec 246 | fi 247 | if [[ -f Stop-iterations || "$TIME" == "" || \ 248 | `echo $TIME '>' $MAX_STARTUP | bc -l` == "1" ]]; 249 | then # timeout fired 250 | echo Too long startup: stopping iterations 251 | clean_and_exit 252 | else 253 | if [[ `echo $TIME '<' 2 | bc -l` == "1" ]]; then 254 | # extra pause to let a minimum of thr stats be printed 255 | sleep 2 256 | fi 257 | fi 258 | done 259 | } 260 | 261 | function calc_latency { 262 | echo "Latency statistics:" | tee -a $1 263 | sh $UTIL_DIR/calc_avg_and_co.sh 99 < lat-${sched}\ 264 | | tee -a $1 265 | } 266 | 267 | function compute_statistics { 268 | file_name=$STAT_DEST_DIR/\ 269 | ${sched}-${NUM_READERS}r${NUM_WRITERS}w-${RW_TYPE}-lat_thr_stat.txt 270 | 271 | echo Results for $sched, $NUM_ITER $COMMAND, $NUM_READERS $RW_TYPE\ 272 | readers and $NUM_WRITERS $RW_TYPE writers | tee $file_name 273 | 274 | calc_latency $file_name 275 | 276 | if [ "$IOSTAT" == "yes" ]; then 277 | print_save_agg_thr $file_name 278 | fi 279 | } 280 | 281 | function compile_replayer 282 | { 283 | ../utilities/check_dependencies.sh g++ 284 | if [[ $? -ne 0 ]]; then 285 | echo g++ not found: I need it to compile replay-startup-io 286 | exit 287 | fi 288 | g++ -std=c++11 -pthread -Wall replay-startup-io.cc -o replay-startup-io -laio 289 | if [ $? -ne 0 ]; then 290 | echo Failed to compile replay-startup-io 291 | echo Maybe libaio-dev/libaio-devel is not properly installed? 292 | exit 293 | fi 294 | } 295 | 296 | ## Main ## 297 | 298 | if [[ "$BASE_DIR" = "" ]]; then 299 | echo Sorry, you configured the suite for not using filesystems, 300 | echo but this is a benchmark requiring the use of a filesystem. 301 | exit 1 302 | fi 303 | 304 | FIRSTWORD=`echo $COMMAND | awk '{print $1}'` 305 | 306 | if [ "$FIRSTWORD" == replay-startup-io ]; then 307 | SHORTNAME=$COMMAND 308 | SECONDWORD=`echo $COMMAND | awk '{print $2}'` 309 | COMMAND="$PWD/replay-startup-io $PWD/$SECONDWORD.trace $BASE_DIR" 310 | else 311 | SHORTNAME=$FIRSTWORD 312 | fi 313 | 314 | if [[ "$FIRSTWORD" != replay-startup-io && $(which $SHORTNAME) == "" ]] ; then 315 | echo Command to invoke not found 316 | exit 317 | fi 318 | 319 | mkdir -p $STAT_DEST_DIR 320 | # turn to an absolute path (needed because current directory will be changed) 321 | STAT_DEST_DIR=`cd $STAT_DEST_DIR; pwd` 322 | 323 | rm -f $FILE_TO_WRITE 324 | 325 | if [ $FIRSTWORD == replay-startup-io ]; then 326 | if [[ ! -f replay-startup-io || \ 327 | replay-startup-io.cc -nt replay-startup-io ]]; then 328 | echo Compiling replay-startup-io ... 329 | compile_replayer 330 | fi 331 | # test command and create files 332 | $COMMAND create_files 333 | if [ $? -ne 0 ]; then 334 | echo Pre-execution of replay-startup-io failed 335 | echo Trying to recompile from source ... 336 | # trying to recompile 337 | compile_replayer 338 | $COMMAND create_files 339 | if [ $? -ne 0 ]; then 340 | echo Pre-execution of replay-startup-io failed 341 | exit 342 | fi 343 | fi 344 | else 345 | PATH_TO_CMD=$(dirname $(which $SHORTNAME)) 346 | 347 | # put into BACKING_DEVS the backing devices for $PATH_TO_CMD 348 | find_dev_for_dir $PATH_TO_CMD 349 | 350 | if [[ "$BACKING_DEVS" != "$DEVS" ]]; then 351 | echo Command exec is on different devices \($BACKING_DEVS\) 352 | echo from those of test files \($DEVS\) 353 | exit 354 | fi 355 | enable_X_access_and_test_cmd "$COMMAND" 356 | fi 357 | 358 | set_scheduler > $REDIRECT 359 | 360 | echo Preliminary sync to block until previous writes have been completed > $REDIRECT 361 | sync 362 | 363 | # create and enter work dir 364 | rm -rf results-${sched} 365 | mkdir -p results-$sched 366 | cd results-$sched 367 | 368 | rm -f Stop-iterations current-pid 369 | 370 | # setup a quick shutdown for Ctrl-C 371 | trap "clean_and_exit" sigint 372 | trap 'kill -HUP $(jobs -lp) >/dev/null 2>&1 || true' EXIT 373 | 374 | if (( $NUM_READERS > 0 || $NUM_WRITERS > 0)); then 375 | 376 | flush_caches 377 | start_readers_writers_rw_type $NUM_READERS $NUM_WRITERS $RW_TYPE \ 378 | $MAXRATE 379 | 380 | # wait for reader/writer start-up transitory to terminate 381 | secs=$(transitory_duration 3) 382 | 383 | while [ $secs -ge 0 ]; do 384 | echo -ne "Waiting for transitory to terminate: $secs\033[0K\r" 385 | sync & # let writes start as soon as possible 386 | sleep 1 387 | : $((secs--)) 388 | done 389 | echo 390 | fi 391 | 392 | # start logging aggthr 393 | if [ "$IOSTAT" == "yes" ]; then 394 | iostat -tmd /dev/$HIGH_LEV_DEV 3 | tee iostat.out > $REDIRECT & 395 | fi 396 | 397 | invoke_commands 398 | 399 | shutdwn 'fio iostat' 400 | 401 | if [[ "$XHOST_CONTROL" != "" ]]; then 402 | xhost - > /dev/null 2>&1 403 | fi 404 | 405 | if [[ $NUM_ITER -ge 2 ]]; then 406 | compute_statistics 407 | fi 408 | 409 | cd .. 410 | 411 | # rm work dir 412 | if [ -f results-${sched}/trace ]; then 413 | cp -f results-${sched}/trace . 414 | fi 415 | rm -rf results-${sched} 416 | -------------------------------------------------------------------------------- /comm_startup_lat/replay-startup-io.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2018 Paolo Valente 3 | * Daniele Toschi 4 | * 5 | * Main dependency: libaio-devel 6 | * 7 | * Command line to compile: 8 | * g++ -pthread -Wall replay-startup-IO.cc -o replay-startup-IO -laio 9 | */ 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | using namespace std; 27 | 28 | #define DEB(A) 29 | 30 | bool debug_mode = false; 31 | 32 | const int OUT_FILE_SIZE = 20000000; // in bytes 33 | //const int OUT_FILE_SIZE = 200000000; // in bytes 34 | const string OUT_FILE_BASENAME = "replay_startup_file"; 35 | 36 | enum IO_req_type {SEQ, RAND}; 37 | 38 | struct IO_request_t { 39 | // id of the thread that will issue this request 40 | int thread_id; 41 | 42 | // time interval to wait before issuing this request, secs + nsecs 43 | timespec delta; 44 | 45 | // size of this request, in sectors (512 bytes per sector) 46 | unsigned long size; 47 | 48 | string action; 49 | IO_req_type type; 50 | }; 51 | 52 | vector IO_requests; 53 | 54 | struct thread_data_t { 55 | int id; 56 | pthread_cond_t cond; 57 | pthread_mutex_t mutex; 58 | bool please_start; 59 | int fd; 60 | io_context_t ctx; 61 | unsigned long long offset; 62 | unsigned int pending_io; 63 | }; 64 | 65 | pthread_t *threads; 66 | thread_data_t *thread_datas; 67 | unsigned long next_rq_idx; // global index of the next request to issue 68 | bool IO_finished; 69 | pthread_cond_t IO_fin_cond; 70 | pthread_mutex_t IO_fin_mutex; 71 | 72 | void do_sync_read(thread_data_t *data, void *odirect_buf) 73 | { 74 | DEB(cout<<"pread "<offset<fd, odirect_buf, 78 | 512 * IO_requests[next_rq_idx].size, 79 | data->offset) < 0) { 80 | cout<<"Thread "<id<<" failed reading"<fd, odirect_buf, 93 | 512 * IO_requests[next_rq_idx].size, 94 | data->offset); 95 | int res = io_submit(data->ctx, 1, &iocbs); 96 | if(res < 0) { 97 | cout<<"io_submit error for thread "<id<pending_io++; 101 | } 102 | 103 | void issue_next_rq(thread_data_t *data) 104 | { 105 | #if 0 // not sleeping for the moment, as emulation seems more accurate this way 106 | nanosleep(&IO_requests[next_rq_idx].delta, 0); 107 | #endif 108 | 109 | if (debug_mode) { 110 | cout<<"Rq "<id 112 | <<", Size "<offset >= 136 | OUT_FILE_SIZE - 512 * IO_requests[next_rq_idx].size) 137 | data->offset = 0; 138 | } else // preserve 512-byte alignment in memory 139 | data->offset = 140 | (rand() % 141 | (OUT_FILE_SIZE / 512 - 142 | IO_requests[next_rq_idx].size)) * 512; 143 | 144 | if (next_rq_idx + 1 < IO_requests.size() && 145 | IO_requests[next_rq_idx].action == "RA") 146 | do_async_read(data, odirect_buf); 147 | else { 148 | if (data->pending_io > 0) { 149 | struct io_event events[data->pending_io]; 150 | 151 | DEB(cout<<"Thread "<id<<": rq " 152 | <pending_io 154 | <<" pending IOs" 155 | <ctx, data->pending_io, 157 | data->pending_io, events, 0); 158 | if(ret < 0) { 159 | cout<<"io_getevents error "<id 161 | <<", rq "<pending_io = 0; 166 | } 167 | 168 | do_sync_read(data, odirect_buf); 169 | } 170 | data->offset += 512 * IO_requests[next_rq_idx].size; 171 | 172 | end: 173 | next_rq_idx++; 174 | } 175 | 176 | // these go one at a time, actually 177 | void *thread_worker(void *p) 178 | { 179 | struct thread_data_t *data = (struct thread_data_t *)p; 180 | 181 | DEB(cout<<"Thread "<mutex); 184 | while (!data->please_start && !IO_finished) { 185 | DEB(cout<<"Thread "<id<<" blocked"<cond, &data->mutex); 187 | } 188 | pthread_mutex_unlock(&data->mutex); 189 | 190 | if (IO_finished) 191 | return 0; 192 | 193 | data->please_start = false; 194 | 195 | DEB(cout<<"Thread "<id<<" starting from line " 196 | <id 201 | <<": finished reading trace"<id) { 211 | DEB(cout<<"Thread "<id 212 | <<", next rq has id "< " 242 | <<" [create_files]" 243 | < procs_threads_map; 271 | 272 | while (getline(infile, line)) 273 | { 274 | stringstream ls(line); 275 | IO_request_t rq; 276 | 277 | string next_proc_name; 278 | ls>>next_proc_name; 279 | 280 | if (procs_threads_map.count(next_proc_name) == 0) { 281 | procs_threads_map[next_proc_name] = nr_threads; 282 | DEB(cout<<"Created id "<>deltaT; 291 | 292 | // store in a timespec structure, to use nanosleep 293 | double int_part; 294 | rq.delta.tv_sec = modf(deltaT, &int_part); 295 | rq.delta.tv_nsec = int_part; 296 | 297 | ls>>rq.size; 298 | 299 | string buffer; 300 | // throw away next field, containing rq position 301 | ls>>buffer; 302 | 303 | ls>>buffer; 304 | if (buffer == "Seq") 305 | rq.type = SEQ; 306 | else 307 | rq.type = RAND; 308 | 309 | // throw away two other, unused fields 310 | ls>>buffer>>buffer; 311 | 312 | ls>>rq.action; 313 | 314 | DEB(cout<<"Id "< $CONF_DEST_DIR/.S-config.sh 16 | 17 | if [ "$SUDO_USER" != "" ]; then 18 | chown $SUDO_USER:$SUDO_USER $CONF_DEST_DIR/.S-config.sh 19 | fi 20 | else 21 | sed 's/^#.*//g' $ROOTDIR/def_config.sh > def_file 22 | sed 's/^#.*//g' $CONF_DEST_DIR/.S-config.sh > user_file 23 | if [[ "$(diff -d -B def_file user_file)" != "" && \ 24 | $ROOTDIR/def_config.sh -nt $CONF_DEST_DIR/.S-config.sh ]] 25 | then 26 | echo Your config file \($CONF_DEST_DIR/.S-config.sh\) is older 27 | echo than my default config file. If this is ok for you, 28 | echo then just 29 | echo touch $CONF_DEST_DIR/.S-config.sh 30 | echo to eliminate this error. 31 | echo Otherwise 32 | echo rm $CONF_DEST_DIR/.S-config.sh 33 | echo to have your config file updated automatically 34 | echo with default values. 35 | rm def_file user_file 36 | exit 37 | fi 38 | rm def_file user_file 39 | fi 40 | -------------------------------------------------------------------------------- /def_config.sh: -------------------------------------------------------------------------------- 1 | # Default configuration file; do not edit this file, but the file .S-config.sh 2 | # in your home directory. The latter gets created on the very first execution 3 | # of some benchmark script (even if only the option -h is passed to the script). 4 | 5 | # Set to yes if you want to use SCSI_DEBUG; this will override your 6 | # possible choice for TEST_PARTITION below, and will set BASE_DIR too, 7 | # automatically, overriding your possible choice for BASE_DIR below. 8 | # scsi_debug is not a good option for performance profiling, use 9 | # nullb for that (next option). 10 | SCSI_DEBUG=no 11 | 12 | # Set to yes if you want to use the nullb device; this will override your 13 | # possible choice for TEST_PARTITION below, and will set BASE_DIR too, 14 | # automatically, overriding your possible choice for BASE_DIR below. 15 | NULLB=no 16 | 17 | # Set the following parameter to the name or the full path of a device 18 | # or of a partition, if you want to perform tests on that device or 19 | # partition. In the next comments, we assume for simplicity that 20 | # TEST_DEV is set to just the name of a device. 21 | # 22 | # The following two alternatives are handled differently: first, the 23 | # device contains the partition ${TEST_DEV}1 (or ${TEST_DEV}p1, 24 | # depending on the type of device) and ${TEST_DEV}1 contains a mounted 25 | # filesystem; second, the latter compound condition does not hold. In 26 | # the first case, all test files are created in that filesystem. In 27 | # the second case, the execution is simply aborted if the following 28 | # FORMAT parameter is not set to yes. If, instead, FORMAT is set to 29 | # yes, then 30 | # - the device is formatted so as to contain a partition ${TEST_DEV}1 31 | # or ${TEST_DEV}p1; 32 | # - an ext4 filesystem is made on that partition; 33 | # - that filesystem is mounted. 34 | # 35 | # If you go directly for a partition, then the latter must contain a 36 | # mounted filesystem. 37 | # 38 | # In all succesful cases, all test files are created in the filesystem 39 | # contained in the test partition. 40 | # 41 | # If TEST_DEV is set, then BASE_DIR will be built automatically, 42 | # overriding your possible choice below. 43 | # 44 | # Be careful in setting TEST_DEV manually in case of bcache or 45 | # raids. With these configurations, more then one device is involved. 46 | # The simplest option is to set BASE_DIR to a directory stored on 47 | # these devices, and let the code of the suite automatically detect 48 | # all the involved devices. 49 | TEST_DEV= 50 | 51 | # If set to yes, then $TEST_DEV is (re)formatted if needed, as 52 | # explained in detail in the comments on TEST_DEV. 53 | FORMAT=no 54 | 55 | # Directory containing files read/written during benchmarks. The path 56 | # "$PWD/../" points to S root directory. The value for BASE_DIR chosen 57 | # here is overridden if SCSI_DEBUG or TEST_DEV is set. 58 | BASE_DIR=$PWD/../workfiles 59 | 60 | # Next parameter contains the names of the devices the test files are 61 | # on (devices may be more than one in case of a RAID 62 | # configuration). Those devices are the ones for which, e.g., the I/O 63 | # scheduler is changed, if you do ask the benchmarks to select the 64 | # scheduler(s) to use. These devices are detected automatically. If 65 | # automatic detection does not work, or is not wat you want, then just 66 | # reassign the value of DEVS. 67 | # For example: DEVS=sda. 68 | # 69 | # For the same reasons pointed out for TEST_DEV, in case of bcache or 70 | # raids it may not be so easy to set DEVS correctly. It is simpler and 71 | # safer to set, instead, BASE_DIR to a directory stored on the 72 | # involved devices, and let the code of the suite automatically detect 73 | # these devices correctly. 74 | DEVS= 75 | 76 | # Size of (each of) the files to create for reading/writing, in 77 | # MiB. If left empty, then automatically set to the maximum value that 78 | # guarantees that at most 50% of the free space is used, or left to 79 | # the value used in last file creation, if lower than the latter 80 | # threshold. For random I/O with rotational devices, consider that 81 | # the size of the files may heavily influence throughput and, in 82 | # general, service properties. 83 | # 84 | # Change at your will, if you prefer a different value. 85 | FILE_SIZE_MB= 86 | 87 | # Portion, in 1M blocks, to read for each file, used only in 88 | # fairness.sh; make sure it is not larger than $FILE_SIZE_MB 89 | NUM_BLOCKS=2000 90 | 91 | # If equal to 1, tracing is enabled during each test 92 | TRACE=0 93 | 94 | # The kernel-development benchmarks expect a repository in the 95 | # following directory. In particular, they play with v5.1, v5.2, so 96 | # they expect these versions to be present. If you have a repository 97 | # ready for testing, then make the following parameter point to 98 | # it. Otherwise see comments on next parameter. 99 | KERN_DIR=$BASE_DIR/linux.git-for_kern_dev_benchmarks 100 | # If no repository is found in the above directory, then a repository 101 | # is cloned therein. The source URL is stored in the following 102 | # variable. 103 | KERN_REMOTE=https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 104 | 105 | # NCQ queue depth, if undefined then no script will change the current value 106 | NCQ_QUEUE_DEPTH= 107 | 108 | # Set this variable to the name of your package manager, if auto detect fails 109 | PACKAGE_MANAGER= 110 | 111 | # Mail-report parameters. A mail transfer agent (such as msmtp) and a mail 112 | # client (such as mailx) must be installed to be able to send mail reports. 113 | # The sender e-mail address will be the one configured as default in the 114 | # mail client itself. 115 | MAIL_REPORTS=0 116 | MAIL_REPORTS_RECIPIENT= 117 | -------------------------------------------------------------------------------- /fairness/fairness.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Paolo Valente 3 | # Arianna Avanzini 4 | 5 | ../utilities/check_dependencies.sh awk dd fio iostat bc 6 | if [[ $? -ne 0 ]]; then 7 | exit 8 | fi 9 | 10 | . ../config_params.sh 11 | . ../utilities/lib_utils.sh 12 | CALC_AVG_AND_CO=`cd ../utilities; pwd`/calc_avg_and_co.sh 13 | 14 | # see the following string for usage, or invoke fairness -h 15 | usage_msg="\ 16 | Usage (as root):\n\ 17 | ./fairness.sh [cur-sched | bfq | cfq | ...] [num_files] [iterations] [file_size_MB] \n\ 18 | [seq | rand] [weights]\n\ 19 | \n\ 20 | For example:\n\ 21 | sudo ./fairness.sh bfq 2 10 100 seq 1000 500\n\ 22 | switches to bfq and launches 10 iterations of 2 sequential readers of 2 \n\ 23 | different files of 100MB each; the first reader has weight 1000, the second\n\ 24 | 500.\n\ 25 | \n\ 26 | Default parameter values are bfq, 4, 2, $NUM_BLOCKS, and 100 for every reader\n" 27 | 28 | sched=${1-bfq} 29 | NUM_FILES=${2-4} 30 | ITERATIONS=${3-2} 31 | NUM_BLOCKS=${4-$NUM_BLOCKS} 32 | R_TYPE=${5-seq} 33 | BFQ_NEW_VERSION=Y 34 | 35 | function create_reader_and_assign_to_group { 36 | WL_TYPE=$1 37 | ITER_IDX=$2 38 | GROUP_IDX=$3 39 | FNAME=$4 40 | echo $BASHPID > /cgroup/test$GROUP_IDX/tasks 41 | 42 | if [[ "$WL_TYPE" == "seq" ]]; then 43 | dd if=$FNAME of=/dev/null bs=1M \ 44 | count=$(((${NUM_BLOCKS}*${WEIGHT[$GROUP_IDX]})/$max_w)) \ 45 | 2>&1 | tee iter-$ITER_IDX/singles/reader-$GROUP_IDX 46 | else 47 | fio --name=readers --rw=randread --numjobs=1 --randrepeat=0 \ 48 | --size=$(((${NUM_BLOCKS}*${WEIGHT[$GROUP_IDX]})/$max_w))M \ 49 | --filename=$FNAME --minimal \ 50 | 2>&1 | tee iter-$ITER_IDX/singles/reader-$GROUP_IDX 51 | fi 52 | } 53 | 54 | if [ "$1" == "-h" ]; then 55 | printf "$usage_msg" 56 | exit 57 | fi 58 | 59 | # set proper group 60 | if [[ "${sched}" == "bfq" || "${sched}" == "bfq-mq" || \ 61 | "${sched}" == "bfq-sq" ]] ; then 62 | if [ "${BFQ_NEW_VERSION}" == "Y" ]; then 63 | GROUP="blkio" 64 | PREFIX="${sched}." 65 | else 66 | GROUP="bfqio" 67 | PREFIX="" 68 | fi 69 | elif [ "${sched}" == "cfq" ] ; then 70 | GROUP="blkio" 71 | PREFIX="" 72 | fi 73 | 74 | mkdir -p /cgroup 75 | umount /cgroup 76 | echo mount -t cgroup -o $GROUP none /cgroup 77 | mount -t cgroup -o $GROUP none /cgroup 78 | 79 | # load file names and create group dirs 80 | FILES="" 81 | for ((i = 0 ; $i < $NUM_FILES ; i++)) ; do 82 | mkdir -p /cgroup/test$i 83 | FILES+="${BASE_FILE_PATH}$i " 84 | done 85 | 86 | # create files to read 87 | create_files_rw_type $NUM_FILES 88 | 89 | # initialize weight array 90 | echo -n "Weights:" 91 | args=("$@") 92 | max_w=${WEIGHT[0]} 93 | for ((i = 0 ; $i < $NUM_FILES ; i++)) ; do 94 | if [ "${args[$(($i+5))]}" != "" ] ; then 95 | WEIGHT[$i]=${args[$(($i+5))]} 96 | else 97 | WEIGHT[$i]=100 98 | fi 99 | if [[ ${WEIGHT[$i]} -gt $max_w ]] ; then 100 | max_w=${WEIGHT[$i]} 101 | fi 102 | echo -n " ${WEIGHT[$i]}" 103 | echo ${WEIGHT[$i]} > /cgroup/test$i/$GROUP.${PREFIX}weight 104 | done 105 | echo 106 | 107 | # create result dir tree and cd to its root 108 | rm -rf results-${sched} 109 | mkdir -p results-$sched 110 | for ((i = 0 ; $i < ${ITERATIONS} ; i++)) ; do 111 | mkdir -p results-$sched/iter-$i/singles 112 | done 113 | cd results-$sched 114 | 115 | # switch to the desired scheduler 116 | set_scheduler 117 | 118 | # If the scheduler under test is BFQ or CFQ, then disable the 119 | # low_latency heuristics to not ditort results. 120 | if [[ "$sched" == "bfq-mq" || "$sched" == "bfq" || \ 121 | "$sched" == "cfq" ]]; then 122 | for dev in $DEVS; do 123 | PREVIOUS_VALUE=$(cat /sys/block/$dev/queue/iosched/low_latency) 124 | echo "Disabling low_latency on $dev" 125 | echo 0 > /sys/block/$dev/queue/iosched/low_latency 126 | done 127 | fi 128 | 129 | function restore_low_latency 130 | { 131 | if [[ "$sched" == "bfq-mq" || "$sched" == "bfq" || \ 132 | "$sched" == "cfq" ]]; then 133 | 134 | for dev in $DEVS; do 135 | echo Restoring previous value of low_latency on $dev 136 | echo $PREVIOUS_VALUE >\ 137 | /sys/block/$dev/queue/iosched/low_latency 138 | done 139 | fi 140 | } 141 | 142 | # setup a quick shutdown for Ctrl-C 143 | trap "shutdwn 'dd fio iostat'; restore_low_latency; exit" sigint 144 | 145 | # init and turn on tracing if TRACE==1 146 | init_tracing 147 | set_tracing 1 148 | 149 | for ((i = 0 ; $i < $ITERATIONS ; i++)) ; do 150 | echo Iteration $(($i+1))/$ITERATIONS 151 | echo Flushing caches 152 | flush_caches 153 | 154 | # start readers 155 | idx=0 156 | echo $FILES 157 | for f in $FILES ; do 158 | (create_reader_and_assign_to_group $R_TYPE $i $idx $f) & 159 | idx=$(($idx+1)) 160 | done 161 | 162 | # wait a just a little bit for all the readers to start 163 | sleep 2 164 | 165 | # start logging aggregated throughput 166 | iostat -tmd /dev/$HIGH_LEV_DEV 1 | tee iter-$i/iostat.out & 167 | 168 | if [[ "$TIMEOUT" != "0" && "$TIMEOUT" != "" ]]; then 169 | bash -c "sleep $TIMEOUT && \ 170 | echo Timeout: killing readers ;\ 171 | killall -q -s USR1 dd ; sleep 1 ;\ 172 | killall -q fio dd" & 173 | KILLPROC=$! 174 | disown 175 | fi 176 | 177 | # wait for all the readers to complete 178 | for ((j = $((($i*(${NUM_FILES}+1))+1)) ; \ 179 | $j <= $((($i*(${NUM_FILES}+1))+${NUM_FILES})) ; j++)) ; do 180 | wait %$j 181 | done 182 | 183 | if [[ "$KILLPROC" != "" && "$(ps $KILLPROC | tail -n +2)" != "" ]]; 184 | then 185 | kill -9 $KILLPROC > /dev/null 2>&1 186 | fi 187 | KILLPROC= 188 | killall iostat 189 | done 190 | 191 | set_tracing 0 192 | 193 | # destroy cgroups and unmount controller 194 | for ((i = 0 ; $i < $NUM_FILES ; i++)) ; do 195 | rmdir /cgroup/test$i 196 | done 197 | umount /cgroup 198 | rm -rf /cgroup 199 | 200 | for ((i = 0 ; $i < $ITERATIONS ; i++)) ; do 201 | cd iter-$i 202 | len=$(cat iostat.out | grep ^$HIGH_LEV_DEV | wc -l) 203 | echo Aggregated Throughtput in iteration $i | tee -a ../output 204 | cat iostat.out | grep ^$HIGH_LEV_DEV | awk '{ print $3 }' | \ 205 | tail -n$(($len-3)) | head -n$(($len-3)) > iostat-aggthr 206 | $CALC_AVG_AND_CO 99 < iostat-aggthr | tee -a ../output 207 | 208 | echo reader time stats in iteration $i | tee -a ../output 209 | if [[ "$R_TYPE" == "seq" ]]; then 210 | cat singles/* | grep "copied\|copiati" | awk '{ print $6 }' \ 211 | > time 212 | else 213 | rm -f time; touch time 214 | for s in $(ls -1 singles/*); do 215 | time=$(cat $s | grep "fio-" | cut -d\; -f9) 216 | # time is expressed in msec in the 217 | # minimal output of the fio utility 218 | echo $(echo "$time/1000" | bc -l) >> time 219 | done 220 | fi 221 | $CALC_AVG_AND_CO 99 < time | tee -a ../output 222 | 223 | echo reader bandwith stats in iteration $i | tee -a ../output 224 | if [[ "$R_TYPE" == "seq" ]]; then 225 | cat singles/* | grep "copied\|copiati" | awk '{ print $8 }' \ 226 | > band 227 | else 228 | rm -f band; touch band 229 | for s in $(ls -1 singles/*); do 230 | band=$(cat $s | grep "fio-" | cut -d\; -f7) 231 | # bandwidth is expressed in KB/s in the 232 | # minimal output of the fio utility 233 | echo $(echo "$band/1024" | bc -l) >> band 234 | done 235 | fi 236 | $CALC_AVG_AND_CO 99 < band | tee -a ../output 237 | cd .. 238 | done 239 | 240 | restore_low_latency 241 | 242 | cd .. 243 | -------------------------------------------------------------------------------- /file-copy/file-copy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Paolo Valente 3 | # Copyright (C) 2018 Quirino Leone 4 | 5 | ../utilities/check_dependencies.sh awk dd fio iostat pv 6 | if [[ $? -ne 0 ]]; then 7 | exit 8 | fi 9 | 10 | export LC_NUMERIC=C 11 | . ../config_params.sh 12 | . ../utilities/lib_utils.sh 13 | 14 | sched=$1 15 | NUM_COPIERS=${2-1} 16 | ITERATIONS=${3-10} 17 | SYNC=${4-yes} 18 | MAXRATE=${5-0} # maximum value for which the system apparently does 19 | # not risk to become unresponsive under bfq with a 90 20 | # MB/s hard disk 21 | PIPE=${6-pipe} 22 | 23 | # see the following string for usage, or invoke file-copy.sh -h 24 | usage_msg="\ 25 | Usage (as root):\n\ 26 | ./file-copy.sh [\"\" | bfq | cfq | ...] [num_copies] [num_iterations]\n\ 27 | [sync] [max_kB-per-sec] [nopipe]\n\ 28 | \n\ 29 | first parameter equal to \"\" or cur-sched -> do not change scheduler\n\ 30 | sync parameter equal to yes -> invoke sync before starting readers/writers\n\ 31 | max_kB-per-sec parameter equal to 0 -> no limitation on the maxrate\n\ 32 | nopipe parameter set -> execute only one dd command, instead of a pair\n\ 33 | with a pipe in between (only with max_kB-per-sec=0)\n\ 34 | For example:\n\ 35 | sudo ./file-copy.sh bfq 10 3 yes 10000\n\ 36 | switches to bfq and launches, for 3 times, 10 copies in parallel,\n\ 37 | with each copy reading from/writing to a distinct file, at a maximum rate\n\ 38 | equal to 10000 kB/sec.\n\ 39 | \n\ 40 | Default parameter values are \"\", ${NUM_COPIERS}, $ITERATIONS, $SYNC, $MAXRATE and $PIPE\n" 41 | 42 | if [ "$1" == "-h" ]; then 43 | printf "$usage_msg" 44 | exit 45 | fi 46 | 47 | SUFFIX=-to-copy 48 | 49 | # create and enter work dir 50 | rm -rf results-${sched} 51 | mkdir -p results-$sched 52 | cd results-$sched 53 | 54 | # switch to the desired scheduler 55 | set_scheduler 56 | 57 | # setup a quick shutdown for Ctrl-C 58 | trap "shutdwn dd; exit" sigint 59 | 60 | echo Flushing caches 61 | if [ "$SYNC" != "yes" ]; then 62 | echo 3 > /proc/sys/vm/drop_caches 63 | else 64 | flush_caches 65 | fi 66 | 67 | # create the file to copy if it doesn't exist 68 | create_files $NUM_COPIERS $SUFFIX 69 | 70 | init_tracing 71 | set_tracing 1 72 | 73 | for ((iter = 1 ; $ITERATIONS == 0 || $iter <= $ITERATIONS ; iter++)) 74 | do 75 | if [[ $ITERATIONS -gt 0 ]]; then 76 | echo Iteration $iter / $ITERATIONS 77 | fi 78 | # start $NUM_COPIES copiers 79 | for ((i = 0 ; $i < $NUM_COPIERS ; i++)) 80 | do 81 | if [[ $MAXRATE -eq 0 && "$PIPE" == "pipe" ]]; then 82 | dd if=${BASE_FILE_PATH}$SUFFIX$i 2>&1 | \ 83 | dd of=${BASE_FILE_PATH}-copy$i > /dev/null 2>&1 & 84 | elif [[ $MAXRATE -eq 0 && "$PIPE" == "nopipe" ]]; then 85 | dd if=${BASE_FILE_PATH}$SUFFIX$i 2>&1 of=${BASE_FILE_PATH}-copy$i \ 86 | > /dev/null 2>&1 & 87 | else 88 | dd if=${BASE_FILE_PATH}$SUFFIX$i 2>&1 | \ 89 | pv -q -L $(($MAXRATE / $NUM_COPIERS))k 2>&1 | \ 90 | dd of=${BASE_FILE_PATH}-copy$i > /dev/null 2>&1 & 91 | fi 92 | done 93 | echo "Copying $NUM_COPIERS file(s)" 94 | echo 95 | time wait 96 | flush_caches 97 | done 98 | 99 | shutdwn dd 100 | 101 | cd .. 102 | 103 | # rm work dir 104 | rm -rf results-${sched} 105 | -------------------------------------------------------------------------------- /interleaved_io/interleaved_io.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Mauro Andreolini 3 | # Arianna Avanzini 4 | 5 | ../utilities/check_dependencies.sh awk dd fio iostat 6 | if [[ $? -ne 0 ]]; then 7 | exit 8 | fi 9 | 10 | . ../config_params.sh 11 | . ../utilities/lib_utils.sh 12 | 13 | sched=${1-bfq} 14 | NUM_READERS=${2-3} 15 | STAT_DEST_DIR=${3-.} 16 | DURATION=${4-30} 17 | DIS_LOW_LATENCY=NO # If set to YES, then also disable low latency 18 | 19 | # see the following string for usage, or invoke interleaved_io.sh -h 20 | usage_msg="\ 21 | Usage (as root):\n\ 22 | ./interleaved_io.sh [cur-sched | bfq | cfq | ...] [num_readers]\n\ 23 | [stat_dest_dir] [duration]\n\ 24 | \n\ 25 | For example:\n\ 26 | sudo ./interleaved_io.sh bfq 3 ..\n\ 27 | switches to bfq and launches 3 interleaved readers on the same device.\n\ 28 | The file containing the computed stats is stored\n\ 29 | in the .. dir with respect to the cur dir.\n\ 30 | \n\ 31 | Default parameter values are bfq, 3, . and $DURATION\n 32 | With CFQ and BFQ, it is also possible to disable low latency\n" 33 | 34 | if [ "$1" == "-h" ]; then 35 | printf "$usage_msg" 36 | exit 37 | fi 38 | 39 | mkdir -p $STAT_DEST_DIR 40 | # turn to an absolute path (needed later) 41 | STAT_DEST_DIR=`cd $STAT_DEST_DIR; pwd` 42 | 43 | # create and enter work dir 44 | rm -rf results-${sched} 45 | mkdir -p results-$sched 46 | cd results-$sched 47 | 48 | # switch to the desired scheduler 49 | set_scheduler 50 | 51 | # If the scheduler under test is BFQ or CFQ, then disable the 52 | # low_latency heuristics to not ditort results. 53 | if [[ "$DIS_LOW_LATENCY" != "NO" ]]; then 54 | if [[ "$sched" == "bfq-mq" || "$sched" == "bfq" || \ 55 | "$sched" == "cfq" ]]; then 56 | for dev in $DEVS; do 57 | PREVIOUS_VALUE=$(cat /sys/block/$dev/queue/iosched/low_latency) 58 | echo "Disabling low_latency on $dev" >/dev/$OUT 2>&1 59 | echo 0 > /sys/block/$dev/queue/iosched/low_latency 60 | done 61 | fi 62 | fi 63 | 64 | function restore_low_latency 65 | { 66 | if [[ "$sched" == "bfq-mq" || "$sched" == "bfq" || \ 67 | "$sched" == "cfq" ]]; then 68 | for dev in $DEVS; do 69 | echo Restoring previous value of low_latency on $dev 70 | echo $PREVIOUS_VALUE >\ 71 | /sys/block/$dev/queue/iosched/low_latency 72 | done 73 | fi 74 | } 75 | 76 | # setup a quick shutdown for Ctrl-C 77 | trap "shutdwn 'fio iostat' ; restore_low_latency; exit" sigint 78 | 79 | flush_caches 80 | 81 | init_tracing 82 | set_tracing 1 83 | 84 | start_interleaved_readers /dev/$HIGH_LEV_DEV ${NUM_READERS} & 85 | 86 | # wait for reader start-up transitory to terminate 87 | sleep 5 88 | 89 | # start logging interleaved test 90 | iostat -tmd /dev/$HIGH_LEV_DEV 2 | tee iostat.out & 91 | 92 | echo Test duration: $DURATION secs 93 | sleep $DURATION 94 | 95 | shutdwn 'fio iostat' 96 | 97 | mkdir -p $STAT_DEST_DIR 98 | file_name=$STAT_DEST_DIR/\ 99 | ${sched}-${NUM_READERS}r-int_io_stat.txt 100 | echo "Results for $sched, $NUM_READERS readers" | tee $file_name 101 | print_save_agg_thr $file_name 102 | 103 | cd .. 104 | 105 | # rm work dir 106 | if [ -f results-${sched}/trace ]; then 107 | cp -f results-${sched}/trace . 108 | fi 109 | 110 | rm -rf results-${sched} 111 | 112 | restore_low_latency 113 | -------------------------------------------------------------------------------- /kern_dev_tasks-vs-rw/kern_dev_tasks_vs_rw.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Paolo Valente 3 | # Arianna Avanzini 4 | 5 | ../utilities/check_dependencies.sh awk dd fio iostat git make 6 | if [[ $? -ne 0 ]]; then 7 | exit 8 | fi 9 | 10 | . ../config_params.sh 11 | . ../utilities/lib_utils.sh 12 | 13 | sched=$1 14 | NUM_READERS=${2-1} 15 | NUM_WRITERS=${3-1} 16 | RW_TYPE=${4-seq} 17 | TASK=${5-make} 18 | STAT_DEST_DIR=${6-.} 19 | MAXRATE=${7-4000} # maximum total sequential write rate for which the 20 | # system apparently does not risk to become 21 | # unresponsive under bfq with a 90 MB/s hard disk 22 | # (see comments in script comm_startup_lat.sh) 23 | 24 | # see the following string for usage, or invoke task_vs_rw.sh -h 25 | usage_msg="\ 26 | Usage: 27 | ./kern_dev_tasks_vs_rw.sh [\"\" | bfq | cfq | ...] [num_readers] [num_writers] 28 | [seq | rand | raw_seq | raw_rand] 29 | [make | merge | grep] [results_dir] 30 | [max_write-kB-per-sec] 31 | 32 | first parameter equal to \"\" -> do not change scheduler 33 | raw_seq/raw_rand -> read directly from device (no writers allowed) 34 | 35 | For example: 36 | sudo ./kern_dev_tasks_vs_rw.sh bfq 10 rand merge .. 37 | switches to bfq and launches 10 rand readers and 10 rand writers 38 | aganinst a kernel merge, 39 | with each reader reading from the same file. The file containing 40 | the computed stats is stored in the .. dir with respect to the cur dir. 41 | 42 | Default parameters values are \"\", $NUM_READERS, $NUM_WRITERS, \ 43 | $RW_TYPE, $TASK, $STAT_DEST_DIR and $MAXRATE. 44 | 45 | The output of the script depends on the command to be executed and is related 46 | to the fact that some tests have a preset duration. For a kernel make, the 47 | duration of the test is fixed, and the output is the number of lines given as 48 | output by the command, which represent the number of files processed during the 49 | make; this gives an idea of the completion level of the command. A more accurate 50 | output is given in case of a git merge, since the output of the command 51 | itself gives the completion percentage of the command in the fixed amount of time 52 | of the test; this is reported in the output of the script. In case of a git grep, 53 | the duration of the test is not fixed, but bounded by a maximum duration. The 54 | output of the script is the duration of the execution of the command in seconds. 55 | 56 | See the comments on KERN_* paramters in ~/.S-config.sh for details on 57 | test repositories. If ~/.S-config.sh does not exist, just invoke this 58 | script with -h to have it created. 59 | 60 | " 61 | 62 | if [ "$1" == "-h" ]; then 63 | printf "$usage_msg" 64 | exit 65 | fi 66 | 67 | function cleanup_and_exit { 68 | echo $1 69 | shutdwn 'fio iostat make git' 70 | cd .. 71 | # rm work dir 72 | if [ -f results-${sched}/trace ]; then 73 | cp -f results-${sched}/trace . 74 | fi 75 | rm -rf results-${sched} 76 | exit 77 | } 78 | 79 | function check_timed_out { 80 | what=$1 81 | task=$2 82 | cur=$3 83 | timeout=$4 84 | 85 | echo -ne "$1-waiting time / Timeout: $cur / $timeout\033[0K\r" 86 | if [ $cur -eq $timeout ]; then 87 | cleanup_and_exit "$task timed out, shutting down and removing all files" 88 | fi 89 | } 90 | 91 | # MAIN 92 | 93 | if [ "$TASK" == checkout ]; then 94 | echo checkout temporarily disabled, because of insufficient 95 | echo output produced by newer git versions 96 | exit 97 | fi 98 | 99 | mkdir -p $STAT_DEST_DIR 100 | # turn to an absolute path (needed later) 101 | STAT_DEST_DIR=`cd $STAT_DEST_DIR; pwd` 102 | 103 | if [[ -d ${KERN_DIR}/.git ]]; then 104 | rm -f $KERN_DIR/.git/index.lock 105 | else 106 | echo No linux repository found in $KERN_DIR 107 | echo You can put one there yourself, or I can clone a remote repository for you 108 | read -p "Do you want me to clone a remote repository? " yn 109 | for yes_answer in y Y yes Yes YES; do 110 | if [ "$yn" == $yes_answer ]; then 111 | yn=y 112 | fi 113 | done 114 | if [ "$yn" != y ]; then 115 | exit 116 | fi 117 | 118 | mkdir -p ${BASE_DIR} 119 | echo Cloning into $KERN_DIR ... 120 | git clone --branch v5.1 $KERN_REMOTE $KERN_DIR 121 | fi 122 | 123 | if [[ ! -f $KERN_DIR/.config ]]; then 124 | SRC_CONF=$(ls /boot/config-$(uname -r)) 125 | cp $SRC_CONF $KERN_DIR/.config 126 | fi 127 | 128 | (cd $KERN_DIR && 129 | if [ "`git branch | grep base_branch`" == "" ]; then 130 | echo Creating the base branch &&\ 131 | git branch base_branch v5.1 ;\ 132 | fi) 133 | 134 | echo Executing $TASK prologue before actual test 135 | # task prologue 136 | case $TASK in 137 | make) 138 | (cd $KERN_DIR && 139 | if [ "`git branch | head -n 1`" != "* base_branch" ]; then 140 | echo Switching to base_branch &&\ 141 | git checkout -f base_branch ;\ 142 | else 143 | echo Already on base_branch 144 | fi 145 | make mrproper && make defconfig) 146 | echo clean finished 147 | ;; 148 | checkout) # disabled! 149 | (cd $KERN_DIR &&\ 150 | echo Switching to base_branch &&\ 151 | git checkout -f base_branch &&\ 152 | echo Removing previous branches &&\ 153 | git branch -D test1 ;\ 154 | echo Creating the branch to switch to &&\ 155 | git branch test1 v5.2) 156 | ;; 157 | merge) 158 | (cd $KERN_DIR &&\ 159 | echo Renaming the first branch if existing &&\ 160 | git branch -M test1 to_delete;\ 161 | echo Creating first branch to merge &&\ 162 | git branch test1 v5.1 &&\ 163 | echo Switching to the first branch and cleaning &&\ 164 | git checkout -f test1 &&\ 165 | git clean -f -d ; 166 | echo Removing previous branches &&\ 167 | git branch -D to_delete test2 ;\ 168 | echo Creating second branch to merge &&\ 169 | git branch test2 v5.2) 170 | ;; 171 | grep) 172 | (cd $KERN_DIR && 173 | if [ "`git branch | head -n 1`" != "* base_branch" ]; then 174 | echo Switching to base_branch &&\ 175 | git checkout -f base_branch ;\ 176 | else 177 | echo Already on base_branch 178 | fi) 179 | ;; 180 | *) 181 | echo Wrong task name $TASK 182 | exit 183 | ;; 184 | esac 185 | echo Prologue finished 186 | 187 | set_scheduler 188 | 189 | # create and enter work dir 190 | rm -rf results-${sched} 191 | mkdir -p results-$sched 192 | cd results-$sched 193 | 194 | # setup a quick shutdown for Ctrl-C 195 | trap "shutdwn 'fio iostat make git' ; exit" sigint 196 | 197 | curr_dir=$PWD 198 | 199 | echo Flushing caches 200 | flush_caches 201 | 202 | # init and turn on tracing if TRACE==1 203 | init_tracing 204 | set_tracing 1 205 | 206 | # start task 207 | case $TASK in 208 | make) 209 | (cd $KERN_DIR && make -j5 | tee ${curr_dir}/$TASK.out) & 210 | waited_pattern="arch/x86/kernel/time\.o" 211 | ;; 212 | checkout) # disabled! 213 | (cd $KERN_DIR && echo git checkout test1 ;\ 214 | echo\ 215 | "git checkout -f test1 2>&1 |tee ${curr_dir}/$TASK.out" 216 | git checkout -f test1 2>&1 |tee ${curr_dir}/$TASK.out) & 217 | waited_pattern="(Checking out files)|(Switched)" 218 | ;; 219 | merge) 220 | (cd $KERN_DIR && echo git merge test2 ;\ 221 | echo "git merge test2 2>&1 | tee ${curr_dir}/$TASK.out" 222 | git merge test2 2>&1 | tee ${curr_dir}/$TASK.out) & 223 | waited_pattern="Checking out files" 224 | ;; 225 | grep) 226 | echo Executing grep task 227 | rm -f ${curr_dir}/timefile 228 | (cd $KERN_DIR && /usr/bin/time -f %e git grep foo > ${curr_dir}/$TASK.out 2> ${curr_dir}/timefile) & 229 | waited_pattern="Documentation/admin-guide/bug-hunting.rst" 230 | ;; 231 | esac 232 | 233 | echo 234 | echo Waiting for $TASK to start before setting the timeout. 235 | if [ $TASK == make ]; then 236 | echo In particular, for make we wait for the begininning of actual 237 | echo source compilation, to leave out the initial configuration part. 238 | echo In fact, the workload and execution time of this part may vary 239 | echo significantly, thereby distorting results with any scheduler. 240 | fi 241 | echo 242 | 243 | count=0 244 | while ! grep -E "$waited_pattern" $TASK.out > /dev/null 2>&1 ; do 245 | sleep 1 246 | count=$(($count+1)) 247 | check_timed_out Pattern $TASK $count 120 248 | done 249 | 250 | echo 251 | echo Pattern read 252 | 253 | if grep "Switched" $TASK.out > /dev/null ; then 254 | cleanup_and_exit "$TASK already finished, shutting down and removing all files" 255 | fi 256 | 257 | if (( $NUM_READERS > 0 || $NUM_WRITERS > 0)); then 258 | start_readers_writers_rw_type $NUM_READERS $NUM_WRITERS $RW_TYPE \ 259 | $MAXRATE 260 | 261 | # wait for reader/writer start-up transitory to terminate 262 | SLEEP=$(transitory_duration 7) 263 | echo "Waiting for transitory to terminate ($SLEEP seconds)" 264 | sleep $SLEEP 265 | fi 266 | 267 | # start logging aggthr; use a short interval as the test itself might be short 268 | iostat -tmd /dev/$HIGH_LEV_DEV 1 | tee iostat.out & 269 | 270 | # store the current number of lines, or the current completion level, 271 | # to subtract it from the total for make or merge 272 | if [ "$TASK" == "make" ]; then 273 | initial_completion_level=`cat $TASK.out | wc -l` 274 | else 275 | initial_completion_level=`sed 's/\r/\n/g' $TASK.out |\ 276 | grep "Checking out files" |\ 277 | tail -n 1 | awk '{printf "%d", $4}'` 278 | fi 279 | 280 | if [ "$TASK" != "grep" ]; then 281 | test_dur=20 282 | else 283 | test_dur=60 # git-grep test typically lasts for more than 20 seconds 284 | fi 285 | 286 | echo Test duration $test_dur secs 287 | 288 | if [ "$TASK" == "grep" ]; then 289 | count=0 290 | completion_pattern="arch/x86/include/asm/processor.h" 291 | while pgrep git > /dev/null && 292 | ! grep -E "$completion_pattern" $TASK.out > /dev/null 2>&1 ; do 293 | sleep 1 294 | count=$((count+1)) 295 | check_timed_out Completion $TASK $count $test_dur 296 | done 297 | echo 298 | else 299 | sleep $test_dur 300 | fi 301 | 302 | # test finished, shutdown what needs to 303 | shutdwn 'fio iostat make git' 304 | 305 | file_name=$STAT_DEST_DIR/\ 306 | ${sched}-${TASK}_vs_${NUM_READERS}r${NUM_WRITERS}w-${RW_TYPE}-stat.txt 307 | echo "Results for $sched, $NUM_READERS $RW_TYPE readers and $NUM_WRITERS\ 308 | $RW_TYPE against a $TASK" | tee $file_name 309 | 310 | case $TASK in 311 | make) 312 | final_completion_level=`cat $TASK.out | wc -l` 313 | ;; 314 | grep) 315 | # timefile has been filled with test completion time 316 | TIME=`cat timefile` 317 | ;; 318 | *) 319 | final_completion_level=`sed 's/\r/\n/g' $TASK.out |\ 320 | grep "Checking out files" |\ 321 | tail -n 1 | awk '{printf "%d", $4}'` 322 | ;; 323 | esac 324 | printf "Adding to $file_name -> " 325 | 326 | if [ "$TASK" == "grep" ]; then 327 | printf "$TASK completion time\n" | tee -a $file_name 328 | printf "$TIME seconds\n" | tee -a $file_name 329 | else 330 | printf "$TASK completion increment during test\n" |\ 331 | tee -a $file_name 332 | printf `expr $final_completion_level - $initial_completion_level` |\ 333 | tee -a $file_name 334 | if [ "$TASK" == "make" ]; then 335 | printf " lines\n" | tee -a $file_name 336 | else 337 | printf "%%\n" | tee -a $file_name 338 | fi 339 | fi 340 | 341 | print_save_agg_thr $file_name 342 | 343 | cd .. 344 | 345 | # rm work dir 346 | if [ -f results-${sched}/trace ]; then 347 | cp -f results-${sched}/trace . 348 | fi 349 | rm -rf results-${sched} 350 | -------------------------------------------------------------------------------- /process_config.sh: -------------------------------------------------------------------------------- 1 | # Default configuration file; do not edit this file, but the file .S-config.sh 2 | # in your home directory. The latter gets created on the very first execution 3 | # of some benchmark script (even if only the option -h is passed to the script). 4 | 5 | # first, a little code to to automate stuff; configuration parameters 6 | # then follow 7 | 8 | if [[ "$1" != "-h" && "$(id -u)" -ne "0" && -z $BATS_VERSION ]]; then 9 | echo "You are currently executing me as $(whoami)," 10 | echo "but I need root privileges (e.g., to switch" 11 | echo "between schedulers)." 12 | echo "Please run me as root." 13 | exit 1 14 | else 15 | FIRST_PARAM=$1 16 | fi 17 | 18 | function print_dev_help 19 | { 20 | # find right path to config file, used in help messages 21 | if [ "$SUDO_USER" != "" ]; then 22 | eval REALHOME=~$SUDO_USER 23 | else 24 | eval REALHOME=~ 25 | fi 26 | CONFPATH=${REALHOME}/.S-config.sh 27 | 28 | echo 29 | echo To address this issue, you can 30 | echo - either set the parameter BASE_DIR, in $CONFPATH, to a directory 31 | echo " that you know to be in a local filesystem (such a local filesystem" 32 | echo " must be mounted on a supported physical or virtual device);" 33 | echo - or set the parameter TEST_DEV, in $CONFPATH, to \ 34 | the \(supported\) device 35 | echo " or partition you want to use for your tests." 36 | echo 37 | echo See the comments in $CONFPATH for details and more options. 38 | } 39 | 40 | function get_partition_info 41 | { 42 | PART_INFO= 43 | if [[ -e $1 ]]; then 44 | PART_INFO=$(df $1 | egrep $1) 45 | else 46 | # most likely linux live os 47 | PART_INFO=$(df | egrep $1) 48 | fi 49 | echo $PART_INFO 50 | } 51 | 52 | function find_partition_for_dir 53 | { 54 | PART=$(df "$1" | tail -1 | awk '{print $1;}') 55 | echo $PART 56 | } 57 | 58 | function find_dev_for_dir 59 | { 60 | if [[ "$PART" == "" ]]; then 61 | PART=$(find_partition_for_dir $1) 62 | fi 63 | 64 | if [[ "$PART" == "" ]]; then 65 | echo Sorry, failed to find the partition containing the directory 66 | echo $1. 67 | print_dev_help 68 | exit 69 | fi 70 | 71 | REALPATH=$PART 72 | if [[ -e $PART ]]; then 73 | REALPATH=$(readlink -f $PART) # moves to /dev/dm-X in case of device mapper 74 | if [[ "$REALPATH" == "" ]]; then 75 | echo The directory where you want me store my test files, 76 | echo namely $1, 77 | echo is contained in the following partition: 78 | echo $PART. 79 | echo Unfortunately, such a partition does not seem to correspond 80 | echo to any local partition \(it is probably a remote filesystem\). 81 | print_dev_help 82 | exit 83 | fi 84 | fi 85 | 86 | BASEPART=$(basename $PART) 87 | REALPART=$(basename $REALPATH) 88 | 89 | BACKING_DEVS= 90 | if [[ "$(echo $BASEPART | egrep loop)" != "" ]]; then 91 | # loopback device: $BASEPART is already equal to the device name 92 | BACKING_DEVS=$BASEPART 93 | elif cat /proc/1/cgroup | tail -1 | egrep -q "container"; then 94 | # is container. lsblk will return block devices of the host 95 | # so let's use the host drive. 96 | BACKING_DEVS=$(lsblk | egrep -m 1 "disk" | awk '{print $1;}') 97 | elif ! egrep -q $BASEPART /proc/partitions; then 98 | # is linux live OS. Use cd drive 99 | BACKING_DEVS=$(lsblk | egrep -m 1 "rom" | awk '{print $1;}') 100 | else 101 | # get devices from partition 102 | for dev in $(ls /sys/block/); do 103 | if ! lsblk /dev/$dev | egrep -q "$BASEPART|$REALPART"; then 104 | # the block device does not contain the partition we're 105 | # attempting to run benchmarks on. 106 | continue 107 | fi 108 | disk_line=$(lsblk -n -i /dev/$dev | egrep disk | egrep -v "^ |^\`|\|") 109 | if [[ "$disk_line" != "" && \ 110 | ( "$(lsblk -n -o TRAN /dev/$dev 2> /dev/null)" != "" || \ 111 | $(echo $dev | egrep "mmc|sda|nvme") != "" \ 112 | ) ]]; then 113 | BACKING_DEVS="$BACKING_DEVS $dev" 114 | 115 | if [[ "$HIGH_LEV_DEV" == "" ]]; then 116 | HIGH_LEV_DEV=$dev # make md win in setting HIGH_LEV_DEV 117 | fi 118 | fi 119 | 120 | if lsblk /dev/$dev | grep -q "md.*raid"; then 121 | if [[ "$(echo $HIGH_LEV_DEV | egrep md)" != "" ]]; then 122 | echo -n Stacked raids not supported 123 | echo " ($HIGH_LEV_DEV + $dev), sorry." 124 | print_dev_help 125 | exit 126 | fi 127 | 128 | HIGH_LEV_DEV=$dev # set unconditionally as high-level 129 | # dev (the one used, e.g., to 130 | # measure aggregate throughput) 131 | fi 132 | done 133 | fi 134 | 135 | if [[ "$BACKING_DEVS" == "" ]]; then 136 | echo Block devices for partition $BASEPART or $REALPART unrecognized. 137 | print_dev_help 138 | exit 139 | fi 140 | } 141 | 142 | function check_create_mount_part 143 | { 144 | if [[ $(echo $BACKING_DEVS | egrep "mmc|nvme") != "" ]]; then 145 | extra_char=p 146 | fi 147 | 148 | TARGET_PART=${BACKING_DEVS}${extra_char}1 149 | 150 | if [[ ! -b $TARGET_PART ]]; then 151 | ( 152 | echo o # Create a new empty DOS partition table 153 | echo n # Add a new partition 154 | echo p # Primary partition 155 | echo 1 # Partition number 156 | echo # First sector (Accept default: 1) 157 | echo # Last sector (Accept default: varies) 158 | echo w # Write changes 159 | ) | fdisk $BACKING_DEVS > /dev/null 160 | fi 161 | 162 | BASE_DIR=$1 163 | if [[ "$(mount | egrep $BASE_DIR)" == "" ]]; then 164 | fsck.ext4 -n $TARGET_PART 165 | if [[ $? -ne 0 ]]; then 166 | mkfs.ext4 -F $TARGET_PART 167 | if [ $? -ne 0 ]; then 168 | echo Filesystem creation failed, aborting. 169 | exit 170 | fi 171 | fi 172 | 173 | mkdir -p $BASE_DIR 174 | mount $TARGET_PART $BASE_DIR 175 | if [ $? -ne 0 ]; then 176 | echo Mount failed, aborting. 177 | exit 178 | fi 179 | fi 180 | BACKING_DEVS=$(basename $BACKING_DEVS) 181 | HIGH_LEV_DEV=$BACKING_DEVS 182 | } 183 | 184 | function use_nullb_dev 185 | { 186 | lsmod | grep null_blk > /dev/null 187 | 188 | if [ $? -eq 0 ]; then 189 | modprobe -r null_blk 2> /dev/null 190 | if [ $? -eq 1 ]; then # null_blk is not a module but built-in 191 | echo "ERROR: failed to unload null_blk module" 192 | exit 1 193 | fi 194 | fi 195 | 196 | modprobe null_blk queue_mode=2 irqmode=0 completion_nsec=0 \ 197 | nr_devices=1 198 | if [ $? -ne 0 ]; then 199 | echo "ERROR: failed to load null_blk module" 200 | exit 1 201 | fi 202 | 203 | BACKING_DEVS=nullb0 204 | HIGH_LEV_DEV=$BACKING_DEVS 205 | BASE_DIR= # empty, to signal that there is no fs and no file to create 206 | } 207 | 208 | function use_scsi_debug_dev 209 | { 210 | ../utilities/check_dependencies.sh lsscsi mkfs.ext4 fsck.ext4 sfdisk 211 | if [[ $? -ne 0 ]]; then 212 | exit 1 213 | fi 214 | 215 | if [[ "$(lsmod | egrep scsi_debug)" == "" ]]; then 216 | echo -n Setting up scsi_debug, this may take a little time ... 217 | sudo modprobe scsi_debug ndelay=1600000 dev_size_mb=1000 max_queue=4 218 | if [[ $? -ne 0 ]]; then 219 | echo 220 | echo "Failed to load scsi_debug module (maybe not installed?)" 221 | exit 1 222 | fi 223 | echo " done" 224 | fi 225 | 226 | BACKING_DEVS=$(lsscsi | egrep scsi_debug | sed 's<\(.*\)/dev/$file_size_MiB ? $file_size_MiB : $MAXSIZE_MiB )) 285 | } 286 | 287 | function find_partition { 288 | lsblk -rno MOUNTPOINT /dev/$TEST_DEV \ 289 | > mountpoints 2> /dev/null 290 | 291 | cur_line=$(tail -n +2 mountpoints | head -n 1) 292 | i=3 293 | while [[ "$cur_line" == "" && \ 294 | $i -lt $(cat mountpoints | wc -l) ]]; do 295 | cur_line=$(tail -n +$i mountpoints | head -n 1) 296 | i=$(( i+1 )) 297 | done 298 | 299 | rm mountpoints 300 | 301 | echo $cur_line 302 | } 303 | 304 | function prepare_basedir 305 | { 306 | # NOTE: the following cases are mutually exclusive 307 | 308 | if [[ "$FIRST_PARAM" == "-h" ]]; then 309 | return 310 | fi 311 | 312 | if [[ "$SCSI_DEBUG" == yes ]]; then 313 | use_scsi_debug_dev # this will set BASE_DIR 314 | return 315 | fi 316 | 317 | if [[ "$NULLB" == yes ]]; then 318 | use_nullb_dev 319 | return 320 | fi 321 | 322 | if [[ "$TEST_DEV" != "" ]]; then 323 | # strip /dev/ if present 324 | TEST_DEV=$(echo $TEST_DEV | sed 's if you want 368 | echo me to format the drive, create a fs and mount it for you. 369 | echo Aborting. 370 | exit 371 | elif [[ "$mntpoint" == "" ]]; then # implies $FORMAT_DISK == yes 372 | format_and_use_test_dev 373 | mntpoint=$BASE_DIR 374 | fi 375 | 376 | mntpoint=${mntpoint%/} # hate to see consecutive / in paths :) 377 | BASE_DIR="$mntpoint/var/lib/S" 378 | fi 379 | 380 | if [[ ! -d $BASE_DIR ]]; then 381 | mkdir -p $BASE_DIR 382 | fi 383 | 384 | if [[ ! -w $BASE_DIR && "$TEST_PARTITION" != "" ]]; then 385 | echo Sorry, $BASE_DIR not writeable for test partition $TEST_PARTITION 386 | echo Aborting. 387 | exit 388 | fi 389 | 390 | if [[ ! -w $BASE_DIR ]]; then 391 | echo "$BASE_DIR is not writeable, reverting to /tmp/test" 392 | BASE_DIR=/tmp/test 393 | mkdir -p $BASE_DIR 394 | fi 395 | 396 | if [[ "$PART" == "" ]]; then 397 | PART=$(find_partition_for_dir $BASE_DIR) 398 | fi 399 | 400 | if [[ "$(get_partition_info $PART)" == "" ]]; then # it must be /dev/root 401 | PART=/dev/root 402 | fi 403 | 404 | FREESPACE=$(get_partition_info $PART | awk '{print $4}' | head -n 1) 405 | 406 | BASE_DIR_SIZE=$(du -s $BASE_DIR | awk '{print $1}') 407 | 408 | if [[ $(( ($FREESPACE + $BASE_DIR_SIZE) / 1024 )) -lt 500 ]]; then 409 | echo Not enough free space for test files in $BASE_DIR: \ 410 | I need at least 500MB 411 | exit 412 | fi 413 | 414 | if [[ "$TEST_DEV" == "" && -d $BASE_DIR ]]; then 415 | find_dev_for_dir $BASE_DIR 416 | else 417 | # in case no path setting BACKING_DEVS has been followed: 418 | BACKING_DEVS=$TEST_DEV 419 | HIGH_LEV_DEV=$BACKING_DEVS 420 | fi 421 | } 422 | 423 | # MAIN 424 | 425 | prepare_basedir 426 | 427 | # paths of files to read/write in the background 428 | if [[ "$BASE_DIR" != "" ]]; then 429 | BASE_FILE_PATH=$BASE_DIR/largefile 430 | fi 431 | 432 | if [[ "$DEVS" == "" ]]; then 433 | DEVS=$BACKING_DEVS 434 | fi 435 | 436 | if [[ "$FIRST_PARAM" != "-h" && -z $BATS_VERSION ]]; then 437 | # test target devices 438 | for dev in $DEVS; do 439 | cat /sys/block/$dev/queue/scheduler >/dev/null 2>&1 440 | if [ $? -ne 0 ]; then 441 | echo -n "There is something wrong with the device /dev/$dev, " 442 | echo which should be 443 | echo a device on which your test directory $BASE_DIR 444 | echo is mounted. 445 | echo -n "Try setting your target devices manually " 446 | echo \(and correctly\) in ~/.S-config.sh 447 | exit 448 | fi 449 | done 450 | fi 451 | 452 | if [[ "$FILE_SIZE_MB" == "" ]]; then 453 | FILE_SIZE_MB=$(get_max_affordable_file_size) 454 | fi 455 | -------------------------------------------------------------------------------- /run_multiple_benchmarks/test_responsiveness.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # see the following string for usage, or invoke ./test_responsiveness -h 4 | usage_msg="\ 5 | Usage:\n\ 6 | ./test_responsiveness.sh 7 | 8 | By replaying the I/O issued by gnome-terminal when it starts, this 9 | script measures the time that it takes to start gnome-terminal 10 | - for each of the I/O schedulers available in the kernel; 11 | - while each of the following two heavy workloads is being served in 12 | the background: ten parallel file reads, or five parallell file 13 | reads plus five parallel file writes. 14 | 15 | NOTE: test_responsiveness.sh invokes run_main_benchmarks.sh through sudo; so, 16 | there is no need to invoke test_responsiveness.sh with sudo. If you do, then 17 | run_main_benchmarks.sh gets executed as root, and, regardless of what user 18 | you actually are, /root is used as home (so /root/.S-config.sh is used as 19 | configuration file). 20 | " 21 | 22 | if [ "$1" == "-h" ]; then 23 | printf "$usage_msg" 24 | exit 25 | fi 26 | 27 | PREVPWD=$(pwd) 28 | cd $(dirname $0) 29 | sudo ./run_main_benchmarks.sh replayed-gnome-term-startup 30 | cd $PREVPWD 31 | -------------------------------------------------------------------------------- /run_unit_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | pushd "$(dirname "${BASH_SOURCE[0]}")/unit_tests" 5 | 6 | # get bats 7 | bash get_bats.sh 8 | 9 | # get lsblk 10 | if ! command -v lsblk >/dev/null 2>&1; then 11 | echo "installing lsblk with util-linux..." 12 | if ! apk add util-linux >/dev/null 2>&1; then 13 | apt-get update >/dev/null 2>&1 14 | apt-get install -y util-linux >/dev/null 2>&1 15 | fi 16 | command -v lsblk >/dev/null 2>&1 17 | fi 18 | 19 | # run tests 20 | bats . 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /throughput-sync/throughput-sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Paolo Valente 3 | # Arianna Avanzini 4 | # Copyright (C) 2019 Paolo Valente 5 | 6 | ../utilities/check_dependencies.sh awk dd fio iostat 7 | if [[ $? -ne 0 ]]; then 8 | exit 9 | fi 10 | 11 | . ../config_params.sh 12 | . ../utilities/lib_utils.sh 13 | 14 | sched=$1 15 | NUM_READERS=${2-1} 16 | NUM_WRITERS=${3-0} 17 | RW_TYPE=${4-seq} 18 | STAT_DEST_DIR=${5-.} 19 | DURATION=${6-10} 20 | SYNC=${7-yes} 21 | MAXRATE=${8-0} # If useful with other schedulers than bfq, 16500 22 | # is apparently the maximum value for which the 23 | # system does not risk to become unresponsive, with 24 | # sequential writers, under any scheduler with a 90 25 | # MB/s hard disk. 26 | 27 | VERBOSITY=$9 28 | PERF_PROF=${10} 29 | 30 | if [[ "$VERBOSITY" == verbose ]]; then 31 | REDIRECT=/dev/stdout 32 | else 33 | REDIRECT=/dev/null 34 | fi 35 | 36 | # see the following string for usage, or invoke throughput-sync.sh -h 37 | usage_msg="\ 38 | Usage (as root):\n\ 39 | ./throughput-sync.sh [\"\" | cur-sched | bfq | cfq | ...]\n\ 40 | [num_readers] [num_writers]\n\ 41 | [seq | rand | raw_seq | raw_rand ]\n\ 42 | [stat_dest_dir] [duration] [sync]\n\ 43 | [max_write-kB-per-sec] [verbose]\n\ 44 | [perf_prof] 45 | \n\ 46 | first parameter equal to \"\" or cur-sched -> do not change scheduler\n\ 47 | raw_seq/raw_rand -> read directly from device (no writers allowed)\n\ 48 | sync parameter equal to yes -> invoke sync before starting readers/writers\n\ 49 | \n\ 50 | \n\ 51 | For example:\n\ 52 | sudo ./throughput-sync.sh bfq 10 0 rand ..\n\ 53 | switches to bfq and launches 10 rand readers and 10 rand writers\n\ 54 | with each reader reading from the same file. The file containing\n\ 55 | the computed stats is stored in the .. dir with respect to the cur dir.\n\ 56 | \n\ 57 | If perf_prof is different than an empty string, then the CPU is set to\n\ 58 | maximum, constant speed.\n\ 59 | \n\ 60 | Default parameter values are \"\", $NUM_WRITERS, $NUM_WRITERS, \ 61 | $RW_TYPE, $STAT_DEST_DIR, $DURATION, $SYNC, $MAXRATE, \"$VERBOSITY\" and \"$PERF_PROF\".\n" 62 | 63 | if [ "$1" == "-h" ]; then 64 | printf "$usage_msg" 65 | exit 66 | fi 67 | 68 | if [[ "$BASE_DIR" == "" && "$RW_TYPE" != raw_seq && "$RW_TYPE" != raw_rand ]]; 69 | then 70 | echo Sorry, only raw I/O allowed on $HIGH_LEV_DEV 71 | exit 1 72 | fi 73 | 74 | mkdir -p $STAT_DEST_DIR 75 | # turn to an absolute path (needed later) 76 | STAT_DEST_DIR=`cd $STAT_DEST_DIR; pwd` 77 | 78 | set_scheduler > $REDIRECT 79 | 80 | echo Preliminary sync to wait for the completion of possible previous writes > $REDIRECT 81 | sync 82 | 83 | # create and enter work dir 84 | rm -rf results-${sched} 85 | mkdir -p results-$sched 86 | cd results-$sched 87 | 88 | function reset_pm { 89 | if [[ "$PERF_PROF" != "" ]]; then 90 | cpupower frequency-set -g powersave -d 800MHz 91 | cpupower idle-set -E 92 | fi 93 | } 94 | 95 | # setup a quick shutdown for Ctrl-C 96 | trap "reset_pm; shutdwn 'fio iostat'; exit" sigint 97 | 98 | init_tracing 99 | set_tracing 1 100 | 101 | if [[ "$PERF_PROF" != "" ]]; then 102 | cpupower frequency-set -g performance -d 3.50GHz -u 3.50GHz 103 | cpupower idle-set -D 0 104 | fi 105 | 106 | start_readers_writers_rw_type $NUM_READERS $NUM_WRITERS $RW_TYPE $MAXRATE 107 | 108 | # add short sleep to avoid false bursts of creations of 109 | # processes doing I/O 110 | sleep 0.3 111 | 112 | echo Flushing caches > $REDIRECT 113 | if [ "$SYNC" != "yes" ]; then 114 | echo Not syncing > $REDIRECT 115 | echo 3 > /proc/sys/vm/drop_caches 116 | else 117 | # Flushing in parallel, otherwise sync would block for a very 118 | # long time 119 | flush_caches > $REDIRECT & 120 | fi 121 | 122 | WAIT_TRANSITORY=no 123 | if [[ $WAIT_TRANSITORY = yes && \ 124 | ($NUM_READERS -gt 0 || $NUM_WRITERS -gt 0) ]]; then 125 | 126 | # wait for reader/writer start-up transitory to terminate 127 | secs=$(transitory_duration 7) 128 | 129 | while [ $secs -ge 0 ]; do 130 | echo -ne "Waiting for transitory to terminate: $secs\033[0K\r" > $REDIRECT 131 | sleep 1 132 | : $((secs--)) 133 | done 134 | echo > $REDIRECT 135 | fi 136 | 137 | echo Measurement started, and lasting $DURATION seconds > $REDIRECT 138 | 139 | start_time=$(date +'%s') 140 | 141 | # start logging thr 142 | iostat -tmd /dev/$HIGH_LEV_DEV 2 | tee iostat.out > $REDIRECT & 143 | 144 | # wait for reader/writer start-up transitory to terminate 145 | secs=$DURATION 146 | 147 | while [ $secs -gt 0 ]; do 148 | echo "Remaining time: $secs" > $REDIRECT 149 | sleep 2 150 | if [[ "$SYNC" == "yes" && $NUM_WRITERS -gt 0 ]]; then 151 | echo Syncing again in parallel ... > $REDIRECT 152 | sync & 153 | fi 154 | : $((secs-=2)) 155 | done 156 | echo > $REDIRECT 157 | 158 | if [[ "$PERF_PROF" != "" ]]; then 159 | cpupower frequency-set -g powersave -d 800MHz 160 | cpupower idle-set -E 161 | fi 162 | 163 | shutdwn 'fio iostat' 164 | 165 | end_time=$(date +'%s') 166 | 167 | actual_duration=$(($(date +'%s') - $start_time)) 168 | 169 | if [ -f trace ]; then 170 | cp -f trace .. 171 | fi 172 | 173 | if [ $actual_duration -gt $(($DURATION + 10)) ]; then 174 | echo Run lasted $actual_duration seconds instead of $DURATION 175 | echo In this conditions the system, and thus the results, are not reliable 176 | echo Aborting 177 | else 178 | mkdir -p $STAT_DEST_DIR 179 | file_name=$STAT_DEST_DIR/\ 180 | ${sched}-${NUM_READERS}r${NUM_WRITERS}\ 181 | w-${RW_TYPE}-${DURATION}sec-aggthr_stat.txt 182 | echo "Results for $sched, $NUM_READERS $RW_TYPE readers and \ 183 | $NUM_WRITERS $RW_TYPE writers" | tee $file_name 184 | print_save_agg_thr $file_name 185 | fi 186 | 187 | cd .. 188 | # rm work dir 189 | rm -rf results-${sched} 190 | -------------------------------------------------------------------------------- /unit_tests/get_bats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -euo pipefail 3 | 4 | # settings 5 | test_framework="https://github.com/bats-core/bats-core" 6 | commit="87b16eb" 7 | destination_root="/tmp" 8 | destination_path="bats-core" 9 | download_destination="${destination_root}/${destination_path}" 10 | 11 | # download 12 | if [[ ! -d ${download_destination} ]]; then 13 | PREVPWD=$(pwd) 14 | cd "${destination_root}" 15 | 16 | curl -L "${test_framework}/tarball/${commit}" | tar xz 17 | mv "bats-core-bats-core-${commit}" "${destination_path}" 18 | 19 | cd ${PREVPWD} 20 | fi 21 | 22 | # install 23 | if command -v bash >/dev/null 2>&1; then 24 | if ! "${download_destination}/install.sh" /usr/local; then 25 | echo "retrying with root privileges" 26 | sudo "${download_destination}/install.sh" /usr/local 27 | fi 28 | command -v bats 29 | fi 30 | -------------------------------------------------------------------------------- /unit_tests/prev_impl.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # previous implementations 4 | 5 | function original_find_partition_for_dir 6 | { 7 | PART= 8 | longest_substr= 9 | mount | 10 | { 11 | while IFS= read -r var 12 | do 13 | curpart=$(echo "$var" | cut -f 1 -d " ") 14 | 15 | if [[ "$(echo $curpart | egrep '/')" == "" ]] && [[ -z "$2" ]]; then 16 | continue 17 | fi 18 | 19 | mountpoint=$(echo "$var" | \ 20 | sed 's<.* on \(.*\)<\1<' | \ 21 | sed 's<\(.*\) type.*<\1<') 22 | substr=$(printf "%s\n%s\n" "$mountpoint" "$1" | \ 23 | sed -e 'N;s/^\(.*\).*\n\1.*$/\1/') 24 | 25 | if [[ "$substr" == $mountpoint && \ 26 | ${#substr} -gt ${#longest_substr} ]] ; then 27 | longest_substr=$substr 28 | PART=$(echo "$var" | cut -f 1 -d " ") 29 | fi 30 | done 31 | echo $PART 32 | } 33 | } -------------------------------------------------------------------------------- /unit_tests/test_config_scripts.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | cd $BATS_TEST_DIRNAME 4 | 5 | # must be fully qualified (absolute) path in order to specify the .sh extension 6 | load $BATS_TEST_DIRNAME/../config_params.sh 7 | 8 | load prev_impl 9 | 10 | @test "compare find_partition_for_dir implementations" { 11 | # original implementation 12 | run original_find_partition_for_dir "$(pwd)" 13 | original_output=$output 14 | 15 | # modified original implementation 16 | run original_find_partition_for_dir "$(pwd)" "allow no slash in partition" 17 | modified_output=$output 18 | 19 | # current implementation 20 | run find_partition_for_dir "$(pwd)" 21 | current_output=$output 22 | 23 | [ "$current_output" = "$original_output" ] || [ "$current_output" = "$modified_output" ] 24 | } 25 | 26 | @test "test find_dev_for_dir" { 27 | run find_dev_for_dir "$(pwd)" 28 | 29 | # Echo and other stdout are limited to error cases in find_dev_for_dir 30 | # so one way to verify success is to assert empty output. 31 | # We could change this to [ "$status" -eq 0 ] if we add nonzero exit codes. 32 | [ "$output" = "" ] 33 | } 34 | 35 | @test "test get_partition_info" { 36 | run find_partition_for_dir "$(pwd)" 37 | part="$output" 38 | 39 | run get_partition_info "$part" 40 | part_info="$output" 41 | 42 | [ -n "$part_info" ] 43 | 44 | # we could change this to wc --lines, but grep is more commonly available 45 | line_count=$(echo "$part_info" | grep -c '') 46 | [ "$line_count" -eq 1 ] 47 | 48 | free_space=$(echo "$part_info" | awk '{print $4}') 49 | [ -n "$free_space" ] 50 | 51 | # assert is number 52 | echo $free_space | egrep -q "^[0-9]+$" 53 | } 54 | -------------------------------------------------------------------------------- /utilities/calc_avg_and_co.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # calc_avg_and_co.sh <95..99> 3 | # computes the 95..99% confidence interval for a file with one col 4 | # 5 | # INPUT: one column of data 6 | # OUTPUT: min max mean std_dev conf 7 | # 8 | export LC_ALL=C 9 | awk ' BEGIN { 10 | n=0; 11 | sum=0; 12 | k=0; 13 | array[1,1]=6.314; array[1,2]=31.821 14 | array[2,1]=2.92; array[2,2]=6.965 15 | array[3,1]=2.353; array[3,2]=4.541 16 | array[4,1]=2.132; array[4,2]=3.747 17 | array[5,1]=2.015; array[5,2]=3.365 18 | array[6,1]=1.943; array[6,2]=3.143 19 | array[7,1]=1.895; array[7,2]=2.998 20 | array[8,1]=1.86; array[8,2]=2.896 21 | array[9,1]=1.833; array[9,2]=2.821 22 | array[10,1]=1.812; array[10,2]=2.764 23 | array[11,1]=1.796; array[11,2]=2.718 24 | array[12,1]=1.782; array[12,2]=2.681 25 | array[13,1]=1.771; array[13,2]=2.65 26 | array[14,1]=1.761; array[14,2]=2.624 27 | array[15,1]=1.753; array[15,2]=2.602 28 | array[16,1]=1.746; array[16,2]=2.583 29 | array[17,1]=1.74; array[17,2]=2.567 30 | array[18,1]=1.734; array[18,2]=2.552 31 | array[19,1]=1.729; array[19,2]=2.539 32 | array[20,1]=1.725; array[20,2]=2.528 33 | array[21,1]=1.721; array[21,2]=2.518 34 | array[22,1]=1.717; array[22,2]=2.508 35 | array[23,1]=1.714; array[23,2]=2.5 36 | array[24,1]=1.711; array[24,2]=2.492 37 | array[25,1]=1.708; array[25,2]=2.485 38 | array[26,1]=1.706; array[26,2]=2.479 39 | array[27,1]=1.703; array[27,2]=2.473 40 | array[28,1]=1.701; array[28,2]=2.467 41 | array[29,1]=1.699; array[29,2]=2.462 42 | array[30,1]=1.697; array[30,2]=2.457 43 | } 44 | 45 | { 46 | if (n == 0 || $1 < min) 47 | min = $1; 48 | if (n == 0 || $1 > max) 49 | max = $1; 50 | 51 | n++; 52 | c[n] += $1; 53 | sum +=$1; 54 | } 55 | 56 | END { 57 | if (n < 2) { 58 | printf "ERROR - too few values. Aborting...\n"; 59 | exit(0); 60 | } 61 | mean = sum / n; 62 | 63 | if (n > 31) 64 | lim = 31; 65 | else 66 | lim = n; 67 | 68 | if ('$1' == 95) 69 | width = array[lim-1,1]; 70 | else 71 | width = array[lim-1,2]; 72 | 73 | for (j = 1; j <= n; j++) { 74 | square_diff += (c[j] - mean) *( c[j] - mean); 75 | for (i = 1; i <= '$1'; i++) 76 | k += (c[j] - mean) * (c[j] - mean); 77 | } 78 | 79 | q = sqrt(k / (n * (n - 1))); 80 | std_dev = sqrt( square_diff / (n-1) ); 81 | printf "%12s%12s%12s%12s%12s\n", "min", "max", "avg", 82 | "std_dev", "conf'$1'%"; 83 | printf "%12g%12g%12g%12g%12g\n", min, max, mean, std_dev, width*q/2; 84 | }' 85 | 86 | -------------------------------------------------------------------------------- /utilities/check_dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check and install dependencies 4 | 5 | # Make this associative array global 6 | declare -A packages 7 | 8 | function select_packages_and_manager 9 | { 10 | declare -A rpm_packages 11 | rpm_packages=( [fio]=fio [iostat]=sysstat [/usr/bin/time]=time \ 12 | [/usr/include/libaio.h]=libaio-devel [awk]=gawk \ 13 | [dd]=coreutils [bc]=bc [fio]=fio [killall]=psmisc \ 14 | [g++]=gcc-c++ [git]=git-core [mplayer]=mplayer \ 15 | [xterm]=xterm [gnome-terminal]=gnome-terminal \ 16 | [pv]=pv [lsscsi]=lsscsi\ 17 | ) 18 | declare -A deb_packages 19 | deb_packages=( [fio]=fio [iostat]=sysstat [/usr/bin/time]=time \ 20 | [/usr/include/libaio.h]=libaio-dev [awk]=gawk \ 21 | [dd]=coreutils [bc]=bc [fio]=fio [killall]=psmisc \ 22 | [g++]=g++ [git]=git [mplayer]=mplayer \ 23 | [xterm]=xterm [gnome-terminal]=gnome-terminal \ 24 | [pv]=pv [lsscsi]=lsscsi\ 25 | ) 26 | 27 | declare -A pack_managers 28 | pack_managers[/etc/fedora-release]=dnf 29 | pack_managers[/usr/lib/os.release.d/issue-fedora]=dnf 30 | pack_managers[/etc/redhat-release]=yum 31 | pack_managers[/etc/debian_version]=apt 32 | pack_managers[/etc/issue]=apt 33 | # pack_managers[/etc/SuSE-release]=zypper not supported yet 34 | # pack_managers[/etc/arch-release]=pacman not supported yet 35 | # pack_managers[/etc/gentoo-release]=emerge not supported yet 36 | 37 | declare -A pack_formats 38 | pack_formats[/etc/fedora-release]=rpm 39 | pack_formats[/usr/lib/os.release.d/issue-fedora]=rpm 40 | pack_formats[/etc/redhat-release]=rpm 41 | pack_formats[/etc/debian_version]=deb 42 | pack_formats[/etc/issue]=deb 43 | # pack_formats[/etc/SuSE-release]=rpm not supported yet 44 | 45 | for f in ${!pack_managers[@]} 46 | do 47 | f=$(readlink -f $f) 48 | 49 | if [[ -f $f ]]; then 50 | DISTRO_FOUND=yes 51 | if [[ "$PACKAGE_MANAGER" == "" ]]; then 52 | PACKAGE_MANAGER=${pack_managers[$f]} 53 | fi 54 | type $PACKAGE_MANAGER >/dev/null 2>&1 55 | if [[ $? -ne 0 ]]; then 56 | echo Looked for $PACKAGE_MANAGER as package manager 57 | echo for installing missing commands, but not found. 58 | echo You may want to choose the package manager to use 59 | echo manually, by setting the config parameter 60 | echo PACKAGE_MANAGER 61 | 62 | return 63 | else 64 | PACK_MAN_FOUND=yes 65 | fi 66 | break 67 | fi 68 | done 69 | 70 | if [[ "$DISTRO_FOUND" != yes ]]; then 71 | echo -n Sorry, automatic dependency installation not yet supported 72 | echo for your distribution. 73 | return 74 | fi 75 | 76 | if [[ ${pack_formats[$f]} == rpm ]]; then 77 | for k in "${!rpm_packages[@]}"; do 78 | packages[$k]=${rpm_packages[$k]}; 79 | done 80 | else 81 | for k in "${!deb_packages[@]}"; do 82 | packages[$k]=${deb_packages[$k]}; 83 | done 84 | fi 85 | } 86 | 87 | function install_commands 88 | { 89 | 90 | select_packages_and_manager 91 | 92 | if [[ "$DISTRO_FOUND" != yes || "$PACK_MAN_FOUND" != yes ]]; then 93 | return 94 | fi 95 | 96 | for comm in $MISSING_LIST; do 97 | if [[ "${packages[$comm]}" == "" ]]; then 98 | echo Sorry, no package associated with $comm in my database 99 | PARTIAL_INSTALL=yes 100 | fi 101 | PACKAGE_LIST="$PACKAGE_LIST ${packages[$comm]}" 102 | done 103 | 104 | if [[ "$PACKAGE_LIST" != "" && "$PACKAGE_LIST" != " " ]]; then 105 | echo -n "To install the above missing commands, " 106 | echo I\'m about to install the following packages: 107 | echo $PACKAGE_LIST 108 | 109 | $PACKAGE_MANAGER -y install $PACKAGE_LIST 110 | fi 111 | 112 | if [[ $? -ne 0 ]]; then 113 | echo Some packages failed to be installed 114 | else 115 | INSTALL_SUCCESS=yes 116 | fi 117 | } 118 | 119 | function check_dep 120 | { 121 | COMMAND_LIST=( "$@" ) 122 | MISSING_LIST= 123 | for i in "${COMMAND_LIST[@]}" ; do 124 | type $i >/dev/null 2>&1 || [ -f $i ] || \ 125 | { echo >&2 "$i not found."; \ 126 | MISSING_LIST="$MISSING_LIST $i"; } 127 | done 128 | 129 | if [ "$MISSING_LIST" == "" ]; then 130 | return 131 | fi 132 | 133 | install_commands "$MISSING_LIST" 134 | 135 | if [[ "$INSTALL_SUCCESS" != yes || "$PARTIAL_INSTALL" == yes ]]; then 136 | echo Please install unsolved dependencies manually, and retry. 137 | echo Aborting now. 138 | exit 1 139 | fi 140 | } 141 | 142 | if [[ "$@" == "" ]] ; then 143 | echo "Checking principal dependencies..." 144 | check_dep awk iostat bc time fio 145 | 146 | echo "Checking secondary dependencies..." 147 | check_dep pv git make 148 | else 149 | check_dep "$@" 150 | fi 151 | -------------------------------------------------------------------------------- /utilities/lib_utils.sh: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2013 Paolo Valente 2 | # Arianna Avanzini 3 | # Copyright (C) 2018 Paolo Valente 4 | 5 | CALC_AVG_AND_CO=`cd ../utilities; pwd`/calc_avg_and_co.sh 6 | FIO="fio --minimal --loops=10000" 7 | 8 | ../utilities/check_dependencies.sh bash awk bc killall 9 | if [[ $? -ne 0 ]]; then 10 | exit 11 | fi 12 | 13 | . ../utilities/tracing.sh 14 | 15 | function load_all_sched_modules { 16 | for mod in bfq-iosched bfq-mq-iosched mq-deadline kyber-iosched \ 17 | cfq-iosched deadline-iosched; do 18 | modprobe $mod > /dev/null 2>&1 19 | if [ $? != 0 ]; then 20 | echo -n Failed to load $mod, tests will be executed 21 | echo " without this scheduler" 22 | fi 23 | done 24 | } 25 | 26 | # Check whether an X display can be accessed. 27 | function test_X_access { 28 | COMMAND="$1" 29 | if [[ "$SUDO_USER" != "" ]]; then 30 | SUDO_PREFIX="sudo -u $SUDO_USER" 31 | fi 32 | 33 | ACCESS_OK=no 34 | for dis in `ls /tmp/.X11-unix | tr 'X' ':'`; do 35 | # Tentatively set display so as to allow applications with a 36 | # GUI to be started remotely too (a session must however be 37 | # open on the target machine) 38 | export DISPLAY=$dis 39 | 40 | # To run, an X application needs to access the X server. In 41 | # this respect, these scripts may be executed as root (e.g., 42 | # using sudo) by a different, non-root user. And the latter may 43 | # be the actual owner the current X session. To guarantee that 44 | # the X application can access the X server also in this case, 45 | # turn off access control temporarily. Before turning it 46 | # off, save previous access-control state, to re-enable it 47 | # again at the end of the test, if needed. 48 | XHOST_CONTROL=$($SUDO_PREFIX xhost 2> /dev/null |\ 49 | egrep "enabled") 50 | $SUDO_PREFIX xhost + > /dev/null 2>&1 51 | 52 | if [[ $? -ne 0 ]]; then 53 | continue 54 | fi 55 | ACCESS_OK=yes 56 | break 57 | done 58 | if [[ "$ACCESS_OK" != "yes" ]]; then 59 | echo Sorry, failed to get access to any display. 60 | return 1 61 | else 62 | if [[ "$XHOST_CONTROL" != "" ]]; then 63 | xhost - > /dev/null 2>&1 64 | fi 65 | fi 66 | return 0 67 | } 68 | 69 | 70 | # Try to open access to an X display; then set DISPLAY, plus XHOST_CONTROL, for 71 | # that display. In addition, test the execution of the command line passed as 72 | # first argument, if any is passed. 73 | function enable_X_access_and_test_cmd { 74 | COMMAND="$1" 75 | if [[ "$SUDO_USER" != "" ]]; then 76 | SUDO_PREFIX="sudo -u $SUDO_USER" 77 | fi 78 | 79 | COMM_OK=no 80 | for dis in `ls /tmp/.X11-unix | tr 'X' ':'`; do 81 | # Tentatively set display so as to allow applications with a 82 | # GUI to be started remotely too (a session must however be 83 | # open on the target machine) 84 | export DISPLAY=$dis 85 | 86 | # To run, an X application needs to access the X server. In 87 | # this respect, these scripts may be executed as root (e.g., 88 | # using sudo) by a different, non-root user. And the latter may 89 | # be the actual owner the current X session. To guarantee that 90 | # the X application can access the X server also in this case, 91 | # turn off access control temporarily. Before turning it 92 | # off, save previous access-control state, to re-enable it 93 | # again at the end of the benchmark, if needed. 94 | XHOST_CONTROL=$($SUDO_PREFIX xhost 2> /dev/null |\ 95 | egrep "enabled") 96 | $SUDO_PREFIX xhost + > /dev/null 2>&1 97 | 98 | if [[ $? -ne 0 && "$COMMAND" == "" ]]; then 99 | continue 100 | fi 101 | 102 | if [[ "$COMMAND" == "" ]]; then # => "xhost +" succeded 103 | COMM_OK=yes 104 | break 105 | fi 106 | 107 | # some X appplication, such as gnome-terminal, may need LC_ALL 108 | # set as follows 109 | export LC_ALL="en_US.UTF-8" 110 | 111 | $COMMAND >comm_out 2>&1 112 | COM_OUT=$? 113 | fail_str=$(egrep -i "fail|error|can\'t open display" comm_out) 114 | if [[ $COM_OUT -ne 0 || "$fail_str" != "" ]]; then 115 | continue 116 | fi 117 | COMM_OK=yes 118 | break 119 | done 120 | 121 | if [[ "$COMMAND" != "" && "$COMM_OK" != "yes" ]]; then 122 | echo Command \"$COMMAND\" failed on every 123 | echo display, with the following error message: 124 | echo 125 | echo ------------------------------------------------------ 126 | cat comm_out 127 | echo ------------------------------------------------------ 128 | echo 129 | echo If the problem is unsuccessful access to the X server, 130 | echo then check access permissions, and make sure that 131 | echo an X session is open for your user. In this respect, 132 | echo if you have opened a session as foo, then, as foo, you 133 | echo can successfully execute these scripts using sudo 134 | echo \(even through ssh\). 135 | echo But if you, as foo, become root using su, or if you 136 | echo logged in as root, then I\'m not able to give you 137 | echo access to the X server. 138 | echo 139 | echo Aborting. 140 | rm comm_out 141 | if [[ "$XHOST_CONTROL" != "" ]]; then 142 | xhost - > /dev/null 2>&1 143 | fi 144 | exit 1 145 | fi 146 | rm -f comm_out 147 | if [[ "$COMM_OK" != "yes" ]]; then 148 | echo Sorry, failed to get access to any display. Aborting. 149 | exit 1 150 | fi 151 | } 152 | 153 | function flush_caches 154 | { 155 | echo Syncing and dropping caches ... 156 | sync 157 | echo 3 > /proc/sys/vm/drop_caches 158 | } 159 | 160 | function get_scheduler 161 | { 162 | dev=$(echo $DEVS | awk '{ print $1 }') 163 | cat /sys/block/$dev/queue/scheduler | sed 's/.*\[\(.*\)\].*/\1/' 164 | } 165 | 166 | function save_scheduler 167 | { 168 | SAVEDSCHED=$(get_scheduler) 169 | } 170 | 171 | function restore_scheduler 172 | { 173 | for dev in $DEVS; do 174 | echo $SAVEDSCHED /sys/block/$dev/queue/scheduler 175 | echo $SAVEDSCHED > /sys/block/$dev/queue/scheduler 2>&1 | \ 176 | echo &> /dev/null 177 | PIPE_STATUS=${PIPESTATUS[0]} 178 | NEW_SCHED=$(cat /sys/block/$dev/queue/scheduler | \ 179 | egrep "\[$SAVEDSCHED\]") 180 | if [[ $PIPE_STATUS -ne 0 || "$NEW_SCHED" == "" ]]; then 181 | echo "Restore of $SAVEDSCHED failed:" > /dev/tty 182 | cat /sys/block/$dev/queue/scheduler > /dev/tty 183 | fi 184 | done 185 | } 186 | 187 | function set_scheduler 188 | { 189 | if [[ "$sched" != "" && "$sched" != cur-sched ]] ; then 190 | # Switch to the desired scheduler 191 | echo Switching to $sched for $DEVS 192 | 193 | for dev in $DEVS; do 194 | echo $sched > /sys/block/$dev/queue/scheduler 2>&1 | \ 195 | echo &> /dev/null 196 | PIPE_STATUS=${PIPESTATUS[0]} 197 | if [[ $(cat /sys/block/$dev/queue/scheduler | wc -w) -gt 1 ]]; then 198 | NEW_SCHED=$(cat /sys/block/$dev/queue/scheduler | \ 199 | egrep "\[$sched\]") 200 | else 201 | NEW_SCHED=$(cat /sys/block/$dev/queue/scheduler) 202 | fi 203 | 204 | if [[ $PIPE_STATUS -ne 0 || "$NEW_SCHED" == "" ]]; then 205 | echo "Switch to $sched failed:" > /dev/tty 206 | cat /sys/block/$dev/queue/scheduler > /dev/tty 207 | exit 1 208 | fi 209 | done 210 | else 211 | dev=$(echo $DEVS | awk '{ print $1 }') 212 | sched=`cat /sys/block/$dev/queue/scheduler` 213 | sched=`echo $sched | sed 's/.*\[//'` 214 | sched=`echo $sched | sed 's/\].*//'` 215 | fi 216 | } 217 | 218 | function transitory_duration 219 | { 220 | OTHER_SCHEDULER_DURATION=$1 221 | dev=$(echo $DEVS | awk '{ print $1 }') 222 | if [ -f /sys/block/$dev/queue/iosched/raising_max_time ]; then 223 | FNAME=/sys/block/$dev/queue/iosched/raising_max_time 224 | else 225 | if [ -f /sys/block/$dev/queue/iosched/wr_max_time ]; 226 | then 227 | FNAME=/sys/block/$dev/queue/iosched/wr_max_time 228 | fi 229 | fi 230 | if [[ "$FNAME" != "" ]]; then 231 | MAX_RAIS_SEC=$(( $(cat $FNAME) / 1000 )) 232 | else 233 | MAX_RAIS_SEC=$OTHER_SCHEDULER_DURATION 234 | fi 235 | # the extra 6 seconds mainly follow from the fact that fio is 236 | # slow to start many jobs 237 | echo $((MAX_RAIS_SEC + 4)) 238 | } 239 | 240 | function shutdwn 241 | { 242 | set_tracing 0 243 | killall $1 2> /dev/null 244 | (kill -HUP $(jobs -lp)) >/dev/null 2>&1 || true 245 | 246 | # fio does not handle SIGTERM, and hence does not destroy 247 | # the shared memory segments on this signal 248 | num_lines=`ipcs -m | wc -l` 249 | ipcs -m | tail -n `expr $num_lines - 3` |\ 250 | for f in `cat - | awk '{ print $2 }'`; do\ 251 | ipcrm -m $f > /dev/null 2>&1; \ 252 | done 253 | } 254 | 255 | function create_file 256 | { 257 | fname=$1 258 | target_num_blocks=$2 # of 1MB each 259 | test -f ${fname} 260 | file_absent=$? 261 | wrong_size=0 262 | if [ -f ${fname} ] ; then 263 | file_size=$(du --apparent-size -B 1024 $fname | col -x | cut -f 1 -d " ") 264 | computed_size=$(echo "${target_num_blocks} * 1024" | bc -l) 265 | if [[ "${file_size}" -ne "${computed_size}" ]]; then 266 | wrong_size=1 267 | fi 268 | fi 269 | if [[ "${file_absent}" -eq "1" || "${wrong_size}" -eq "1" ]]; then 270 | echo dd if=/dev/zero bs=1M \ 271 | count=${target_num_blocks} \ 272 | of=${fname} 273 | dd if=/dev/zero bs=1M \ 274 | count=${target_num_blocks} \ 275 | of=${fname} 276 | echo syncing after file creation 277 | flush_caches 278 | fi 279 | } 280 | 281 | function create_files 282 | { 283 | NUM_READERS=$1 284 | SUFFIX=$2 285 | 286 | if [[ "$BASE_DIR" == "" ]]; then 287 | return 288 | fi 289 | 290 | mkdir -p ${BASE_DIR} 291 | 292 | for ((i = 0 ; $i < $NUM_READERS ; i++)); do 293 | create_file ${BASE_FILE_PATH}$SUFFIX$i ${FILE_SIZE_MB} 294 | done 295 | } 296 | 297 | function create_files_rw_type 298 | { 299 | NUM_READERS=$1 300 | RW_TYPE=$2 301 | if [[ "$RW_TYPE" != "raw_seq" && "$RW_TYPE" != "raw_rand" ]]; then 302 | create_files $NUM_READERS 303 | echo 304 | else 305 | NUM_WRITERS=0 # only raw readers allowed for the moment (we use 306 | # raw readers basically for testing SSDs without 307 | # causing them to wear out quickly) 308 | fi 309 | } 310 | 311 | function start_readers_writers 312 | { 313 | NUM_READERS=$1 314 | NUM_WRITERS=$2 315 | RW_TYPE=$3 316 | MAXRATE=${4-0} 317 | ncpus=$(nproc --all) 318 | 319 | if [[ ${NUM_READERS} -eq 0 && ${NUM_WRITERS} -eq 0 ]]; then 320 | return 321 | fi 322 | 323 | printf "Started" 324 | 325 | if [[ $NUM_READERS -gt 0 ]]; then 326 | printf " $NUM_READERS $RW_TYPE reader(s)" 327 | fi 328 | if [[ $NUM_WRITERS -gt 0 ]]; then 329 | printf " $NUM_WRITERS $RW_TYPE writer(s)" 330 | if [[ $MAXRATE -gt 0 ]]; then 331 | if [[ "$RW_TYPE" != seq ]]; then 332 | MAXRATE=$(($MAXRATE / 60)) 333 | fi 334 | SETMAXRATE="rate=$(($MAXRATE / $NUM_WRITERS))k" 335 | fi 336 | fi 337 | echo 338 | 339 | if [[ "$RW_TYPE" != seq && "$RW_TYPE" != raw_seq ]]; then 340 | TYPE_PREF=rand 341 | fi 342 | if [[ "$RW_TYPE" == raw_seq || "$RW_TYPE" == raw_rand ]]; then 343 | IS_RAW=yes 344 | else 345 | IS_RAW=no 346 | fi 347 | 348 | for ((i = 0 ; $i < ${NUM_WRITERS} ; i++)) 349 | do 350 | rm -f ${BASE_FILE_PATH}_write$i 351 | done 352 | 353 | num_jobs=$(( ${NUM_READERS} + ${NUM_WRITERS} )) 354 | 355 | jobvar=" 356 | [global]\n 357 | thread=0\n 358 | invalidate=1\n 359 | \n 360 | " 361 | 362 | for ((i = 0 ; $i < ${NUM_READERS} ; i++)) 363 | do 364 | if [[ "$IS_RAW" != yes && "${BASE_FILE_PATH}" == "" ]]; then 365 | break 366 | fi 367 | jobvar=$jobvar" 368 | [${RW_TYPE}reader$i]\n 369 | readwrite=${TYPE_PREF}read\n 370 | " 371 | 372 | if [[ "$IS_RAW" == yes ]]; then 373 | jobvar=$jobvar" 374 | filename=/dev/$HIGH_LEV_DEV\n 375 | " 376 | if [[ "$TYPE_PREF" != rand ]]; then 377 | offset=$(( $i * $FILE_SIZE_MB )) 378 | jobvar=$jobvar" 379 | size=${FILE_SIZE_MB}M\n 380 | offset=${offset}M\n 381 | " 382 | fi 383 | else 384 | jobvar=$jobvar" 385 | filename=${BASE_FILE_PATH}$i\n 386 | " 387 | fi 388 | 389 | if [[ "$PERF_PROF" != "" ]]; then 390 | jobvar=$jobvar" 391 | cpus_allowed=$(( $i % $ncpus ))\n 392 | " 393 | fi 394 | 395 | done 396 | 397 | for ((i = 0 ; $i < ${NUM_WRITERS} ; i++)) 398 | do 399 | if [[ "$IS_RAW" == "yes" ]]; then 400 | break 401 | fi 402 | jobvar=$jobvar" 403 | [${RW_TYPE}writer$i]\n 404 | readwrite=${TYPE_PREF}write\n 405 | filename=${BASE_FILE_PATH}_write$i\n 406 | size=${FILE_SIZE_MB}M\n 407 | $SETMAXRATE\n 408 | " 409 | if [[ "$PERF_PROF" != "" ]]; then 410 | jobvar=$jobvar" 411 | cpus_allowed=$(( $i % $ncpus ))\n 412 | " 413 | fi 414 | done 415 | 416 | # add short sleep to avoid false bursts of creations of 417 | # processes doing I/O 418 | sleep 0.3 419 | 420 | echo -e $jobvar | $FIO - > /dev/null 2>&1 & 421 | } 422 | 423 | function start_readers_writers_rw_type 424 | { 425 | NUM_READERS=$1 426 | NUM_WRITERS=$2 427 | R_TYPE=$3 428 | MAXRATE=$4 429 | if [[ "$R_TYPE" != "raw_seq" && "$R_TYPE" != "raw_rand" ]]; then 430 | create_files_rw_type $NUM_READERS $RW_TYPE 431 | fi 432 | start_readers_writers $NUM_READERS $NUM_WRITERS $R_TYPE $MAXRATE 433 | } 434 | 435 | function start_interleaved_readers 436 | { 437 | READFILE=$1 438 | NUM_READERS=$2 439 | 440 | ZONE_SIZE=16384 441 | SKIP_BYTES=$[((${NUM_READERS}-1)*${ZONE_SIZE})+1] 442 | ZONE_MODE=strided 443 | 444 | echo Starting $NUM_READERS interleaved readers 445 | for ((i = 0 ; $i < $NUM_READERS ; i++)) 446 | do 447 | READ_OFFSET=$[$i*$ZONE_SIZE] 448 | $FIO --name=reader$i -rw=read --numjobs=1 \ 449 | --filename=$READFILE \ 450 | --ioengine=sync --iomem=malloc --bs=$ZONE_SIZE \ 451 | --offset=$READ_OFFSET --zonesize=$ZONE_SIZE \ 452 | --zonemode=$ZONE_MODE \ 453 | --zoneskip=$SKIP_BYTES > /dev/null & 454 | done 455 | } 456 | 457 | function print_save 458 | { 459 | thr_stat_file_name=$1 460 | message=$2 461 | command=$3 462 | extra_rm_lines=${4:-0} 463 | 464 | echo -n "$message" | tee -a ${thr_stat_file_name} 465 | len=$(cat iostat.out | grep ^$HIGH_LEV_DEV | wc -l) 466 | # collect iostat aggthr lines into one file, throwing away: 467 | # . the first sample, because it just contains a wrong value 468 | # (easy to see by letting iostat start during a steady workload) 469 | # . the last sample, because it can be influenced by the operations 470 | # performed at the end of the test 471 | cat iostat.out | grep ^$HIGH_LEV_DEV | awk "{ $command }" |\ 472 | tail -n$(($len-1-$extra_rm_lines)) | head -n$(($len-1)) > iostat-aggthr 473 | if [[ "$BW_LAT_RESULTS" == true && \ 474 | $(cat iostat-aggthr | wc -l ) -le 1 ]] 475 | then 476 | echo " ERROR - too few values." | tee -a ${thr_stat_file_name} 477 | echo " min max avg std_dev" | \ 478 | tee -a ${thr_stat_file_name} 479 | echo " X X X X" | \ 480 | tee -a ${thr_stat_file_name} 481 | else 482 | echo | tee -a ${thr_stat_file_name} 483 | sh $CALC_AVG_AND_CO 99 < iostat-aggthr | \ 484 | tee -a $thr_stat_file_name 485 | fi 486 | } 487 | 488 | function print_save_agg_thr 489 | { 490 | sed -i 's/,/\./g' iostat.out 491 | sed -i '3,6d' iostat.out 492 | print_save $1 "Aggregated throughput:" 'print $3 + $4' $2 493 | print_save $1 "Read throughput:" 'print $3' $2 494 | print_save $1 "Write throughput:" 'print $4' $2 495 | } 496 | -------------------------------------------------------------------------------- /utilities/plot_bar_errbar_subplots.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Execute me with python3! 3 | import numpy as np 4 | import matplotlib as mpl 5 | import matplotlib.pyplot as plt 6 | import matplotlib.patches as patches 7 | import matplotlib.lines as mlines 8 | import sys 9 | import os 10 | import re 11 | 12 | if len(sys.argv) < 2: 13 | print ("Tell me the file name please") 14 | sys.exit() 15 | 16 | with open(sys.argv[1]) as f: 17 | content = [x.strip() for x in f.readlines()] 18 | 19 | fileprefix = os.path.splitext(sys.argv[1])[0] 20 | 21 | num_sub_plots=len(content)-8 22 | 23 | headline=content[7].split() 24 | 25 | scheds=headline[2:] 26 | 27 | labels = ['Average latency experienced by individual I/O operations of the target (error bars show standard deviation)'] 28 | legend_colors = ['0.5'] 29 | legend_range=1 30 | colors = ['0.5', '0.8'] 31 | 32 | ind = np.arange(len(scheds)) 33 | width = 0.5 34 | 35 | # works only with at least two subplots 36 | f, ax = plt.subplots(1, num_sub_plots, sharey=True, sharex=True, figsize=(10, 6)) 37 | 38 | plt.subplots_adjust(top=0.86) 39 | 40 | for axis in ax: 41 | axis.tick_params(axis='y', which='major', labelsize=6) 42 | axis.tick_params(axis='y', which='minor', labelsize=2) 43 | 44 | f.subplots_adjust(bottom=0.15) # make room for the legend 45 | 46 | scheds = [sched.replace('-', '\n', 1) for sched in scheds] 47 | 48 | plt.xticks(ind+width/2., scheds) 49 | 50 | plt.suptitle(content[0].replace('# ', '')) 51 | 52 | def autolabel(rects, axis, xpos='center'): 53 | """ 54 | Attach a text label above each bar in *rects*, displaying its height. 55 | 56 | *xpos* indicates which side to place the text w.r.t. the center of 57 | the bar. It can be one of the following {'center', 'right', 'left'}. 58 | """ 59 | 60 | xpos = xpos.lower() # normalize the case of the parameter 61 | ha = {'center': 'center', 'right': 'left', 'left': 'right'} 62 | offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off 63 | 64 | for rect in rects: 65 | height = rect.get_height() 66 | axis.text(rect.get_x() + rect.get_width()*offset[xpos], 67 | rect.get_y() + rect.get_height() + max_lat/80., 68 | '{:.4g}'.format(height), ha=ha[xpos], va='bottom', 69 | size=8) 70 | 71 | 72 | p = [] # list of bar properties 73 | def create_subplot(values, errors, colors, axis, title): 74 | bar_renderers = [] 75 | ind = np.arange(len(values)) 76 | 77 | r = axis.bar(ind, values, yerr=errors, width=0.5, alpha=0.6, ecolor='black', align='edge', capsize=5) 78 | autolabel(r, axis) 79 | bar_renderers.append(r) 80 | 81 | axis.set_title(title, size=10) 82 | return bar_renderers 83 | 84 | # compute max to position labels at a fixed offset above bars 85 | max_lat = 0 86 | for line in content[8:]: 87 | line_elems = line.split() 88 | numbers = line_elems[1:] 89 | 90 | values = np.asarray(numbers[::2]).astype(np.float).tolist() 91 | errors = np.asarray(numbers[1::2]).astype(np.float).tolist() 92 | 93 | sums = [a + b for a, b in zip(values, errors)] 94 | 95 | local_max_lat = np.amax(sums) 96 | if max_lat < local_max_lat: 97 | max_lat = local_max_lat 98 | 99 | i = 0 100 | for line in content[8:]: 101 | line_elems = line.split() 102 | numbers = line_elems[1:] 103 | 104 | values = np.asarray(numbers[::2]).astype(np.float).tolist() 105 | errors = np.asarray(numbers[1::2]).astype(np.float).tolist() 106 | 107 | workload_name=line_elems[0].replace('_', ' ') 108 | interferers_name = re.sub(r".*vs ", '', workload_name) 109 | target_name = re.sub(r" vs.*", '', workload_name) 110 | p.extend(create_subplot(values, errors, colors, ax[i], 111 | interferers_name + '\n' + target_name, 112 | )) 113 | i += 1 114 | 115 | 116 | 117 | ax[0].set_ylabel('Latency [ms]') # add left y label 118 | ax[0].text(-0.02, -0.025, 'I/O policy:\nScheduler:', 119 | horizontalalignment='right', 120 | verticalalignment='top', 121 | transform=ax[0].transAxes) 122 | ax[0].text(-0.02, 1.012, 'Interferers:\nTarget:', 123 | horizontalalignment='right', 124 | verticalalignment='bottom', 125 | transform=ax[0].transAxes) 126 | 127 | 128 | f.legend(labels=labels, 129 | bbox_to_anchor=(0.5, -0.0), 130 | loc='lower center', 131 | ncol=2) 132 | 133 | # set the same scale on all subplots' y-axis 134 | y_mins, y_maxs = zip(*[axis.get_ylim() for axis in ax]) 135 | for axis in ax: 136 | axis.set_ylim((min(y_mins), max(y_maxs))) 137 | 138 | if len(sys.argv) > 2: 139 | plt.savefig(fileprefix + '.' + sys.argv[2]) 140 | else: 141 | plt.show() 142 | -------------------------------------------------------------------------------- /utilities/plot_stacked_bar_subplots.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Execute me with python3! 3 | import numpy as np 4 | import matplotlib as mpl 5 | import matplotlib.pyplot as plt 6 | import matplotlib.patches as patches 7 | import matplotlib.lines as mlines 8 | import sys 9 | import os 10 | import re 11 | 12 | if len(sys.argv) < 2: 13 | print ("Tell me the file name please") 14 | sys.exit() 15 | 16 | with open(sys.argv[1]) as f: 17 | content = [x.strip() for x in f.readlines()] 18 | 19 | fileprefix = os.path.splitext(sys.argv[1])[0] 20 | 21 | num_sub_plots=len(content)-8 22 | 23 | headline=content[7].split() 24 | 25 | scheds=headline[2:] 26 | 27 | no_pol_idx=next( (i for i, x in enumerate(scheds) if x=='none-none'), -1) 28 | 29 | if no_pol_idx != -1: 30 | del scheds[no_pol_idx] 31 | labels = ['Cumulative avg throughput of interferers', 32 | 'Avg throughput of target', 33 | 'Avg total throughput (sum of bars)', 34 | 'Avg throughput reached without any I/O control', 35 | 'Min avg throughput to be guaranteed to target' 36 | ] 37 | legend_colors = ['turquoise', 'lightcoral', 'white', 'white', 'white'] 38 | legend_range=5 39 | else: 40 | labels = ['Cumulative avg throughput of interferers', 41 | 'Avg throughput of target', 42 | 'Avg total throughput (sum of bars)', 43 | 'Min avg throughput to be guaranteed to target' 44 | ] 45 | legend_colors = ['turquoise', 'lightcoral', 'white', 'white'] 46 | legend_range=4 47 | 48 | colors = ['lightcoral', 'turquoise'] 49 | 50 | ind = np.arange(len(scheds)) 51 | width = 0.5 52 | 53 | # works only with at least two subplots 54 | f, ax = plt.subplots(1, num_sub_plots, sharey=True, sharex=True, figsize=(10, 6)) 55 | 56 | plt.subplots_adjust(top=0.86) 57 | 58 | for axis in ax: 59 | axis.tick_params(axis='y', which='major', labelsize=6) 60 | axis.tick_params(axis='y', which='minor', labelsize=2) 61 | 62 | f.subplots_adjust(bottom=0.2) # make room for the legend 63 | 64 | scheds = [sched.replace('-', '\n', 1) for sched in scheds] 65 | 66 | plt.xticks(ind+width/2., scheds) 67 | 68 | plt.suptitle(content[0].replace('# ', '')) 69 | 70 | def autolabel(rects, axis, xpos='center'): 71 | """ 72 | Attach a text label above each bar in *rects*, displaying its height. 73 | 74 | *xpos* indicates which side to place the text w.r.t. the center of 75 | the bar. It can be one of the following {'center', 'right', 'left'}. 76 | """ 77 | 78 | xpos = xpos.lower() # normalize the case of the parameter 79 | ha = {'center': 'center', 'right': 'left', 'left': 'right'} 80 | offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off 81 | 82 | for rect in rects: 83 | labelheight = height = rect.get_height() 84 | if rect.get_y() > 0 and rect.get_y() < max_thr / 150 and labelheight < max_thr / 150: 85 | labelheight = labelheight * 12; 86 | elif rect.get_y() > 0 and rect.get_y() < max_thr / 100 and labelheight < max_thr / 100: 87 | labelheight = labelheight * 10; 88 | 89 | axis.text(rect.get_x() + rect.get_width()*offset[xpos], 90 | rect.get_y() + labelheight / 2., 91 | '{:.4g}'.format(height), ha=ha[xpos], va='bottom', 92 | size=8) 93 | 94 | 95 | p = [] # list of bar properties 96 | def create_subplot(matrix, colors, axis, title, reachable_thr): 97 | bar_renderers = [] 98 | ind = np.arange(matrix.shape[1]) 99 | 100 | bottoms = np.cumsum(np.vstack((np.zeros(matrix.shape[1]), matrix)), axis=0)[:-1] 101 | 102 | for i, row in enumerate(matrix): 103 | r = axis.bar(ind, row, width=0.5, color=colors[i], bottom=bottoms[i], 104 | align='edge') 105 | autolabel(r, axis) 106 | bar_renderers.append(r) 107 | 108 | if reachable_thr > 0: 109 | axis.axhline(y=float(reachable_thr), xmin=0.0, xmax=1, ls='dashed', dashes=(7, 7), 110 | c='black', lw=1) 111 | 112 | axis.set_title(title, size=10) 113 | return bar_renderers 114 | 115 | max_thr = 0 116 | i = 0 117 | for line in content[8:]: 118 | line_elems = line.split() 119 | numbers = line_elems[1:] 120 | 121 | first_row = np.asarray(numbers[::2]).astype(np.float).tolist() 122 | second_row = np.asarray(numbers[1::2]).astype(np.float).tolist() 123 | 124 | mat = np.array([first_row, second_row]) 125 | 126 | tot_throughput = np.amax(mat.sum(axis=0)) 127 | 128 | max_thr = max(tot_throughput, max_thr) 129 | i += 1 130 | 131 | i = 0 132 | for line in content[8:]: 133 | line_elems = line.split() 134 | numbers = line_elems[1:] 135 | 136 | first_row = np.asarray(numbers[::2]).astype(np.float).tolist() 137 | second_row = np.asarray(numbers[1::2]).astype(np.float).tolist() 138 | 139 | reachable_thr = 0 140 | if no_pol_idx != -1: 141 | reachable_thr = first_row[no_pol_idx] + second_row[no_pol_idx] 142 | del first_row[no_pol_idx] 143 | del second_row[no_pol_idx] 144 | 145 | mat = np.array([first_row, second_row]) 146 | workload_name=line_elems[0].replace('_', ' ') 147 | interferers_name = re.sub(r".*vs ", '', workload_name) 148 | target_name = re.sub(r" vs.*", '', workload_name) 149 | p.extend(create_subplot(mat, colors, ax[i], interferers_name + '\n' + target_name, 150 | reachable_thr)) 151 | i += 1 152 | 153 | 154 | ax[0].set_ylabel('Target, interferers and total throughput') # add left y label 155 | ax[0].text(-0.02, -0.025, 'I/O policy:\nScheduler:', 156 | horizontalalignment='right', 157 | verticalalignment='top', 158 | transform=ax[0].transAxes) 159 | ax[0].text(-0.02, 1.012, 'Interferers:\nTarget:', 160 | horizontalalignment='right', 161 | verticalalignment='bottom', 162 | transform=ax[0].transAxes) 163 | 164 | 165 | ref_line=content[4].split() 166 | ref_value=ref_line[-1] 167 | 168 | if ref_value.replace('.','',1).isdigit(): 169 | [axis.axhline(y=float(ref_value), xmin=0.0, xmax=1, ls='dashed', c='black', lw=1, dashes=(4, 6)) for axis in ax] 170 | else: 171 | legend_range=legend_range-1 172 | no_ref_value=True 173 | 174 | 175 | class Handler(object): 176 | def __init__(self, colors): 177 | self.colors=colors 178 | def legend_artist(self, legend, orig_handle, fontsize, handlebox): 179 | x0, y0 = handlebox.xdescent, handlebox.ydescent 180 | width, height = handlebox.width, handlebox.height 181 | patch = plt.Rectangle([x0, y0], width, height, facecolor=self.colors[1], 182 | edgecolor='none', transform=handlebox.get_transform()) 183 | patch2 = plt.Rectangle([x0, y0], width, height/2., facecolor=self.colors[0], 184 | edgecolor='none', transform=handlebox.get_transform()) 185 | handlebox.add_artist(patch) 186 | handlebox.add_artist(patch2) 187 | return patch 188 | 189 | mpl.rcParams['hatch.linewidth'] = 10.0 190 | handles = [patches.Rectangle((0,0),1,1,ec='none', facecolor=legend_colors[i]) for i in range(legend_range)] 191 | handles[2] = patches.Rectangle((0,0),1,1) 192 | 193 | if no_pol_idx != -1: 194 | handles[3] = mlines.Line2D([], [], ls='dashed', c='black', lw=1, dashes=(7, 7)) 195 | if not no_ref_value: 196 | handles[4] = mlines.Line2D([], [], ls='dashed', c='black', lw=1, dashes=(4, 6)) 197 | else: 198 | if not no_ref_value: 199 | handles[3] = mlines.Line2D([], [], ls='dashed', c='black', lw=1, dashes=(4, 6)) 200 | 201 | f.legend(handles=handles, labels=labels, 202 | handler_map={handles[2]: Handler(colors)}, 203 | bbox_to_anchor=(0.5, -0.0), 204 | loc='lower center', 205 | ncol=2) 206 | 207 | plt.yticks(list(plt.yticks()[0]) + [10]) 208 | 209 | # set the same scale on all subplots' y-axis 210 | y_mins, y_maxs = zip(*[axis.get_ylim() for axis in ax]) 211 | for axis in ax: 212 | axis.set_ylim((min(y_mins), max(y_maxs))) 213 | 214 | if len(sys.argv) > 2: 215 | plt.savefig(fileprefix + '.' + sys.argv[2]) 216 | else: 217 | plt.show() 218 | -------------------------------------------------------------------------------- /utilities/plot_stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export LC_NUMERIC=C 3 | dirname=plots 4 | ref_mode=${2:-"ref"} 5 | term_mode=${3:-"x11"} 6 | scaling_factor=${4:-"1.55"} 7 | 8 | if [[ "$5" == print_tables ]]; then 9 | PRINT_TABLES=yes 10 | fi 11 | 12 | plot_id=1 13 | usage_msg="\ 14 | Usage: 15 | plot_stats.sh | [] [] 16 | [] [print_tables] 17 | 18 | - if the first parameter is a directory, make a plot for each table 19 | file in the directory 20 | - ref_mode may be ref or noref 21 | - term_mode may be x11, gif or eps. 22 | 23 | " 24 | 25 | if [[ "$PRINT_TABLES" == yes ]]; then 26 | ../utilities/check_dependencies.sh bash awk bc 27 | else 28 | ../utilities/check_dependencies.sh bash awk gnuplot bc 29 | fi 30 | if [[ $? -ne 0 ]]; then 31 | exit 32 | fi 33 | 34 | . ../utilities/lib_utils.sh 35 | 36 | if [ $term_mode == "eps" ] ; then 37 | lw=3 38 | else 39 | lw=1 40 | fi 41 | 42 | function create_label_file 43 | { 44 | in_file_name=$1 45 | col_idx=$2 46 | x_offset=$3 47 | y_offset=$4 48 | label_file=$5 49 | 50 | # use two different width depending on whether the value is lower than 100 51 | awk '{ if ($'$col_idx' < 0) \ 52 | printf "set label \"X\" at %g,%g center font \"arial,'$((FONT_SIZE+3))'\""\ 53 | " front\n",\ 54 | (row++)'$x_offset$GIF_OFFSET', '$y_offset'*0.75; \ 55 | else if ($'$col_idx' < 100) \ 56 | printf "set label \"%.3f\" at %g,%g center font \"arial,'$FONT_SIZE'\""\ 57 | " front\n",\ 58 | $'$col_idx', (row++)'$x_offset$GIF_OFFSET', $'$col_idx'+'$y_offset'; \ 59 | else \ 60 | printf "set label \"%.f\" at %g,%g center font \"arial,'$FONT_SIZE'\""\ 61 | " front\n",\ 62 | $'$col_idx', (row++)'$x_offset$GIF_OFFSET', $'$col_idx'+'$y_offset'}' \ 63 | < $in_file_name > $label_file 64 | 65 | # remove leading zeros 66 | sed 's/\"0\./\"\./' $label_file > $label_file.tmp 67 | mv $label_file.tmp $label_file 68 | } 69 | 70 | # create files (loaded by gnuplot) containing the relative positions 71 | # of the labels (numbers) written on top of the bars 72 | function create_label_positions() 73 | { 74 | 75 | if [ "$term_mode" == "gif" ] ; then 76 | FONT_SIZE=10 77 | GIF_OFFSET=+.02 78 | else 79 | FONT_SIZE=15 80 | fi 81 | 82 | if [[ "$1" -gt 5 ]] ; then 83 | FONT_SIZE=$(($FONT_SIZE - 3)) 84 | if [ "$term_mode" == "eps" ] ; then 85 | FONT_SIZE=$(($FONT_SIZE - 2)) 86 | fi 87 | fi 88 | 89 | label_y_offset=`echo "$max_y/100 * 2" | bc -l` 90 | 91 | case "$1" in 92 | 1) 93 | create_label_file $in_file_name 2 -.0 $label_y_offset label_1.plt 94 | ;; 95 | 2) 96 | create_label_file $in_file_name 2 -.14 $label_y_offset label_1.plt 97 | create_label_file $in_file_name 3 +.20 $label_y_offset label_2.plt 98 | ;; 99 | 3) 100 | create_label_file $in_file_name 2 -.25 $label_y_offset label_1.plt 101 | create_label_file $in_file_name 3 -.01 $label_y_offset label_2.plt 102 | create_label_file $in_file_name 4 +.26 $label_y_offset label_3.plt 103 | ;; 104 | 4) 105 | create_label_file $in_file_name 2 -.30 $label_y_offset label_1.plt 106 | create_label_file $in_file_name 3 -.11 $label_y_offset label_2.plt 107 | create_label_file $in_file_name 4 +.10 $label_y_offset label_3.plt 108 | create_label_file $in_file_name 5 +.31 $label_y_offset label_4.plt 109 | ;; 110 | 5) 111 | create_label_file $in_file_name 2 -.14 $label_y_offset label_1.plt 112 | create_label_file $in_file_name 3 +.20 $label_y_offset label_2.plt 113 | create_label_file $in_file_name 4 +.30 $label_y_offset label_3.plt 114 | create_label_file $in_file_name 5 +.40 $label_y_offset label_4.plt 115 | create_label_file $in_file_name 6 +.50 $label_y_offset label_5.plt 116 | ;; 117 | 6) # good for four clusters 118 | create_label_file $in_file_name 2 -.39 $label_y_offset label_1.plt 119 | create_label_file $in_file_name 3 -.23 $label_y_offset label_2.plt 120 | create_label_file $in_file_name 4 -.07 $label_y_offset label_3.plt 121 | create_label_file $in_file_name 5 +.08 $label_y_offset label_4.plt 122 | create_label_file $in_file_name 6 +.21 $label_y_offset label_5.plt 123 | create_label_file $in_file_name 7 +.35 $label_y_offset label_6.plt 124 | ;; 125 | *) 126 | echo $1 bars not supported 127 | exit 128 | ;; 129 | esac 130 | } 131 | 132 | function write_basic_plot_conf() 133 | { 134 | num_bars=$1 135 | 136 | printf " 137 | set title \"$plot_title\" 138 | set style fill solid 0.8 border -1 139 | set style data histogram 140 | set style histogram cluster gap 1 141 | set mytics 142 | set xtics scale 0 143 | set grid y 144 | set ytics scale 2.0, 1.2 145 | set bars 3.0 146 | set boxwidth 1 147 | set pointsize 4 148 | set key samplen 1 149 | set auto fix 150 | set yrange [0:$max_y] 151 | # set size 1.4 #-> useful if the legend overlaps with some bar 152 | " >> tmp.txt 153 | } 154 | 155 | function plot_histograms() 156 | { 157 | in_file_name=$1 158 | out_file_path=$2 159 | x_label=${3:-"No x label!"} 160 | x_label_offset=${4:-0} 161 | y_label=$5 162 | num_bars=$6 163 | plot_curves=$7 164 | ref_label=$8 165 | ref_value=$9 166 | max_y=${10} 167 | 168 | type gnuplot >/dev/null 2>&1 169 | if [[ $? -ne 0 ]]; then 170 | return 171 | fi 172 | 173 | rm -f tmp.txt 174 | write_basic_plot_conf $num_bars $out_file_path 175 | printf " 176 | set xlabel \"$x_label\" offset 0,$x_label_offset 177 | set ylabel \"$y_label\" 178 | " >> tmp.txt 179 | 180 | create_label_positions $num_bars 181 | for ((i = 1; i <= $num_bars; i++)) 182 | do 183 | echo load \"label_${i}.plt\" >> tmp.txt 184 | done 185 | 186 | case $term_mode in 187 | eps) 188 | printf " 189 | set style fill pattern 1 190 | set output \"${out_file_path}.eps\" 191 | set term post eps 22 192 | " >> tmp.txt 193 | options="-mono" 194 | ;; 195 | gif) 196 | printf " 197 | #set key horizontal 8000, 30 198 | set output \"${out_file_path}.gif\" 199 | set term gif font \"arial,14\" size 1024,768 200 | " >> tmp.txt 201 | ;; 202 | *) 203 | printf " 204 | set term $term_mode $plot_id font \"arial,12\" 205 | " >> tmp.txt 206 | options="-persist" 207 | ;; 208 | esac 209 | 210 | printf "plot " >> tmp.txt 211 | 212 | if [[ "$ref_value" != "" ]] ; then 213 | printf "%f t \"$ref_label\" lw $lw, " $ref_value >> tmp.txt 214 | fi 215 | 216 | echo $plot_curves >> tmp.txt 217 | 218 | if [ $term_mode == "x11" ] ; then 219 | enable_X_access_and_test_cmd "" just_test_display 220 | fi 221 | 222 | gnuplot $options < tmp.txt 223 | 224 | if [[ "$XHOST_CONTROL" != "" ]]; then 225 | xhost - > /dev/null 2>&1 226 | XHOST_CONTROL= 227 | fi 228 | 229 | rm -f label_*.plt tmp.txt 230 | 231 | plot_id=$(($plot_id+1)) 232 | } 233 | 234 | function get_max_value 235 | { 236 | echo $1 $2 | awk '{if ($1 > $2) print $1; else print $2}' 237 | } 238 | 239 | if [[ "$1" == "-h" || "$1" == "" ]]; then 240 | printf "$usage_msg" 241 | exit 242 | fi 243 | 244 | function plot_bw_lat_bars 245 | { 246 | command=$1 247 | if [[ $term_mode == png || $term_mode == eps ]]; then 248 | file_type=$term_mode 249 | fi 250 | type python3 >/dev/null 2>&1 251 | if [[ $? -ne 0 ]]; then 252 | echo Install python3 if you want to get plots too 253 | return 254 | fi 255 | python -c "import numpy" >/dev/null 2>&1 256 | if [[ $? -ne 0 ]]; then 257 | echo Install numpy for python3 if you want to get plots too 258 | return 259 | fi 260 | ./$command $in_filename $file_type 261 | } 262 | 263 | function parse_table 264 | { 265 | in_filename=$1 266 | 267 | if [[ "$in_filename" == "" || ! -f $in_filename ]]; then 268 | echo Sorry, table $in_filename not found 269 | exit 270 | fi 271 | 272 | if [[ "$(echo $in_filename | \ 273 | egrep ".*latency.*-bw-table.txt")" != "" ]]; then 274 | plot_bw_lat_bars plot_stacked_bar_subplots.py 275 | return 276 | elif [[ "$(echo $in_filename | \ 277 | egrep ".*latency.*-lat-table.txt")" != "" ]]; then 278 | plot_bw_lat_bars plot_bar_errbar_subplots.py 279 | return 280 | fi 281 | 282 | sed 's/X/-1/g' $in_filename > $in_filename.tmp1 283 | sed 's/-1-Axis/X-Axis/g' $in_filename.tmp1 > $in_filename.tmp 284 | 285 | out_filepath=$in_filename 286 | out_filepath="${out_filepath%.*}" 287 | in_filename=$in_filename.tmp 288 | 289 | lines=() 290 | max_value=0 291 | while read line; do 292 | lines+=("$line") 293 | if [[ $(echo $line | grep ^#) == "" ]] ; then 294 | first_word=$(echo $line | awk '{printf $1}') 295 | rest_of_line=$(echo $line | sed 's<'$first_word' <<') 296 | 297 | for number in $rest_of_line; do 298 | tmp_max=`get_max_value $number $max_value` 299 | 300 | if [[ $tmp_max == $number ]]; then 301 | max_value=$number 302 | fi 303 | done 304 | fi 305 | done < $in_filename 306 | 307 | max_value=$(echo "$max_value * $scaling_factor" | bc -l) 308 | 309 | if [[ "$max_value" == "0" ]]; then 310 | max_value=0.01 311 | fi 312 | 313 | line_idx=0 # first line 314 | plot_title=$(echo ${lines[$line_idx]} | sed 's/# //') 315 | 316 | line_idx=1 # second line 317 | 318 | x_label=$(echo ${lines[$line_idx]} | sed 's/# First column: //') 319 | ((line_idx++)) 320 | 321 | y_label=$(echo ${lines[$line_idx]} | sed 's/# Next columns: //' \ 322 | | sed 's/\(.*\), or -1.*/\1/') 323 | y_label="$y_label, or X in case of failure" 324 | ((line_idx++)) 325 | ((line_idx++)) 326 | 327 | if [[ $ref_mode == ref ]]; then 328 | reference_case=$(echo ${lines[$line_idx]} | sed 's/# Reference case: //') 329 | 330 | reference_case_value=$(grep "^ $reference_case" $in_filename | tail -n 1 | \ 331 | awk '{print $2}') 332 | 333 | if [[ "$reference_case_value" == "" ]]; then 334 | reference_case=`echo $reference_case | sed 's/seq/raw_seq/'` 335 | reference_case_value=$(grep "^ $reference_case" $in_filename |\ 336 | tail -n 1 | awk '{print $2}') 337 | fi 338 | 339 | reference_case_label=$(echo ${lines[$(($line_idx + 1))]} | \ 340 | sed 's/# Reference-case meaning: //') 341 | else 342 | reference_case=none 343 | fi 344 | ((line_idx += 3)) 345 | 346 | first_word=$(echo ${lines[$line_idx]} | sed 's/# //' | awk '{print $1}') 347 | scheduler_string=$(echo ${lines[$line_idx]} | sed 's<# '"$first_word"' <<') 348 | 349 | schedulers=() 350 | for sched in $scheduler_string; do 351 | schedulers+=($sched) 352 | done 353 | 354 | grep -v "^ $reference_case\|^#" $in_filename > tmp_file 355 | # tmp_file could be empty if the only data in the 356 | # file-table ($in_filename) is the reference case. 357 | # Thus in that case let's plot at least the reference case 358 | if [ ! -s tmp_file ]; then 359 | grep -v "^#" $in_filename > tmp_file 360 | fi 361 | 362 | curves="\"tmp_file\" using 2:xticlabels(1) t \"${schedulers[0]}\"" 363 | 364 | for ((i = 1 ; i < ${#schedulers[@]} ; i++)); do 365 | curves=$curves", \"\" using $((i+2)) t \"${schedulers[$i]}\"" 366 | done 367 | 368 | plot_histograms tmp_file $out_filepath \ 369 | "$x_label" 0 "$y_label" ${#schedulers[@]} \ 370 | "$curves" "$reference_case_label" "$reference_case_value" $max_value 371 | 372 | if [[ $term_mode != "x11" && $term_mode != "aqua" && \ 373 | "$PRINT_TABLES" != yes ]] ; then 374 | echo Wrote $out_file_path.$term_mode 375 | fi 376 | 377 | rm tmp_file $in_filename ${in_filename}1 378 | } 379 | 380 | if [ -f "$1" ]; then 381 | parse_table $1 382 | else 383 | if [ -d "$1" ]; then 384 | num_tables_parsed=0 385 | for table_file in "$1"/*-table.txt; do 386 | thr_component=$(echo $table_file | egrep throughput) 387 | startup_component=$(echo $table_file | egrep startup) 388 | video_component=$(echo $table_file | egrep video) 389 | 390 | if [[ "$thr_component" != "" && \ 391 | ( "$startup_component" != "" || "$video_component" != "" ) ]] 392 | then 393 | mixed_thr_lat_table=yes 394 | else 395 | mixed_thr_lat_table=no 396 | fi 397 | 398 | if [[ -f "$table_file" ]]; then 399 | if [[ ( $term_mode != "x11" && $term_mode != "aqua" ) || \ 400 | "$mixed_thr_lat_table" != yes ]]; then 401 | parse_table $table_file 402 | fi 403 | 404 | if [[ "$PRINT_TABLES" == yes && "$mixed_thr_lat_table" != yes ]] 405 | then 406 | echo ------------------------------------------------------- 407 | cat $table_file 408 | fi 409 | 410 | num_tables_parsed=$(($num_tables_parsed+1)) 411 | fi 412 | done 413 | 414 | if (($num_tables_parsed == 0)); then 415 | echo No table found, maybe you forgot to run calc_overall_stats.sh? 416 | else 417 | if [[ "$PRINT_TABLES" == yes ]]; then 418 | echo ------------------------------------------------------- 419 | fi 420 | fi 421 | else 422 | echo $1 is not either a table file or a directory 423 | exit 424 | fi 425 | fi 426 | 427 | if [[ "$(echo $1 | \ 428 | egrep ".*latency.*-bw-table.txt")" != "" ]]; then 429 | exit 430 | fi 431 | type gnuplot >/dev/null 2>&1 432 | if [[ $? -ne 0 ]]; then 433 | echo Install gnuplot if you want to get plots too 434 | fi 435 | -------------------------------------------------------------------------------- /utilities/tracing.sh: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2019 Paolo Valente 2 | 3 | # HOW TO USE THE FOLLOWING TWO FUNCTIONS 4 | # 5 | # 1) Include this file 6 | # 2) Set TRACE=1 if you do want to activate tracing. This parameter is useful 7 | # in that it allows you to make your code take or not take traces, by just 8 | # changing the value of this parameter 9 | # 3) Set DEVS to the name of the device for which you want to take a trace; 10 | # just the name (e.g., sda), not the full path (e.g., /dev/sda) 11 | # 4) Invoke init_tracing 12 | # 5) Invoke set_tracing 1 when you want to turn tracing on 13 | # 6) Invoke set_tracing 0 when you want to turn tracing off 14 | # 7) Browse the trace in your current dir (where these functions copy it) 15 | # 16 | # Here is an example: 17 | # 18 | # . 19 | # TRACE=1 20 | # DEVS=sda # if needed, replace with the name of the actual device to trace 21 | # init_tracing 22 | # 23 | # echo > /sys/kernel/debug/tracing/trace # empty the trace (useful if TRACE=0) 24 | # set_tracing 1 25 | # 26 | # set_tracing 0 27 | 28 | function init_tracing { 29 | if [ "$TRACE" == "1" ] ; then 30 | if [ ! -d /sys/kernel/debug/tracing ] ; then 31 | mount -t debugfs none /sys/kernel/debug 32 | fi 33 | echo nop > /sys/kernel/debug/tracing/current_tracer 34 | echo 500000 > /sys/kernel/debug/tracing/buffer_size_kb 35 | echo blk > /sys/kernel/debug/tracing/current_tracer 36 | 37 | echo > /sys/kernel/debug/tracing/trace 38 | rm -f trace 39 | fi 40 | } 41 | 42 | function copy_trace { 43 | if [[ "$1" == 0 && "$trace_needs_copying" != "" ]]; then 44 | echo -n Copying block trace to $PWD ... 45 | cp /sys/kernel/debug/tracing/trace . 46 | echo " done" 47 | fi 48 | } 49 | 50 | function set_tracing { 51 | if [ "$TRACE" == "0" ] ; then 52 | return 53 | fi 54 | 55 | trace_needs_copying= 56 | if [[ -e /sys/kernel/debug/tracing/tracing_enabled && \ 57 | $(cat /sys/kernel/debug/tracing/tracing_enabled) -ne $1 ]]; then 58 | echo "echo $1 > /sys/kernel/debug/tracing/tracing_enabled" 59 | echo $1 > /sys/kernel/debug/tracing/tracing_enabled 60 | 61 | trace_needs_copying=yes 62 | fi 63 | 64 | dev=$(echo $DEVS | awk '{ print $1 }') 65 | if [[ -e /sys/block/$dev/trace/enable && \ 66 | $(cat /sys/block/$dev/trace/enable) -ne $1 ]]; then 67 | echo "echo $1 > /sys/block/$dev/trace/enable" 68 | echo $1 > /sys/block/$dev/trace/enable 69 | 70 | trace_needs_copying=yes 71 | fi 72 | 73 | if [ "$1" == 0 ]; then 74 | for cpu_path in /sys/kernel/debug/tracing/per_cpu/cpu? 75 | do 76 | stat_file=$cpu_path/stats 77 | OVER=$(grep "overrun" $stat_file | \ 78 | grep -v "overrun: 0") 79 | if [[ "$OVER" != "" && "$trace_needs_copying" != "" ]]; then 80 | cpu=$(basename $cpu_path) 81 | echo $OVER on $cpu, please increase buffer size! 82 | trace_needs_copying= 83 | fi 84 | done 85 | 86 | copy_trace $1 87 | fi 88 | } 89 | -------------------------------------------------------------------------------- /video_playing_vs_commands/WALL-E HD 1080p Trailer.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algodev-github/S/27fa2b229ec60a323716327ecf423990a775b3dc/video_playing_vs_commands/WALL-E HD 1080p Trailer.mp4 -------------------------------------------------------------------------------- /video_playing_vs_commands/video_play_vs_comms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Mauro Andreolini 3 | # Paolo Valente 4 | # Arianna Avanzini 5 | 6 | ../utilities/check_dependencies.sh awk dd fio iostat /usr/bin/time mplayer 7 | if [[ $? -ne 0 ]]; then 8 | exit 9 | fi 10 | 11 | . ../config_params.sh 12 | . ../utilities/lib_utils.sh 13 | CURDIR=$(pwd) 14 | 15 | # put into BACKING_DEVS the backing device for $CURDIR 16 | find_dev_for_dir $CURDIR 17 | 18 | if [[ "$BACKING_DEVS" != "$DEVS" ]]; then 19 | echo Video file is on different devices \($BACKING_DEVS\) 20 | echo from those of test files \($DEVS\) 21 | exit 22 | fi 23 | 24 | UTIL_DIR=`cd ../utilities; pwd` 25 | 26 | sched=${1-bfq} 27 | NUM_READERS=${2-10} 28 | NUM_WRITERS=${3-0} 29 | RW_TYPE=${4-seq} 30 | NUM_ITER=${5-3} 31 | TYPE=${6-real} 32 | CACHE=${7-n} 33 | STAT_DEST_DIR=${8-.} 34 | MAXRATE=${9-4000} # maximum total sequential write rate for which the 35 | # system apparently does not risk to become 36 | # unresponsive under bfq with a 90 MB/s hard disk 37 | # (see comm_startup_lat script) 38 | 39 | VERBOSITY=${10} 40 | 41 | if [[ "$VERBOSITY" == verbose ]]; then 42 | REDIRECT=/dev/stdout 43 | else 44 | REDIRECT=/dev/null 45 | fi 46 | 47 | 48 | enable_X_access_and_test_cmd 49 | 50 | function show_usage { 51 | echo "\ 52 | Usage (as root): 53 | ./video_play_vs_comms.sh [\"\" | cur_sched | bfq | cfq | ...] 54 | [num_readers] [num_writers] 55 | [seq | rand | raw_seq | raw_rand] [] 56 | [real | fake] [: y|n] [] 57 | [] [verbose] 58 | 59 | first parameter equal to \"\" or cur-sched -> do not change scheduler 60 | 61 | fake implies that \"-vo null\" and \"-ao null\" are passed to mplayer. 62 | 63 | cache toggle: if y, let mplayer use a miminum of cache (more details 64 | in the comments inside this script) 65 | 66 | raw_seq/raw_rand -> read directly from device (no writers allowed) 67 | 68 | For example: 69 | sudo ./video_play_vs_comms.sh bfq 5 5 seq 20 real mydir 70 | switches to bfq and, after launching 5 sequential readers and 5 sequential 71 | writers, runs mplayer for 20 times. During each run 72 | a custom \"dd\" command is executed every 4 seconds. The file containing the computed 73 | statistics is stored in the mydir subdir of the current dir. 74 | 75 | Default parameters values are \"\", $NUM_READERS, $NUM_WRITERS, \ 76 | $RW_TYPE, $TYPE, $CACHE, $STAT_DEST_DIR and $MAXRATE 77 | 78 | See the comments inside this script for details about the video 79 | currently in use for this becnhmark and the \"dd\" command used as noise. 80 | " 81 | } 82 | 83 | # Execute next command as noise. The command reads 15 uncached megabytes 84 | # greadily. At such it creates the maximum possible short-term interference: 85 | # it lasts little, so with BFQ it enjoys weight raising all the time, and it 86 | # does as much I/O as possible, so it interferes as much as possible 87 | COMMAND="dd if=$BASE_DIR/smallfile of=/dev/null iflag=nocache bs=1M count=15" 88 | 89 | PLAYER_CMD="mplayer" 90 | 91 | # Let mplayer provide benchmarking information, and drop late frames, so 92 | # that we can measure the effects of too high I/O latencies 93 | BENCH_OPTS="-benchmark -framedrop" 94 | if [ $TYPE == "fake" ]; then 95 | VIDEO_OPTS="-vo null" 96 | AUDIO_OPTS="-ao null" 97 | fi 98 | 99 | # Modern devices with internal queues cause latencies that no I/O 100 | # scheduler can avoid, unless the scheduler forces the device to 101 | # serve one request at a time (with obvious throughput penalties). 102 | if [[ $CACHE != y && $CACHE != Y ]]; then 103 | CACHE_OPTS="-nocache" 104 | else 105 | CACHE_OPTS="-cache 8192" 106 | fi 107 | SKIP_START_OPTS="-ss" 108 | SKIP_LENGTH_OPTS="-endpos" 109 | 110 | # The following file name is the one assigned as a default to the 111 | # trailer available at 112 | # http://www.youtube.com/watch?v=8-_9n5DtKOc 113 | # when it is downloaded. For convenience, a copy of the video is 114 | # already present in this directory. In spite of the file name, it is 115 | # a 720p video (higher resolution are apparently available only withou 116 | # audio). 117 | VIDEO_FNAME="$CURDIR/WALL-E HD 1080p Trailer.mp4" 118 | # The following parameters let the playback of the trailer start a 119 | # few seconds before the most demanding portion of the video. 120 | SKIP_START="00:01:32" 121 | SKIP_LENGTH_SEC=20 122 | SKIP_LENGTH="00:00:${SKIP_LENGTH_SEC}" 123 | STOP_ITER_TOLERANCE_SEC=40 124 | 125 | PLAYER_OUT_FNAME=${sched}-player_out.txt 126 | DROP_DATA_FNAME=${sched}-drop_data_points.txt 127 | DROP_RATE_FNAME=${sched}-drop_rate_points.txt 128 | 129 | if [ "$1" == "-h" ]; then 130 | show_usage 131 | exit 132 | fi 133 | 134 | function clean_and_exit { 135 | shutdwn 'fio iostat mplayer' 136 | cd .. 137 | # rm work dir 138 | rm -rf results-${sched} 139 | 140 | for dev in $DEVS; do 141 | if [[ $CACHE != y && $CACHE != Y && $sched == bfq ]]; then 142 | echo "Deactivating strict_guarantees on $dev" 143 | echo 0 > /sys/block/$dev/queue/iosched/strict_guarantees 144 | fi 145 | done 146 | 147 | if [[ "$XHOST_CONTROL" != "" ]]; then 148 | xhost - > /dev/null 2>&1 149 | fi 150 | exit 151 | } 152 | 153 | function check_timed_out { 154 | cur=$1 155 | timeout=$2 156 | 157 | echo -ne "Pattern-waiting time / Timeout: $cur / $timeout\033[0K\r" 158 | if [ $cur -eq $timeout ]; then 159 | echo "Start-up timed out, shutting down and removing all files" 160 | clean_and_exit 161 | fi 162 | } 163 | 164 | function invoke_player_plus_commands { 165 | 166 | rm -f ${DROP_DATA_FNAME} 167 | 168 | M_CMD="${PLAYER_CMD} ${BENCH_OPTS} ${VIDEO_OPTS} ${AUDIO_OPTS}" 169 | M_CMD="${M_CMD} ${CACHE_OPTS}" 170 | M_CMD="${M_CMD} ${SKIP_START_OPTS} ${SKIP_START}" 171 | M_CMD="${M_CMD} ${SKIP_LENGTH_OPTS} ${SKIP_LENGTH}" 172 | M_CMD="${M_CMD} \"${VIDEO_FNAME}\"" 173 | 174 | for ((i = 0 ; i < $NUM_ITER ; i++)) ; do 175 | echo Iteration $(($i+1)) / $NUM_ITER 176 | rm -f ${PLAYER_OUT_FNAME} && touch ${PLAYER_OUT_FNAME} 177 | 178 | sleep 2 # To introduce a pause between consecutive iterations, 179 | # which better mimics user behavior. This also lets 180 | # the invocation of the player not belong to a burst 181 | # of I/O queue activations (which is not what 182 | # happens if a player is invoked by a user) 183 | 184 | eval ${M_CMD} 2>&1 | tee -a ${PLAYER_OUT_FNAME} & 185 | echo "Started ${M_CMD}" 186 | ITER_START_TIMESTAMP=`date +%s` 187 | 188 | count=0 189 | while ! grep -E "Starting playback..." ${PLAYER_OUT_FNAME} > /dev/null 2>&1 ; do 190 | sleep 1 191 | count=$(($count+1)) 192 | check_timed_out $count 30 193 | done 194 | 195 | echo > $REDIRECT 196 | echo Pattern read > $REDIRECT 197 | 198 | while true ; do 199 | sleep 4 200 | if [ `date +%s` -gt $(($ITER_START_TIMESTAMP+$SKIP_LENGTH_SEC+$STOP_ITER_TOLERANCE_SEC)) ]; then 201 | echo Timeout: stopping iterations 202 | clean_and_exit 203 | fi 204 | 205 | # increase difficulty by syncing (in parallel, as sync 206 | # is blocking) 207 | echo Syncing in parallel > $REDIRECT 208 | sync & 209 | 210 | echo Executing $COMMAND > $REDIRECT 211 | (time -p $COMMAND) 2>&1 | tee -a lat-${sched} & 212 | if [ "`pgrep ${PLAYER_CMD}`" == "" ] ; then 213 | break 214 | fi 215 | done 216 | 217 | drop=`grep -n "^BENCHMARKn:" ${PLAYER_OUT_FNAME} | tr -s " " | \ 218 | cut -f7 -d" "` 219 | total=`grep -n "^BENCHMARKn:" ${PLAYER_OUT_FNAME} | tr -s " " | \ 220 | cut -f10 -d" "` 221 | echo $drop >> ${DROP_DATA_FNAME} 222 | echo $(echo "$drop $total" | awk '{printf "%f", $1/$2*100}') >> \ 223 | ${DROP_RATE_FNAME} 224 | echo "--- DROP DATA ---" 225 | cat ${DROP_DATA_FNAME} 226 | echo "--- DROP RATE ---" 227 | cat ${DROP_RATE_FNAME} 228 | rm -f ${PLAYER_OUT_FNAME} 229 | 230 | echo 3 > /proc/sys/vm/drop_caches 231 | done 232 | } 233 | 234 | function calc_frame_drops { 235 | echo "Frame drops:" | tee -a $1 236 | sh $CALC_AVG_AND_CO 99 < ${DROP_DATA_FNAME} | tee -a $1 237 | } 238 | 239 | function calc_frame_drop_rate { 240 | echo "Frame drop rate:" | tee -a $1 241 | sh $CALC_AVG_AND_CO 99 < ${DROP_RATE_FNAME} | tee -a $1 242 | } 243 | 244 | function calc_latency { 245 | echo "Latency:" | tee -a $1 246 | len=$(cat lat-${sched} | grep ^real | wc -l) 247 | cat lat-${sched} | grep ^real | tail -n$(($len)) | \ 248 | awk '{ print $2 }' > lat-${sched}-real 249 | sh $UTIL_DIR/calc_avg_and_co.sh 99 < lat-${sched}-real\ 250 | | tee -a $1 251 | } 252 | 253 | function compute_statistics { 254 | mkdir -p $STAT_DEST_DIR 255 | file_name=$STAT_DEST_DIR/\ 256 | ${sched}-${NUM_READERS}r${NUM_WRITERS}w-${RW_TYPE}-video_playing_stat.txt 257 | 258 | echo Results for $sched, $NUM_ITER $COMMAND, $NUM_READERS $RW_TYPE\ 259 | readers and $NUM_WRITERS $RW_TYPE writers | tee $file_name 260 | 261 | calc_frame_drops $file_name 262 | calc_frame_drop_rate $file_name 263 | calc_latency $file_name 264 | 265 | print_save_agg_thr $file_name 266 | } 267 | 268 | ## Main ## 269 | 270 | mkdir -p $STAT_DEST_DIR 271 | # turn to an absolute path (needed later) 272 | STAT_DEST_DIR=`cd $STAT_DEST_DIR; pwd` 273 | 274 | set_scheduler > $REDIRECT 275 | 276 | if [[ $CACHE != y && $CACHE != Y && $sched == bfq ]]; then 277 | for dev in $DEVS; do 278 | echo "Activating strict_guarantees on $dev" 279 | echo 1 > /sys/block/$dev/queue/iosched/strict_guarantees 280 | done 281 | fi 282 | 283 | # create and enter work dir 284 | rm -rf results-${sched} 285 | mkdir -p results-$sched 286 | cd results-$sched 287 | 288 | # setup a quick shutdown for Ctrl-C 289 | trap "clean_and_exit" sigint 290 | 291 | # file read by the interfering command 292 | create_file $BASE_DIR/smallfile 15 293 | 294 | echo Preliminary cache-flush to block until previous writes have been completed\ 295 | > $REDIRECT 296 | flush_caches 297 | 298 | if (( $NUM_READERS > 0 || $NUM_WRITERS > 0)); then 299 | start_readers_writers_rw_type $NUM_READERS $NUM_WRITERS $RW_TYPE $MAXRATE 300 | 301 | # wait for reader/writer start-up transitory to terminate 302 | secs=$(transitory_duration 7) 303 | 304 | while [ $secs -ge 0 ]; do 305 | echo -ne "Waiting for transitory to terminate: $secs\033[0K\r" 306 | sync & 307 | sleep 1 308 | : $((secs--)) 309 | done 310 | echo 311 | fi 312 | 313 | # start logging aggthr 314 | iostat -tmd /dev/$HIGH_LEV_DEV 3 | tee iostat.out > $REDIRECT & 315 | 316 | init_tracing 317 | set_tracing 1 318 | 319 | invoke_player_plus_commands 320 | 321 | shutdwn 'fio iostat' 322 | 323 | if [[ "$XHOST_CONTROL" != "" ]]; then 324 | xhost - > /dev/null 2>&1 325 | fi 326 | 327 | if [[ $NUM_ITER -ge 2 ]]; then 328 | compute_statistics 329 | fi 330 | 331 | cd .. 332 | 333 | # rm work dir 334 | rm -rf results-${sched} 335 | -------------------------------------------------------------------------------- /video_streaming/README: -------------------------------------------------------------------------------- 1 | To run experiments you need first to patch vlc to add the max loss rate check 2 | functionality (which just writes a message on stdout when the target max loss 3 | rate is exceeded). 4 | To this purpose, get vlc sources (check in this directory which patches are 5 | currently available), then cd to the vlc sources root dir and invoke 6 | 7 | patch -p1 < PATH_TO_THE_PATCH/vlc-your_version-limit-loss-rate.patch 8 | 9 | After properly compiling vlc, you have to set a few parameters in conf.sh, 10 | explained below, and finally just invoke vlc_test.sh as superuser. 11 | 12 | The parameters to set in conf.sh are: 13 | . where the vlc executable containing the patch is located 14 | . the user you want to be for executing the main tasks that do not need 15 | superuser privileges 16 | . the address of the system running the vlc server and the user@address to use 17 | when launching the (fake) clients 18 | . the list of movies to play (the paths to the files) 19 | 20 | There are a few other configuration parameters that you might want 21 | to change. Probably it is better to first familiarize with the set of 22 | scripts. To this purpose you may start by invoking 23 | vlc_test.sh -h 24 | to get an idea of the usage, and then browse the script vlc_test.sh itself to 25 | understand what it does exactly and how the other programs help it. 26 | 27 | POSSIBLE PROBLEMS 28 | 29 | One of the reasons why the scripts may fail is that your version of nc has 30 | a different syntax/semantic w.r.t. to the expected one. -------------------------------------------------------------------------------- /video_streaming/conf.sh: -------------------------------------------------------------------------------- 1 | # path to the vlc executable containing the patch 2 | VLC=vlc-1.0.6/vlc 3 | 4 | # user to embody for executing the tasks that do not need root privileges 5 | USER="paolo" 6 | 7 | # ADDRESS of the vlc server 8 | SERVER_ADDR=127.0.0.1 9 | 10 | # account@machine of the fake clients (one nc sink per movie) 11 | CLIENT=paolo@127.0.0.1 12 | 13 | # list of movies to play, represented as a list of pairs 14 | # (n-th video to play (v1, v2, v3, ...): path to the avi file) 15 | # Example: 16 | # FILES="v1:path_to_movie1 v2:path_to_movie2 v3:path_to_movie1" 17 | FILES="\ 18 | v1:/condivisa/Pisa/diskdev/bfq-code/test/test_suite-stuff/test-suite/video_streaming/movie1.avi 19 | v2:/condivisa/Pisa/diskdev/bfq-code/test/test_suite-stuff/test-suite/video_streaming/movie2.avi 20 | v3:/condivisa/Pisa/diskdev/bfq-code/test/test_suite-stuff/test-suite/video_streaming/movie3.avi" 21 | 22 | # time to wait between video submissions 23 | VLC_VIDEO_DELAY=15 24 | 25 | # maximum packet loss precentage accepted (/ 1000, i.e., 1000 means 1%) 26 | MAX_LOSS=1000 27 | -------------------------------------------------------------------------------- /video_streaming/read_files.sh: -------------------------------------------------------------------------------- 1 | root_name=$1 2 | files=$2 3 | 4 | idx=0 5 | for f in $files ; do 6 | echo "./reader $f > $root_name$idx &" 7 | ./reader $f > "$root_name$idx" & 8 | idx=$(($idx+1)) ; 9 | done ; 10 | wait 11 | 12 | idx=0 13 | for f in $files ; do 14 | echo `basename $root_name$idx`: 15 | cat "$root_name$idx" 16 | real_size="$real_size `cat $root_name$idx`" 17 | idx=$(($idx+1)) ; 18 | done 19 | 20 | echo '' > /tmp/mysize 21 | for s in $real_size ; do 22 | echo $s >> /tmp/mysize 23 | done 24 | 25 | awk '{ sum+=$1 };END{ print sum / 1024.0 / 1024.0 }' /tmp/mysize > \ 26 | ${root_name}-MB_total 27 | -------------------------------------------------------------------------------- /video_streaming/reader.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #define MIN_SIZE (64 << 20) /* byte */ 8 | #define MAX_SIZE (256 << 20) 9 | #define MIN_SLEEP 1 /* ms */ 10 | #define MAX_SLEEP 200 11 | #define SCALED_RAND(min, max) (int)((double)rand() * \ 12 | ((max) - (min)) / RAND_MAX + (min)) 13 | 14 | #define MAX_READ_SIZE (1 << 20) 15 | unsigned char buffer[MAX_READ_SIZE]; 16 | 17 | sig_atomic_t wait_signal = 1; 18 | int fd; 19 | 20 | unsigned long long total_bytes; 21 | 22 | void generic_reader(void) 23 | { 24 | size_t size; 25 | ssize_t bytes_red; 26 | __useconds_t slpt; 27 | 28 | while (wait_signal == 1) { 29 | size = SCALED_RAND(MIN_SIZE, MAX_SIZE); 30 | while (size > 0 && wait_signal == 1) { 31 | bytes_red = read(fd, buffer, size < MAX_READ_SIZE ? 32 | size : MAX_READ_SIZE); 33 | if (bytes_red == 0) { 34 | /* assume that files are long enough that 35 | * caching effects are negligible (e.g., 36 | * with 3 files of 1GB each, after all the 37 | * readers reach the end 3GB of data have 38 | * made it through the system buffer 39 | * cache...) */ 40 | lseek(fd, 0, SEEK_SET); 41 | } else if (bytes_red < 0) { 42 | printf("Error while reading from input\n"); 43 | exit(-1); 44 | } 45 | total_bytes += bytes_red; 46 | size -= bytes_red; 47 | } 48 | slpt = SCALED_RAND(MIN_SLEEP, MAX_SLEEP); 49 | usleep(slpt * 1000); 50 | } 51 | printf("%llu\n", total_bytes); 52 | } 53 | 54 | void sighandler(int signum) 55 | { 56 | (void)signum; 57 | wait_signal = 0; 58 | } 59 | 60 | int main(int argc, char *argv[]) 61 | { 62 | if (argc != 2) { 63 | printf("Usage: reader \n"); 64 | exit(-1); 65 | } 66 | 67 | fd = open(argv[1], O_RDONLY); 68 | if (fd == -1) { 69 | printf("Unable to open %s\n", argv[1]); 70 | exit(-1); 71 | } 72 | 73 | if (signal(SIGUSR1, sighandler) == SIG_ERR) { 74 | printf("Unable to assign an handler to SIGUSR1\n"); 75 | exit(-1); 76 | } 77 | 78 | generic_reader(); 79 | 80 | return 0; 81 | } 82 | 83 | -------------------------------------------------------------------------------- /video_streaming/vlc-0.8.6.c-limit-loss-rate.patch: -------------------------------------------------------------------------------- 1 | diff -Naurp tmp/vlc-0.8.6.c/modules/access_output/udp.c vlc-0.8.6.c/modules/access_output/udp.c 2 | --- tmp/vlc-0.8.6.c/modules/access_output/udp.c 2007-06-16 14:25:07.000000000 +0000 3 | +++ vlc-0.8.6.c/modules/access_output/udp.c 2008-02-14 18:04:07.000000000 +0000 4 | @@ -92,6 +92,11 @@ static void Close( vlc_object_t * ); 5 | "directly, without trying to fill the MTU (ie, " \ 6 | "without trying to make the biggest possible packets " \ 7 | "in order to improve streaming)." ) 8 | +#define LOSS_TEXT ("Max loss rate (x 1000)") 9 | +#define LOSS_LONGTEXT ("Maximum loss rate accepted over Window packets.") 10 | + 11 | +#define WINDOW_TEXT ("Window (pkts)") 12 | +#define WINDOW_LONGTEXT ("Window for packet loss calculation.") 13 | 14 | vlc_module_begin(); 15 | set_description( _("UDP stream output") ); 16 | @@ -106,6 +111,10 @@ vlc_module_begin(); 17 | add_suppressed_integer( SOUT_CFG_PREFIX "late" ); 18 | add_bool( SOUT_CFG_PREFIX "raw", 0, NULL, RAW_TEXT, RAW_LONGTEXT, 19 | VLC_TRUE ); 20 | + add_integer( SOUT_CFG_PREFIX "loss", 0, NULL, LOSS_TEXT, LOSS_LONGTEXT, 21 | + VLC_TRUE ); 22 | + add_integer( SOUT_CFG_PREFIX "win", 0, NULL, WINDOW_TEXT, WINDOW_LONGTEXT, 23 | + VLC_TRUE ); 24 | 25 | set_capability( "sout access", 100 ); 26 | add_shortcut( "udp" ); 27 | @@ -122,6 +131,8 @@ static const char *ppsz_sout_options[] = 28 | "ttl", 29 | "group", 30 | "raw", 31 | + "loss", 32 | + "win", 33 | NULL 34 | }; 35 | 36 | @@ -145,6 +156,8 @@ typedef struct sout_access_thread_t 37 | int64_t i_caching; 38 | int i_group; 39 | 40 | + float i_max_loss; 41 | + 42 | block_fifo_t *p_empty_blocks; 43 | 44 | } sout_access_thread_t; 45 | @@ -281,10 +294,13 @@ static int Open( vlc_object_t *p_this ) 46 | if( val.b_bool ) p_access->pf_write = WriteRaw; 47 | else p_access->pf_write = Write; 48 | 49 | + var_Get( p_access, SOUT_CFG_PREFIX "loss", &val ); 50 | + p_sys->p_thread->i_max_loss = val.i_int / 1000.0; 51 | + 52 | p_access->pf_seek = Seek; 53 | 54 | - msg_Dbg( p_access, "udp access output opened(%s:%d)", 55 | - psz_dst_addr, i_dst_port ); 56 | + msg_Dbg( p_access, "udp access output opened(%s:%d) loss=%f", 57 | + psz_dst_addr, i_dst_port, p_sys->p_thread->i_max_loss ); 58 | 59 | free( psz_dst_addr ); 60 | 61 | @@ -500,6 +516,12 @@ static block_t *NewUDPPacket( sout_acces 62 | return p_buffer; 63 | } 64 | 65 | +/* 66 | + * maximum interarrival time (in packets) with a loss < i_max_loss 67 | + */ 68 | +#define BURST_WINDOW(p_this) ((p_this)->i_max_loss * 100) 69 | +#define MAX_UDELAY 1000000 70 | + 71 | /***************************************************************************** 72 | * ThreadWrite: Write a packet on the network at the good time. 73 | *****************************************************************************/ 74 | @@ -509,6 +531,7 @@ static void ThreadWrite( vlc_object_t *p 75 | mtime_t i_date_last = -1; 76 | mtime_t i_to_send = p_thread->i_group; 77 | int i_dropped_packets = 0; 78 | + int i_idx = 0, i_last_bad = -1; 79 | #if defined(WIN32) || defined(UNDER_CE) 80 | char strerror_buf[WINSOCK_STRERROR_SIZE]; 81 | # define strerror( x ) winsock_strerror( strerror_buf ) 82 | @@ -555,6 +578,7 @@ static void ThreadWrite( vlc_object_t *p 83 | } 84 | } 85 | 86 | + i_idx++; 87 | i_to_send--; 88 | if( !i_to_send || (p_pk->i_flags & BLOCK_FLAG_CLOCK) ) 89 | { 90 | @@ -580,6 +604,18 @@ static void ThreadWrite( vlc_object_t *p 91 | msg_Dbg( p_thread, "packet has been sent too late (" I64Fd ")", 92 | i_sent - i_date ); 93 | } 94 | + if ( i_sent > i_date + MAX_UDELAY ) 95 | + { 96 | + msg_Dbg( p_thread, "packet %d late for %fs buffering", i_idx, MAX_UDELAY/1000000.0 ); 97 | + if( i_last_bad != -1 && 98 | + i_idx - i_last_bad < BURST_WINDOW(p_thread) ) 99 | + { 100 | + msg_Dbg( p_thread, "MAX_LOSS_RATE exceeded (%d-%d/%.0f)", 101 | + i_idx, i_last_bad, 102 | + BURST_WINDOW(p_thread) ); 103 | + } 104 | + i_last_bad = i_idx; 105 | + } 106 | #endif 107 | 108 | block_FifoPut( p_thread->p_empty_blocks, p_pk ); 109 | -------------------------------------------------------------------------------- /video_streaming/vlc-1.0.6-limit-loss-rate.patch: -------------------------------------------------------------------------------- 1 | --- vlc-1.0.6.orig//modules/access_output/udp.c 2010-12-19 06:53:43.202721600 +0000 2 | +++ vlc-1.0.6/modules/access_output/udp.c 2011-01-05 10:37:08.822512000 +0000 3 | @@ -76,6 +76,12 @@ 4 | "helps reducing the scheduling load on " \ 5 | "heavily-loaded systems." ) 6 | 7 | +#define LOSS_TEXT ("Max loss rate (x 1000)") 8 | +#define LOSS_LONGTEXT ("Maximum loss rate accepted over Window packets.") 9 | + 10 | +#define WINDOW_TEXT ("Window (pkts)") 11 | +#define WINDOW_LONGTEXT ("Window for packet loss calculation.") 12 | + 13 | vlc_module_begin () 14 | set_description( N_("UDP stream output") ) 15 | set_shortname( "UDP" ) 16 | @@ -87,6 +93,11 @@ 17 | add_obsolete_integer( SOUT_CFG_PREFIX "late" ) 18 | add_obsolete_bool( SOUT_CFG_PREFIX "raw" ) 19 | 20 | + add_integer( SOUT_CFG_PREFIX "loss", 0, NULL, LOSS_TEXT, LOSS_LONGTEXT, 21 | + true ) 22 | + add_integer( SOUT_CFG_PREFIX "win", 0, NULL, WINDOW_TEXT, WINDOW_LONGTEXT, 23 | + true ) 24 | + 25 | set_capability( "sout access", 0 ) 26 | add_shortcut( "udp" ) 27 | set_callbacks( Open, Close ) 28 | @@ -99,6 +110,8 @@ 29 | static const char *const ppsz_sout_options[] = { 30 | "caching", 31 | "group", 32 | + "loss", 33 | + "win", 34 | NULL 35 | }; 36 | 37 | @@ -124,6 +137,7 @@ 38 | int i_handle; 39 | bool b_mtu_warning; 40 | size_t i_mtu; 41 | + float i_max_loss; 42 | 43 | block_fifo_t *p_fifo; 44 | block_fifo_t *p_empty_blocks; 45 | @@ -234,8 +248,13 @@ 46 | } 47 | 48 | p_access->pf_write = Write; 49 | + 50 | + p_sys->i_max_loss = 51 | + var_GetInteger( p_access, SOUT_CFG_PREFIX "loss" ) / 1000.0; 52 | p_access->pf_seek = Seek; 53 | p_access->pf_control = Control; 54 | + msg_Dbg( p_access, "udp access output opened(dst_port %d) loss=%f", 55 | + i_dst_port, p_sys->i_max_loss ); 56 | 57 | return VLC_SUCCESS; 58 | } 59 | @@ -400,6 +419,12 @@ 60 | return p_buffer; 61 | } 62 | 63 | +/* 64 | + * maximum interarrival time (in packets) with a loss < i_max_loss 65 | + */ 66 | +#define BURST_WINDOW(p_this) ((p_this)->i_max_loss * 100) 67 | +#define MAX_UDELAY 1000000 68 | + 69 | /***************************************************************************** 70 | * ThreadWrite: Write a packet on the network at the good time. 71 | *****************************************************************************/ 72 | @@ -410,8 +435,9 @@ 73 | mtime_t i_date_last = -1; 74 | const unsigned i_group = var_GetInteger( p_access, 75 | SOUT_CFG_PREFIX "group" ); 76 | - mtime_t i_to_send = i_group; 77 | - unsigned i_dropped_packets = 0; 78 | + volatile mtime_t i_to_send = i_group; 79 | + volatile unsigned i_dropped_packets = 0; 80 | + volatile int i_idx = 0, i_last_bad = -1; 81 | 82 | for (;;) 83 | { 84 | @@ -442,6 +468,8 @@ 85 | } 86 | 87 | block_cleanup_push( p_pk ); 88 | + 89 | + i_idx++ ; 90 | i_to_send--; 91 | if( !i_to_send || (p_pk->i_flags & BLOCK_FLAG_CLOCK) ) 92 | { 93 | @@ -465,8 +493,22 @@ 94 | msg_Dbg( p_access, "packet has been sent too late (%"PRId64 ")", 95 | i_sent - i_date ); 96 | } 97 | + if ( i_sent > i_date + MAX_UDELAY ) 98 | + { 99 | + msg_Dbg( p_access, 100 | + "packet %d late for %fs buffering", 101 | + i_idx, MAX_UDELAY/1000000.0 ); 102 | + if( i_last_bad != -1 && 103 | + i_idx - i_last_bad < BURST_WINDOW(p_sys) ) 104 | + { 105 | + msg_Dbg( p_access, "MAX_LOSS_RATE exceeded (%d-%d/%.0f)", 106 | + i_idx, i_last_bad, 107 | + BURST_WINDOW(p_sys) ); 108 | + } 109 | + i_last_bad = i_idx; 110 | + } 111 | #endif 112 | - 113 | + 114 | block_FifoPut( p_sys->p_empty_blocks, p_pk ); 115 | 116 | i_date_last = i_date; 117 | -------------------------------------------------------------------------------- /video_streaming/vlc-2.1.4-limit-loss-rate.patch: -------------------------------------------------------------------------------- 1 | From 36f81713c2ef8015dbedfcdcded5df0a0cf469a0 Mon Sep 17 00:00:00 2001 2 | From: Bruno George Moraes 3 | Date: Wed, 30 Jul 2014 02:43:16 +0200 4 | Subject: [PATCH] modules/udp.c: limit loss rate 5 | 6 | Signed-off-by: Paolo Valente 7 | Signed-off-by: Bruno George Moraes 8 | --- 9 | modules/access_output/udp.c | 46 +++++++++++++++++++++++++++++++++++++++++++-- 10 | 1 file changed, 44 insertions(+), 2 deletions(-) 11 | 12 | diff --git a/modules/access_output/udp.c b/modules/access_output/udp.c 13 | index 5c04c50..1ea40ac 100644 14 | --- a/modules/access_output/udp.c 15 | +++ b/modules/access_output/udp.c 16 | @@ -73,6 +73,12 @@ static void Close( vlc_object_t * ); 17 | "helps reducing the scheduling load on " \ 18 | "heavily-loaded systems." ) 19 | 20 | +#define LOSS_TEXT ("Max loss rate (x 1000)") 21 | +#define LOSS_LONGTEXT ("Maximum loss rate accepted over Window packets.") 22 | + 23 | +#define WINDOW_TEXT ("Window (pkts)") 24 | +#define WINDOW_LONGTEXT ("Window for packet loss calculation.") 25 | + 26 | vlc_module_begin () 27 | set_description( N_("UDP stream output") ) 28 | set_shortname( "UDP" ) 29 | @@ -82,6 +88,11 @@ vlc_module_begin () 30 | add_integer( SOUT_CFG_PREFIX "group", 1, GROUP_TEXT, GROUP_LONGTEXT, 31 | true ) 32 | 33 | + add_integer( SOUT_CFG_PREFIX "loss", 0, LOSS_TEXT, LOSS_LONGTEXT, 34 | + true ) 35 | + add_integer( SOUT_CFG_PREFIX "win", 0, WINDOW_TEXT, WINDOW_LONGTEXT, 36 | + true ) 37 | + 38 | set_capability( "sout access", 0 ) 39 | add_shortcut( "udp" ) 40 | set_callbacks( Open, Close ) 41 | @@ -94,6 +105,8 @@ vlc_module_end () 42 | static const char *const ppsz_sout_options[] = { 43 | "caching", 44 | "group", 45 | + "loss", 46 | + "win", 47 | NULL 48 | }; 49 | 50 | @@ -118,6 +131,7 @@ struct sout_access_out_sys_t 51 | int i_handle; 52 | bool b_mtu_warning; 53 | size_t i_mtu; 54 | + float i_max_loss; 55 | 56 | block_fifo_t *p_fifo; 57 | block_fifo_t *p_empty_blocks; 58 | @@ -228,8 +242,13 @@ static int Open( vlc_object_t *p_this ) 59 | } 60 | 61 | p_access->pf_write = Write; 62 | + 63 | + p_sys->i_max_loss = 64 | + var_GetInteger( p_access, SOUT_CFG_PREFIX "loss" ) / 1000.0; 65 | p_access->pf_seek = Seek; 66 | p_access->pf_control = Control; 67 | + msg_Dbg( p_access, "udp access output opened(dst_port %d) loss=%f", 68 | + i_dst_port, p_sys->i_max_loss ); 69 | 70 | return VLC_SUCCESS; 71 | } 72 | @@ -394,6 +413,12 @@ static block_t *NewUDPPacket( sout_access_out_t *p_access, mtime_t i_dts) 73 | return p_buffer; 74 | } 75 | 76 | +/* 77 | + * maximum interarrival time (in packets) with a loss < i_max_loss 78 | + */ 79 | +#define BURST_WINDOW(p_this) ((p_this)->i_max_loss * 100) 80 | +#define MAX_UDELAY 1000000 81 | + 82 | /***************************************************************************** 83 | * ThreadWrite: Write a packet on the network at the good time. 84 | *****************************************************************************/ 85 | @@ -404,8 +429,9 @@ static void* ThreadWrite( void *data ) 86 | mtime_t i_date_last = -1; 87 | const unsigned i_group = var_GetInteger( p_access, 88 | SOUT_CFG_PREFIX "group" ); 89 | - mtime_t i_to_send = i_group; 90 | - unsigned i_dropped_packets = 0; 91 | + volatile mtime_t i_to_send = i_group; 92 | + volatile unsigned i_dropped_packets = 0; 93 | + volatile int i_idx = 0, i_last_bad = -1; 94 | 95 | for (;;) 96 | { 97 | @@ -436,6 +462,8 @@ static void* ThreadWrite( void *data ) 98 | } 99 | 100 | block_cleanup_push( p_pk ); 101 | + 102 | + i_idx++ ; 103 | i_to_send--; 104 | if( !i_to_send || (p_pk->i_flags & BLOCK_FLAG_CLOCK) ) 105 | { 106 | @@ -459,6 +487,20 @@ static void* ThreadWrite( void *data ) 107 | msg_Dbg( p_access, "packet has been sent too late (%"PRId64 ")", 108 | i_sent - i_date ); 109 | } 110 | + if ( i_sent > i_date + MAX_UDELAY ) 111 | + { 112 | + msg_Dbg( p_access, 113 | + "packet %d late for %fs buffering", 114 | + i_idx, MAX_UDELAY/1000000.0 ); 115 | + if( i_last_bad != -1 && 116 | + i_idx - i_last_bad < BURST_WINDOW(p_sys) ) 117 | + { 118 | + msg_Dbg( p_access, "MAX_LOSS_RATE exceeded (%d-%d/%.0f)", 119 | + i_idx, i_last_bad, 120 | + BURST_WINDOW(p_sys) ); 121 | + } 122 | + i_last_bad = i_idx; 123 | + } 124 | #endif 125 | 126 | block_FifoPut( p_sys->p_empty_blocks, p_pk ); 127 | -- 128 | 2.0.3 129 | 130 | -------------------------------------------------------------------------------- /video_streaming/vlc_auto.sh: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2013 Fabio Checconi 2 | # Paolo Valente 3 | # 4 | # start both vlc server and fake clients, automatically called by vlc_test.sh 5 | 6 | . conf.sh 7 | 8 | out_file=$1 9 | 10 | # number of sarted video playbacks 11 | # 12 | num_started=0 13 | 14 | # start to broadcast $1, using netcat to send data towards the 15 | # telnet interface 16 | # 17 | function play() { 18 | echo -e "videolan\ncontrol $1 play\nquit\n" | nc 127.0.0.1 4212 > /dev/null 19 | } 20 | 21 | function shutdown() { 22 | killall vlc 23 | ssh ${CLIENT} killall nc 24 | rm -f vlm_bcast.cfg 25 | rm -f vlc_listen.sh 26 | } 27 | 28 | # check if there is any underrun, if so, print the maximum number of 29 | # videos that were playing BEFORE trying to add the last one 30 | # 31 | function check() { 32 | if grep "MAX_LOSS_RATE exceeded" vlc.log > /dev/null ; then 33 | shutdown 34 | echo $num_started VIDS 35 | echo $num_started VIDS > $out_file 36 | exit 0 37 | fi 38 | 39 | num_started=$((num_started+1)) 40 | } 41 | 42 | echo Cleaning up and preventively shutting down possible still alive processes ... 43 | shutdown 44 | 45 | for f in $FILES ; do 46 | echo $f 47 | nr=${f/:*/} 48 | ofs=${nr/v/} 49 | vid=${f/*:/} 50 | echo new $nr broadcast enabled >> vlm_bcast.cfg 51 | echo setup $nr input $vid >> vlm_bcast.cfg 52 | echo "setup $nr output #std{access=udp,mux=ts,dst=${SERVER_ADDR}:$((5554+$ofs))}" >> vlm_bcast.cfg 53 | echo "nc -l -u $((5554+$ofs)) -q -1 > /dev/null 2>&1 &" >> vlc_listen.sh 54 | # echo control $nr play >> vlm_bcast.cfg 55 | done 56 | 57 | echo "Starting remote listeners (fake clients) ..." 58 | scp vlc_listen.sh ${CLIENT}: 59 | ssh -f ${CLIENT} 'bash vlc_listen.sh' 60 | 61 | echo Starting VLC... 62 | 63 | COMMAND="$VLC --ttl 12 -vvv --vlm-conf=vlm_bcast.cfg \ 64 | -I telnet --telnet-password videolan --sout-udp-loss $MAX_LOSS" 65 | echo $COMMAND 66 | $COMMAND > vlc.log 2>&1 & 67 | 68 | echo Waiting for vlc to bring up telnet interface ... 69 | while ! grep "telnet interface: telnet interface started" vlc.log > /dev/null ; do 70 | sleep 1 71 | done 72 | 73 | # also the readers start as soon as the telnet interface is up, so, to let them 74 | # settle, here we wait for a few seconds after they have been started too 75 | while ! [ -f noise_started ] ; do 76 | sleep 1 77 | done 78 | 79 | # let the readers settle 80 | sleep 10 81 | 82 | # finally, start strrreming movies one after the other, provided that 83 | # the maximum loss rate is not reached 84 | for f in $FILES ; do 85 | nr=${f/:*/} 86 | echo Starting $nr 87 | play $nr 88 | sleep $VLC_VIDEO_DELAY 89 | check 90 | done 91 | 92 | echo NO MORE MOVIES TO PLAY 93 | echo Shutting down... 94 | shutdown 95 | echo PLAYED ALL AVAILABLE MOVIES > $out_file 96 | 97 | -------------------------------------------------------------------------------- /video_streaming/vlc_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2013 Fabio Checconi 3 | # Paolo Valente 4 | # 5 | # main script, run me as super-user to run the whole test 6 | # super-user privilegese are needed to switch between schedulers and the like, 7 | # the other tasks, as e.g., the vlc server itself, are executed as 8 | # the (possibly un privileged user you prefer); se the USER parameter in conf.sh 9 | # to the username you prefer 10 | 11 | . conf.sh 12 | . ../config_params.sh 13 | . ../utilities/lib_utils.sh 14 | 15 | # PARAMS: size budget nfiles sched 16 | function start_noise() { 17 | size=$1 18 | budget=$2 19 | nfiles=$3 20 | sched=$4 21 | 22 | rm -f vlc.log noise_started # cleanup in case these files were not 23 | # properly removed at the right moment 24 | out=log-${size}M-b${budget} 25 | 26 | if [ $file_location != $BASE_DIR ]; then 27 | for f in ${files[$nfiles]} ; do 28 | umount `dirname $f` 29 | done 30 | for f in ${files[$nfiles]} ; do 31 | mount `dirname $f` 32 | done 33 | else 34 | flush_caches 35 | fi 36 | 37 | # do not start before vlc is ready 38 | while ! grep "telnet interface: telnet interface started" vlc.log > \ 39 | /dev/null 2>&1 40 | do 41 | sleep 1 42 | done 43 | touch noise_started 44 | 45 | for dev in $DEVS; do 46 | echo $budget > /sys/block/$dev/queue/iosched/$sysfs_par 47 | done 48 | 49 | echo -- File size = $size MB / Budget = $budget -- 50 | echo $out 51 | /usr/bin/time -f %e --output="$rootdir/delay_$out" \ 52 | sh read_files.sh "$rootdir/single_logs/read_bytes-$out-file" \ 53 | "${files[$nfiles]}" 54 | delay=`cat "$rootdir/delay_$out"` 55 | rm noise_started 56 | 57 | echo noise duration: $delay 58 | echo ---------------------------------------------------------- 59 | echo 60 | } 61 | 62 | function stop_noise() { 63 | killall -USR1 reader 64 | } 65 | 66 | function show_usage() { 67 | echo Usage: vlc_test.sh start_iteration start_num_files sched 68 | echo " [end_iteration (default: start_iteration)]" 69 | echo " [end_num_files (default: start_num_files)]" 70 | echo " [bfq_max_budget (used only with bfq, default: 0)]" 71 | echo " [location of files to read (default: $BASE_DIR)]" 72 | echo " [stat dest dir (default: .)]" 73 | 74 | echo "Example: sudo ./vlc_test.sh 1 1 cfq 2 2 0 /tmp/test ." 75 | 76 | } 77 | 78 | start_iter=$1 79 | start_nfiles=$2 80 | sched=$3 81 | end_iter=${4:-$start_iter} 82 | end_nfiles=${5:-$start_nfiles} 83 | bfq_max_budget=${6-0} 84 | file_location=${7:-$BASE_DIR} 85 | out_dir=${8:-.} 86 | 87 | if [ $1 == "-h" ] ; then 88 | show_usage 89 | exit 90 | fi 91 | 92 | if [ $# -lt 3 ] ; then 93 | echo Too few parameters! 94 | show_usage 95 | exit 1 96 | fi 97 | 98 | if [ $sched != "bfq" ] && [ $sched != "cfq" ] ; then 99 | echo Unaccepted scheduler type $sched 100 | exit 2 101 | fi 102 | 103 | if [ ! -f ./reader ]; then 104 | echo reader executable not present in current dir, compiling it ... 105 | echo gcc reader.c -o reader 106 | gcc reader.c -o reader 107 | fi 108 | if [ ! -f ./reader ]; then 109 | echo errors in creatin reader, try to fix them 110 | echo aborting 111 | exit 3 112 | fi 113 | 114 | # kill possible still alive readers 115 | killall -9 reader > /dev/null 2>&1 116 | 117 | umask 2 118 | 119 | printf "start_iter end_iter start_nfiles end_nfiles bfq_max_budget sched\n" 120 | printf "%9d %8d %12d %10d %14d %5s\n\n" $start_iter $end_iter $start_nfiles \ 121 | $end_nfiles $bfq_max_budget $sched 122 | 123 | ver=${out_dir}/video_streaming_`date +%Y%m%d-%H%M` 124 | 125 | #measured in MB 126 | sizes="$(($FILE_SIZE_MB ))" 127 | max_size=$(($FILE_SIZE_MB)) 128 | 129 | echo Creating needed files $f 130 | if [ $file_location != $BASE_DIR ]; then 131 | # create files if needed 132 | for f in ${files[5]} ; do 133 | mount `dirname $f` 134 | if ! [ -f $f ] ; then 135 | echo Preparing $f 136 | dd if=/dev/zero of=$f bs=1M count=$max_size; 137 | else 138 | echo $f already exists 139 | fi 140 | umount `dirname $f` 141 | done 142 | 143 | files[1]="/mnt/${HIGH_LEV_DEV}20/1GB_file" 144 | files[2]="/mnt/${HIGH_LEV_DEV}5/1GB_file /mnt/${HIGH_LEV_DEV}34/1GB_file" 145 | files[3]="/mnt/${HIGH_LEV_DEV}5/1GB_file /mnt/${HIGH_LEV_DEV}20/1GB_file /mnt/${HIGH_LEV_DEV}34/1GB_file" 146 | files[4]="/mnt/${HIGH_LEV_DEV}5/1GB_file /mnt/${HIGH_LEV_DEV}12/1GB_file /mnt/${HIGH_LEV_DEV}20/1GB_file \ 147 | /mnt/${HIGH_LEV_DEV}34/1GB_file" 148 | files[5]="/mnt/${HIGH_LEV_DEV}5/1GB_file /mnt/${HIGH_LEV_DEV}12/1GB_file /mnt/${HIGH_LEV_DEV}20/1GB_file \ 149 | /mnt/${HIGH_LEV_DEV}27/1GB_file /mnt/${HIGH_LEV_DEV}34/1GB_file" 150 | 151 | else 152 | create_files 5 # at most five files are read in parallel at the moment 153 | flush_caches 154 | 155 | files[1]="${BASE_FILE_PATH}0" 156 | files[2]="${BASE_FILE_PATH}0 ${BASE_FILE_PATH}4" 157 | files[3]="${BASE_FILE_PATH}0 ${BASE_FILE_PATH}2 ${BASE_FILE_PATH}4" 158 | files[4]="${BASE_FILE_PATH}0 ${BASE_FILE_PATH}1 ${BASE_FILE_PATH}2 ${BASE_FILE_PATH}4" 159 | files[5]="${BASE_FILE_PATH}0 ${BASE_FILE_PATH}1 ${BASE_FILE_PATH}2 ${BASE_FILE_PATH}3 ${BASE_FILE_PATH}4" 160 | 161 | fi 162 | 163 | echo Recall: files should currently be $max_size MB long 164 | 165 | # unused at the moment: 166 | # echo noop > /sys/block/$DEV/queue/scheduler 167 | # rmmod $sched-iosched 168 | # modprobe $sched-iosched 169 | 170 | for dev in $DEVS; do 171 | echo echo "$sched > /sys/block/$dev/queue/scheduler" 172 | echo $sched > /sys/block/$dev/queue/scheduler 173 | done 174 | 175 | # set scheduler parameters 176 | if [ $sched == "bfq" ] ; then 177 | # in sectors 178 | start_budget=4096 179 | max_budget=bfq_max_budget 180 | if [ $max_budget -lt $start_budget ] ; then 181 | start_budget=$max_budget 182 | fi 183 | sysfs_par="max_budget" 184 | elif [ $sched == "cfq" ] ; then 185 | # in ms 186 | dev=$(echo $DEVS | awk '{ print $1 }') 187 | start_budget=`cat /sys/block/$dev/queue/iosched/slice_sync` 188 | max_budget=$start_budget 189 | sysfs_par="slice_sync" 190 | fi 191 | 192 | echo Starting tests ... 193 | # do the test 194 | for ((iteration = $start_iter; iteration <= $end_iter; \ 195 | iteration++)) ; do 196 | for ((nfiles = $start_nfiles; nfiles <= $end_nfiles; \ 197 | nfiles++)) ; do 198 | rootdir="$ver/repetition${iteration}/nfiles${nfiles}/$sched" 199 | mkdir -p "$rootdir/single_logs" 200 | for size in $sizes ; do 201 | for ((budget = $start_budget; budget <= $max_budget; \ 202 | budget *= 2)) ; do 203 | 204 | echo 205 | echo Repetition $iteration, num_readers $nfiles, budget $budget 206 | out=log-${size}M-b${budget} 207 | start_noise $size $budget $nfiles $sched & 208 | iostat -tmd /dev/$HIGH_LEV_DEV 5 > $rootdir/${out}_iostat & 209 | echo su $USER -c "bash vlc_auto.sh /tmp/videos" 210 | su $USER -c "bash vlc_auto.sh /tmp/videos" 211 | stop_noise 212 | killall iostat 213 | mv /tmp/videos $rootdir/${out}_videos 214 | 215 | sleep 4 216 | if ((max_budget==0)) ; then 217 | break # one iteration is enough 218 | fi 219 | done 220 | done 221 | done 222 | done 223 | 224 | rm vlc.log 225 | --------------------------------------------------------------------------------