├── .gitignore ├── LICENSE ├── README.md ├── bart ├── __init__.py ├── common │ ├── Analyzer.py │ ├── Utils.py │ ├── __init__.py │ └── signal.py ├── sched │ ├── SchedAssert.py │ ├── SchedMatrix.py │ ├── SchedMultiAssert.py │ ├── __init__.py │ ├── functions.py │ └── pelt.py ├── thermal │ ├── ThermalAssert.py │ └── __init__.py └── version.py ├── docs ├── api_reference │ ├── .gitignore │ ├── Makefile │ ├── conf.py │ └── index.rst ├── examples │ └── thermal.py └── notebooks │ ├── sched │ └── SchedDeadline.ipynb │ └── thermal │ └── Thermal.ipynb ├── requirements.txt ├── setup.cfg ├── setup.py └── tests ├── pelt.py ├── raw_trace.dat ├── test_common_utils.py ├── test_pelt_sim.py ├── test_sched_assert.py ├── test_sched_functions.py ├── test_signal.py ├── trace.raw.txt ├── trace.txt └── utils_tests.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .ipynb_checkpoints 3 | example_trace_dat* 4 | /dist/ 5 | /build/ 6 | /bart_py.egg-info/ 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | BART [![Build Status](https://travis-ci.org/ARM-software/bart.svg?branch=master)](https://travis-ci.org/ARM-software/bart) [![Version](https://img.shields.io/pypi/v/bart-py.svg)](https://pypi.python.org/pypi/bart-py) 2 | ==== 3 | 4 | The Behavioural Analysis and Regression Toolkit is based on 5 | [TRAPpy](https://github.com/ARM-software/trappy). The primary goal is to assert 6 | behaviours using the FTrace output from the kernel. 7 | 8 | ## Target Audience 9 | 10 | The framework is designed to cater to a wide range of audience. Aiding 11 | developers as well as automating the testing of "difficult to test" behaviours. 12 | 13 | #### Kernel Developers 14 | 15 | Making sure that the code that you are writing is doing the right thing. 16 | 17 | #### Performance Engineers 18 | 19 | Plotting/Asserting performance behaviours between different revisions of the 20 | kernel. 21 | 22 | #### Quality Assurance/Release Engineers 23 | 24 | Verifying behaviours when different components/patches are integrated. 25 | 26 | # Installation 27 | 28 | The following instructions are for Ubuntu 14.04 LTS but they should 29 | also work with Debian jessie. Older versions of Ubuntu or Debian 30 | (e.g. Ubuntu 12.04 or Debian wheezy) will likely require to install 31 | more packages from pip as the ones present in Ubuntu 12.04 or Debian 32 | wheezy will probably be too old. 33 | 34 | ## Required dependencies 35 | 36 | #### Install additional tools required for some tests and functionalities 37 | 38 | $ sudo apt install trace-cmd kernelshark 39 | 40 | #### Install the Python package manager 41 | 42 | $ sudo apt install python-pip python-dev 43 | 44 | #### Install required python packages 45 | 46 | $ sudo apt install libfreetype6-dev libpng12-dev python-nose 47 | $ sudo pip install hypothesis numpy matplotlib pandas ipython[all] 48 | $ sudo pip install --upgrade trappy 49 | 50 | `ipython[all]` will install [IPython 51 | Notebook](http://ipython.org/notebook.html), a web based interactive 52 | python programming interface. It is required if you plan to use interactive 53 | plotting in BART. 54 | 55 | #### Install BART 56 | 57 | $ sudo pip install --upgrade bart-py 58 | 59 | # For developers 60 | 61 | Instead of installing TRAPpy and BART using `pip` you should clone the repositories: 62 | 63 | $ git clone git@github.com:ARM-software/bart.git 64 | $ git clone git@github.com:ARM-software/trappy.git 65 | 66 | Add the directories to your PYTHONPATH 67 | 68 | $ export PYTHONPATH=$BASE_DIR/bart:$BASE_DIR/trappy:$PYTHONPATH 69 | 70 | 71 | # Trace Analysis Language 72 | 73 | BART also provides a generic Trace Analysis Language, which allows the user to 74 | construct complex relation statements on trace data and assert their expected 75 | behaviours. The usage of the Analyzer module can be seen for the thermal 76 | behaviours 77 | [here](https://github.com/ARM-software/bart/blob/master/docs/notebooks/thermal/Thermal.ipynb) 78 | 79 | # Scheduler Assertions 80 | 81 | Enables assertion and the calculation of the following parameters: 82 | 83 | #### Runtime 84 | 85 | The total time that the task spent on a CPU executing. 86 | 87 | #### Switch 88 | 89 | Assert that a task switched between CPUs/Clusters in a given window of time. 90 | 91 | #### Duty Cycle 92 | 93 | The ratio of the execution time to the total time. 94 | 95 | #### Period 96 | 97 | The average difference between two switch-in or two switch-out events of a 98 | task. 99 | 100 | #### First CPU 101 | 102 | The first CPU that a task ran on. 103 | 104 | #### Residency 105 | 106 | Calculate and assert the total residency of a task on a CPU or cluster. 107 | 108 | #### Examples 109 | 110 | The Scheduler assertions also use TRAPpy's EventPlot to provide a `kernelshark` 111 | like timeline for the tasks under consideration. (in IPython notebooks). 112 | 113 | A notebook explaining the usage of the framework for asserting the deadline 114 | scheduler behaviours can be seen 115 | [here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html). 116 | 117 | # API reference 118 | 119 | The API reference can be found in https://pythonhosted.org/bart-py 120 | -------------------------------------------------------------------------------- /bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Initialization for bart""" 17 | 18 | import bart.sched 19 | import bart.common 20 | import bart.thermal 21 | from bart.version import __version__ 22 | -------------------------------------------------------------------------------- /bart/common/Analyzer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Allow the user to assert various conditions 17 | based on the grammar defined in trappy.stats.grammar. The class is 18 | also intended to have aggregator based functionality. This is not 19 | implemented yet. 20 | """ 21 | 22 | from trappy.stats.grammar import Parser 23 | import warnings 24 | import numpy as np 25 | import pandas as pd 26 | 27 | # pylint: disable=invalid-name 28 | 29 | 30 | class Analyzer(object): 31 | 32 | """ 33 | :param data: TRAPpy FTrace Object 34 | :type data: :mod:`trappy.ftrace.FTrace` 35 | 36 | :param config: A dictionary of variables, classes 37 | and functions that can be used in the statements 38 | :type config: dict 39 | """ 40 | 41 | def __init__(self, data, config, **kwargs): 42 | self._parser = Parser(data, config, **kwargs) 43 | 44 | def assertStatement(self, statement, select=None): 45 | """Solve the statement for a boolean result 46 | 47 | :param statement: A string representing a valid 48 | :mod:`trappy.stats.grammar` statement 49 | :type statement: str 50 | 51 | :param select: If the result represents a boolean 52 | mask and the data was derived from a TRAPpy event 53 | with a pivot value. The :code:`select` can be 54 | used to select a particular pivot value 55 | :type select: :mod:`pandas.DataFrame` column 56 | """ 57 | 58 | result = self.getStatement(statement, select=select) 59 | 60 | if isinstance(result, pd.DataFrame): 61 | result = result.all().all() 62 | elif not(isinstance(result, bool) or isinstance(result, np.bool_)): # pylint: disable=no-member 63 | warnings.warn("solution of {} is not boolean".format(statement)) 64 | 65 | return result 66 | 67 | def getStatement(self, statement, reference=False, select=None): 68 | """Evaluate the statement""" 69 | 70 | result = self._parser.solve(statement) 71 | 72 | # pylint: disable=no-member 73 | if np.isscalar(result): 74 | return result 75 | # pylint: enable=no-member 76 | 77 | if select is not None and len(result): 78 | result = result[select] 79 | if reference: 80 | result = self._parser.ref(result) 81 | 82 | return result 83 | -------------------------------------------------------------------------------- /bart/common/Utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Utility functions for sheye""" 17 | 18 | import trappy 19 | import numpy as np 20 | 21 | # pylint fails to recognize numpy members. 22 | # pylint: disable=no-member 23 | 24 | def listify(to_select): 25 | """Utitlity function to handle both single and 26 | list inputs 27 | """ 28 | 29 | if not isinstance(to_select, list): 30 | to_select = [to_select] 31 | 32 | return to_select 33 | 34 | def init_ftrace(trace): 35 | """Initialize the FTrace Object 36 | 37 | :param trace: Path for the trace file 38 | or a trace object 39 | :type trace: str, :mod:`trappy.ftrace.FTrace` 40 | """ 41 | 42 | if isinstance(trace, basestring): 43 | return trappy.FTrace(trace) 44 | 45 | elif isinstance(trace, trappy.BareTrace): 46 | return trace 47 | 48 | raise ValueError("Invalid trace Object") 49 | 50 | def select_window(series, window): 51 | """Helper Function to select a portion of 52 | pandas time series 53 | 54 | :param series: Input Time Series data 55 | :type series: :mod:`pandas.Series` 56 | 57 | :param window: A tuple indicating a time window 58 | :type window: tuple 59 | """ 60 | 61 | if not window: 62 | return series 63 | 64 | start, stop = window 65 | ix = series.index 66 | selector = ((ix >= start) & (ix <= stop)) 67 | window_series = series[selector] 68 | return window_series 69 | 70 | def area_under_curve(series, sign=None, method="trapz", step="post"): 71 | """Return the area under the time series curve (Integral) 72 | 73 | :param series: The time series to be integrated 74 | :type series: :mod:`pandas.Series` 75 | 76 | :param sign: Clip the data for the area in positive 77 | or negative regions. Can have two values 78 | 79 | - `"+"` 80 | - `"="` 81 | :type sign: str 82 | 83 | :param method: The method for area calculation. This can 84 | be any of the integration methods supported in `numpy` 85 | or `rect` 86 | :type param: str 87 | 88 | :param step: The step behaviour for `rect` method 89 | :type step: str 90 | 91 | *Rectangular Method* 92 | 93 | - Step: Post 94 | 95 | Consider the following time series data 96 | 97 | .. code:: 98 | 99 | 2 *----*----*----+ 100 | | | 101 | 1 | *----*----+ 102 | | 103 | 0 *----*----+ 104 | 0 1 2 3 4 5 6 7 105 | 106 | .. code:: 107 | 108 | import pandas as pd 109 | a = [0, 0, 2, 2, 2, 1, 1] 110 | s = pd.Series(a) 111 | 112 | The area under the curve is: 113 | 114 | .. math:: 115 | 116 | \sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\ 117 | (2 \\times 3) + (1 \\times 2) = 8 118 | 119 | - Step: Pre 120 | 121 | .. code:: 122 | 123 | 2 +----*----*----* 124 | | | 125 | 1 | +----*----*----+ 126 | | 127 | 0 *----* 128 | 0 1 2 3 4 5 6 7 129 | 130 | .. code:: 131 | 132 | import pandas as pd 133 | a = [0, 0, 2, 2, 2, 1, 1] 134 | s = pd.Series(a) 135 | 136 | The area under the curve is: 137 | 138 | .. math:: 139 | 140 | \sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\ 141 | (2 \\times 3) + (1 \\times 3) = 9 142 | """ 143 | 144 | if sign == "+": 145 | series = series.clip_lower(0) 146 | elif sign == "=": 147 | series = series.clip_upper(0) 148 | 149 | series = series.dropna() 150 | 151 | if method == "rect": 152 | 153 | if step == "post": 154 | values = series.values[:-1] 155 | elif step == "pre": 156 | values = series.values[1:] 157 | else: 158 | raise ValueError("Invalid Value for step: {}".format(step)) 159 | 160 | return float((values * np.diff(series.index)).sum()) 161 | 162 | if hasattr(np, method): 163 | np_integ_method = getattr(np, method) 164 | return np_integ_method(series.values, series.index) 165 | else: 166 | raise ValueError("Invalid method: {}".format(method)) 167 | 168 | def interval_sum(series, value=None, step="post"): 169 | """A function that returns the sum of the 170 | intervals where the value of series is equal to 171 | the expected value. Consider the following time 172 | series data: 173 | 174 | ====== ======= 175 | Time Value 176 | ====== ======= 177 | 0 0 178 | 1 0 179 | 2 1 180 | 3 1 181 | 4 1 182 | 5 1 183 | 8 0 184 | 9 1 185 | 10 0 186 | 11 1 187 | 12 1 188 | ====== ======= 189 | 190 | .. note:: 191 | 192 | The time/index values, in general, may not be 193 | uniform. This causes difference in the 194 | the values of :func:`interval_sum` for **step-pre** 195 | and **step-post** behaviours 196 | 197 | .. code:: 198 | 199 | import pandas 200 | 201 | values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1] 202 | index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12] 203 | series = pandas.Series(values, index=index) 204 | 205 | The :func:`interval_sum` for the value 1 is calculated differently 206 | for **step-post** and **step-pre** behaviours as follows: 207 | 208 | - **Step-Post** 209 | 210 | 211 | .. code:: 212 | 213 | 1 *----*----*----*-------------+ *----+ *----* 214 | | | | | | 215 | 0 *----*----+ *----+ *----+ 216 | 0 1 2 3 4 5 6 7 8 9 10 11 12 217 | 218 | .. math:: 219 | 220 | (8-2) + (10-9) + (12-11) = 6 + 1 + 1 = 8 221 | 222 | - **Step-Pre** 223 | 224 | .. code:: 225 | 226 | 1 +----*----*----*----* +----* +----*----* 227 | | | | | | 228 | 0 *----* +--------------* +----* 229 | 0 1 2 3 4 5 6 7 8 9 10 11 12 230 | 231 | .. math:: 232 | 233 | (5-1) + (9-8) + (12-10) = 4 + 1 + 2 = 7 234 | 235 | .. note:: 236 | 237 | The asterisks (*) on the plots above represent the values of the time 238 | series data and these do not vary between the two step styles 239 | 240 | :param series: The time series data 241 | :type series: :mod:`pandas.Series` 242 | 243 | :param value: The value to checked for in the series. If the 244 | value is None, the truth value of the elements in the 245 | series will be used 246 | :type value: element 247 | 248 | :param step: The step behaviour as described above 249 | :: 250 | 251 | step="post" 252 | step="pre 253 | :type step: str 254 | """ 255 | 256 | index = series.index 257 | array = series.values 258 | 259 | time_splits = np.append(np.where(np.diff(array) != 0), len(array) - 1) 260 | 261 | prev = 0 262 | time = 0 263 | step_post = True 264 | 265 | if step == "pre": 266 | step_post = False 267 | elif step != "post": 268 | raise ValueError("Invalid value for step: {}".format(step)) 269 | 270 | for split in time_splits: 271 | 272 | first_val = series.iloc[split] 273 | check = (first_val == value) if value else first_val 274 | if check: 275 | start = prev 276 | end = split 277 | 278 | if step_post: 279 | end = split + 1 if split < len(series) - 1 else split 280 | else: 281 | start = prev - 1 if prev > 1 else prev 282 | 283 | time += index[end] - index[start] 284 | 285 | prev = split + 1 286 | 287 | return float(time) 288 | -------------------------------------------------------------------------------- /bart/common/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Initialization for bart.common""" 17 | 18 | 19 | from bart.common import Utils 20 | from bart.common import Analyzer 21 | -------------------------------------------------------------------------------- /bart/common/signal.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """ 17 | **Signals** 18 | 19 | - Definition 20 | 21 | A signal is a string representation of a TRAPpy event and the 22 | column in the same event. The signal can be of two types: 23 | 24 | - *Pivoted Signal* 25 | 26 | A pivoted signal has a pivot specified in its event class. 27 | This means that the signal in the event is a concatenation of different 28 | signals which belong to different **pivot** nodes. The analysis for pivoted 29 | signals must be done by decomposing them into pivoted signals for each node. 30 | 31 | For example, an even that represents the load of the CPU can be pivoted on 32 | :code:`"cpu"` which should be a column in the event's `DataFrame` 33 | 34 | - *Non-Pivoted Signal* 35 | 36 | A non pivoted signal has an event that has no pivot value associated with it. 37 | This probably means that signal has one component and can be analysed without 38 | decomposing it into smaller signals. 39 | 40 | - Representation 41 | 42 | The following are valid representations of a signal 43 | 44 | - :code:`"event_name:event_column"` 45 | - :code:`"trappy.event.class:event_column"` 46 | 47 | """ 48 | 49 | from trappy.stats.grammar import Parser 50 | from trappy.stats import StatConf 51 | from bart.common.Utils import area_under_curve, interval_sum 52 | 53 | # pylint: disable=invalid-name 54 | # pylint: disable=anomalous-backslash-in-string 55 | 56 | class SignalCompare(object): 57 | 58 | """ 59 | :param data: TRAPpy FTrace Object 60 | :type data: :mod:`trappy.ftrace.FTrace` 61 | 62 | :param sig_a: The first signal 63 | :type sig_a: str 64 | 65 | :param sig_b: The first signal 66 | :type sig_b: str 67 | 68 | :param config: A dictionary of variables, classes 69 | and functions that can be used in the statements 70 | :type config: dict 71 | 72 | :param method: The method to be used for reindexing data 73 | This can be one of the standard :mod:`pandas.DataFrame` 74 | methods (eg. pad, bfill, nearest). The default is pad 75 | or use the last valid observation. 76 | :type method: str 77 | 78 | :param limit: The number of indices a value will be propagated 79 | when reindexing. The default is None 80 | :type limit: int 81 | 82 | :param fill: Whether to fill the NaNs in the data. 83 | The default value is True. 84 | :type fill: bool 85 | 86 | .. note:: 87 | 88 | Both the signals must have the same pivots. For example: 89 | 90 | - Signal A has a pivot as :code:`"cpu"` which means that 91 | the trappy event (:mod:`trappy.base.Base`) has a pivot 92 | parameter which is equal to :code:`"cpu"`. Then the signal B 93 | should also have :code:`"cpu"` as it's pivot. 94 | 95 | - Signal A and B can both have undefined or None 96 | as their pivots 97 | """ 98 | 99 | def __init__(self, data, sig_a, sig_b, **kwargs): 100 | 101 | self._parser = Parser( 102 | data, 103 | config=kwargs.pop( 104 | "config", 105 | None), 106 | **kwargs) 107 | self._a = sig_a 108 | self._b = sig_b 109 | self._pivot_vals, self._pivot = self._get_signal_pivots() 110 | 111 | # Concatenate the indices by doing any operation (say add) 112 | self._a_data = self._parser.solve(sig_a) 113 | self._b_data = self._parser.solve(sig_b) 114 | 115 | def _get_signal_pivots(self): 116 | """Internal function to check pivot conditions and 117 | return an intersection of pivot on the signals""" 118 | 119 | sig_a_info = self._parser.inspect(self._a) 120 | sig_b_info = self._parser.inspect(self._b) 121 | 122 | if sig_a_info["pivot"] != sig_b_info["pivot"]: 123 | raise RuntimeError("The pivot column for both signals" + 124 | "should be same (%s,%s)" 125 | % (sig_a_info["pivot"], sig_b_info["pivot"])) 126 | 127 | if sig_a_info["pivot"]: 128 | pivot_vals = set( 129 | sig_a_info["pivot_values"]).intersection(sig_b_info["pivot_values"]) 130 | pivoted = sig_a_info["pivot"] 131 | else: 132 | pivot_vals = [StatConf.GRAMMAR_DEFAULT_PIVOT] 133 | pivoted = False 134 | 135 | return pivot_vals, pivoted 136 | 137 | def conditional_compare(self, condition, **kwargs): 138 | """Conditionally compare two signals 139 | 140 | The conditional comparison of signals has two components: 141 | 142 | - **Value Coefficient** :math:`\\alpha_{v}` which measures the difference in values of 143 | of the two signals when the condition is true: 144 | 145 | .. math:: 146 | 147 | \\alpha_{v} = \\frac{area\_under\_curve(S_A\ |\ C(t)\ is\ true)} 148 | {area\_under\_curve(S_B\ |\ C(t)\ is\ true)} \\\\ 149 | 150 | \\alpha_{v} = \\frac{\int S_A(\{t\ |\ C(t)\})dt}{\int S_B(\{t\ |\ C(t)\})dt} 151 | 152 | - **Time Coefficient** :math:`\\alpha_{t}` which measures the time during which the 153 | condition holds true. 154 | 155 | .. math:: 156 | 157 | \\alpha_{t} = \\frac{T_{valid}}{T_{total}} 158 | 159 | :param condition: A condition that returns a truth value and obeys the grammar syntax 160 | :: 161 | 162 | "event_x:sig_a > event_x:sig_b" 163 | 164 | :type condition: str 165 | 166 | :param method: The method for area calculation. This can 167 | be any of the integration methods supported in `numpy` 168 | or `rect` 169 | :type param: str 170 | 171 | :param step: The step behaviour for area and time 172 | summation calculation 173 | :type step: str 174 | 175 | Consider the two signals A and B as follows: 176 | 177 | .. code:: 178 | 179 | A = [0, 0, 0, 3, 3, 0, 0, 0] 180 | B = [0, 0, 2, 2, 2, 2, 1, 1] 181 | 182 | 183 | .. code:: 184 | 185 | 186 | A = xxxx 187 | 3 *xxxx*xxxx+ B = ---- 188 | | | 189 | 2 *----*----*----+ 190 | | | | 191 | 1 | | *----*----+ 192 | | | | 193 | 0 *x-x-*x-x-+xxxx+ +xxxx*xxxx+ 194 | 0 1 2 3 4 5 6 7 195 | 196 | The condition: 197 | 198 | .. math:: 199 | 200 | A > B 201 | 202 | is valid between T=3 and T=5. Therefore, 203 | 204 | .. math:: 205 | 206 | \\alpha_v=1.5 \\\\ 207 | \\alpha_t=\\frac{2}{7} 208 | 209 | :returns: There are two cases: 210 | 211 | - **Pivoted Signals** 212 | :: 213 | 214 | { 215 | "pivot_name" : { 216 | "pval_1" : (v1,t1), 217 | "pval_2" : (v2, t2) 218 | } 219 | } 220 | - **Non Pivoted Signals** 221 | 222 | The tuple of :math:`(\\alpha_v, \\alpha_t)` 223 | """ 224 | 225 | if self._pivot: 226 | result = {self._pivot: {}} 227 | 228 | mask = self._parser.solve(condition) 229 | step = kwargs.get("step", "post") 230 | 231 | for pivot_val in self._pivot_vals: 232 | 233 | a_piv = self._a_data[pivot_val] 234 | b_piv = self._b_data[pivot_val] 235 | 236 | area = area_under_curve(a_piv[mask[pivot_val]], **kwargs) 237 | try: 238 | area /= area_under_curve(b_piv[mask[pivot_val]], **kwargs) 239 | except ZeroDivisionError: 240 | area = float("nan") 241 | 242 | duration = min(a_piv.last_valid_index(), b_piv.last_valid_index()) 243 | duration -= max(a_piv.first_valid_index(), 244 | b_piv.first_valid_index()) 245 | duration = interval_sum(mask[pivot_val], step=step) / duration 246 | 247 | if self._pivot: 248 | result[self._pivot][pivot_val] = area, duration 249 | else: 250 | result = area, duration 251 | 252 | return result 253 | 254 | def get_overshoot(self, **kwargs): 255 | """Special case for :func:`conditional_compare` 256 | where the condition is: 257 | :: 258 | 259 | "sig_a > sig_b" 260 | 261 | :param method: The method for area calculation. This can 262 | be any of the integration methods supported in `numpy` 263 | or `rect` 264 | :type param: str 265 | 266 | :param step: The step behaviour for calculation of area 267 | and time summation 268 | :type step: str 269 | 270 | .. seealso:: 271 | 272 | :func:`conditional_compare` 273 | """ 274 | 275 | condition = " ".join([self._a, ">", self._b]) 276 | return self.conditional_compare(condition, **kwargs) 277 | 278 | def get_undershoot(self, **kwargs): 279 | """Special case for :func:`conditional_compare` 280 | where the condition is: 281 | :: 282 | 283 | "sig_a < sig_b" 284 | 285 | :param method: The method for area calculation. This can 286 | be any of the integration methods supported in `numpy` 287 | or `rect` 288 | :type param: str 289 | 290 | :param step: The step behaviour for calculation of area 291 | and time summation 292 | :type step: str 293 | 294 | .. seealso:: 295 | 296 | :func:`conditional_compare` 297 | """ 298 | 299 | condition = " ".join([self._a, "<", self._b]) 300 | return self.conditional_compare(condition, **kwargs) 301 | -------------------------------------------------------------------------------- /bart/sched/SchedAssert.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """ 17 | :mod:`bart.sched.SchedAssert` provides ability to assert scheduler behaviour. 18 | The analysis is based on TRAPpy's statistics framework and is potent enough 19 | to aggregate statistics over processor hierarchies. 20 | """ 21 | 22 | import trappy 23 | import itertools 24 | import math 25 | from trappy.stats.Aggregator import MultiTriggerAggregator 26 | from bart.sched import functions as sched_funcs 27 | from bart.common import Utils 28 | import numpy as np 29 | 30 | # pylint: disable=invalid-name 31 | # pylint: disable=too-many-arguments 32 | class SchedAssert(object): 33 | 34 | """The primary focus of this class is to assert and verify 35 | predefined scheduler scenarios. This does not compare parameters 36 | across runs 37 | 38 | :param ftrace: A single trappy.FTrace object 39 | or a path that can be passed to trappy.FTrace 40 | :type ftrace: :mod:`trappy.ftrace.FTrace` 41 | 42 | :param topology: A topology that describes the arrangement of 43 | CPU's on a system. This is useful for multi-cluster systems 44 | where data needs to be aggregated at different topological 45 | levels 46 | :type topology: :mod:`trappy.stats.Topology.Topology` 47 | 48 | :param execname: The execname of the task to be analysed 49 | 50 | .. note:: 51 | 52 | There should be only one PID that maps to the specified 53 | execname. If there are multiple PIDs :mod:`bart.sched.SchedMultiAssert` 54 | should be used 55 | 56 | :type execname: str 57 | 58 | :param pid: The process ID of the task to be analysed 59 | :type pid: int 60 | 61 | .. note: 62 | 63 | One of pid or execname is mandatory. If only execname 64 | is specified, The current implementation will fail if 65 | there are more than one processes with the same execname 66 | """ 67 | 68 | def __init__(self, ftrace, topology, execname=None, pid=None): 69 | 70 | ftrace = Utils.init_ftrace(ftrace) 71 | 72 | if not execname and not pid: 73 | raise ValueError("Need to specify at least one of pid or execname") 74 | 75 | self.execname = execname 76 | self._ftrace = ftrace 77 | self._pid = self._validate_pid(pid) 78 | self._aggs = {} 79 | self._topology = topology 80 | self._triggers = sched_funcs.sched_triggers(self._ftrace, self._pid, 81 | trappy.sched.SchedSwitch) 82 | self.name = "{}-{}".format(self.execname, self._pid) 83 | 84 | def _validate_pid(self, pid): 85 | """Validate the passed pid argument""" 86 | 87 | if not pid: 88 | pids = sched_funcs.get_pids_for_process(self._ftrace, 89 | self.execname) 90 | 91 | if len(pids) != 1: 92 | raise RuntimeError( 93 | "There should be exactly one PID {0} for {1}".format( 94 | pids, 95 | self.execname)) 96 | 97 | return pids[0] 98 | 99 | elif self.execname: 100 | 101 | pids = sched_funcs.get_pids_for_process(self._ftrace, 102 | self.execname) 103 | if pid not in pids: 104 | raise RuntimeError( 105 | "PID {0} not mapped to {1}".format( 106 | pid, 107 | self.execname)) 108 | else: 109 | self.execname = sched_funcs.get_task_name(self._ftrace, pid) 110 | 111 | return pid 112 | 113 | def _aggregator(self, aggfunc): 114 | """ 115 | Return an aggregator corresponding to the 116 | aggfunc, the aggregators are memoized for performance 117 | 118 | :param aggfunc: Function parameter that 119 | accepts a :mod:`pandas.Series` object and 120 | returns a vector/scalar 121 | 122 | :type: function(:mod:`pandas.Series`) 123 | """ 124 | 125 | if aggfunc not in self._aggs.keys(): 126 | self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers, 127 | self._topology, 128 | aggfunc) 129 | return self._aggs[aggfunc] 130 | 131 | def getResidency(self, level, node, window=None, percent=False): 132 | """ 133 | Residency of the task is the amount of time it spends executing 134 | a particular group of a topological level. For example: 135 | :: 136 | 137 | from trappy.stats.Topology import Topology 138 | 139 | big = [1, 2] 140 | little = [0, 3, 4, 5] 141 | 142 | topology = Topology(clusters=[little, big]) 143 | 144 | s = SchedAssert(trace, topology, pid=123) 145 | s.getResidency("cluster", big) 146 | 147 | This will return the residency of the task on the big cluster. If 148 | percent is specified it will be normalized to the total runtime 149 | of the task 150 | 151 | :param level: The topological level to which the group belongs 152 | :type level: str 153 | 154 | :param node: The group of CPUs for which residency 155 | needs to calculated 156 | :type node: list 157 | 158 | :param window: A (start, end) tuple to limit the scope of the 159 | residency calculation. 160 | :type window: tuple 161 | 162 | :param percent: If true the result is normalized to the total runtime 163 | of the task and returned as a percentage 164 | :type percent: bool 165 | 166 | .. math:: 167 | 168 | R = \\frac{T_{group} \\times 100}{T_{total}} 169 | 170 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertResidency` 171 | """ 172 | 173 | # Get the index of the node in the level 174 | node_index = self._topology.get_index(level, node) 175 | 176 | agg = self._aggregator(sched_funcs.residency_sum) 177 | level_result = agg.aggregate(level=level, window=window) 178 | 179 | node_value = level_result[node_index] 180 | 181 | if percent: 182 | total = agg.aggregate(level="all", window=window)[0] 183 | node_value = node_value * 100 184 | node_value = node_value / total 185 | 186 | return node_value 187 | 188 | def assertResidency( 189 | self, 190 | level, 191 | node, 192 | expected_value, 193 | operator, 194 | window=None, 195 | percent=False): 196 | """ 197 | :param level: The topological level to which the group belongs 198 | :type level: str 199 | 200 | :param node: The group of CPUs for which residency 201 | needs to calculated 202 | :type node: list 203 | 204 | :param expected_value: The expected value of the residency 205 | :type expected_value: double 206 | 207 | :param operator: A binary operator function that returns 208 | a boolean. For example: 209 | :: 210 | 211 | import operator 212 | op = operator.ge 213 | assertResidency(level, node, expected_value, op) 214 | 215 | Will do the following check: 216 | :: 217 | 218 | getResidency(level, node) >= expected_value 219 | 220 | A custom function can also be passed: 221 | :: 222 | 223 | THRESHOLD=5 224 | def between_threshold(a, expected): 225 | return abs(a - expected) <= THRESHOLD 226 | 227 | :type operator: function 228 | 229 | :param window: A (start, end) tuple to limit the scope of the 230 | residency calculation. 231 | :type window: tuple 232 | 233 | :param percent: If true the result is normalized to the total runtime 234 | of the task and returned as a percentage 235 | :type percent: bool 236 | 237 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getResidency` 238 | """ 239 | node_value = self.getResidency(level, node, window, percent) 240 | return operator(node_value, expected_value) 241 | 242 | def getStartTime(self): 243 | """ 244 | :return: The first time the task ran across all the CPUs 245 | """ 246 | 247 | agg = self._aggregator(sched_funcs.first_time) 248 | result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING) 249 | return min(result[0]) 250 | 251 | def getEndTime(self): 252 | """ 253 | :return: The first last time the task ran across 254 | all the CPUs 255 | """ 256 | 257 | agg = self._aggregator(sched_funcs.first_time) 258 | agg = self._aggregator(sched_funcs.last_time) 259 | result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING) 260 | return max(result[0]) 261 | 262 | def _relax_switch_window(self, series, direction, window): 263 | """ 264 | direction == "left" 265 | return the last time the task was running 266 | if no such time exists in the window, 267 | extend the window's left extent to 268 | getStartTime 269 | 270 | direction == "right" 271 | return the first time the task was running 272 | in the window. If no such time exists in the 273 | window, extend the window's right extent to 274 | getEndTime() 275 | 276 | The function returns a None if 277 | len(series[series == TASK_RUNNING]) == 0 278 | even in the extended window 279 | """ 280 | 281 | series = series[series == sched_funcs.TASK_RUNNING] 282 | w_series = sched_funcs.select_window(series, window) 283 | start, stop = window 284 | 285 | if direction == "left": 286 | if len(w_series): 287 | return w_series.index.values[-1] 288 | else: 289 | start_time = self.getStartTime() 290 | w_series = sched_funcs.select_window( 291 | series, 292 | window=( 293 | start_time, 294 | start)) 295 | 296 | if not len(w_series): 297 | return None 298 | else: 299 | return w_series.index.values[-1] 300 | 301 | elif direction == "right": 302 | if len(w_series): 303 | return w_series.index.values[0] 304 | else: 305 | end_time = self.getEndTime() 306 | w_series = sched_funcs.select_window(series, window=(stop, end_time)) 307 | 308 | if not len(w_series): 309 | return None 310 | else: 311 | return w_series.index.values[0] 312 | else: 313 | raise ValueError("direction should be either left or right") 314 | 315 | def assertSwitch( 316 | self, 317 | level, 318 | from_node, 319 | to_node, 320 | window, 321 | ignore_multiple=True): 322 | """ 323 | This function asserts that there is context switch from the 324 | :code:`from_node` to the :code:`to_node`: 325 | 326 | :param level: The topological level to which the group belongs 327 | :type level: str 328 | 329 | :param from_node: The node from which the task switches out 330 | :type from_node: list 331 | 332 | :param to_node: The node to which the task switches 333 | :type to_node: list 334 | 335 | :param window: A (start, end) tuple to limit the scope of the 336 | residency calculation. 337 | :type window: tuple 338 | 339 | :param ignore_multiple: If true, the function will ignore multiple 340 | switches in the window, If false the assert will be true if and 341 | only if there is a single switch within the specified window 342 | :type ignore_multiple: bool 343 | """ 344 | 345 | from_node_index = self._topology.get_index(level, from_node) 346 | to_node_index = self._topology.get_index(level, to_node) 347 | 348 | agg = self._aggregator(sched_funcs.csum) 349 | level_result = agg.aggregate(level=level) 350 | 351 | from_node_result = level_result[from_node_index] 352 | to_node_result = level_result[to_node_index] 353 | 354 | from_time = self._relax_switch_window(from_node_result, "left", window) 355 | if ignore_multiple: 356 | to_time = self._relax_switch_window(to_node_result, "left", window) 357 | else: 358 | to_time = self._relax_switch_window( 359 | to_node_result, 360 | "right", window) 361 | 362 | if from_time and to_time: 363 | if from_time < to_time: 364 | return True 365 | 366 | return False 367 | 368 | def getRuntime(self, window=None, percent=False): 369 | """Return the Total Runtime of a task 370 | 371 | :param window: A (start, end) tuple to limit the scope of the 372 | residency calculation. 373 | :type window: tuple 374 | 375 | :param percent: If True, the result is returned 376 | as a percentage of the total execution time 377 | of the run. 378 | :type percent: bool 379 | 380 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertRuntime` 381 | """ 382 | 383 | agg = self._aggregator(sched_funcs.residency_sum) 384 | run_time = agg.aggregate(level="all", window=window)[0] 385 | 386 | if percent: 387 | 388 | if window: 389 | begin, end = window 390 | total_time = end - begin 391 | else: 392 | total_time = self._ftrace.get_duration() 393 | 394 | run_time = run_time * 100 395 | run_time = run_time / total_time 396 | 397 | return run_time 398 | 399 | def assertRuntime( 400 | self, 401 | expected_value, 402 | operator, 403 | window=None, 404 | percent=False): 405 | """Assert on the total runtime of the task 406 | 407 | :param expected_value: The expected value of the runtime 408 | :type expected_value: double 409 | 410 | :param operator: A binary operator function that returns 411 | a boolean. For example: 412 | :: 413 | 414 | import operator 415 | op = operator.ge 416 | assertRuntime(expected_value, op) 417 | 418 | Will do the following check: 419 | :: 420 | 421 | getRuntime() >= expected_value 422 | 423 | A custom function can also be passed: 424 | :: 425 | 426 | THRESHOLD=5 427 | def between_threshold(a, expected): 428 | return abs(a - expected) <= THRESHOLD 429 | 430 | :type operator: function 431 | 432 | :param window: A (start, end) tuple to limit the scope of the 433 | residency calculation. 434 | :type window: tuple 435 | 436 | :param percent: If True, the result is returned 437 | as a percentage of the total execution time 438 | of the run. 439 | :type percent: bool 440 | 441 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getRuntime` 442 | """ 443 | 444 | run_time = self.getRuntime(window, percent) 445 | return operator(run_time, expected_value) 446 | 447 | def getPeriod(self, window=None, align="start"): 448 | """Return the period of the task in (ms) 449 | 450 | Let's say a task started execution at the following times: 451 | 452 | .. math:: 453 | 454 | T_1, T_2, ...T_n 455 | 456 | The period is defined as: 457 | 458 | .. math:: 459 | 460 | Median((T_2 - T_1), (T_4 - T_3), ....(T_n - T_{n-1})) 461 | 462 | :param window: A (start, end) tuple to limit the scope of the 463 | residency calculation. 464 | :type window: tuple 465 | 466 | :param align: 467 | :code:`"start"` aligns period calculation to switch-in events 468 | :code:`"end"` aligns the calculation to switch-out events 469 | :type param: str 470 | 471 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertPeriod` 472 | """ 473 | 474 | agg = self._aggregator(sched_funcs.period) 475 | deltas = agg.aggregate(level="all", window=window)[0] 476 | 477 | if not len(deltas): 478 | return float("NaN") 479 | else: 480 | return np.median(deltas) * 1000 481 | 482 | def assertPeriod( 483 | self, 484 | expected_value, 485 | operator, 486 | window=None, 487 | align="start"): 488 | """Assert on the period of the task 489 | 490 | :param expected_value: The expected value of the runtime 491 | :type expected_value: double 492 | 493 | :param operator: A binary operator function that returns 494 | a boolean. For example: 495 | :: 496 | 497 | import operator 498 | op = operator.ge 499 | assertPeriod(expected_value, op) 500 | 501 | Will do the following check: 502 | :: 503 | 504 | getPeriod() >= expected_value 505 | 506 | A custom function can also be passed: 507 | :: 508 | 509 | THRESHOLD=5 510 | def between_threshold(a, expected): 511 | return abs(a - expected) <= THRESHOLD 512 | 513 | :param window: A (start, end) tuple to limit the scope of the 514 | calculation. 515 | :type window: tuple 516 | 517 | :param align: 518 | :code:`"start"` aligns period calculation to switch-in events 519 | :code:`"end"` aligns the calculation to switch-out events 520 | :type param: str 521 | 522 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getPeriod` 523 | """ 524 | 525 | period = self.getPeriod(window, align) 526 | return operator(period, expected_value) 527 | 528 | def getDutyCycle(self, window): 529 | """Return the duty cycle of the task 530 | 531 | :param window: A (start, end) tuple to limit the scope of the 532 | calculation. 533 | :type window: tuple 534 | 535 | Duty Cycle: 536 | The percentage of time the task spends executing 537 | in the given window of time 538 | 539 | .. math:: 540 | 541 | \delta_{cycle} = \\frac{T_{exec} \\times 100}{T_{window}} 542 | 543 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertDutyCycle` 544 | """ 545 | 546 | return self.getRuntime(window, percent=True) 547 | 548 | def assertDutyCycle(self, expected_value, operator, window): 549 | """ 550 | :param operator: A binary operator function that returns 551 | a boolean. For example: 552 | :: 553 | 554 | import operator 555 | op = operator.ge 556 | assertPeriod(expected_value, op) 557 | 558 | Will do the following check: 559 | :: 560 | 561 | getPeriod() >= expected_value 562 | 563 | A custom function can also be passed: 564 | :: 565 | 566 | THRESHOLD=5 567 | def between_threshold(a, expected): 568 | return abs(a - expected) <= THRESHOLD 569 | 570 | :param window: A (start, end) tuple to limit the scope of the 571 | calculation. 572 | :type window: tuple 573 | 574 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getDutyCycle` 575 | 576 | """ 577 | return self.assertRuntime( 578 | expected_value, 579 | operator, 580 | window, 581 | percent=True) 582 | 583 | def getFirstCpu(self, window=None): 584 | """ 585 | :return: The first CPU the task ran on 586 | 587 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertFirstCPU` 588 | """ 589 | 590 | agg = self._aggregator(sched_funcs.first_cpu) 591 | result = agg.aggregate(level="cpu", window=window) 592 | result = list(itertools.chain.from_iterable(result)) 593 | 594 | min_time = min(result) 595 | if math.isinf(min_time): 596 | return -1 597 | index = result.index(min_time) 598 | return self._topology.get_node("cpu", index)[0] 599 | 600 | def assertFirstCpu(self, cpus, window=None): 601 | """ 602 | Check if the Task started (first ran on in the duration 603 | of the trace) on a particular CPU(s) 604 | 605 | :param cpus: A list of acceptable CPUs 606 | :type cpus: int, list 607 | 608 | .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getFirstCPU` 609 | """ 610 | 611 | first_cpu = self.getFirstCpu(window=window) 612 | cpus = Utils.listify(cpus) 613 | return first_cpu in cpus 614 | 615 | def getLastCpu(self, window=None): 616 | """Return the last CPU the task ran on""" 617 | 618 | agg = self._aggregator(sched_funcs.last_cpu) 619 | result = agg.aggregate(level="cpu", window=window) 620 | result = list(itertools.chain.from_iterable(result)) 621 | 622 | end_time = max(result) 623 | if not end_time: 624 | return -1 625 | 626 | return result.index(end_time) 627 | 628 | def generate_events(self, level, start_id=0, window=None): 629 | """Generate events for the trace plot 630 | 631 | .. note:: 632 | This is an internal function accessed by the 633 | :mod:`bart.sched.SchedMultiAssert` class for plotting data 634 | """ 635 | 636 | agg = self._aggregator(sched_funcs.trace_event) 637 | result = agg.aggregate(level=level, window=window) 638 | events = [] 639 | 640 | for idx, level_events in enumerate(result): 641 | if not len(level_events): 642 | continue 643 | events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist() 644 | 645 | return sorted(events, key = lambda x : x[0]) 646 | 647 | def plot(self, level="cpu", window=None, xlim=None): 648 | """ 649 | :return: :mod:`trappy.plotter.AbstractDataPlotter` instance 650 | Call :func:`view` to draw the graph 651 | """ 652 | 653 | if not xlim: 654 | if not window: 655 | xlim = [0, self._ftrace.get_duration()] 656 | else: 657 | xlim = list(window) 658 | 659 | events = {} 660 | events[self.name] = self.generate_events(level, window) 661 | names = [self.name] 662 | num_lanes = self._topology.level_span(level) 663 | lane_prefix = level.upper() + ": " 664 | return trappy.EventPlot(events, names, xlim, 665 | lane_prefix=lane_prefix, 666 | num_lanes=num_lanes) 667 | -------------------------------------------------------------------------------- /bart/sched/SchedMatrix.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """ 17 | The SchedMatrix provides an ability to compare two executions 18 | of benchmarks with multiple processes. 19 | 20 | For example, consider a benchmark that spawns 4 identical threads 21 | and any two threads should exhibit a certain behaviours and the 22 | remaining another identical but different behaviour. 23 | 24 | SchedMatrix creates a Matrix of Scheduler Waveform Correlations 25 | 26 | A = Reference Execution 27 | B = Execution to be Evaluated 28 | 29 | .. code:: 30 | 31 | +---+ +---+ 32 | | | | | 33 | A1, B3 +---+ +--+ +--------------+ 34 | +---+ +---+ 35 | | | | | 36 | A2, B4 +--------------+ +--+ +---+ 37 | +---+ +---+ 38 | | | | | 39 | A3, B1 +---+ +--+ +--------------+ 40 | +---+ +---+ 41 | | | | | 42 | A4, B2 +--------------+ +--+ +---+ 43 | 44 | 45 | **Correlation Matrix** 46 | 47 | === ==== ==== ==== ==== 48 | B1 B2 B3 B4 49 | === ==== ==== ==== ==== 50 | A1 1 0 1 0 51 | A2 0 1 0 1 52 | A3 1 0 1 0 53 | A4 0 1 0 1 54 | === ==== ==== ==== ==== 55 | 56 | 57 | Thus a success criteria can be defined as A1 having two similar threads in the 58 | evaluated execution 59 | :: 60 | 61 | assertSiblings(A1, 2, operator.eq) 62 | assertSiblings(A2, 2, operator.eq) 63 | assertSiblings(A3, 2, operator.eq) 64 | assertSiblings(A4, 2, operator.eq) 65 | """ 66 | 67 | 68 | import sys 69 | import trappy 70 | import numpy as np 71 | from trappy.stats.Aggregator import MultiTriggerAggregator 72 | from trappy.stats.Correlator import Correlator 73 | from bart.sched import functions as sched_funcs 74 | from bart.common import Utils 75 | 76 | POSITIVE_TOLERANCE = 0.80 77 | 78 | # pylint: disable=invalid-name 79 | # pylint: disable=too-many-arguments 80 | 81 | 82 | class SchedMatrix(object): 83 | 84 | """ 85 | :param reference_trace: The trace file path/ftrace object 86 | to be used as a reference 87 | :type reference_trace: str, :mod:`trappy.ftrace.FTrace` 88 | 89 | :param trace: The trace file path/ftrace object 90 | to be verified 91 | :type trace: str, :mod:`trappy.ftrace.FTrace` 92 | 93 | :param topology: A topology that describes the arrangement of 94 | CPU's on a system. This is useful for multi-cluster systems 95 | where data needs to be aggregated at different topological 96 | levels 97 | :type topology: :mod:`trappy.stats.Topology.Topology` 98 | 99 | :param execnames: The execnames of the task to be analysed 100 | 101 | A single execname or a list of execnames can be passed. 102 | There can be multiple processes associated with a single 103 | execname parameter. The execnames are searched using a prefix 104 | match. 105 | :type execname: list, str 106 | 107 | Consider the following processes which need to be analysed: 108 | 109 | * **Reference Trace** 110 | 111 | ===== ============== 112 | PID execname 113 | ===== ============== 114 | 11 task_1 115 | 22 task_2 116 | 33 task_3 117 | ===== ============== 118 | 119 | * **Trace to be verified** 120 | 121 | ===== ============== 122 | PID execname 123 | ===== ============== 124 | 77 task_1 125 | 88 task_2 126 | 99 task_3 127 | ===== ============== 128 | 129 | 130 | A :mod:`bart.sched.SchedMatrix.SchedMatrix` instance be created 131 | following different ways: 132 | 133 | - Using execname prefix match 134 | :: 135 | 136 | SchedMatrix(r_trace, trace, topology, 137 | execnames="task_") 138 | 139 | - Individual Task names 140 | :: 141 | 142 | SchedMatrix(r_trace, trace, topology, 143 | execnames=["task_1", "task_2", "task_3"]) 144 | 145 | """ 146 | 147 | def __init__( 148 | self, 149 | reference_trace, 150 | trace, 151 | topology, 152 | execnames, 153 | aggfunc=sched_funcs.csum): 154 | 155 | run = Utils.init_ftrace(trace) 156 | reference_run = Utils.init_ftrace(reference_trace) 157 | 158 | self._execnames = Utils.listify(execnames) 159 | self._reference_pids = self._populate_pids(reference_run) 160 | self._pids = self._populate_pids(run) 161 | self._dimension = len(self._pids) 162 | self._topology = topology 163 | self._matrix = self._generate_matrix(run, reference_run, aggfunc) 164 | 165 | if len(self._pids) != len(self._reference_pids): 166 | raise RuntimeError( 167 | "The runs do not have the same number of PIDs for {0}".format( 168 | str(execnames))) 169 | 170 | def _populate_pids(self, run): 171 | """Populate the qualifying PIDs from the run""" 172 | 173 | if len(self._execnames) == 1: 174 | return sched_funcs.get_pids_for_process(run, self._execnames[0]) 175 | 176 | pids = [] 177 | 178 | for proc in self._execnames: 179 | pids += sched_funcs.get_pids_for_process(run, proc) 180 | 181 | return list(set(pids)) 182 | 183 | def _generate_matrix(self, run, reference_run, aggfunc): 184 | """Generate the Correlation Matrix""" 185 | 186 | reference_aggs = [] 187 | aggs = [] 188 | 189 | for idx in range(self._dimension): 190 | 191 | reference_aggs.append( 192 | MultiTriggerAggregator( 193 | sched_funcs.sched_triggers( 194 | reference_run, 195 | self._reference_pids[idx], 196 | trappy.sched.SchedSwitch 197 | ), 198 | self._topology, 199 | aggfunc)) 200 | 201 | aggs.append( 202 | MultiTriggerAggregator( 203 | sched_funcs.sched_triggers( 204 | run, 205 | self._pids[idx], 206 | trappy.sched.SchedSwitch 207 | ), 208 | self._topology, 209 | aggfunc)) 210 | 211 | agg_pair_gen = ((r_agg, agg) 212 | for r_agg in reference_aggs for agg in aggs) 213 | 214 | # pylint fails to recognize numpy members. 215 | # pylint: disable=no-member 216 | matrix = np.zeros((self._dimension, self._dimension)) 217 | # pylint: enable=no-member 218 | 219 | for (ref_result, test_result) in agg_pair_gen: 220 | i = reference_aggs.index(ref_result) 221 | j = aggs.index(test_result) 222 | corr = Correlator( 223 | ref_result, 224 | test_result, 225 | corrfunc=sched_funcs.binary_correlate, 226 | filter_gaps=True) 227 | _, total = corr.correlate(level="cluster") 228 | 229 | matrix[i][j] = total 230 | 231 | return matrix 232 | 233 | def print_matrix(self): 234 | """Print the correlation matrix""" 235 | 236 | # pylint fails to recognize numpy members. 237 | # pylint: disable=no-member 238 | np.set_printoptions(precision=5) 239 | np.set_printoptions(suppress=False) 240 | np.savetxt(sys.stdout, self._matrix, "%5.5f") 241 | # pylint: enable=no-member 242 | 243 | def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE): 244 | """Return the number of processes in the 245 | reference trace that have a correlation 246 | greater than tolerance 247 | 248 | :param pid: The PID of the process in the reference 249 | trace 250 | :type pid: int 251 | 252 | :param tolerance: A correlation value > tolerance 253 | will classify the resultant process as a sibling 254 | :type tolerance: float 255 | 256 | .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.assertSiblings` 257 | """ 258 | 259 | ref_pid_idx = self._reference_pids.index(pid) 260 | pid_result = self._matrix[ref_pid_idx] 261 | return len(pid_result[pid_result > tolerance]) 262 | 263 | def assertSiblings(self, pid, expected_value, operator, 264 | tolerance=POSITIVE_TOLERANCE): 265 | """Assert that the number of siblings in the reference 266 | trace match the expected value and the operator 267 | 268 | :param pid: The PID of the process in the reference 269 | trace 270 | :type pid: int 271 | 272 | :param operator: A binary operator function that returns 273 | a boolean. For example: 274 | :: 275 | 276 | import operator 277 | op = operator.eq 278 | getSiblings(pid, expected_value, op) 279 | 280 | Will do the following check: 281 | :: 282 | 283 | getSiblings(pid) == expected_value 284 | 285 | :param tolerance: A correlation value > tolerance 286 | will classify the resultant process as a sibling 287 | :type tolerance: float 288 | 289 | .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.getSiblings` 290 | """ 291 | num_siblings = self.getSiblings(pid, tolerance) 292 | return operator(num_siblings, expected_value) 293 | -------------------------------------------------------------------------------- /bart/sched/SchedMultiAssert.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """A library for asserting scheduler scenarios based on the 17 | statistics aggregation framework""" 18 | 19 | import re 20 | import inspect 21 | import trappy 22 | from bart.sched import functions as sched_funcs 23 | from bart.sched.SchedAssert import SchedAssert 24 | from bart.common import Utils 25 | 26 | class SchedMultiAssert(object): 27 | """This is vector assertion class built on top of 28 | :mod:`bart.sched.SchedAssert.SchedAssert` 29 | 30 | :param ftrace: A single trappy.FTrace object 31 | or a path that can be passed to trappy.FTrace 32 | :type ftrace: :mod:`trappy.ftrace.FTrace` 33 | 34 | :param topology: A topology that describes the arrangement of 35 | CPU's on a system. This is useful for multi-cluster systems 36 | where data needs to be aggregated at different topological 37 | levels 38 | :type topology: :mod:`trappy.stats.Topology.Topology` 39 | 40 | :param execnames: The execnames of the task to be analysed 41 | 42 | A single execname or a list of execnames can be passed. 43 | There can be multiple processes associated with a single 44 | execname parameter. The execnames are searched using a prefix 45 | match. 46 | :type execname: list, str 47 | 48 | :param pids: The process IDs of the tasks to be analysed 49 | :type pids: list, int 50 | 51 | Consider the following processes which need to be analysed 52 | 53 | ===== ============== 54 | PID execname 55 | ===== ============== 56 | 11 task_1 57 | 22 task_2 58 | 33 task_3 59 | ===== ============== 60 | 61 | A :mod:`bart.sched.SchedMultiAssert.SchedMultiAssert` instance be created 62 | following different ways: 63 | 64 | - Using execname prefix match 65 | :: 66 | 67 | SchedMultiAssert(ftrace, topology, execnames="task_") 68 | 69 | - Individual Task names 70 | :: 71 | 72 | SchedMultiAssert(ftrace, topology, execnames=["task_1", "task_2", "task_3"]) 73 | 74 | - Using Process IDs 75 | :: 76 | 77 | SchedMultiAssert(ftrace, topology, pids=[11, 22, 33]) 78 | 79 | 80 | All the functionality provided in :mod:`bart.sched.SchedAssert.SchedAssert` is available 81 | in this class with the addition of handling vector assertions. 82 | 83 | For example consider the use of :func:`getDutyCycle` 84 | :: 85 | 86 | >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") 87 | >>> s.getDutyCycle(window=(start, end)) 88 | { 89 | "11": { 90 | "task_name": "task_1", 91 | "dutycycle": 10.0 92 | }, 93 | "22": { 94 | "task_name": "task_2", 95 | "dutycycle": 20.0 96 | }, 97 | "33": { 98 | "task_name": "task_3", 99 | "dutycycle": 30.0 100 | }, 101 | } 102 | 103 | The assertions can be used in a similar way 104 | :: 105 | 106 | >>> import operator as op 107 | >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") 108 | >>> s.assertDutyCycle(15, op.ge, window=(start, end)) 109 | { 110 | "11": { 111 | "task_name": "task_1", 112 | "dutycycle": False 113 | }, 114 | "22": { 115 | "task_name": "task_2", 116 | "dutycycle": True 117 | }, 118 | "33": { 119 | "task_name": "task_3", 120 | "dutycycle": True 121 | }, 122 | } 123 | 124 | The above result can be coalesced using a :code:`rank` parameter 125 | As we know that only 2 processes have duty cycles greater than 15% 126 | we can do the following: 127 | :: 128 | 129 | >>> import operator as op 130 | >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") 131 | >>> s.assertDutyCycle(15, op.ge, window=(start, end), rank=2) 132 | True 133 | 134 | See :mod:`bart.sched.SchedAssert.SchedAssert` for the available 135 | functionality 136 | """ 137 | 138 | def __init__(self, ftrace, topology, execnames=None, pids=None): 139 | 140 | self._ftrace = Utils.init_ftrace(ftrace) 141 | self._topology = topology 142 | 143 | if execnames and pids: 144 | raise ValueError('Either pids or execnames must be specified') 145 | if execnames: 146 | self._execnames = Utils.listify(execnames) 147 | self._pids = self._populate_pids() 148 | elif pids: 149 | self._pids = pids 150 | else: 151 | raise ValueError('One of PIDs or execnames must be specified') 152 | 153 | self._asserts = self._populate_asserts() 154 | self._populate_methods() 155 | 156 | def _populate_asserts(self): 157 | """Populate SchedAsserts for the PIDs""" 158 | 159 | asserts = {} 160 | 161 | for pid in self._pids: 162 | asserts[pid] = SchedAssert(self._ftrace, self._topology, pid=pid) 163 | 164 | return asserts 165 | 166 | def _populate_pids(self): 167 | """Map the input execnames to PIDs""" 168 | 169 | if len(self._execnames) == 1: 170 | return sched_funcs.get_pids_for_process(self._ftrace, self._execnames[0]) 171 | 172 | pids = [] 173 | 174 | for proc in self._execnames: 175 | pids += sched_funcs.get_pids_for_process(self._ftrace, proc) 176 | 177 | return list(set(pids)) 178 | 179 | def _create_method(self, attr_name): 180 | """A wrapper function to create a dispatch function""" 181 | 182 | return lambda *args, **kwargs: self._dispatch(attr_name, *args, **kwargs) 183 | 184 | def _populate_methods(self): 185 | """Populate Methods from SchedAssert""" 186 | 187 | for attr_name in dir(SchedAssert): 188 | attr = getattr(SchedAssert, attr_name) 189 | 190 | valid_method = attr_name.startswith("get") or \ 191 | attr_name.startswith("assert") 192 | if inspect.ismethod(attr) and valid_method: 193 | func = self._create_method(attr_name) 194 | setattr(self, attr_name, func) 195 | 196 | def get_task_name(self, pid): 197 | """Get task name for the PID""" 198 | return self._asserts[pid].execname 199 | 200 | 201 | def _dispatch(self, func_name, *args, **kwargs): 202 | """The dispatch function to call into the SchedAssert 203 | Method 204 | """ 205 | 206 | assert_func = func_name.startswith("assert") 207 | num_true = 0 208 | 209 | rank = kwargs.pop("rank", None) 210 | result = kwargs.pop("result", {}) 211 | param = kwargs.pop("param", re.sub(r"assert|get", "", func_name, count=1).lower()) 212 | 213 | for pid in self._pids: 214 | 215 | if pid not in result: 216 | result[pid] = {} 217 | result[pid]["task_name"] = self.get_task_name(pid) 218 | 219 | attr = getattr(self._asserts[pid], func_name) 220 | result[pid][param] = attr(*args, **kwargs) 221 | 222 | if assert_func and result[pid][param]: 223 | num_true += 1 224 | 225 | if assert_func and rank: 226 | return num_true == rank 227 | else: 228 | return result 229 | 230 | def getCPUBusyTime(self, level, node, window=None, percent=False): 231 | """Get the amount of time the cpus in the system were busy executing the 232 | tasks 233 | 234 | :param level: The topological level to which the group belongs 235 | :type level: string 236 | 237 | :param node: The group of CPUs for which to calculate busy time 238 | :type node: list 239 | 240 | :param window: A (start, end) tuple to limit the scope of the 241 | calculation. 242 | :type window: tuple 243 | 244 | :param percent: If True the result is normalized to the total 245 | time of the period, either the window or the full lenght of 246 | the trace. 247 | :type percent: bool 248 | 249 | .. math:: 250 | 251 | R = \\frac{T_{busy} \\times 100}{T_{total}} 252 | 253 | """ 254 | residencies = self.getResidency(level, node, window=window) 255 | 256 | busy_time = sum(v["residency"] for v in residencies.itervalues()) 257 | 258 | if percent: 259 | if window: 260 | total_time = window[1] - window[0] 261 | else: 262 | total_time = self._ftrace.get_duration() 263 | num_cpus = len(node) 264 | return busy_time / (total_time * num_cpus) * 100 265 | else: 266 | return busy_time 267 | 268 | def generate_events(self, level, window=None): 269 | """Generate Events for the trace plot 270 | 271 | .. note:: 272 | This is an internal function for plotting data 273 | """ 274 | 275 | events = {} 276 | for s_assert in self._asserts.values(): 277 | events[s_assert.name] = s_assert.generate_events(level, window=window) 278 | 279 | return events 280 | 281 | def plot(self, level="cpu", window=None, xlim=None): 282 | """ 283 | :return: :mod:`trappy.plotter.AbstractDataPlotter` instance 284 | Call :func:`view` to draw the graph 285 | """ 286 | 287 | if not xlim: 288 | if not window: 289 | xlim = [0, self._ftrace.get_duration()] 290 | else: 291 | xlim = list(window) 292 | 293 | events = self.generate_events(level, window) 294 | names = [s.name for s in self._asserts.values()] 295 | num_lanes = self._topology.level_span(level) 296 | lane_prefix = level.upper() + ": " 297 | return trappy.EventPlot(events, names, xlim, 298 | lane_prefix=lane_prefix, 299 | num_lanes=num_lanes) 300 | -------------------------------------------------------------------------------- /bart/sched/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Initialization for bart.sched""" 17 | 18 | 19 | from bart.sched import SchedAssert 20 | from bart.sched import SchedMultiAssert 21 | from bart.sched import SchedMatrix 22 | 23 | from bart.sched import pelt 24 | -------------------------------------------------------------------------------- /bart/sched/functions.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Scheduler specific Functionality for the 17 | stats framework 18 | 19 | The Scheduler stats aggregation is based on a signal 20 | which is generated by the combination of two triggers 21 | from the events with the following parameters 22 | 23 | ========================= ============ ============= 24 | EVENT VALUE FILTERS 25 | ========================= ============ ============= 26 | :func:`sched_switch` 1 next_pid 27 | :func:`sched_switch` -1 prev_pid 28 | ========================= ============ ============= 29 | 30 | Both these Triggers are provided by the event 31 | :mod:`trappy.sched.SchedSwitch` which correspond to 32 | the :code:`sched_switch` unique word in the trace 33 | 34 | .. seealso:: :mod:`trappy.stats.Trigger.Trigger` 35 | 36 | Using the above information the following signals are 37 | generated. 38 | 39 | **EVENT SERIES** 40 | 41 | This is a combination of the two triggers as specified 42 | above and has alternating +/- 1 values and is merely 43 | a representation of the position in time when the process 44 | started or stopped running on a CPU 45 | 46 | **RESIDENCY SERIES** 47 | 48 | This series is a cumulative sum of the event series and 49 | is a representation of the continuous residency of the 50 | process on a CPU 51 | 52 | The pivot for the aggregators is the CPU on which the 53 | event occurred on. If N is the number of CPUs in the 54 | system, N signal for each CPU are generated. These signals 55 | can then be aggregated by specifying a Topology 56 | 57 | .. seealso:: :mod:`trappy.stats.Topology.Topology` 58 | """ 59 | 60 | import numpy as np 61 | from trappy.stats.Trigger import Trigger 62 | 63 | WINDOW_SIZE = 0.0001 64 | """A control config for filter events. Some analyses 65 | may require ignoring of small interruptions""" 66 | 67 | # Trigger Values 68 | SCHED_SWITCH_IN = 1 69 | """Value of the event when a task is **switch in** 70 | or scheduled on a CPU""" 71 | SCHED_SWITCH_OUT = -1 72 | """Value of the event when a task is **switched out** 73 | or relinquishes a CPU""" 74 | NO_EVENT = 0 75 | """Signifies no event on an event trace""" 76 | 77 | # Field Names 78 | CPU_FIELD = "__cpu" 79 | """The column in the sched_switch event that 80 | indicates the CPU on which the event occurred 81 | """ 82 | NEXT_PID_FIELD = "next_pid" 83 | """The column in the sched_switch event that 84 | indicates the PID of the next process to be scheduled 85 | """ 86 | PREV_PID_FIELD = "prev_pid" 87 | """The column in the sched_switch event that 88 | indicates the PID of the process that was scheduled 89 | in 90 | """ 91 | TASK_RUNNING = 1 92 | """The column in the sched_switch event that 93 | indicates the CPU on which the event occurred 94 | """ 95 | TASK_NOT_RUNNING = 0 96 | """In a residency series, a zero indicates 97 | that the task is not running 98 | """ 99 | TIME_INVAL = -1 100 | """Standard Value to indicate invalid time data""" 101 | SERIES_SANTIZED = "_sched_sanitized" 102 | """A memoized flag which is set when an event series 103 | is checked for boundary conditions 104 | """ 105 | 106 | 107 | def sanitize_asymmetry(series, window=None): 108 | """Sanitize the cases when a :code:`SWITCH_OUT` 109 | happens before a :code:`SWITCH_IN`. (The case when 110 | a process is already running before the trace started) 111 | 112 | :param series: Input Time Series data 113 | :type series: :mod:`pandas.Series` 114 | 115 | :param window: A tuple indicating a time window 116 | :type window: tuple 117 | """ 118 | 119 | if not hasattr(series, SERIES_SANTIZED): 120 | 121 | events = series[series != 0] 122 | if len(series) >= 2 and len(events): 123 | if series.values[0] == SCHED_SWITCH_OUT: 124 | series.values[0] = TASK_NOT_RUNNING 125 | 126 | elif events.values[0] == SCHED_SWITCH_OUT: 127 | series.values[0] = SCHED_SWITCH_IN 128 | if window: 129 | series.index.values[0] = window[0] 130 | 131 | if series.values[-1] == SCHED_SWITCH_IN: 132 | series.values[-1] = TASK_NOT_RUNNING 133 | 134 | elif events.values[-1] == SCHED_SWITCH_IN: 135 | series.values[-1] = SCHED_SWITCH_OUT 136 | if window: 137 | series.index.values[-1] = window[1] 138 | 139 | # No point if the series just has one value and 140 | # one event. We do not have sufficient data points 141 | # for any calculation. We should Ideally never reach 142 | # here. 143 | elif len(series) == 1: 144 | series.values[0] = 0 145 | 146 | setattr(series, SERIES_SANTIZED, True) 147 | 148 | return series 149 | 150 | 151 | def csum(series, window=None, filter_gaps=False): 152 | """:func:`aggfunc` for the cumulative sum of the 153 | input series data 154 | 155 | :param series: Input Time Series data 156 | :type series: :mod:`pandas.Series` 157 | 158 | :param window: A tuple indicating a time window 159 | :type window: tuple 160 | 161 | :param filter_gaps: If set, a process being switched out 162 | for :mod:`bart.sched.functions.WINDOW_SIZE` is 163 | ignored. This is helpful when small interruptions need 164 | to be ignored to compare overall correlation 165 | :type filter_gaps: bool 166 | """ 167 | 168 | if filter_gaps: 169 | series = filter_small_gaps(series) 170 | 171 | series = series.cumsum() 172 | return select_window(series, window) 173 | 174 | def filter_small_gaps(series): 175 | """A helper function that does filtering of gaps 176 | in residency series < :mod:`bart.sched.functions.WINDOW_SIZE` 177 | 178 | :param series: Input Time Series data 179 | :type series: :mod:`pandas.Series` 180 | """ 181 | 182 | start = None 183 | for index, value in series.iteritems(): 184 | 185 | if value == SCHED_SWITCH_IN: 186 | if start == None: 187 | continue 188 | 189 | if index - start < WINDOW_SIZE: 190 | series[start] = NO_EVENT 191 | series[index] = NO_EVENT 192 | start = None 193 | 194 | if value == SCHED_SWITCH_OUT: 195 | start = index 196 | 197 | return series 198 | 199 | def first_cpu(series, window=None): 200 | """:func:`aggfunc` to calculate the time of 201 | the first switch in event in the series 202 | This is returned as a vector of unit length 203 | so that it can be aggregated and reduced across 204 | nodes to find the first cpu of a task 205 | 206 | :param series: Input Time Series data 207 | :type series: :mod:`pandas.Series` 208 | 209 | :param window: A tuple indicating a time window 210 | :type window: tuple 211 | """ 212 | series = select_window(series, window) 213 | series = series[series == SCHED_SWITCH_IN] 214 | if len(series): 215 | return [series.index.values[0]] 216 | else: 217 | return [float("inf")] 218 | 219 | def last_cpu(series, window=None): 220 | """:func:`aggfunc` to calculate the time of 221 | the last switch out event in the series 222 | This is returned as a vector of unit length 223 | so that it can be aggregated and reduced across 224 | nodes to find the last cpu of a task 225 | 226 | :param series: Input Time Series data 227 | :type series: :mod:`pandas.Series` 228 | 229 | :param window: A tuple indicating a time window 230 | :type window: tuple 231 | """ 232 | series = select_window(series, window) 233 | series = series[series == SCHED_SWITCH_OUT] 234 | 235 | if len(series): 236 | return [series.index.values[-1]] 237 | else: 238 | return [0] 239 | 240 | def select_window(series, window): 241 | """Helper Function to select a portion of 242 | pandas time series 243 | 244 | :param series: Input Time Series data 245 | :type series: :mod:`pandas.Series` 246 | 247 | :param window: A tuple indicating a time window 248 | :type window: tuple 249 | """ 250 | 251 | if not window: 252 | return series 253 | 254 | start, stop = window 255 | ix = series.index 256 | selector = ((ix >= start) & (ix <= stop)) 257 | window_series = series[selector] 258 | return window_series 259 | 260 | def residency_sum(series, window=None): 261 | """:func:`aggfunc` to calculate the total 262 | residency 263 | 264 | 265 | The input series is processed for 266 | intervals between a :mod:`bart.sched.functions.SCHED_SWITCH_OUT` 267 | and :mod:`bart.sched.functions.SCHED_SWITCH_IN` to track 268 | additive residency of a task 269 | 270 | .. math:: 271 | 272 | S_{in} = i_{1}, i_{2}...i_{N} \\\\ 273 | S_{out} = o_{1}, o_{2}...o_{N} \\\\ 274 | R_{total} = \sum_{k}^{N}\Delta_k = \sum_{k}^{N}(o_{k} - i_{k}) 275 | 276 | :param series: Input Time Series data 277 | :type series: :mod:`pandas.Series` 278 | 279 | :param window: A tuple indicating a time window 280 | :type window: tuple 281 | 282 | :return: A scalar float value 283 | """ 284 | 285 | if not len(series): 286 | return 0.0 287 | 288 | org_series = series 289 | series = select_window(series, window) 290 | series = sanitize_asymmetry(series, window) 291 | 292 | s_in = series[series == SCHED_SWITCH_IN] 293 | s_out = series[series == SCHED_SWITCH_OUT] 294 | 295 | if not (len(s_in) and len(s_out)): 296 | try: 297 | org_series = sanitize_asymmetry(org_series) 298 | running = select_window(org_series.cumsum(), window) 299 | if running.values[0] == TASK_RUNNING and running.values[-1] == TASK_RUNNING: 300 | return window[1] - window[0] 301 | except Exception as e: 302 | pass 303 | 304 | if len(s_in) != len(s_out): 305 | raise RuntimeError( 306 | "Unexpected Lengths: s_in={}, s_out={}".format( 307 | len(s_in), 308 | len(s_out))) 309 | else: 310 | return np.sum(s_out.index.values - s_in.index.values) 311 | 312 | 313 | def first_time(series, value, window=None): 314 | """:func:`aggfunc` to: 315 | 316 | - Return the first index where the 317 | series == value 318 | 319 | - If no such index is found 320 | +inf is returned 321 | 322 | :param series: Input Time Series data 323 | :type series: :mod:`pandas.Series` 324 | 325 | :param window: A tuple indicating a time window 326 | :type window: tuple 327 | 328 | :return: A vector of Unit Length 329 | """ 330 | 331 | series = select_window(series, window) 332 | series = series[series == value] 333 | 334 | if not len(series): 335 | return [float("inf")] 336 | 337 | return [series.index.values[0]] 338 | 339 | 340 | def period(series, align="start", window=None): 341 | """This :func:`aggfunc` returns a tuple 342 | of the average duration between two triggers: 343 | 344 | - When :code:`align=start` the :code:`SCHED_IN` 345 | trigger is used 346 | 347 | - When :code:`align=end` the :code:`SCHED_OUT` 348 | trigger is used 349 | 350 | 351 | .. math:: 352 | 353 | E = e_{1}, e_{2}...e_{N} \\\\ 354 | T_p = \\frac{\sum_{j}^{\lfloor N/2 \\rfloor}(e_{2j + 1} - e_{2j})}{N} 355 | 356 | :param series: Input Time Series data 357 | :type series: :mod:`pandas.Series` 358 | 359 | :param window: A tuple indicating a time window 360 | :type window: tuple 361 | 362 | :return: 363 | A list of deltas of successive starts/stops 364 | of a task 365 | 366 | """ 367 | 368 | series = select_window(series, window) 369 | series = sanitize_asymmetry(series, window) 370 | 371 | if align == "start": 372 | series = series[series == SCHED_SWITCH_IN] 373 | elif align == "end": 374 | series = series[series == SCHED_SWITCH_OUT] 375 | 376 | if len(series) % 2 == 0: 377 | series = series[:1] 378 | 379 | if not len(series): 380 | return [] 381 | 382 | return list(np.diff(series.index.values)) 383 | 384 | def last_time(series, value, window=None): 385 | """:func:`aggfunc` to: 386 | 387 | - The first index where the 388 | series == value 389 | 390 | - If no such index is found 391 | :mod:`bart.sched.functions.TIME_INVAL` 392 | is returned 393 | 394 | :param series: Input Time Series data 395 | :type series: :mod:`pandas.Series` 396 | 397 | :param window: A tuple indicating a time window 398 | :type window: tuple 399 | 400 | :return: A vector of Unit Length 401 | """ 402 | 403 | series = select_window(series, window) 404 | series = series[series == value] 405 | if not len(series): 406 | return [TIME_INVAL] 407 | 408 | return [series.index.values[-1]] 409 | 410 | 411 | def binary_correlate(series_x, series_y): 412 | """Helper function to Correlate binary Data 413 | 414 | Both the series should have same indices 415 | 416 | For binary time series data: 417 | 418 | .. math:: 419 | 420 | \\alpha_{corr} = \\frac{N_{agree} - N_{disagree}}{N} 421 | 422 | :param series_x: First time Series data 423 | :type series_x: :mod:`pandas.Series` 424 | 425 | :param series_y: Second time Series data 426 | :type series_y: :mod:`pandas.Series` 427 | """ 428 | 429 | if len(series_x) != len(series_y): 430 | raise ValueError("Cannot compute binary correlation for \ 431 | unequal vectors") 432 | 433 | agree = len(series_x[series_x == series_y]) 434 | disagree = len(series_x[series_x != series_y]) 435 | 436 | return (agree - disagree) / float(len(series_x)) 437 | 438 | def get_pids_for_process(ftrace, execname, cls=None): 439 | """Get the PIDs for a given process 440 | 441 | :param ftrace: A ftrace object with a sched_switch 442 | event 443 | :type ftrace: :mod:`trappy.ftrace.FTrace` 444 | 445 | :param execname: The name of the process 446 | :type execname: str 447 | 448 | :param cls: The SchedSwitch event class (required if 449 | a different event is to be used) 450 | :type cls: :mod:`trappy.base.Base` 451 | 452 | :return: The set of PIDs for the execname 453 | """ 454 | 455 | if not cls: 456 | try: 457 | df = ftrace.sched_switch.data_frame 458 | except AttributeError: 459 | raise ValueError("SchedSwitch event not found in ftrace") 460 | 461 | if len(df) == 0: 462 | raise ValueError("SchedSwitch event not found in ftrace") 463 | else: 464 | event = getattr(ftrace, cls.name) 465 | df = event.data_frame 466 | 467 | mask = df["next_comm"].apply(lambda x : True if x == execname else False) 468 | return list(np.unique(df[mask]["next_pid"].values)) 469 | 470 | def get_task_name(ftrace, pid, cls=None): 471 | """Returns the execname for pid 472 | 473 | :param ftrace: A ftrace object with a sched_switch 474 | event 475 | :type ftrace: :mod:`trappy.ftrace.FTrace` 476 | 477 | :param pid: The PID of the process 478 | :type pid: int 479 | 480 | :param cls: The SchedSwitch event class (required if 481 | a different event is to be used) 482 | :type cls: :mod:`trappy.base.Base` 483 | 484 | :return: The execname for the PID 485 | """ 486 | 487 | if not cls: 488 | try: 489 | df = ftrace.sched_switch.data_frame 490 | except AttributeError: 491 | raise ValueError("SchedSwitch event not found in ftrace") 492 | else: 493 | event = getattr(ftrace, cls.name) 494 | df = event.data_frame 495 | 496 | df = df[df["next_pid"] == pid] 497 | if not len(df): 498 | return "" 499 | else: 500 | return df["next_comm"].values[0] 501 | 502 | def sched_triggers(ftrace, pid, sched_switch_class): 503 | """Returns the list of sched_switch triggers 504 | 505 | :param ftrace: A ftrace object with a sched_switch 506 | event 507 | :type ftrace: :mod:`trappy.ftrace.FTrace` 508 | 509 | :param pid: The PID of the associated process 510 | :type pid: int 511 | 512 | :param sched_switch_class: The SchedSwitch event class 513 | :type sched_switch_class: :mod:`trappy.base.Base` 514 | 515 | :return: List of triggers, such that 516 | :: 517 | 518 | triggers[0] = switch_in_trigger 519 | triggers[1] = switch_out_trigger 520 | """ 521 | 522 | if not hasattr(ftrace, "sched_switch"): 523 | raise ValueError("SchedSwitch event not found in ftrace") 524 | 525 | triggers = [] 526 | triggers.append(sched_switch_in_trigger(ftrace, pid, sched_switch_class)) 527 | triggers.append(sched_switch_out_trigger(ftrace, pid, sched_switch_class)) 528 | return triggers 529 | 530 | def sched_switch_in_trigger(ftrace, pid, sched_switch_class): 531 | """ 532 | :param ftrace: A ftrace object with a sched_switch 533 | event 534 | :type ftrace: :mod:`trappy.ftrace.FTrace` 535 | 536 | :param pid: The PID of the associated process 537 | :type pid: int 538 | 539 | :param sched_switch_class: The SchedSwitch event class 540 | :type sched_switch_class: :mod:`trappy.base.Base` 541 | 542 | :return: :mod:`trappy.stats.Trigger.Trigger` on 543 | the SchedSwitch: IN for the given PID 544 | """ 545 | 546 | task_in = {} 547 | task_in[NEXT_PID_FIELD] = pid 548 | 549 | return Trigger(ftrace, 550 | sched_switch_class, # trappy Event Class 551 | task_in, # Filter Dictionary 552 | SCHED_SWITCH_IN, # Trigger Value 553 | CPU_FIELD) # Primary Pivot 554 | 555 | def sched_switch_out_trigger(ftrace, pid, sched_switch_class): 556 | """ 557 | :param ftrace: A ftrace object with a sched_switch 558 | event 559 | :type ftrace: :mod:`trappy.ftrace.FTrace` 560 | 561 | :param pid: The PID of the associated process 562 | :type pid: int 563 | 564 | :param sched_switch_class: The SchedSwitch event class 565 | :type sched_switch_class: :mod:`trappy.base.Base` 566 | 567 | :return: :mod:`trappy.stats.Trigger.Trigger` on 568 | the SchedSwitch: OUT for the given PID 569 | """ 570 | 571 | task_out = {} 572 | task_out[PREV_PID_FIELD] = pid 573 | 574 | return Trigger(ftrace, 575 | sched_switch_class, # trappy Event Class 576 | task_out, # Filter Dictionary 577 | SCHED_SWITCH_OUT, # Trigger Value 578 | CPU_FIELD) # Primary Pivot 579 | 580 | 581 | def trace_event(series, window=None): 582 | """ 583 | :func:`aggfunc` to be used for plotting 584 | the process residency data using 585 | :mod:`trappy.plotter.EventPlot` 586 | 587 | :param series: Input Time Series data 588 | :type series: :mod:`pandas.Series` 589 | 590 | :param window: A tuple indicating a time window 591 | :type window: tuple 592 | 593 | :return: A list of events 594 | of the type: 595 | :: 596 | 597 | [ 598 | [start_time_1, stop_time_1], 599 | [start_time_2, stop_time_2], 600 | # 601 | # 602 | [start_time_N, stop_time_N], 603 | ] 604 | """ 605 | rects = [] 606 | series = select_window(series, window) 607 | series = sanitize_asymmetry(series, window) 608 | 609 | s_in = series[series == SCHED_SWITCH_IN] 610 | s_out = series[series == SCHED_SWITCH_OUT] 611 | 612 | if not len(s_in): 613 | return rects 614 | 615 | if len(s_in) != len(s_out): 616 | raise RuntimeError( 617 | "Unexpected Lengths: s_in={}, s_out={}".format( 618 | len(s_in), 619 | len(s_out))) 620 | 621 | return np.column_stack((s_in.index.values, s_out.index.values)) 622 | -------------------------------------------------------------------------------- /bart/thermal/ThermalAssert.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | """A thermal specific library to assert certain thermal 16 | behaviours 17 | """ 18 | 19 | from bart.common import Utils 20 | from bart.common.Analyzer import Analyzer 21 | import numpy as np 22 | 23 | 24 | # pylint: disable=invalid-name 25 | # pylint: disable=too-many-arguments 26 | class ThermalAssert(object): 27 | 28 | """A class that accepts a TRAPpy FTrace object and 29 | provides assertions for thermal behaviours 30 | 31 | :param ftrace: A path to the trace file or a TRAPpy FTrace object 32 | :type ftrace: str, :mod:`trappy.ftrace.FTrace` 33 | """ 34 | 35 | def __init__(self, ftrace, config=None): 36 | 37 | self._ftrace = Utils.init_ftrace(ftrace) 38 | self._analyzer = Analyzer(self._ftrace, config) 39 | 40 | def getThermalResidency(self, temp_range, window, percent=False): 41 | """Return the total time spent in a given temperature range 42 | 43 | :param temp_range: A tuple of (low_temp, high_temp) 44 | which specifies the range of temperature that 45 | one intends to calculate the residency for. 46 | :type temp_range: tuple 47 | 48 | :param window: A (start, end) tuple to limit the scope of the 49 | residency calculation. 50 | :type window: tuple 51 | 52 | :param percent: Returns the residency as a percentage of the total 53 | duration of the trace 54 | :type percent: bool 55 | 56 | .. seealso: 57 | 58 | :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency` 59 | """ 60 | 61 | # Get a pivoted thermal temperature data using the grammar 62 | data = self._analyzer.getStatement("trappy.thermal.Thermal:temp") 63 | 64 | result = {} 65 | for pivot, data_frame in data.groupby(axis=1, level=0): 66 | 67 | series = data_frame[pivot] 68 | series = Utils.select_window(series, window) 69 | mask = (series >= temp_range[0]) & (series <= temp_range[1]) 70 | index = series.index.values 71 | # pylint fails to recognize numpy members. 72 | # pylint: disable=no-member 73 | shift_index = np.roll(index, 1) 74 | # pylint: enable=no-member 75 | shift_index[0] = 0 76 | 77 | result[pivot] = sum((index - shift_index)[mask.values]) 78 | 79 | if percent: 80 | result[pivot] = ( 81 | result[pivot] * 100.0) / self._ftrace.get_duration() 82 | 83 | return result 84 | 85 | def assertThermalResidency( 86 | self, 87 | expected_value, 88 | operator, 89 | temp_range, 90 | window, 91 | percent=False): 92 | """ 93 | :param expected_value: The expected value of the residency 94 | :type expected_value: double 95 | 96 | :param operator: A binary operator function that returns 97 | a boolean. For example: 98 | :: 99 | 100 | import operator 101 | op = operator.ge 102 | assertThermalResidency(temp_range, expected_value, op) 103 | 104 | Will do the following check: 105 | :: 106 | 107 | getThermalResidency(temp_range) >= expected_value 108 | 109 | A custom function can also be passed: 110 | :: 111 | 112 | THRESHOLD=5 113 | def between_threshold(a, expected): 114 | return abs(a - expected) <= THRESHOLD 115 | 116 | :param temp_range: A tuple of (low_temp, high_temp) 117 | which specifies the range of temperature that 118 | one intends to calculate the residency for. 119 | :type temp_range: tuple 120 | 121 | :param window: A (start, end) tuple to limit the scope of the 122 | residency calculation. 123 | :type window: tuple 124 | 125 | :param percent: Returns the residency as a percentage of the total 126 | duration of the trace 127 | :type percent: bool 128 | 129 | .. seealso: 130 | 131 | :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency` 132 | """ 133 | 134 | residency = self.getThermalResidency(temp_range, window, percent) 135 | return operator(residency, expected_value) 136 | -------------------------------------------------------------------------------- /bart/thermal/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """Initialization for bart.thermal""" 17 | 18 | 19 | import bart.thermal.ThermalAssert 20 | -------------------------------------------------------------------------------- /bart/version.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | __version__ = "2.0.0" 17 | -------------------------------------------------------------------------------- /docs/api_reference/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | *.rst 3 | !index.rst 4 | -------------------------------------------------------------------------------- /docs/api_reference/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | ls *.rst | grep -v index.rst | xargs rm -f 54 | 55 | reference: 56 | sphinx-apidoc -f -e -o . ../../bart 57 | 58 | html: reference 59 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 62 | 63 | dirhtml: reference 64 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 65 | @echo 66 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 67 | 68 | singlehtml: reference 69 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 70 | @echo 71 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 72 | 73 | pickle: reference 74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 75 | @echo 76 | @echo "Build finished; now you can process the pickle files." 77 | 78 | json: reference 79 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 80 | @echo 81 | @echo "Build finished; now you can process the JSON files." 82 | 83 | htmlhelp: reference 84 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 85 | @echo 86 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 87 | ".hhp project file in $(BUILDDIR)/htmlhelp." 88 | 89 | qthelp: reference 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BART.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BART.qhc" 97 | 98 | applehelp: reference 99 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 100 | @echo 101 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 102 | @echo "N.B. You won't be able to view it unless you put it in" \ 103 | "~/Library/Documentation/Help or install it in your application" \ 104 | "bundle." 105 | 106 | devhelp: reference 107 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 108 | @echo 109 | @echo "Build finished." 110 | @echo "To view the help file:" 111 | @echo "# mkdir -p $$HOME/.local/share/devhelp/BART" 112 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BART" 113 | @echo "# devhelp" 114 | 115 | epub: reference 116 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 117 | @echo 118 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 119 | 120 | latex: reference 121 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 122 | @echo 123 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 124 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 125 | "(use \`make latexpdf' here to do that automatically)." 126 | 127 | latexpdf: reference 128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 129 | @echo "Running LaTeX files through pdflatex..." 130 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 131 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 132 | 133 | latexpdfja: reference 134 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 135 | @echo "Running LaTeX files through platex and dvipdfmx..." 136 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 137 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 138 | 139 | text: reference 140 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 141 | @echo 142 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 143 | 144 | man: reference 145 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 146 | @echo 147 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 148 | 149 | texinfo: reference 150 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 151 | @echo 152 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 153 | @echo "Run \`make' in that directory to run these through makeinfo" \ 154 | "(use \`make info' here to do that automatically)." 155 | 156 | info: reference 157 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 158 | @echo "Running Texinfo files through makeinfo..." 159 | make -C $(BUILDDIR)/texinfo info 160 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 161 | 162 | gettext: reference 163 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 164 | @echo 165 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 166 | 167 | changes: reference 168 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 169 | @echo 170 | @echo "The overview file is in $(BUILDDIR)/changes." 171 | 172 | linkcheck: reference 173 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 174 | @echo 175 | @echo "Link check complete; look for any errors in the above output " \ 176 | "or in $(BUILDDIR)/linkcheck/output.txt." 177 | 178 | doctest: reference 179 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 180 | @echo "Testing of doctests in the sources finished, look at the " \ 181 | "results in $(BUILDDIR)/doctest/output.txt." 182 | 183 | coverage: reference 184 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 185 | @echo "Testing of coverage in the sources finished, look at the " \ 186 | "results in $(BUILDDIR)/coverage/python.txt." 187 | 188 | xml: reference 189 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 190 | @echo 191 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 192 | 193 | pseudoxml: reference 194 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 195 | @echo 196 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 197 | -------------------------------------------------------------------------------- /docs/api_reference/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright 2015-2016 ARM Limited 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | # BART documentation build configuration file, created by 18 | # sphinx-quickstart on Fri Sep 4 11:30:35 2015. 19 | # 20 | # This file is execfile()d with the current directory set to its 21 | # containing dir. 22 | # 23 | # Note that not all possible configuration values are present in this 24 | # autogenerated file. 25 | # 26 | # All configuration values have a default; values that are commented out 27 | # serve to show the default. 28 | 29 | import sys 30 | import os 31 | import shlex 32 | 33 | this_dir = os.path.dirname(__file__) 34 | sys.path.insert(0, os.path.join(this_dir, '../..')) 35 | import bart 36 | 37 | # If extensions (or modules to document with autodoc) are in another directory, 38 | # add these directories to sys.path here. If the directory is relative to the 39 | # documentation root, use os.path.abspath to make it absolute, like shown here. 40 | #sys.path.insert(0, os.path.abspath('.')) 41 | 42 | # -- General configuration ------------------------------------------------ 43 | 44 | # If your documentation needs a minimal Sphinx version, state it here. 45 | #needs_sphinx = '1.0' 46 | 47 | # Add any Sphinx extension module names here, as strings. They can be 48 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 49 | # ones. 50 | extensions = [ 51 | 'sphinx.ext.autodoc', 52 | 'sphinx.ext.todo', 53 | 'sphinx.ext.coverage', 54 | 'sphinx.ext.mathjax', 55 | 'sphinx.ext.ifconfig', 56 | 'sphinx.ext.viewcode'] 57 | 58 | # Update MathJax path to use the cdnjs using HTTPS 59 | mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" 60 | 61 | # Add any paths that contain templates here, relative to this directory. 62 | templates_path = ['_templates'] 63 | 64 | # The suffix(es) of source filenames. 65 | # You can specify multiple suffix as a list of string: 66 | # source_suffix = ['.rst', '.md'] 67 | source_suffix = '.rst' 68 | 69 | # The encoding of source files. 70 | #source_encoding = 'utf-8-sig' 71 | 72 | # The master toctree document. 73 | master_doc = 'index' 74 | 75 | # General information about the project. 76 | project = u'BART' 77 | copyright = u'2016, ARM Ltd.' 78 | author = u'Kapileshwar Singh(KP), Javi Merino' 79 | 80 | # The version info for the project you're documenting, acts as replacement for 81 | # |version| and |release|, also used in various other places throughout the 82 | # built documents. 83 | # 84 | # The short X.Y version. Drop everything after the last "." 85 | version = bart.__version__[:bart.__version__.rindex(".")] 86 | # The full version, including alpha/beta/rc tags. 87 | release = bart.__version__ 88 | 89 | # The language for content autogenerated by Sphinx. Refer to documentation 90 | # for a list of supported languages. 91 | # 92 | # This is also used if you do content translation via gettext catalogs. 93 | # Usually you set "language" from the command line for these cases. 94 | language = 'en' 95 | 96 | # There are two options for replacing |today|: either, you set today to some 97 | # non-false value, then it is used: 98 | #today = '' 99 | # Else, today_fmt is used as the format for a strftime call. 100 | #today_fmt = '%B %d, %Y' 101 | 102 | # List of patterns, relative to source directory, that match files and 103 | # directories to ignore when looking for source files. 104 | exclude_patterns = ['_build'] 105 | 106 | # The reST default role (used for this markup: `text`) to use for all 107 | # documents. 108 | #default_role = None 109 | 110 | # If true, '()' will be appended to :func: etc. cross-reference text. 111 | #add_function_parentheses = True 112 | 113 | # If true, the current module name will be prepended to all description 114 | # unit titles (such as .. function::). 115 | #add_module_names = True 116 | 117 | # If true, sectionauthor and moduleauthor directives will be shown in the 118 | # output. They are ignored by default. 119 | #show_authors = False 120 | 121 | # The name of the Pygments (syntax highlighting) style to use. 122 | pygments_style = 'sphinx' 123 | 124 | # A list of ignored prefixes for module index sorting. 125 | #modindex_common_prefix = [] 126 | 127 | # If true, keep warnings as "system message" paragraphs in the built documents. 128 | #keep_warnings = False 129 | 130 | # If true, `todo` and `todoList` produce output, else they produce nothing. 131 | todo_include_todos = True 132 | 133 | 134 | # -- Options for HTML output ---------------------------------------------- 135 | 136 | # The theme to use for HTML and HTML Help pages. See the documentation for 137 | # a list of builtin themes. 138 | html_theme = 'classic' 139 | 140 | # Theme options are theme-specific and customize the look and feel of a theme 141 | # further. For a list of options available for each theme, see the 142 | # documentation. 143 | #html_theme_options = {} 144 | 145 | # Add any paths that contain custom themes here, relative to this directory. 146 | #html_theme_path = [] 147 | 148 | # The name for this set of Sphinx documents. If None, it defaults to 149 | # " v documentation". 150 | #html_title = None 151 | 152 | # A shorter title for the navigation bar. Default is the same as html_title. 153 | #html_short_title = None 154 | 155 | # The name of an image file (relative to this directory) to place at the top 156 | # of the sidebar. 157 | #html_logo = None 158 | 159 | # The name of an image file (within the static path) to use as favicon of the 160 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 161 | # pixels large. 162 | #html_favicon = None 163 | 164 | # Add any paths that contain custom static files (such as style sheets) here, 165 | # relative to this directory. They are copied after the builtin static files, 166 | # so a file named "default.css" will overwrite the builtin "default.css". 167 | html_static_path = ['_static'] 168 | 169 | # Add any extra paths that contain custom files (such as robots.txt or 170 | # .htaccess) here, relative to this directory. These files are copied 171 | # directly to the root of the documentation. 172 | #html_extra_path = [] 173 | 174 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 175 | # using the given strftime format. 176 | #html_last_updated_fmt = '%b %d, %Y' 177 | 178 | # If true, SmartyPants will be used to convert quotes and dashes to 179 | # typographically correct entities. 180 | #html_use_smartypants = True 181 | 182 | # Custom sidebar templates, maps document names to template names. 183 | #html_sidebars = {} 184 | 185 | # Additional templates that should be rendered to pages, maps page names to 186 | # template names. 187 | #html_additional_pages = {} 188 | 189 | # If false, no module index is generated. 190 | #html_domain_indices = True 191 | 192 | # If false, no index is generated. 193 | #html_use_index = True 194 | 195 | # If true, the index is split into individual pages for each letter. 196 | #html_split_index = False 197 | 198 | # If true, links to the reST sources are added to the pages. 199 | #html_show_sourcelink = True 200 | 201 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 202 | #html_show_sphinx = True 203 | 204 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 205 | #html_show_copyright = True 206 | 207 | # If true, an OpenSearch description file will be output, and all pages will 208 | # contain a tag referring to it. The value of this option must be the 209 | # base URL from which the finished HTML is served. 210 | #html_use_opensearch = '' 211 | 212 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 213 | #html_file_suffix = None 214 | 215 | # Language to be used for generating the HTML full-text search index. 216 | # Sphinx supports the following languages: 217 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 218 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 219 | #html_search_language = 'en' 220 | 221 | # A dictionary with options for the search language support, empty by default. 222 | # Now only 'ja' uses this config value 223 | #html_search_options = {'type': 'default'} 224 | 225 | # The name of a javascript file (relative to the configuration directory) that 226 | # implements a search results scorer. If empty, the default will be used. 227 | #html_search_scorer = 'scorer.js' 228 | 229 | # Output file base name for HTML help builder. 230 | htmlhelp_basename = 'BARTdoc' 231 | 232 | # -- Options for LaTeX output --------------------------------------------- 233 | 234 | latex_elements = { 235 | # The paper size ('letterpaper' or 'a4paper'). 236 | #'papersize': 'letterpaper', 237 | 238 | # The font size ('10pt', '11pt' or '12pt'). 239 | #'pointsize': '10pt', 240 | 241 | # Additional stuff for the LaTeX preamble. 242 | #'preamble': '', 243 | 244 | # Latex figure (float) alignment 245 | #'figure_align': 'htbp', 246 | } 247 | 248 | # Grouping the document tree into LaTeX files. List of tuples 249 | # (source start file, target name, title, 250 | # author, documentclass [howto, manual, or own class]). 251 | latex_documents = [ 252 | (master_doc, 'BART.tex', u'BART Documentation', 253 | u'Kapileshwar Singh(KP), Javi Merino', 'manual'), 254 | ] 255 | 256 | # The name of an image file (relative to this directory) to place at the top of 257 | # the title page. 258 | #latex_logo = None 259 | 260 | # For "manual" documents, if this is true, then toplevel headings are parts, 261 | # not chapters. 262 | #latex_use_parts = False 263 | 264 | # If true, show page references after internal links. 265 | #latex_show_pagerefs = False 266 | 267 | # If true, show URL addresses after external links. 268 | #latex_show_urls = False 269 | 270 | # Documents to append as an appendix to all manuals. 271 | #latex_appendices = [] 272 | 273 | # If false, no module index is generated. 274 | #latex_domain_indices = True 275 | 276 | 277 | # -- Options for manual page output --------------------------------------- 278 | 279 | # One entry per manual page. List of tuples 280 | # (source start file, name, description, authors, manual section). 281 | man_pages = [ 282 | (master_doc, 'bart', u'BART Documentation', 283 | [author], 1) 284 | ] 285 | 286 | # If true, show URL addresses after external links. 287 | #man_show_urls = False 288 | 289 | 290 | # -- Options for Texinfo output ------------------------------------------- 291 | 292 | # Grouping the document tree into Texinfo files. List of tuples 293 | # (source start file, target name, title, author, 294 | # dir menu entry, description, category) 295 | texinfo_documents = [ 296 | (master_doc, 'BART', u'BART Documentation', 297 | author, 'BART', 'One line description of project.', 298 | 'Miscellaneous'), 299 | ] 300 | 301 | # Documents to append as an appendix to all manuals. 302 | #texinfo_appendices = [] 303 | 304 | # If false, no module index is generated. 305 | #texinfo_domain_indices = True 306 | 307 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 308 | #texinfo_show_urls = 'footnote' 309 | 310 | # If true, do not generate a @detailmenu in the "Top" node's menu. 311 | #texinfo_no_detailmenu = False 312 | 313 | 314 | # -- Options for Epub output ---------------------------------------------- 315 | 316 | # Bibliographic Dublin Core info. 317 | epub_title = project 318 | epub_author = author 319 | epub_publisher = author 320 | epub_copyright = copyright 321 | 322 | # The basename for the epub file. It defaults to the project name. 323 | #epub_basename = project 324 | 325 | # The HTML theme for the epub output. Since the default themes are not optimized 326 | # for small screen space, using the same theme for HTML and epub output is 327 | # usually not wise. This defaults to 'epub', a theme designed to save visual 328 | # space. 329 | #epub_theme = 'epub' 330 | 331 | # The language of the text. It defaults to the language option 332 | # or 'en' if the language is not set. 333 | #epub_language = '' 334 | 335 | # The scheme of the identifier. Typical schemes are ISBN or URL. 336 | #epub_scheme = '' 337 | 338 | # The unique identifier of the text. This can be a ISBN number 339 | # or the project homepage. 340 | #epub_identifier = '' 341 | 342 | # A unique identification for the text. 343 | #epub_uid = '' 344 | 345 | # A tuple containing the cover image and cover page html template filenames. 346 | #epub_cover = () 347 | 348 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 349 | #epub_guide = () 350 | 351 | # HTML files that should be inserted before the pages created by sphinx. 352 | # The format is a list of tuples containing the path and title. 353 | #epub_pre_files = [] 354 | 355 | # HTML files shat should be inserted after the pages created by sphinx. 356 | # The format is a list of tuples containing the path and title. 357 | #epub_post_files = [] 358 | 359 | # A list of files that should not be packed into the epub file. 360 | epub_exclude_files = ['search.html'] 361 | 362 | # The depth of the table of contents in toc.ncx. 363 | #epub_tocdepth = 3 364 | 365 | # Allow duplicate toc entries. 366 | #epub_tocdup = True 367 | 368 | # Choose between 'default' and 'includehidden'. 369 | #epub_tocscope = 'default' 370 | 371 | # Fix unsupported image types using the Pillow. 372 | #epub_fix_images = False 373 | 374 | # Scale large images. 375 | #epub_max_image_width = 0 376 | 377 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 378 | #epub_show_urls = 'inline' 379 | 380 | # If false, no index is generated. 381 | #epub_use_index = True 382 | -------------------------------------------------------------------------------- /docs/api_reference/index.rst: -------------------------------------------------------------------------------- 1 | .. BART documentation master file, created by 2 | sphinx-quickstart on Fri Sep 4 12:40:17 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to BART's documentation! 7 | ================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 4 13 | 14 | bart 15 | 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | -------------------------------------------------------------------------------- /docs/examples/thermal.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | """ 17 | An example file for usage of Analyzer for thermal assertions 18 | """ 19 | from bart.common.Analyzer import Analyzer 20 | from trappy.stats.Topology import Topology 21 | import unittest 22 | import trappy 23 | 24 | 25 | class TestThermal(unittest.TestCase): 26 | 27 | @classmethod 28 | def setUpClass(cls): 29 | # We can run a workload invocation script here 30 | # Which then copies the required traces for analysis to 31 | # the host. 32 | trace_file = "update_a_trace_path_here" 33 | ftrace = trappy.FTrace(trace_file, "test_run") 34 | 35 | # Define the parameters that you intend to use in the grammar 36 | config = {} 37 | config["THERMAL"] = trappy.thermal.Thermal 38 | config["OUT"] = trappy.cpu_power.CpuOutPower 39 | config["IN"] = trappy.cpu_power.CpuInPower 40 | config["PID"] = trappy.pid_controller.PIDController 41 | config["GOVERNOR"] = trappy.thermal.ThermalGovernor 42 | config["CONTROL_TEMP"] = 77000 43 | config["SUSTAINABLE_POWER"] = 2500 44 | config["EXPECTED_TEMP_QRT"] = 95 45 | config["EXPECTED_STD_PCT"] = 5 46 | 47 | # Define a Topology 48 | cls.BIG = '000000f0' 49 | cls.LITTLE = '0000000f' 50 | cls.tz = 0 51 | cls.analyzer = Analyzer(ftrace, config) 52 | 53 | def test_temperature_quartile(self): 54 | """Assert Temperature quartile""" 55 | 56 | self.assertTrue(self.analyzer.assertStatement( 57 | "numpy.percentile(THERMAL:temp, EXPECTED_TEMP_QRT) < (CONTROL_TEMP + 5000)")) 58 | 59 | def test_average_temperature(self): 60 | """Assert Average temperature""" 61 | 62 | self.assertTrue(self.analyzer.assertStatement( 63 | "numpy.mean(THERMAL:temp) < CONTROL_TEMP", select=self.tz)) 64 | 65 | def test_temp_stdev(self): 66 | """Assert StdDev(temp) as % of mean""" 67 | 68 | self.assertTrue(self.analyzer.assertStatement( 69 | "(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\ 70 | < EXPECTED_STD_PCT", select=self.tz)) 71 | 72 | def test_zero_load_input_power(self): 73 | """Test power demand when load is zero""" 74 | 75 | zero_load_power_big = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ 76 | & (IN:dynamic_power > 0)", reference=True, select=self.BIG) 77 | self.assertEquals(len(zero_load_power_big), 0) 78 | 79 | zero_load_power_little = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ 80 | & (IN:dynamic_power > 0)", reference=True, select=self.LITTLE) 81 | self.assertEquals(len(zero_load_power_little), 0) 82 | 83 | def test_sustainable_power(self): 84 | """temp > control_temp, allocated_power < sustainable_power""" 85 | 86 | self.analyzer.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\ 87 | (PID:output > SUSTAINABLE_POWER)", reference=True, select=0) 88 | -------------------------------------------------------------------------------- /docs/notebooks/sched/SchedDeadline.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "display_name": "Python 2", 5 | "language": "python", 6 | "name": "python2" 7 | }, 8 | "language_info": { 9 | "codemirror_mode": { 10 | "name": "ipython", 11 | "version": 2 12 | }, 13 | "file_extension": ".py", 14 | "mimetype": "text/x-python", 15 | "name": "python", 16 | "nbconvert_exporter": "python", 17 | "pygments_lexer": "ipython2", 18 | "version": "2.7.9" 19 | }, 20 | "name": "" 21 | }, 22 | "nbformat": 3, 23 | "nbformat_minor": 0, 24 | "worksheets": [ 25 | { 26 | "cells": [ 27 | { 28 | "cell_type": "heading", 29 | "level": 1, 30 | "metadata": {}, 31 | "source": [ 32 | "Setup" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "collapsed": false, 38 | "input": [ 39 | "from trappy.stats.Topology import Topology\n", 40 | "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", 41 | "from bart.sched.SchedAssert import SchedAssert\n", 42 | "import trappy\n", 43 | "import os\n", 44 | "import operator\n", 45 | "import json\n", 46 | "\n", 47 | "#Define a CPU Topology (for multi-cluster systems)\n", 48 | "BIG = [1, 2]\n", 49 | "LITTLE = [0, 3, 4, 5]\n", 50 | "CLUSTERS = [BIG, LITTLE]\n", 51 | "topology = Topology(clusters=CLUSTERS)\n", 52 | "\n", 53 | "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", 54 | "\n", 55 | "THRESHOLD = 10.0\n", 56 | "def between_threshold(a, b):\n", 57 | " return abs(((a - b) * 100.0) / b) < THRESHOLD" 58 | ], 59 | "language": "python", 60 | "metadata": {}, 61 | "outputs": [], 62 | "prompt_number": 3 63 | }, 64 | { 65 | "cell_type": "heading", 66 | "level": 1, 67 | "metadata": {}, 68 | "source": [ 69 | "Periodic Yield" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", 77 | "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", 78 | "\n", 79 | "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "collapsed": false, 85 | "input": [ 86 | "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", 87 | "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", 88 | "\n", 89 | "# Assert Period\n", 90 | "s = SchedMultiAssert(ftrace, topology, execnames=\"periodic_yield\")\n", 91 | "if s.assertPeriod(30, between_threshold, rank=1):\n", 92 | " print \"PASS: Period\"\n", 93 | " print json.dumps(s.getPeriod(), indent=3)\n", 94 | "\n", 95 | "print \"\"\n", 96 | " \n", 97 | "# Assert DutyCycle \n", 98 | "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", 99 | " print \"PASS: DutyCycle\"\n", 100 | " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" 101 | ], 102 | "language": "python", 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "output_type": "stream", 107 | "stream": "stdout", 108 | "text": [ 109 | "PASS: Period\n", 110 | "{\n", 111 | " \"1844\": {\n", 112 | " \"period\": 1.0085000000401578, \n", 113 | " \"task_name\": \"periodic_yield\"\n", 114 | " }, \n", 115 | " \"1845\": {\n", 116 | " \"period\": 29.822017857142669, \n", 117 | " \"task_name\": \"periodic_yield\"\n", 118 | " }\n", 119 | "}\n", 120 | "\n", 121 | "PASS: DutyCycle\n", 122 | "{\n", 123 | " \"1844\": {\n", 124 | " \"task_name\": \"periodic_yield\", \n", 125 | " \"dutycycle\": 0.074749999998857675\n", 126 | " }, \n", 127 | " \"1845\": {\n", 128 | " \"task_name\": \"periodic_yield\", \n", 129 | " \"dutycycle\": 0.03862499999343072\n", 130 | " }\n", 131 | "}\n" 132 | ] 133 | } 134 | ], 135 | "prompt_number": 10 136 | }, 137 | { 138 | "cell_type": "heading", 139 | "level": 1, 140 | "metadata": {}, 141 | "source": [ 142 | "CPU Hog" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": {}, 148 | "source": [ 149 | "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "collapsed": false, 155 | "input": [ 156 | "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", 157 | "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", 158 | "s = SchedMultiAssert(ftrace, topology, execnames=\"cpuhog\")\n", 159 | "s.plot().view()\n", 160 | "\n", 161 | "# Assert DutyCycle\n", 162 | "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", 163 | " print \"PASS: DutyCycle\"\n", 164 | " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" 165 | ], 166 | "language": "python", 167 | "metadata": {}, 168 | "outputs": [ 169 | { 170 | "html": [ 171 | "\n", 317 | "
\n", 318 | " \n", 340 | "
" 341 | ], 342 | "metadata": {}, 343 | "output_type": "display_data", 344 | "text": [ 345 | "" 346 | ] 347 | }, 348 | { 349 | "output_type": "stream", 350 | "stream": "stdout", 351 | "text": [ 352 | "PASS: DutyCycle\n", 353 | "{\n", 354 | " \"1852\": {\n", 355 | " \"task_name\": \"cpuhog\", \n", 356 | " \"dutycycle\": 10.050119999991693\n", 357 | " }\n", 358 | "}\n" 359 | ] 360 | } 361 | ], 362 | "prompt_number": 11 363 | }, 364 | { 365 | "cell_type": "heading", 366 | "level": 1, 367 | "metadata": {}, 368 | "source": [ 369 | "Changing Reservations" 370 | ] 371 | }, 372 | { 373 | "cell_type": "markdown", 374 | "metadata": {}, 375 | "source": [ 376 | "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" 377 | ] 378 | }, 379 | { 380 | "cell_type": "code", 381 | "collapsed": false, 382 | "input": [ 383 | "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", 384 | "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", 385 | "s = SchedAssert(ftrace, topology, execname=\"cpuhog\")\n", 386 | "s.plot().view()\n", 387 | "\n", 388 | "NUM_PHASES = 10\n", 389 | "PHASE_DURATION = 2\n", 390 | "start = s.getStartTime()\n", 391 | "DUTY_CYCLE_FACTOR = 10\n", 392 | "\n", 393 | "\n", 394 | "for phase in range(NUM_PHASES + 1):\n", 395 | " window = (start + (phase * PHASE_DURATION),\n", 396 | " start + ((phase + 1) * PHASE_DURATION))\n", 397 | " \n", 398 | " if phase % 2 == 0:\n", 399 | " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", 400 | " else:\n", 401 | " DUTY_CYCLE = 100\n", 402 | "\n", 403 | "\n", 404 | " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", 405 | " window[1])\n", 406 | " \n", 407 | " \n", 408 | " \n", 409 | " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", 410 | " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", 411 | " s.getDutyCycle(window=window),\n", 412 | " THRESHOLD)\n", 413 | " else:\n", 414 | " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", 415 | " s.getDutyCycle(window=window),\n", 416 | " THRESHOLD)\n", 417 | " \n", 418 | " print \"\"" 419 | ], 420 | "language": "python", 421 | "metadata": {}, 422 | "outputs": [ 423 | { 424 | "html": [ 425 | "\n", 571 | "
\n", 572 | " \n", 594 | "
" 595 | ], 596 | "metadata": {}, 597 | "output_type": "display_data", 598 | "text": [ 599 | "" 600 | ] 601 | }, 602 | { 603 | "output_type": "stream", 604 | "stream": "stdout", 605 | "text": [ 606 | "WINDOW -> [0.00, 2.00]\n", 607 | "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", 608 | "\n", 609 | "WINDOW -> [2.00, 4.00]\n", 610 | "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", 611 | "\n", 612 | "WINDOW -> [4.00, 6.00]\n", 613 | "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", 614 | "\n", 615 | "WINDOW -> [6.00, 8.00]\n", 616 | "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", 617 | "\n", 618 | "WINDOW -> [8.00, 10.00]\n", 619 | "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", 620 | "\n", 621 | "WINDOW -> [10.00, 12.00]\n", 622 | "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", 623 | "\n", 624 | "WINDOW -> [12.00, 14.00]\n", 625 | "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", 626 | "\n", 627 | "WINDOW -> [14.00, 16.00]\n", 628 | "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", 629 | "\n", 630 | "WINDOW -> [16.00, 18.00]\n", 631 | "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", 632 | "\n", 633 | "WINDOW -> [18.00, 20.00]\n", 634 | "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", 635 | "\n", 636 | "WINDOW -> [20.00, 22.00]\n", 637 | "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", 638 | "\n" 639 | ] 640 | } 641 | ], 642 | "prompt_number": 4 643 | } 644 | ], 645 | "metadata": {} 646 | } 647 | ] 648 | } -------------------------------------------------------------------------------- /docs/notebooks/thermal/Thermal.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "display_name": "Python 2", 5 | "language": "python", 6 | "name": "python2" 7 | }, 8 | "language_info": { 9 | "codemirror_mode": { 10 | "name": "ipython", 11 | "version": 2 12 | }, 13 | "file_extension": ".py", 14 | "mimetype": "text/x-python", 15 | "name": "python", 16 | "nbconvert_exporter": "python", 17 | "pygments_lexer": "ipython2", 18 | "version": "2.7.6" 19 | }, 20 | "name": "", 21 | "signature": "sha256:59ef0b9fe2847e77f9df55deeb6df1f94f4fe2a3a0f99e13cba99854e8bf66ed" 22 | }, 23 | "nbformat": 3, 24 | "nbformat_minor": 0, 25 | "worksheets": [ 26 | { 27 | "cells": [ 28 | { 29 | "cell_type": "heading", 30 | "level": 1, 31 | "metadata": {}, 32 | "source": [ 33 | "Configuration" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "collapsed": false, 39 | "input": [ 40 | "import trappy\n", 41 | "import numpy\n", 42 | "\n", 43 | "config = {}\n", 44 | "\n", 45 | "# TRAPpy Events\n", 46 | "config[\"THERMAL\"] = trappy.thermal.Thermal\n", 47 | "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", 48 | "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", 49 | "config[\"PID\"] = trappy.pid_controller.PIDController\n", 50 | "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", 51 | "\n", 52 | "# Control Temperature\n", 53 | "config[\"CONTROL_TEMP\"] = 77000\n", 54 | "\n", 55 | "# A temperature margin of 2.5 degrees Celsius\n", 56 | "config[\"TEMP_MARGIN\"] = 2500\n", 57 | "\n", 58 | "# The Sustainable power at the control Temperature\n", 59 | "config[\"SUSTAINABLE_POWER\"] = 2500\n", 60 | "\n", 61 | "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", 62 | "config[\"EXPECTED_TEMP_QRT\"] = 95\n", 63 | "\n", 64 | "# Maximum expected Standard Deviation as a percentage\n", 65 | "# of mean temperature\n", 66 | "config[\"EXPECTED_STD_PCT\"] = 5\n" 67 | ], 68 | "language": "python", 69 | "metadata": {}, 70 | "outputs": [], 71 | "prompt_number": 1 72 | }, 73 | { 74 | "cell_type": "heading", 75 | "level": 1, 76 | "metadata": {}, 77 | "source": [ 78 | "Get the Trace" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "collapsed": false, 84 | "input": [ 85 | "import urllib\n", 86 | "import os\n", 87 | "\n", 88 | "TRACE_DIR = \"example_trace_dat_thermal\"\n", 89 | "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", 90 | "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", 91 | "\n", 92 | "if not os.path.isdir(TRACE_DIR):\n", 93 | " os.mkdir(TRACE_DIR)\n", 94 | "\n", 95 | "if not os.path.isfile(TRACE_FILE):\n", 96 | " print \"Fetching trace file..\"\n", 97 | " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" 98 | ], 99 | "language": "python", 100 | "metadata": {}, 101 | "outputs": [], 102 | "prompt_number": 2 103 | }, 104 | { 105 | "cell_type": "heading", 106 | "level": 1, 107 | "metadata": {}, 108 | "source": [ 109 | "FTrace Object" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "collapsed": false, 115 | "input": [ 116 | "# Create a Trace object\n", 117 | "\n", 118 | "ftrace = trappy.FTrace(TRACE_FILE, \"SomeBenchMark\")" 119 | ], 120 | "language": "python", 121 | "metadata": {}, 122 | "outputs": [], 123 | "prompt_number": 3 124 | }, 125 | { 126 | "cell_type": "heading", 127 | "level": 1, 128 | "metadata": {}, 129 | "source": [ 130 | "Assertions" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "collapsed": false, 136 | "input": [ 137 | "# Create an Assertion Object\n", 138 | "\n", 139 | "from bart.common.Analyzer import Analyzer\n", 140 | "t = Analyzer(ftrace, config)\n", 141 | "\n", 142 | "BIG = '000000f0'\n", 143 | "LITTLE = '0000000f'" 144 | ], 145 | "language": "python", 146 | "metadata": {}, 147 | "outputs": [], 148 | "prompt_number": 4 149 | }, 150 | { 151 | "cell_type": "heading", 152 | "level": 2, 153 | "metadata": {}, 154 | "source": [ 155 | "Assertion: Load and Dynamic Power" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "\n", 163 | "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", 164 | "\n", 165 | " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", 166 | " \n", 167 | "" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "collapsed": false, 173 | "input": [ 174 | "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", 175 | " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", 176 | "if len(result):\n", 177 | " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", 178 | "else:\n", 179 | " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", 180 | "\n", 181 | " \n", 182 | "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", 183 | " & (IN:dynamic_power > 0)\",reference=True, select=LITTLE)\n", 184 | "if len(result):\n", 185 | " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", 186 | "else:\n", 187 | " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" 188 | ], 189 | "language": "python", 190 | "metadata": {}, 191 | "outputs": [ 192 | { 193 | "output_type": "stream", 194 | "stream": "stdout", 195 | "text": [ 196 | "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", 197 | "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" 198 | ] 199 | } 200 | ], 201 | "prompt_number": 5 202 | }, 203 | { 204 | "cell_type": "heading", 205 | "level": 2, 206 | "metadata": {}, 207 | "source": [ 208 | "Assertion: Control Temperature and Sustainable Power" 209 | ] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "metadata": {}, 214 | "source": [ 215 | "\n", 216 | "\n", 217 | "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", 218 | "\n", 219 | "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", 220 | "\n", 221 | "" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "collapsed": false, 227 | "input": [ 228 | "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", 229 | " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", 230 | "\n", 231 | "if len(result):\n", 232 | " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", 233 | "else:\n", 234 | " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " 235 | ], 236 | "language": "python", 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "output_type": "stream", 241 | "stream": "stdout", 242 | "text": [ 243 | "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" 244 | ] 245 | } 246 | ], 247 | "prompt_number": 6 248 | }, 249 | { 250 | "cell_type": "heading", 251 | "level": 1, 252 | "metadata": {}, 253 | "source": [ 254 | "Statistics" 255 | ] 256 | }, 257 | { 258 | "cell_type": "markdown", 259 | "metadata": {}, 260 | "source": [ 261 | "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "collapsed": false, 267 | "input": [ 268 | "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" 269 | ], 270 | "language": "python", 271 | "metadata": {}, 272 | "outputs": [ 273 | { 274 | "metadata": {}, 275 | "output_type": "pyout", 276 | "prompt_number": 7, 277 | "text": [ 278 | "True" 279 | ] 280 | } 281 | ], 282 | "prompt_number": 7 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "Check if the mean temperauture is less than CONTROL_TEMP" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "collapsed": false, 294 | "input": [ 295 | "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" 296 | ], 297 | "language": "python", 298 | "metadata": {}, 299 | "outputs": [ 300 | { 301 | "metadata": {}, 302 | "output_type": "pyout", 303 | "prompt_number": 8, 304 | "text": [ 305 | "True" 306 | ] 307 | } 308 | ], 309 | "prompt_number": 8 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "metadata": {}, 314 | "source": [ 315 | "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean" 316 | ] 317 | }, 318 | { 319 | "cell_type": "code", 320 | "collapsed": false, 321 | "input": [ 322 | "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" 323 | ], 324 | "language": "python", 325 | "metadata": {}, 326 | "outputs": [ 327 | { 328 | "metadata": {}, 329 | "output_type": "pyout", 330 | "prompt_number": 9, 331 | "text": [ 332 | "2.2390646863105119" 333 | ] 334 | } 335 | ], 336 | "prompt_number": 9 337 | }, 338 | { 339 | "cell_type": "heading", 340 | "level": 1, 341 | "metadata": {}, 342 | "source": [ 343 | "Thermal Residency" 344 | ] 345 | }, 346 | { 347 | "cell_type": "code", 348 | "collapsed": false, 349 | "input": [ 350 | "from bart.thermal.ThermalAssert import ThermalAssert\n", 351 | "\n", 352 | "t_assert = ThermalAssert(ftrace)\n", 353 | "end = ftrace.get_duration()\n", 354 | "\n", 355 | "LOW = 0\n", 356 | "HIGH = 78000\n", 357 | "\n", 358 | "# The thermal residency gives the percentage (or absolute time) spent in the\n", 359 | "# specified temperature range. \n", 360 | "\n", 361 | "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", 362 | " window=(0, end),\n", 363 | " percent=True)\n", 364 | "\n", 365 | "for tz_id in result:\n", 366 | " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", 367 | " result[tz_id],\n", 368 | " LOW/1000,\n", 369 | " HIGH/1000)\n", 370 | " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", 371 | " \n", 372 | " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", 373 | " " 374 | ], 375 | "language": "python", 376 | "metadata": {}, 377 | "outputs": [ 378 | { 379 | "output_type": "stream", 380 | "stream": "stdout", 381 | "text": [ 382 | "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", 383 | "The 86.58th percentile temperature is 78.28\n" 384 | ] 385 | } 386 | ], 387 | "prompt_number": 10 388 | } 389 | ], 390 | "metadata": {} 391 | } 392 | ] 393 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | TRAPpy>=3.0 2 | hypothesis>=3.0 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [upload_sphinx] 2 | upload-dir = docs/api_reference/_build/html 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2015-2016 ARM Limited 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | from setuptools import setup, find_packages 17 | 18 | 19 | execfile("bart/version.py") 20 | 21 | LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general 22 | expectation of the state of the system while targeting a single or set of heuristics. 23 | This is particularly helpful when there are large number of factors that can change 24 | the behaviour of the system and testing all permutations of these input parameters 25 | is impossible. In such a scenario an assertion of the final expectation can be 26 | useful in managing performance and regression. 27 | 28 | The Behavioural Analysis and Regression Toolkit is based on TRAPpy. The primary goal is 29 | to assert behaviours using the FTrace output from the kernel 30 | """ 31 | 32 | REQUIRES = [ 33 | "TRAPpy>=3.0", 34 | "hypothesis>=3.0", 35 | ] 36 | 37 | setup(name='bart-py', 38 | version=__version__, 39 | license="Apache v2", 40 | author="ARM-BART", 41 | author_email="bart@arm.com", 42 | description="Behavioural Analysis and Regression Toolkit", 43 | long_description=LONG_DESCRIPTION, 44 | url="http://arm-software.github.io/bart", 45 | packages=find_packages(), 46 | include_package_data=True, 47 | classifiers=[ 48 | "Development Status :: 5 - Production/Stable", 49 | "Environment :: Web Environment", 50 | "Environment :: Console", 51 | "License :: OSI Approved :: Apache Software License", 52 | "Operating System :: POSIX :: Linux", 53 | "Programming Language :: Python :: 2.7", 54 | # As we depend on trace data from the Linux Kernel/FTrace 55 | "Topic :: System :: Operating System Kernels :: Linux", 56 | "Topic :: Scientific/Engineering :: Visualization" 57 | ], 58 | install_requires=REQUIRES 59 | ) 60 | -------------------------------------------------------------------------------- /tests/pelt.py: -------------------------------------------------------------------------------- 1 | from hypothesis import given 2 | from hypothesis.strategies import integers, tuples, none, one_of 3 | import unittest 4 | from sys import maxint 5 | 6 | from bart.sched.pelt import * 7 | 8 | # Required to use `int` not `long` henx ma=maxint 9 | nonneg_ints = lambda mi=0, ma=maxint: integers(min_value=mi, max_value=ma) 10 | 11 | # Generate a Simulator 12 | simulator_args = lambda: tuples( 13 | nonneg_ints(0, 1024), # init_value 14 | nonneg_ints(1, 256), # half_life_ms 15 | one_of(nonneg_ints(), none()) # decay_cap_ms 16 | ) 17 | 18 | 19 | # Generate args for PeriodicTask::__init__ args using run_samples 20 | periodic_task_args_samples = lambda: tuples( 21 | nonneg_ints(1), # period_samples 22 | nonneg_ints(), # start_sample 23 | nonneg_ints(), # run_samples 24 | none(), # duty_cycle_pct 25 | ).filter(lambda (period, _, run, __): run <= period) 26 | 27 | # Generate args for PeriodicTask::__init__ args using duty_cycle_pct 28 | periodic_task_args_pct = lambda: tuples( 29 | nonneg_ints(1), # period_samples 30 | nonneg_ints(), # start_sample 31 | none(), # run_samples 32 | integers(0, 100), # duty_cycle_pct 33 | ) 34 | 35 | # Generate a PeriodicTask using args from one of the above two strategies 36 | periodic_task_args = lambda: one_of(periodic_task_args_samples(), 37 | periodic_task_args_pct()) 38 | 39 | # Generate a tuple of ordered integers less than 20 40 | signal_range = lambda: tuples(nonneg_ints(0, 200), 41 | nonneg_ints(0, 200)).map(sorted) 42 | 43 | class TestSimulator(unittest.TestCase): 44 | @given(periodic_task_args(), simulator_args()) 45 | def test_stable_range_range(self, task_args, sim_args): 46 | """Test that the stable range's max_value is within expected bounds""" 47 | task = PeriodicTask(*task_args) 48 | sim = Simulator(*sim_args) 49 | 50 | stable_range = sim.stableRange(task) 51 | self.assertLessEqual(stable_range.max_value, sim._signal_max) 52 | self.assertGreaterEqual(stable_range.min_value, 0) 53 | 54 | @given(periodic_task_args(), simulator_args()) 55 | def test_signal_within_range(self, task_args, sim_args): 56 | """Test that the simulated signal falls within the expected bounds""" 57 | task = PeriodicTask(*task_args) 58 | sim = Simulator(*sim_args) 59 | 60 | signal = sim.getSignal(task) 61 | signal_max = signal.max()['pelt_value'] 62 | signal_min = signal.min()['pelt_value'] 63 | self.assertLessEqual(signal_max, sim._signal_max) 64 | self.assertGreaterEqual(signal_min, 0) 65 | 66 | @given(periodic_task_args(), simulator_args(), signal_range()) 67 | def test_signal_time_range(self, task_args, sim_args, signal_range): 68 | """Test that the result of getSignal covers the requested range""" 69 | task = PeriodicTask(*task_args) 70 | sim = Simulator(*sim_args) 71 | start_s, end_s = signal_range 72 | 73 | signal = sim.getSignal(task, start_s, end_s) 74 | 75 | # Should start no earlier than 1 sample before start_s 76 | earliest_start = min(0, start_s - (sim._sample_us / 1.e6)) 77 | self.assertGreaterEqual(signal.index[0], earliest_start) 78 | # Should start no later than start_s 79 | self.assertLessEqual(signal.index[0], start_s) 80 | 81 | # Should start no earlier than end_s 82 | self.assertGreaterEqual(signal.index[-1], end_s) 83 | # Should end no later than 1 sample after end_s 84 | latest_start = end_s + (sim._sample_us / 1.e6) 85 | self.assertLessEqual(signal.index[-1], latest_start) 86 | 87 | if __name__ == "__main__": 88 | unittest.main() 89 | -------------------------------------------------------------------------------- /tests/raw_trace.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARM-software/bart/d70f8126ba3f6b3ecd0832542771495740a8c593/tests/raw_trace.dat -------------------------------------------------------------------------------- /tests/test_common_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | from bart.common import Utils 17 | from bart.common.Analyzer import Analyzer 18 | import unittest 19 | import pandas as pd 20 | import trappy 21 | 22 | 23 | class TestCommonUtils(unittest.TestCase): 24 | 25 | def __init__(self, *args, **kwargs): 26 | super(TestCommonUtils, self).__init__(*args, **kwargs) 27 | 28 | def test_interval_sum(self): 29 | """Test Utils Function: interval_sum""" 30 | 31 | # A series with a non uniform index 32 | # Refer to the example illustrations in the 33 | # the interval sum docs-strings which explains 34 | # the difference between step-post and ste-pre 35 | # calculations 36 | values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1] 37 | index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12] 38 | series = pd.Series(values, index=index) 39 | 40 | self.assertEqual(Utils.interval_sum(series, 1, step="post"), 8) 41 | self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 7) 42 | 43 | # check left boundary 44 | array = [1, 1, 0, 0] 45 | series = pd.Series(array) 46 | 47 | self.assertEqual(Utils.interval_sum(series, 1, step="post"), 2) 48 | self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 1) 49 | 50 | # check right boundary 51 | array = [0, 0, 1, 1] 52 | series = pd.Series(array) 53 | 54 | self.assertEqual(Utils.interval_sum(series, 1, step="post"), 1) 55 | self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 2) 56 | 57 | array = [False, False, True, True, True, True, False, False] 58 | series = pd.Series(array) 59 | self.assertEqual(Utils.interval_sum(series), 4) 60 | 61 | def test_area_under_curve(self): 62 | """Test Utils function: area_under_curve""" 63 | 64 | array = [0, 0, 2, 2, 2, 1, 1, 1] 65 | series = pd.Series(array) 66 | 67 | # Area under curve post stepping 68 | self.assertEqual( 69 | Utils.area_under_curve( 70 | series, 71 | method="rect", 72 | step="post"), 73 | 8) 74 | 75 | # Area under curve pre stepping 76 | self.assertEqual( 77 | Utils.area_under_curve( 78 | series, 79 | method="rect", 80 | step="pre"), 81 | 9) 82 | 83 | array = [1] 84 | series = pd.Series(array) 85 | 86 | # Area under curve post stepping, edge case 87 | self.assertEqual( 88 | Utils.area_under_curve( 89 | series, 90 | method="rect", 91 | step="post"), 92 | 0) 93 | 94 | # Area under curve pre stepping, edge case 95 | self.assertEqual( 96 | Utils.area_under_curve( 97 | series, 98 | method="rect", 99 | step="pre"), 100 | 0) 101 | 102 | 103 | class TestAnalyzer(unittest.TestCase): 104 | 105 | def test_assert_statement_bool(self): 106 | """Check that asssertStatement() works with a simple boolean case""" 107 | 108 | rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]}) 109 | trace = trappy.BareTrace() 110 | trace.add_parsed_event("dice_rolls", rolls_dfr) 111 | config = {"MAX_DICE_NUMBER": 6} 112 | 113 | t = Analyzer(trace, config) 114 | statement = "numpy.max(dice_rolls:results) <= MAX_DICE_NUMBER" 115 | self.assertTrue(t.assertStatement(statement, select=0)) 116 | 117 | def test_assert_statement_dataframe(self): 118 | """assertStatement() works if the generated statement creates a pandas.DataFrame of bools""" 119 | 120 | rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]}) 121 | trace = trappy.BareTrace() 122 | trace.add_parsed_event("dice_rolls", rolls_dfr) 123 | config = {"MIN_DICE_NUMBER": 1, "MAX_DICE_NUMBER": 6} 124 | t = Analyzer(trace, config) 125 | 126 | statement = "(dice_rolls:results <= MAX_DICE_NUMBER) & (dice_rolls:results >= MIN_DICE_NUMBER)" 127 | self.assertTrue(t.assertStatement(statement)) 128 | 129 | statement = "dice_rolls:results == 3" 130 | self.assertFalse(t.assertStatement(statement)) 131 | -------------------------------------------------------------------------------- /tests/test_pelt_sim.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | from bart.sched.pelt import * 17 | from hypothesis import given 18 | from hypothesis.strategies import integers, tuples, none, one_of 19 | from sys import maxint 20 | from utils_tests import TestBART 21 | 22 | # Required to use `int` not `long` henx ma=maxint 23 | nonneg_ints = lambda mi=0, ma=maxint: integers(min_value=mi, max_value=ma) 24 | 25 | # Generate a Simulator 26 | simulator_args = lambda: tuples( 27 | nonneg_ints(0, 1024), # init_value 28 | nonneg_ints(1, 256), # half_life_ms 29 | one_of(nonneg_ints(), none()) # decay_cap_ms 30 | ) 31 | 32 | 33 | # Generate args for PeriodicTask::__init__ args using run_samples 34 | periodic_task_args_samples = lambda: tuples( 35 | nonneg_ints(1), # period_samples 36 | nonneg_ints(), # start_sample 37 | nonneg_ints(), # run_samples 38 | none(), # duty_cycle_pct 39 | ).filter(lambda (period, _, run, __): run <= period) 40 | 41 | # Generate args for PeriodicTask::__init__ args using duty_cycle_pct 42 | periodic_task_args_pct = lambda: tuples( 43 | nonneg_ints(1), # period_samples 44 | nonneg_ints(), # start_sample 45 | none(), # run_samples 46 | integers(0, 100), # duty_cycle_pct 47 | ) 48 | 49 | # Generate a PeriodicTask using args from one of the above two strategies 50 | periodic_task_args = lambda: one_of(periodic_task_args_samples(), 51 | periodic_task_args_pct()) 52 | 53 | # Generate a tuple of ordered integers less than 200 54 | signal_range = lambda: tuples(nonneg_ints(0, 200), 55 | nonneg_ints(0, 200)).filter(lambda t: t[0] < t[1]) 56 | 57 | class TestSimulator(TestBART): 58 | 59 | def __init__(self, *args, **kwargs): 60 | super(TestSimulator, self).__init__(*args, **kwargs) 61 | 62 | @given(periodic_task_args(), simulator_args()) 63 | def test_stable_range_range(self, task_args, sim_args): 64 | """Test that the stable range's max_value is within expected bounds""" 65 | task = PeriodicTask(*task_args) 66 | sim = Simulator(*sim_args) 67 | 68 | signal = sim.getSignal(task) 69 | stable_range = sim.stableRange(task) 70 | self.assertLessEqual(stable_range.max_value, sim._signal_max) 71 | self.assertGreaterEqual(stable_range.min_value, 0) 72 | 73 | @given(periodic_task_args(), simulator_args()) 74 | def test_signal_within_range(self, task_args, sim_args): 75 | """Test that the simulated signal falls within the expected bounds""" 76 | task = PeriodicTask(*task_args) 77 | sim = Simulator(*sim_args) 78 | 79 | signal = sim.getSignal(task) 80 | signal_max = signal.max()['pelt_value'] 81 | signal_min = signal.min()['pelt_value'] 82 | self.assertLessEqual(signal_max, sim._signal_max) 83 | self.assertGreaterEqual(signal_min, 0) 84 | 85 | @given(periodic_task_args(), simulator_args(), signal_range()) 86 | def test_signal_time_range(self, task_args, sim_args, signal_range): 87 | """Test that the result of getSignal covers the requested range""" 88 | task = PeriodicTask(*task_args) 89 | sim = Simulator(*sim_args) 90 | start_s, end_s = signal_range 91 | 92 | signal = sim.getSignal(task, start_s, end_s) 93 | 94 | # Should start no earlier than 1 sample before start_s 95 | earliest_start = min(0, start_s - (sim._sample_us / 1.e6)) 96 | self.assertGreaterEqual(signal.index[0], earliest_start) 97 | # Should start no later than start_s 98 | self.assertLessEqual(signal.index[0], start_s) 99 | 100 | # Should start no earlier than end_s 101 | self.assertGreaterEqual(signal.index[-1], end_s) 102 | # Should end no later than 1 sample after end_s 103 | latest_start = end_s + (sim._sample_us / 1.e6) 104 | self.assertLessEqual(signal.index[-1], latest_start) 105 | 106 | @given(periodic_task_args(), simulator_args()) 107 | def test_signal_mean_value(self, task_args, sim_args): 108 | """Test that the mean value of the signal corresponds to the duty cycle 109 | percentage of the maximum capacity (1024).""" 110 | task = PeriodicTask(*task_args) 111 | sim = Simulator(*sim_args) 112 | 113 | signal = sim.getSignal(task) 114 | stats = sim.getStats() 115 | 116 | expected_mean = (task.duty_cycle_pct * 1024) / 100 117 | 118 | self.assertEqual(stats.pelt_avg, expected_mean) 119 | 120 | -------------------------------------------------------------------------------- /tests/test_sched_assert.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | 17 | from bart.sched.SchedAssert import SchedAssert 18 | from bart.sched.SchedMultiAssert import SchedMultiAssert 19 | import trappy 20 | from trappy.stats.Topology import Topology 21 | import unittest 22 | 23 | import utils_tests 24 | 25 | 26 | @unittest.skipUnless(utils_tests.trace_cmd_installed(), 27 | "trace-cmd not installed") 28 | class TestSchedAssert(utils_tests.SetupDirectory): 29 | 30 | def __init__(self, *args, **kwargs): 31 | 32 | self.BIG = [1,2] 33 | self.LITTLE = [0, 3, 4, 5] 34 | self.clusters = [self.BIG, self.LITTLE] 35 | self.topology = Topology(clusters=self.clusters) 36 | super(TestSchedAssert, self).__init__( 37 | [("raw_trace.dat", "trace.dat")], 38 | *args, 39 | **kwargs) 40 | 41 | def test_get_runtime(self): 42 | 43 | r = trappy.FTrace() 44 | # The ls process is process we are 45 | # testing against with pre calculated 46 | # values 47 | process = "ls" 48 | 49 | # Complete duration 50 | expected_time = 0.0034740000264719129 51 | s = SchedAssert(r, self.topology, execname=process) 52 | self.assertAlmostEqual(s.getRuntime(), expected_time, places=9) 53 | self.assertAlmostEqual(s.getRuntime(), expected_time, places=9) 54 | 55 | # Non Interrupted Window 56 | window = (0.0034, 0.003525) 57 | expected_time = 0.000125 58 | self.assertAlmostEqual(s.getRuntime(window=window), expected_time, 59 | places=9) 60 | 61 | # Interrupted Window 62 | window = (0.0030, 0.0032) 63 | expected_time = 0.000166 64 | self.assertAlmostEqual(s.getRuntime(window=window), expected_time, 65 | places=9) 66 | 67 | # A window with multiple interruptions 68 | window = (0.0027, 0.0036) 69 | expected_time = 0.000817 70 | self.assertAlmostEqual(s.getRuntime(window=window), expected_time, 71 | places=9) 72 | 73 | def test_get_last_cpu(self): 74 | """SchedAssert.getLastCpu() gives you the last cpu in which a task ran""" 75 | expected_last_cpu = 5 76 | 77 | sa = SchedAssert("trace.dat", self.topology, execname="ls") 78 | self.assertEqual(sa.getLastCpu(), expected_last_cpu) 79 | 80 | class TestSchedMultiAssert(utils_tests.SetupDirectory): 81 | def __init__(self, *args, **kwargs): 82 | self.big = [1,2] 83 | self.little = [0, 3, 4, 5] 84 | self.clusters = [self.big, self.little] 85 | self.all_cpus = sorted(self.big + self.little) 86 | self.topology = Topology(clusters=self.clusters) 87 | super(TestSchedMultiAssert, self).__init__( 88 | [("raw_trace.dat", "trace.dat")], 89 | *args, 90 | **kwargs) 91 | 92 | def test_cpu_busy_time(self): 93 | """SchedMultiAssert.getCPUBusyTime() work""" 94 | 95 | # precalculated values against these processes in the trace 96 | pids = [4729, 4734] 97 | first_time = .000214 98 | last_time = .003171 99 | 100 | tr = trappy.FTrace() 101 | sma = SchedMultiAssert(tr, self.topology, pids=pids) 102 | 103 | expected_busy_time = 0.0041839999754810708 104 | busy_time = sma.getCPUBusyTime("all", self.all_cpus, window=(first_time, last_time)) 105 | self.assertAlmostEqual(busy_time, expected_busy_time) 106 | 107 | # percent calculation 108 | expected_busy_pct = 23.582459561949445 109 | busy_pct= sma.getCPUBusyTime("all", self.all_cpus, percent=True, 110 | window=(first_time, last_time)) 111 | self.assertAlmostEqual(busy_pct, expected_busy_pct) 112 | 113 | # percent without a window 114 | expected_busy_pct = 23.018818156540004 115 | busy_pct= sma.getCPUBusyTime("cluster", self.little, percent=True) 116 | self.assertAlmostEqual(busy_pct, expected_busy_pct) 117 | -------------------------------------------------------------------------------- /tests/test_sched_functions.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | import trappy 17 | 18 | import utils_tests 19 | 20 | class TestSchedFunctions(utils_tests.SetupDirectory): 21 | def __init__(self, *args, **kwargs): 22 | super(TestSchedFunctions, self).__init__([], *args, **kwargs) 23 | 24 | def test_get_pids_for_processes_no_sched_switch(self): 25 | """get_pids_for_processes() raises an exception if the trace doesn't have a sched_switch event""" 26 | from bart.sched.functions import get_pids_for_process 27 | 28 | trace_file = "trace.txt" 29 | raw_trace_file = "trace.raw.txt" 30 | 31 | with open(trace_file, "w") as fout: 32 | fout.write("") 33 | 34 | with open(raw_trace_file, "w") as fout: 35 | fout.write("") 36 | 37 | trace = trappy.FTrace(trace_file) 38 | with self.assertRaises(ValueError): 39 | get_pids_for_process(trace, "foo") 40 | 41 | def test_get_pids_for_process_funny_process_names(self): 42 | """get_pids_for_process() works when a process name is a substring of another""" 43 | from bart.sched.functions import get_pids_for_process 44 | 45 | trace_file = "trace.txt" 46 | raw_trace_file = "trace.raw.txt" 47 | in_data = """ -0 [001] 10826.894644: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=0 next_comm=rt-app next_pid=3268 next_prio=120 48 | wmig-3268 [001] 10826.894778: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=rt-app next_pid=3269 next_prio=120 49 | wmig1-3269 [001] 10826.905152: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=1 next_comm=wmig next_pid=3268 next_prio=120 50 | wmig-3268 [001] 10826.915384: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/1 next_pid=0 next_prio=120 51 | -0 [005] 10826.995169: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 52 | wmig1-3269 [005] 10827.007064: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 53 | wmig-3268 [005] 10827.019061: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 54 | wmig1-3269 [005] 10827.031061: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 55 | wmig-3268 [005] 10827.050645: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/5 next_pid=0 next_prio=120 56 | """ 57 | 58 | # We create an empty trace.txt to please trappy ... 59 | with open(trace_file, "w") as fout: 60 | fout.write("") 61 | 62 | # ... but we only put the sched_switch events in the raw trace 63 | # file because that's where trappy is going to look for 64 | with open(raw_trace_file, "w") as fout: 65 | fout.write(in_data) 66 | 67 | trace = trappy.FTrace(trace_file) 68 | 69 | self.assertEquals(get_pids_for_process(trace, "wmig"), [3268]) 70 | -------------------------------------------------------------------------------- /tests/test_signal.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | import pandas as pd 17 | import trappy 18 | from utils_tests import TestBART 19 | from bart.common.signal import SignalCompare 20 | import numpy as np 21 | 22 | 23 | class TestSignalCompare(TestBART): 24 | 25 | def __init__(self, *args, **kwargs): 26 | super(TestSignalCompare, self).__init__(*args, **kwargs) 27 | 28 | def test_conditional_compare(self): 29 | """Test conditional_compare""" 30 | 31 | # Refer to the example in 32 | # bart.common.signal.SignalCompare.conditional_compare 33 | # doc-strings which explains the calculation for the 34 | # data set below 35 | A = [0, 0, 0, 3, 3, 0, 0, 0] 36 | B = [0, 0, 2, 2, 2, 2, 1, 1] 37 | 38 | trace = trappy.BareTrace() 39 | df = pd.DataFrame({"A": A, "B": B}) 40 | trace.add_parsed_event("event", df) 41 | 42 | s = SignalCompare(trace, "event:A", "event:B") 43 | expected = (1.5, 2.0 / 7) 44 | self.assertEqual( 45 | s.conditional_compare( 46 | "event:A > event:B", 47 | method="rect"), 48 | expected) 49 | 50 | def test_get_overshoot(self): 51 | """Test get_overshoot""" 52 | 53 | A = [0, 0, 0, 3, 3, 0, 0, 0] 54 | B = [0, 0, 2, 2, 2, 2, 1, 1] 55 | 56 | trace = trappy.BareTrace() 57 | df = pd.DataFrame({"A": A, "B": B}) 58 | trace.add_parsed_event("event", df) 59 | 60 | s = SignalCompare(trace, "event:A", "event:B") 61 | expected = (1.5, 2.0 / 7) 62 | self.assertEqual( 63 | s.get_overshoot(method="rect"), 64 | expected) 65 | 66 | A = [0, 0, 0, 1, 1, 0, 0, 0] 67 | B = [0, 0, 2, 2, 2, 2, 1, 1] 68 | 69 | df = pd.DataFrame({"A": A, "B": B}) 70 | trace.event.data_frame = df 71 | s = SignalCompare(trace, "event:A", "event:B") 72 | 73 | expected = (float("nan"), 0.0) 74 | result = s.get_overshoot(method="rect") 75 | self.assertTrue(np.isnan(result[0])) 76 | self.assertEqual(result[1], expected[1]) 77 | 78 | def test_get_undershoot(self): 79 | """Test get_undershoot""" 80 | 81 | A = [0, 0, 0, 1, 1, 1, 1, 1] 82 | B = [2, 2, 2, 2, 2, 2, 2, 2] 83 | 84 | trace = trappy.BareTrace() 85 | df = pd.DataFrame({"A": A, "B": B}) 86 | trace.add_parsed_event("event", df) 87 | 88 | s = SignalCompare(trace, "event:A", "event:B") 89 | expected = (4.0 / 14.0, 1.0) 90 | self.assertEqual( 91 | s.get_undershoot(method="rect"), 92 | expected) 93 | 94 | A = [3, 3, 3, 3, 3, 3, 3, 3] 95 | B = [2, 2, 2, 2, 2, 2, 1, 1] 96 | 97 | df = pd.DataFrame({"A": A, "B": B}) 98 | trace.event.data_frame = df 99 | s = SignalCompare(trace, "event:A", "event:B") 100 | 101 | expected = (float("nan"), 0.0) 102 | result = s.get_undershoot(method="rect") 103 | self.assertTrue(np.isnan(result[0])) 104 | self.assertEqual(result[1], expected[1]) 105 | -------------------------------------------------------------------------------- /tests/trace.raw.txt: -------------------------------------------------------------------------------- 1 | version = 6 2 | CPU 3 is empty 3 | CPU 4 is empty 4 | cpus=6 5 | ls-4734 [002] 106439.675591: sched_switch: prev_comm=trace-cmd prev_pid=4734 prev_prio=120 prev_state=1024 next_comm=migration/2 next_pid=18 next_prio=0 6 | migration/2-18 [002] 106439.675613: sched_switch: prev_comm=migration/2 prev_pid=18 prev_prio=0 prev_state=1 next_comm=trace-cmd next_pid=4732 next_prio=120 7 | trace-cmd-4730 [001] 106439.675718: sched_switch: prev_comm=trace-cmd prev_pid=4730 prev_prio=120 prev_state=1 next_comm=trace-cmd next_pid=4729 next_prio=120 8 | -------------------------------------------------------------------------------- /tests/trace.txt: -------------------------------------------------------------------------------- 1 | version = 6 2 | CPU 3 is empty 3 | CPU 4 is empty 4 | cpus=6 5 | ls-4734 [002] 106439.675591: sched_switch: trace-cmd:4734 [120] R ==> migration/2:18 [0] 6 | migration/2-18 [002] 106439.675613: sched_switch: migration/2:18 [0] S ==> trace-cmd:4732 [120] 7 | trace-cmd-4731 [001] 106439.675698: sched_switch: trace-cmd:4731 [120] S ==> trace-cmd:4730 [120] 8 | -------------------------------------------------------------------------------- /tests/utils_tests.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 ARM Limited 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | 17 | import unittest 18 | import os 19 | import shutil 20 | import subprocess 21 | import tempfile 22 | 23 | TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) 24 | 25 | 26 | def trace_cmd_installed(): 27 | """Return true if trace-cmd is installed, false otherwise""" 28 | with open(os.devnull) as devnull: 29 | try: 30 | subprocess.check_call(["trace-cmd", "options"], stdout=devnull) 31 | except OSError: 32 | return False 33 | 34 | return True 35 | 36 | class SetupDirectory(unittest.TestCase): 37 | 38 | def __init__(self, files_to_copy, *args, **kwargs): 39 | self.files_to_copy = files_to_copy 40 | super(SetupDirectory, self).__init__(*args, **kwargs) 41 | 42 | def setUp(self): 43 | self.previous_dir = os.getcwd() 44 | 45 | self.out_dir = tempfile.mkdtemp() 46 | os.chdir(self.out_dir) 47 | 48 | for src_fname, dst_fname in self.files_to_copy: 49 | src_fname = os.path.join(TESTS_DIRECTORY, src_fname) 50 | shutil.copy(src_fname, os.path.join(self.out_dir, dst_fname)) 51 | 52 | def tearDown(self): 53 | os.chdir(self.previous_dir) 54 | shutil.rmtree(self.out_dir) 55 | 56 | 57 | class TestBART(SetupDirectory): 58 | 59 | def __init__(self, *args, **kwargs): 60 | super(TestBART, self).__init__( 61 | [ 62 | ("./trace.txt", "trace.txt"), 63 | ("./trace.raw.txt", "trace.raw.txt") 64 | ], 65 | *args, 66 | **kwargs) 67 | --------------------------------------------------------------------------------