├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── elf_size_analyze ├── __init__.py ├── __main__.py ├── argument_parser.py ├── color.py ├── html │ ├── gen.py │ ├── index.js │ └── styles.css ├── misc.py ├── section.py ├── symbol.py ├── symbol_tree.py └── tree.py ├── example.jpg └── pyproject.toml /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | password: ${{ secrets.PYPI_API_TOKEN }} 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include elf_size_analyze/html/index.js 2 | include elf_size_analyze/html/styles.css -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Elf size report 2 | 3 | This script is based on [size_report](https://github.com/zephyrproject-rtos/zephyr/blob/master/scripts/footprint/size_report) from Zephyr Project scripts. It has been almost fully rewritten but the idea is the same. It uses binutils programs (readelf, nm, c++filt) to extract information about symbols and sections from an ELF file and filters them. Information is presented in a tree based on paths to files where the symbols have been defined. 4 | 5 | ![Example output](https://raw.githubusercontent.com/jedrzejboczar/elf-size-analyze/master/example.jpg) 6 | 7 | ## Requirements 8 | 9 | * Python 3 10 | * binutils: readelf, nm, c++filt (optional) 11 | 12 | ## Installation 13 | 14 | For normal usage it's best to install from [PyPI](https://pypi.org/project/elf-size-analyze/): 15 | ``` 16 | pip install elf-size-analyze 17 | ``` 18 | 19 | For development it's recommended to install from sources in virtual environment in editable mode: 20 | ``` 21 | python -m venv venv 22 | source ./venv/bin/activate 23 | git clone https://github.com/jedrzejboczar/elf-size-analyze.git 24 | pip install -e ./elf-size-analyze 25 | ``` 26 | 27 | ## Usage 28 | 29 | Select the ELF file to be analyzed. To be able to extract path information about symbols from the ELF file, the program should be compiled with debug information (`gcc -g`). 30 | 31 | If installed using `pip` then the package provides an entry point and you can just use the `elf-size-analyze` command. 32 | Otherwise use `python -m elf_size_analyze` from the source directory. 33 | 34 | Example usage: 35 | ``` 36 | elf-size-analyze -t arm-none-eabi- -w 120 -HaF build/myapp 37 | ``` 38 | 39 | For more options see help: 40 | ``` 41 | elf-size-analyze -h 42 | ``` 43 | 44 | For HTML output: 45 | ``` 46 | elf-size-analyze -t arm-none-eabi- -w 120 -HaF build/myapp -W > /tmp/index.html 47 | firefox /tmp/index.html # or other browser / xdg-open 48 | ``` 49 | -------------------------------------------------------------------------------- /elf_size_analyze/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedrzejboczar/elf-size-analyze/5aac86c7c4da60875d6c5b4f58f0b17bcc893b09/elf_size_analyze/__init__.py -------------------------------------------------------------------------------- /elf_size_analyze/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) 2016, Intel Corporation 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | # Based on a script by: 8 | # Chereau, Fabien 9 | 10 | # ^originial comments before my modifications 11 | # This script is based on 'size_report' from Zephyr Project scripts: 12 | # https://github.com/zephyrproject-rtos/zephyr/blob/master/scripts/footprint/size_report 13 | # 14 | # It has been modified to be more flexible for different (also not-bare-metal) ELF files, 15 | # and adds some more data visualization options. Parsing has been updated to use 16 | # regular expressions as it is much more robust solution. 17 | 18 | import itertools 19 | import json 20 | import logging 21 | import math 22 | import os 23 | import platform 24 | import shutil 25 | import sys 26 | 27 | from elf_size_analyze.argument_parser import parse_args 28 | from elf_size_analyze.section import Section 29 | from elf_size_analyze.symbol import (Symbol, add_fileinfo_to_symbols, 30 | demangle_symbol_names, 31 | extract_elf_symbols_fileinfo) 32 | from elf_size_analyze.symbol_tree import SymbolsTreeByPath 33 | from elf_size_analyze.html.gen import generate_html_output 34 | 35 | # default logging configuration 36 | log = logging.getLogger('elf-size-analyze') 37 | console = logging.StreamHandler() 38 | formatter = logging.Formatter('[%(levelname)s] %(message)s') 39 | console.setFormatter(formatter) 40 | log.setLevel(logging.ERROR) 41 | log.addHandler(console) 42 | 43 | 44 | def main(): 45 | result = 1 46 | args = parse_args() 47 | 48 | # adjust verbosity 49 | if args.verbose: 50 | level = log.level - 10 * args.verbose 51 | log.setLevel(max(level, logging.DEBUG)) 52 | 53 | # prepare arguments 54 | if not os.path.isfile(args.elf): 55 | print('ELF file %s does not exist' % args.elf, file=sys.stderr) 56 | return result 57 | 58 | if not any([args.rom, args.ram, args.print_sections, args.use_sections]): 59 | print('No memory type action specified (RAM/ROM or special). See -h for help.') 60 | return result 61 | 62 | def get_exe(name): 63 | cmd = args.toolchain_triplet + name 64 | if 'Windows' == platform.system(): 65 | cmd = cmd + '.exe' 66 | assert shutil.which(cmd) is not None, \ 67 | 'Executable "%s" could not be found!' % cmd 68 | return args.toolchain_triplet + name 69 | 70 | # process symbols 71 | symbols = Symbol.extract_elf_symbols_info(args.elf, get_exe('readelf')) 72 | fileinfo = extract_elf_symbols_fileinfo(args.elf, get_exe('nm')) 73 | add_fileinfo_to_symbols(fileinfo, symbols) 74 | 75 | # demangle only after fileinfo extraction! 76 | if not args.no_demangle: 77 | demangle_symbol_names(symbols, get_exe('c++filt')) 78 | 79 | # load section info 80 | sections = Section.extract_sections_info(args.elf, get_exe('readelf')) 81 | sections_dict = {sec.num: sec for sec in sections} 82 | 83 | def prepare_tree(symbols): 84 | tree = SymbolsTreeByPath(symbols) 85 | if not args.no_merge_paths: 86 | tree.merge_paths(args.fish_paths) 87 | if not args.no_cumulative_size or args.json or args.html: 88 | tree.accumulate_sizes() 89 | if args.sort_by_name: 90 | tree.sort(key=lambda symbol: symbol.name, reverse=False) 91 | else: # sort by size 92 | tree.sort(key=lambda symbol: symbol.size, reverse=True) 93 | if not args.no_totals: 94 | tree.calculate_total_size() 95 | 96 | return tree 97 | 98 | def print_tree(header, tree): 99 | min_size = math.inf if args.files_only else args.min_size 100 | lines = tree.generate_printable_lines( 101 | header=header, colors=not args.no_color, human_readable=args.human_readable, 102 | max_width=args.max_width, min_size=min_size, alternating_colors=args.alternating_colors) 103 | for line in lines: 104 | line.print() 105 | 106 | def print_json(header, tree): 107 | min_size = math.inf if args.files_only else args.min_size 108 | nodedict = tree._generate_node_dict(min_size=min_size) 109 | 110 | print(json.dumps(nodedict)) 111 | 112 | def print_html(header, tree): 113 | min_size = math.inf if args.files_only else args.min_size 114 | nodedict = tree._generate_node_dict(min_size=min_size) 115 | title = f"ELF size information for {os.path.basename(args.elf)} - {header}" 116 | html = generate_html_output(nodedict, title, args.css) 117 | print(html) 118 | 119 | 120 | def filter_symbols(section_key): 121 | secs = filter(section_key, sections) 122 | secs_str = ', '.join(s.name for s in secs) 123 | log.info('Considering sections: ' + secs_str) 124 | filtered = filter(lambda symbol: section_key(sections_dict.get(symbol.section, None)), 125 | symbols) 126 | out, test = itertools.tee(filtered) 127 | if len(list(test)) == 0: 128 | print(""" 129 | ERROR: No symbols from given section found or all were ignored! 130 | Sections were: %s 131 | """.strip() % secs_str, file=sys.stderr) 132 | sys.exit(1) 133 | return out 134 | 135 | if args.print_sections: 136 | Section.print(sections) 137 | 138 | if args.json: 139 | print_func = print_json 140 | elif args.html: 141 | print_func = print_html 142 | else: 143 | print_func = print_tree 144 | 145 | if args.rom: 146 | print_func('ROM', prepare_tree(filter_symbols(lambda sec: sec and sec.occupies_rom()))) 147 | 148 | if args.ram: 149 | print_func('RAM', prepare_tree(filter_symbols(lambda sec: sec and sec.occupies_ram()))) 150 | 151 | if args.use_sections: 152 | nums = list(map(int, args.use_sections)) 153 | # secs = list(filter(lambda s: s.num in nums, sections)) 154 | name = 'SECTIONS: %s' % ','.join(map(str, nums)) 155 | print_func(name, prepare_tree(filter_symbols(lambda sec: sec and sec.num in nums))) 156 | 157 | return 0 158 | 159 | 160 | if __name__ == "__main__": 161 | sys.exit(main()) 162 | -------------------------------------------------------------------------------- /elf_size_analyze/argument_parser.py: -------------------------------------------------------------------------------- 1 | """ 2 | Argument parser for package 3 | """ 4 | 5 | import argparse 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser(description=""" 10 | Prints report of memory usage of the given executable. 11 | Shows how different source files contribute to the total size. 12 | Uses inforamtion contained in ELF executable and binutils programs. 13 | For best results the program should be compiled with maximum debugging information 14 | (e.g. GCC flag: `-g`, or more: `-ggdb3`). 15 | """, epilog=""" 16 | This script is based on 'size_report' script from Zephyr Project: 17 | https://github.com/zephyrproject-rtos/zephyr (scripts/footprint/size_report). 18 | """) 19 | 20 | parser.add_argument('elf', metavar='ELF_FILE', 21 | help='path to the examined ELF file') 22 | 23 | memory_group = parser.add_argument_group( 24 | 'Memory type', """ 25 | Specifies memory types for which statistics should be printed. 26 | Choosing at least one of these options is required. 27 | RAM/ROM options may be oversimplifed for some targets, under the hood they just filter the symbols 28 | by sections in the following manner: 29 | sections must have ALLOC flag and: for RAM - have WRITE flag, for ROM - not have NOBITS type. 30 | """) 31 | memory_group.add_argument('-R', '--ram', action='store_true', 32 | help='print RAM statistics') 33 | memory_group.add_argument('-F', '--rom', action='store_true', 34 | help='print ROM statistics ("Flash")') 35 | memory_group.add_argument('-P', '--print-sections', action='store_true', 36 | help='print section headers that can be used for filtering symbols with -S option' 37 | + ' (output is almost identical to `readelf -WS ELF_FILE`)') 38 | memory_group.add_argument('-S', '--use-sections', nargs='+', metavar='NUMBER', 39 | help='manually select sections from which symbols will be used (by number)') 40 | 41 | basic_group = parser.add_argument_group( 42 | 'Basic arguments') 43 | basic_group.add_argument('-t', '--toolchain-triplet', '--toolchain-path', 44 | default='', metavar='PATH', 45 | help='toolchain triplet/path to prepend to binutils program names,' 46 | + ' this is important for examining cross-compiled ELF files,' 47 | + ' e.g `arm-none-eabi-` or `/my/path/arm-none-eabi-` or `/my/path/`') 48 | basic_group.add_argument('-v', '--verbose', action='count', 49 | help='increase verbosity, can be specified up to 3 times' 50 | + ' (versobity levels: ERROR -> WARNING -> INFO -> DEBUG)') 51 | 52 | printing_group = parser.add_argument_group( 53 | 'Printing options', 'Options for changing the output formatting.') 54 | printing_group.add_argument('-w', '--max-width', default=80, type=int, 55 | help='set maximum output width, 0 for unlimited width (default 80)') 56 | printing_group.add_argument('-m', '--min-size', default=0, type=int, 57 | help='do not print symbols with size below this value') 58 | printing_group.add_argument('-f', '--fish-paths', action='store_true', 59 | help='when merging paths, use fish-like method to shrink them') 60 | printing_group.add_argument('-s', '--sort-by-name', action='store_true', 61 | help='sort symbols by name instead of sorting by size') 62 | printing_group.add_argument('-H', '--human-readable', action='store_true', 63 | help='print sizes in human readable format') 64 | printing_group.add_argument('-o', '--files-only', action='store_true', 65 | help='print only files (to be used with cumulative size enabled)') 66 | printing_group.add_argument('-a', '--alternating-colors', action='store_true', 67 | help='use alternating colors when printing symbols') 68 | printing_group.add_argument('-c', '--css', 69 | help='path to custom css for HTML output') 70 | 71 | output_type = printing_group.add_mutually_exclusive_group() 72 | output_type.add_argument('-j', '--json', action='store_true', 73 | help='create json output') 74 | output_type.add_argument('-W', '--html', action='store_true', 75 | help='create HTML output') 76 | 77 | printing_group.add_argument('--no-demangle', action='store_true', 78 | help='disable demangling of C++ symbol names') 79 | printing_group.add_argument('--no-merge-paths', action='store_true', 80 | help='disable merging paths in the table') 81 | printing_group.add_argument('--no-color', action='store_true', 82 | help='disable colored output') 83 | printing_group.add_argument('--no-cumulative-size', action='store_true', 84 | help='disable printing of cumulative sizes for paths') 85 | printing_group.add_argument('--no-totals', action='store_true', 86 | help='disable printing the total symbols size') 87 | 88 | args = parser.parse_args() 89 | 90 | return args 91 | -------------------------------------------------------------------------------- /elf_size_analyze/color.py: -------------------------------------------------------------------------------- 1 | """ 2 | The color class 3 | """ 4 | 5 | import sys 6 | 7 | 8 | class Color: 9 | """ 10 | Class for easy color codes manipulations. 11 | """ 12 | 13 | _base_string = '\033[%sm' 14 | _colors = { 15 | 'BLACK': 0, 16 | 'RED': 1, 17 | 'GREEN': 2, 18 | 'YELLOW': 3, 19 | 'BLUE': 4, 20 | 'MAGENTA': 5, 21 | 'CYAN': 6, 22 | 'GRAY': 7, 23 | } 24 | 25 | def __init__(self, color_codes=[]): 26 | try: 27 | self.color_codes = set(color_codes) 28 | except TypeError: 29 | self.color_codes = set([color_codes]) 30 | 31 | def __add__(self, other): 32 | if isinstance(other, Color): 33 | return Color(self.color_codes.union(other.color_codes)) 34 | elif isinstance(other, str): 35 | return str(self) + other 36 | return NotImplemented 37 | 38 | def __radd__(self, other): 39 | if isinstance(other, str): 40 | return other + str(self) 41 | return NotImplemented 42 | 43 | def __str__(self): 44 | return self._base_string % ';'.join(str(c) for c in self.color_codes) 45 | 46 | def __repr__(self): 47 | return 'Color(%s)' % self.color_codes 48 | 49 | 50 | # should probably be done in a metaclass or something 51 | for name, value in Color._colors.items(): 52 | # regular color 53 | setattr(Color, name, Color(value + 30)) 54 | # lighter version 55 | setattr(Color, 'L_%s' % name, Color(value + 90)) 56 | # background 57 | setattr(Color, 'BG_%s' % name, Color(value + 40)) 58 | # lighter background 59 | setattr(Color, 'BG_L_%s' % name, Color(value + 100)) 60 | 61 | setattr(Color, 'RESET', Color(0)) 62 | setattr(Color, 'BOLD', Color(1)) 63 | setattr(Color, 'DIM', Color(2)) 64 | setattr(Color, 'UNDERLINE', Color(4)) 65 | setattr(Color, 'BLINK', Color(5)) 66 | setattr(Color, 'REVERSE', Color(7)) # swaps background and forground 67 | setattr(Color, 'HIDDEN', Color(8)) 68 | 69 | 70 | def test__colors(): 71 | for attr in dir(Color): 72 | if attr.isupper() and not attr.startswith('_'): 73 | print(getattr(Color, attr) + 'attribute %s' % attr + Color.RESET) 74 | sys.exit(0) 75 | 76 | 77 | # test__colors() 78 | -------------------------------------------------------------------------------- /elf_size_analyze/html/gen.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generator functions for HTML output 3 | """ 4 | 5 | import os 6 | 7 | THIS_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) 8 | DEFAULT_CSS = os.path.join(THIS_DIR, 'styles.css') 9 | JAVASCRIPT = os.path.join(THIS_DIR, 'index.js') 10 | 11 | def generate_html_output(node_dict, title, custom_css=None): 12 | table_content = "" 13 | 14 | custom_css = custom_css or DEFAULT_CSS 15 | with open(custom_css, encoding='utf-8') as f: 16 | css_styles = f.read() 17 | 18 | with open(JAVASCRIPT, encoding='utf-8') as f: 19 | javascript = f.read() 20 | 21 | def _print_children(node, level=0): 22 | nonlocal table_content 23 | for x, y in node.items(): 24 | table_content += f""" 25 | 26 | {x} 27 | {y['cumulative_size']} 28 | 29 | """ 30 | 31 | if "children" in y: 32 | _print_children(y["children"], level + 1) 33 | 34 | _print_children(node_dict) 35 | 36 | overall_size = 0 37 | for x,y in node_dict.items(): 38 | overall_size = overall_size + y["cumulative_size"] 39 | 40 | html_output = f""" 41 | 42 | 43 | 44 | {title} 45 | 46 | 47 | 48 | 49 | 50 | 51 |

{title}

52 |
53 | Collapse 54 | 55 | 56 | 57 | 58 | or click on rows 59 |
60 | {table_content} 61 | 62 | 63 | 64 | 65 |
Overall size in bytes{overall_size}
66 | 67 | 68 | """ 69 | 70 | return html_output 71 | -------------------------------------------------------------------------------- /elf_size_analyze/html/index.js: -------------------------------------------------------------------------------- 1 | // Build a tree out of table rows to easily manipulate rows as tree elements 2 | function buildTree() { 3 | const tree = { elem: null, parent: null, children: [] }; 4 | const current = { 5 | node: tree, 6 | level: 0, 7 | max_level: 0, 8 | }; 9 | 10 | for (const elem of document.getElementsByClassName('collapsible')) { 11 | const level = getLevel(elem); 12 | if (level == undefined) continue; 13 | 14 | current.max_level = Math.max(current.max_level, level); 15 | 16 | if (level > current.level) { 17 | if (level != current.level + 1) throw Error('Expected level+1 - invalid rows list'); 18 | current.level = level; 19 | current.node = current.node.children[current.node.children.length - 1]; 20 | if (!current.node) throw Error('what?') 21 | } else if (level < current.level) { 22 | for (let i = 0; i < current.level - level; i++) { 23 | current.node = current.node.parent; 24 | } 25 | current.level = level; 26 | } 27 | 28 | const node = { element: elem, parent: current.node, children: [] }; 29 | current.node.children.push(node); 30 | } 31 | 32 | return { tree, max_level: current.max_level }; 33 | } 34 | 35 | function updateChildren(node, collapsed) { 36 | for (const child of node.children) { 37 | // Remove this class because it is used for the visible element with collapsed children 38 | child.element.classList.remove('collapsed'); 39 | child.element.hidden = collapsed; 40 | updateChildren(child, collapsed); 41 | } 42 | } 43 | 44 | function setCollapsed(node, collapsed) { 45 | if (node.children.length == 0) return; 46 | const method = collapsed ? 'add' : 'remove'; 47 | node.element.classList[method]('collapsed'); 48 | updateChildren(node, collapsed); 49 | } 50 | 51 | function addOnClick(node) { 52 | // root node has no parent, nor elements 53 | if (node.element) { 54 | node.element.addEventListener('click', () => { 55 | setCollapsed(node, !node.element.classList.contains('collapsed')); 56 | }) 57 | } 58 | 59 | node.children.forEach(addOnClick); 60 | } 61 | 62 | // Find row level from element classes 63 | function getLevel(elem) { 64 | const pattern = /level-(\d+)/ 65 | for (const cls of elem.classList) { 66 | const match = cls.match(pattern); 67 | if (match) { 68 | return Number(match[1]); 69 | } 70 | } 71 | return undefined; 72 | } 73 | 74 | function collapseAtLevel(tree, level) { 75 | updateChildren(tree, false); 76 | 77 | const collapse = (node) => { 78 | if (node.element && getLevel(node.element) == level) { 79 | setCollapsed(node, true); 80 | } 81 | for (const child of node.children) { 82 | collapse(child, level); 83 | } 84 | } 85 | 86 | collapse(tree); 87 | } 88 | 89 | function onLoaded() { 90 | const { tree, max_level } = buildTree(); 91 | addOnClick(tree); 92 | 93 | let level = max_level; 94 | 95 | const class_change = { 96 | more: () => (level = Math.max(level - 1, 0)), 97 | less: () => (level = Math.min(level + 1, max_level)), 98 | all: () => (level = 0), 99 | none: () => (level = max_level), 100 | }; 101 | 102 | const buttons = document.getElementsByClassName('collapse-buttons')[0]; 103 | for (const cls of Object.keys(class_change)) { 104 | const action = class_change[cls]; 105 | for (const elem of buttons.getElementsByClassName(cls)) { 106 | elem.addEventListener('click', () => { 107 | action(); 108 | collapseAtLevel(tree, level); 109 | }); 110 | 111 | } 112 | } 113 | } 114 | 115 | document.addEventListener('DOMContentLoaded', onLoaded); 116 | -------------------------------------------------------------------------------- /elf_size_analyze/html/styles.css: -------------------------------------------------------------------------------- 1 | tr:nth-child(even) { 2 | background-color: #efefef; 3 | } 4 | 5 | tr:nth-child(odd) { 6 | background-color: #e0e0e0; 7 | } 8 | 9 | table { 10 | border-spacing: 0px; 11 | table-layout:fixed; 12 | width: 100%; 13 | } 14 | 15 | h3 { 16 | font-family: "Verdana"; 17 | font-size: 14pt; 18 | } 19 | 20 | td { 21 | font-family: "Verdana"; 22 | font-size: 10pt; 23 | } 24 | 25 | tr:hover { 26 | background: #adf0c2 !important; 27 | } 28 | 29 | tr.collapsed { 30 | background-color: #c0dbc9; 31 | font-style: italic; 32 | } 33 | 34 | tr.collapsed td:first-child::after { 35 | content: " …"; 36 | } 37 | 38 | .collapse-buttons { 39 | margin: 1px 0 4px; 40 | display: flex; 41 | align-items: center; 42 | gap: 6px; 43 | } 44 | -------------------------------------------------------------------------------- /elf_size_analyze/misc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Miscellaneous helper functions 3 | """ 4 | 5 | # construct python regex named group 6 | def named_group(name, regex): 7 | return r'(?P<{}>{})'.format(name, regex) 8 | 9 | 10 | # print human readable size 11 | # https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size 12 | def sizeof_fmt(num, suffix='B'): 13 | for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: 14 | if abs(num) < 1024.0: 15 | suffix_str = unit + suffix 16 | return "%3.1f %-3s" % (num, suffix_str) 17 | num /= 1024.0 18 | unit = 'Yi' 19 | suffix_str = unit + suffix 20 | return "%3.1f %-3s" % (num, suffix_str) -------------------------------------------------------------------------------- /elf_size_analyze/section.py: -------------------------------------------------------------------------------- 1 | """ 2 | The section class 3 | """ 4 | 5 | import logging 6 | import re 7 | import subprocess 8 | 9 | from elf_size_analyze.color import Color 10 | from elf_size_analyze.misc import named_group, sizeof_fmt 11 | 12 | log = logging.getLogger('elf-size-analyze') 13 | 14 | # Some nice info about sections in ELF files: 15 | # http://www.sco.com/developers/gabi/2003-12-17/ch4.sheader.html#sh_flags 16 | class Section: 17 | """Represents an ELF file section as read by `readelf -WS`.""" 18 | 19 | # Regex for parsing readelf sections information 20 | # Example output: 21 | # Section Headers: 22 | # [Nr] Name Type Addr Off Size ES Flg Lk Inf Al 23 | # [ 0] NULL 00000000 000000 000000 00 0 0 0 24 | # [ 1] .isr_vector PROGBITS 08000000 010000 000188 00 A 0 0 1 25 | # [ 2] .text PROGBITS 08000190 010190 00490c 00 AX 0 0 16 26 | # [ 3] .rodata PROGBITS 08004aa0 014aa0 000328 00 A 0 0 8 27 | # Regex test: https://regex101.com/r/N3YQYw/1 28 | pattern_fields = [ 29 | r'\s*', 30 | r'\[\s*', named_group('num', r'\d+'), r'\]', 31 | r'\s+', 32 | named_group('name', r'\S+'), 33 | r'\s+', 34 | named_group('type', r'\S+'), 35 | r'\s+', 36 | named_group('address', r'[0-9a-fA-F]+'), 37 | r'\s+', 38 | named_group('offset', r'[0-9a-fA-F]+'), 39 | r'\s+', 40 | named_group('size', r'[0-9a-fA-F]+'), 41 | r'\s+', 42 | named_group('entry_size', r'[0-9a-fA-F]+'), # whatever it is we don't need it 43 | r'\s+', 44 | named_group('flags', r'\S*'), 45 | r'\s+', 46 | named_group('link', r'[0-9a-fA-F]+'), # whatever it is we don't need it 47 | r'\s+', 48 | named_group('info', r'[0-9a-fA-F]+'), # whatever it is we don't need it 49 | r'\s+', 50 | named_group('alignment', r'[0-9a-fA-F]+'), # whatever it is we don't need it 51 | r'\s*' 52 | ] 53 | pattern = r'^{}$'.format(r''.join(pattern_fields)) 54 | pattern = re.compile(pattern) 55 | 56 | class Flag: 57 | # key to flags 58 | WRITE = 'W' 59 | ALLOC = 'A' 60 | EXECUTE = 'X' 61 | MERGE = 'M' 62 | STRINGS = 'S' 63 | INFO = 'I' 64 | LINK_ORDER = 'L' 65 | EXTRA_OS_PROCESSING_REQUIRED = 'O' 66 | GROUP = 'G' 67 | TLS = 'T' 68 | COMPRESSED = 'C' 69 | UNKNOWN = 'x' 70 | OS_SPECIFIC = 'o' 71 | EXCLUDE = 'E' 72 | PURECODE = 'y' 73 | PROCESSOR_SPECIFIC = 'p' 74 | PPC_VLE = 'v' 75 | GNU_MBIND = 'D' 76 | X86_64_LARGE = 'l' 77 | GNU_RETAIN = 'R' 78 | 79 | @classmethod 80 | def to_string(cls, flag): 81 | for name, value in vars(cls).items(): 82 | if not name.startswith('_'): 83 | if value == flag: 84 | return name 85 | return None 86 | 87 | def __init__(self, **kwargs): 88 | self.num = kwargs['num'] 89 | self.name = kwargs['name'] 90 | self.type = kwargs['type'] 91 | self.address = kwargs['address'] 92 | self.offset = kwargs['offset'] 93 | self.size = kwargs['size'] 94 | self.entry_size = kwargs['entry_size'] 95 | self.flags = kwargs['flags'] 96 | self.link = kwargs['link'] 97 | self.info = kwargs['info'] 98 | self.alignment = kwargs['alignment'] 99 | 100 | def is_writable(self): 101 | return self.Flag.WRITE in self.flags 102 | 103 | def occupies_memory(self): 104 | # these are the only relevant sections for us 105 | return self.Flag.ALLOC in self.flags 106 | 107 | # these two methods are probably a big simplification 108 | # as they may be true only for small embedded systems 109 | def occupies_rom(self): 110 | return self.occupies_memory() and \ 111 | self.type not in ['NOBITS'] 112 | 113 | def occupies_ram(self): 114 | return self.occupies_memory() and self.is_writable() 115 | 116 | @classmethod 117 | def from_readelf_line(cls, line): 118 | """ 119 | Create a Section from a line of `readelf -WS` output. 120 | """ 121 | m = cls.pattern.match(line) 122 | if not m: 123 | log.debug('no match: ' + line.strip()) 124 | return None 125 | 126 | # convert non-string values 127 | m = m.groupdict() 128 | m['num'] = int(m['num']) 129 | m['address'] = int(m['address'], 16) 130 | m['offset'] = int(m['offset'], 16) 131 | m['size'] = int(m['size'], 16) 132 | m['entry_size'] = int(m['entry_size'], 16) 133 | # not sure if these are base-16 or base-10 134 | m['link'] = int(m['link'], 10) 135 | m['info'] = int(m['info'], 10) 136 | m['alignment'] = int(m['alignment'], 10) 137 | 138 | return Section(**m) 139 | 140 | @classmethod 141 | def print(cls, sections): 142 | lines = [] 143 | for s in sections: 144 | fields = [str(s.num), s.name, s.type, 145 | hex(s.address), sizeof_fmt(s.size), 146 | ','.join(cls.Flag.to_string(f) for f in s.flags)] 147 | lines.append(fields) 148 | sizes = [max(len(l[i]) for l in lines) for i in range(6)] 149 | h_fmt = '{:%d} {:%d} {:%d} {:%d} {:%d} {:%d}' % (*sizes, ) 150 | fmt = '{:>%d} {:%d} {:%d} {:>%d} {:>%d} {:%d}' % (*sizes, ) 151 | header = h_fmt.format('N', 'Name', 'Type', 'Addr', 'Size', 'Flags') 152 | separator = '=' * len(header) 153 | top_header = '{:=^{size}s}'.format(' SECTIONS ', size=len(separator)) 154 | print(Color.BOLD + top_header + Color.RESET) 155 | print(Color.BOLD + header + Color.RESET) 156 | print(Color.BOLD + separator + Color.RESET) 157 | for line in lines: 158 | print(fmt.format(*line)) 159 | print(Color.BOLD + separator + Color.RESET) 160 | 161 | @classmethod 162 | def extract_sections_info(cls, elf_file, readelf_exe='readelf'): 163 | """ 164 | Uses binutils 'readelf' to find info about all sections from an ELF file. 165 | """ 166 | flags = ['--wide', '--section-headers'] 167 | readelf_proc = subprocess.Popen([readelf_exe, *flags, elf_file], 168 | stdout=subprocess.PIPE, universal_newlines=True) 169 | 170 | # parse lines 171 | log.info('Using readelf sections regex: %s' % cls.pattern.pattern) 172 | sections = [Section.from_readelf_line(l) for l in readelf_proc.stdout] 173 | sections = list(filter(None, sections)) 174 | 175 | if readelf_proc.wait(3) != 0: 176 | raise subprocess.CalledProcessError(readelf_proc.returncode, 177 | readelf_proc.args) 178 | 179 | return sections 180 | -------------------------------------------------------------------------------- /elf_size_analyze/symbol.py: -------------------------------------------------------------------------------- 1 | """ 2 | The symbol class 3 | """ 4 | 5 | import logging 6 | import os 7 | import re 8 | import subprocess 9 | 10 | from elf_size_analyze.misc import named_group 11 | 12 | log = logging.getLogger('elf-size-analyze') 13 | 14 | class Symbol: 15 | """ 16 | Represents a linker symbol in an ELF file. Attributes are as in the output 17 | of readelf command. Additionally, has optional file path and line number. 18 | """ 19 | 20 | def __init__(self, num, name, value, size, type, bind, visibility, section, 21 | file=None, line=None): 22 | self.num = num 23 | self.name = name 24 | self.value = value 25 | self.size = size 26 | self.type = type 27 | self.bind = bind 28 | self.visibility = visibility 29 | self.section = section 30 | self.file = file 31 | self.line = line 32 | 33 | def __repr__(self): 34 | return 'Symbol(%s)' % (self.name, ) 35 | 36 | # Regex for parsing readelf output lines 37 | # Readelf output should look like the following: 38 | # Symbol table '.symtab' contains 623 entries: 39 | # Num: Value Size Type Bind Vis Ndx Name 40 | # 0: 00000000 0 NOTYPE LOCAL DEFAULT UND 41 | # ... 42 | # 565: 08002bf9 2 FUNC WEAK DEFAULT 2 TIM2_IRQHandler 43 | # 566: 200002a8 88 OBJECT GLOBAL DEFAULT 8 hspi1 44 | pattern_fields = [ 45 | r'\s*', 46 | named_group('num', r'\d+'), r':', 47 | r'\s+', 48 | named_group('value', r'[0-9a-fA-F]+'), 49 | r'\s+', 50 | named_group('size', r'(0x)?[0-9A-Fa-f][0-9A-Fa-f]*'), # accept dec & hex numbers 51 | r'\s+', 52 | named_group('type', r'\S+'), 53 | r'\s+', 54 | named_group('bind', r'\S+'), 55 | r'\s+', 56 | named_group('visibility', r'\S+'), 57 | r'\s+', 58 | named_group('section', r'\S+'), 59 | r'\s+', 60 | named_group('name', r'.*'), 61 | ] 62 | pattern = r'^{}$'.format(r''.join(pattern_fields)) 63 | pattern = re.compile(pattern) 64 | 65 | @classmethod 66 | def from_readelf_line(cls, line, 67 | ignored_types=['NOTYPE', 'SECTION', 'FILE'], 68 | ignore_zero_size=True): 69 | """ 70 | Create a Symbol from a line of `readelf -Ws` output. 71 | """ 72 | m = cls.pattern.match(line) 73 | if not m: 74 | log.debug('no match: ' + line.strip()) 75 | return None 76 | 77 | # convert non-string values 78 | m = m.groupdict() 79 | m['num'] = int(m['num']) 80 | m['value'] = int(m['value'], 16) 81 | m['size'] = int(m['size']) if m['size'].isdecimal() else int(m['size'], 16) 82 | try: # for numeric sections 83 | m['section'] = int(m['section']) 84 | except ValueError: 85 | pass 86 | 87 | # ignore if needed 88 | if not m['name'].strip() \ 89 | or m['type'].lower() in map(str.lower, ignored_types) \ 90 | or (ignore_zero_size and m['size'] == 0): 91 | log.debug('ignoring: ' + line.strip()) 92 | return None 93 | 94 | # create the Symbol 95 | s = Symbol(**m) 96 | 97 | return s 98 | 99 | @classmethod 100 | def extract_elf_symbols_info(cls, elf_file, readelf_exe='readelf'): 101 | """ 102 | Uses binutils 'readelf' to find info about all symbols from an ELF file. 103 | """ 104 | flags = ['--wide', '--syms'] 105 | readelf_proc = subprocess.Popen([readelf_exe, *flags, elf_file], 106 | stdout=subprocess.PIPE, universal_newlines=True) 107 | 108 | # parse lines 109 | log.info('Using readelf symbols regex: %s' % cls.pattern.pattern) 110 | symbols = [Symbol.from_readelf_line(l) for l in readelf_proc.stdout] 111 | n_ignored = len(list(filter(lambda x: x is None, symbols))) 112 | symbols = list(filter(None, symbols)) 113 | 114 | if readelf_proc.wait(3) != 0: 115 | raise subprocess.CalledProcessError(readelf_proc.returncode, 116 | readelf_proc.args) 117 | 118 | log.info('ignored %d/%d symbols' % (n_ignored, len(symbols) + n_ignored)) 119 | 120 | return symbols 121 | 122 | 123 | def detect_nm_is_llvm(nm_exe): 124 | proc = subprocess.run([nm_exe, '--version'], 125 | check=True, capture_output=True, universal_newlines=True) 126 | if proc.stdout.lower().find('llvm') >= 0: 127 | return True 128 | # startswith(), not find() because llvm-nm contains "compatible with GNU nm" 129 | if not proc.stdout.lower().strip().startswith('gnu nm'): 130 | log.warning('Could not detect nm version, assuming GNU nm') 131 | return False 132 | 133 | 134 | def extract_elf_symbols_fileinfo(elf_file, nm_exe='nm'): 135 | """ 136 | Uses binutils 'nm' to find files and lines where symbols from an ELF 137 | executable were defined. 138 | """ 139 | # Regex for parsing nm output lines 140 | # We use Posix mode, so lines should be in form: 141 | # NAME TYPE VALUE SIZE[\tFILE[:LINE]] 142 | # e.g. 143 | # MemManage_Handler T 08004130 00000002 /some/path/file.c:80 144 | # memset T 08000bf0 00000010 145 | gnu_flags = ['--portability', '--line-numbers'] 146 | llvm_flags = ['--portability', '--print-file-name'] 147 | gnu_fields = [ 148 | named_group('name', r'\S+'), 149 | r'\s+', 150 | named_group('type', r'\S+'), 151 | r'\s+', 152 | named_group('value', r'[0-9a-fA-F]+'), 153 | r'\s+', 154 | named_group('size', r'[0-9a-fA-F]+'), 155 | named_group('fileinfo', r'.*'), 156 | ] 157 | # llvm-nm version of output: 158 | # /some/path/file.c: memset t 800a2ea 6e 159 | llvm_fields = [ 160 | named_group('fileinfo', r'[^:]*'), 161 | r':\s+', 162 | named_group('name', r'\S+'), 163 | r'\s+', 164 | named_group('type', r'\S+'), 165 | r'\s+', 166 | named_group('value', r'[0-9a-fA-F]+'), 167 | r'\s+', 168 | named_group('size', r'[0-9a-fA-F]+'), 169 | ] 170 | 171 | is_llvm = detect_nm_is_llvm(nm_exe) 172 | flags, fields = (llvm_flags, llvm_fields) if is_llvm else (gnu_flags, gnu_fields) 173 | 174 | pattern = r'^{}$'.format(r''.join(fields)) 175 | pattern = re.compile(pattern) 176 | log.info('Using nm symbols regex: %s' % pattern.pattern) 177 | 178 | nm_proc = subprocess.Popen([nm_exe, *flags, elf_file], 179 | stdout=subprocess.PIPE, universal_newlines=True) 180 | 181 | # process nm output 182 | fileinfo_dict = {} 183 | for line in nm_proc.stdout: 184 | m = pattern.match(line) 185 | if not m: 186 | continue 187 | 188 | # parse the file info 189 | file, line = None, None 190 | fileinfo = m.group('fileinfo').strip() 191 | if len(fileinfo) > 0: 192 | # check for line number 193 | line_i = fileinfo.rfind(':') 194 | if line_i >= 0: 195 | file = fileinfo[:line_i] 196 | line = int(fileinfo[line_i + 1]) 197 | else: 198 | file = fileinfo 199 | # try to make the path more readable 200 | file = os.path.normpath(file) 201 | 202 | fileinfo_dict[m.group('name')] = file, line 203 | 204 | if nm_proc.wait(3) != 0: 205 | raise subprocess.CalledProcessError(nm_proc.returncode, 206 | nm_proc.args) 207 | 208 | return fileinfo_dict 209 | 210 | 211 | def add_fileinfo_to_symbols(fileinfo_dict, symbols_list): 212 | # use dictionary for faster access (probably) 213 | symbols_dict = {s.name: s for s in symbols_list} 214 | for symbol_name, (file, line) in fileinfo_dict.items(): 215 | if file is None and line is None: 216 | continue 217 | if symbol_name in symbols_dict: 218 | symbol = symbols_dict[symbol_name] 219 | symbol.file = file 220 | symbol.line = line 221 | else: 222 | log.warning('nm found fileinfo for symbol "%s", which has not been found by readelf' 223 | % symbol_name) 224 | 225 | 226 | def demangle_symbol_names(symbols, cppfilt_exe='c++filt'): 227 | """ 228 | Use c++filt to demangle symbol names in-place. 229 | """ 230 | flags = [] 231 | cppfilt_proc = subprocess.Popen( 232 | [cppfilt_exe, *flags], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) 233 | 234 | for symbol in symbols: 235 | # write the line and flush it 236 | # not super-efficient but writing all at once for large list of symbols 237 | # can block the program (probably due to buffering) 238 | cppfilt_proc.stdin.write((symbol.name + ' \n')) 239 | cppfilt_proc.stdin.flush() 240 | new_name = cppfilt_proc.stdout.readline().strip() 241 | symbol.name = new_name 242 | cppfilt_proc.stdin.close() 243 | 244 | if cppfilt_proc.wait(3) != 0: 245 | raise subprocess.CalledProcessError(cppfilt_proc.returncode, 246 | cppfilt_proc.args) 247 | -------------------------------------------------------------------------------- /elf_size_analyze/symbol_tree.py: -------------------------------------------------------------------------------- 1 | """ 2 | The symbol tree class 3 | """ 4 | 5 | import itertools 6 | import logging 7 | import math 8 | import os 9 | import pathlib 10 | 11 | from elf_size_analyze.color import Color 12 | from elf_size_analyze.misc import sizeof_fmt 13 | from elf_size_analyze.symbol import Symbol 14 | from elf_size_analyze.tree import TreeNode 15 | 16 | log = logging.getLogger('elf-size-analyze') 17 | 18 | class SymbolsTreeByPath: 19 | """A tree built from symbols grouped by paths. Nodes can be symbols or paths.""" 20 | 21 | class Node(TreeNode): 22 | def __init__(self, data, is_dir=False, *args, **kwargs): 23 | self.data = data 24 | self._is_dir = is_dir 25 | self.cumulative_size = None # used for accumulating symbol sizes in paths 26 | super().__init__(*args, **kwargs) 27 | 28 | def is_symbol(self): 29 | return isinstance(self.data, Symbol) 30 | 31 | def is_root(self): 32 | return self.data is None 33 | 34 | def is_path(self): 35 | return not self.is_root() and not self.is_symbol() 36 | 37 | def is_dir(self): 38 | return self.is_path() and self._is_dir 39 | 40 | def is_file(self): 41 | return self.is_path() and not self._is_dir 42 | 43 | def __repr__(self): 44 | string = self.data.name if self.is_symbol() else self.data 45 | return 'Node(%s)' % string 46 | 47 | def __init__(self, symbols=[]): 48 | self.tree_root = self.Node(None) 49 | self.orphans = self.Node('?') 50 | self.tree_root.add(self.orphans) 51 | for symbol in symbols: 52 | self.add(symbol) 53 | self.total_size = None 54 | 55 | def add(self, symbol): 56 | assert isinstance(symbol, Symbol), "Only instances of Symbol can be added!" 57 | if symbol.file is None: 58 | self.orphans.add(self.Node(symbol)) 59 | else: 60 | if not os.path.isabs(symbol.file): 61 | log.warning('Symbol\'s path is not absolute: %s: %s' 62 | % (symbol, symbol.file)) 63 | self._add_symbol_with_path(symbol) 64 | 65 | def _add_symbol_with_path(self, symbol): 66 | """ 67 | Adds the given symbol by creating nodes for each path component 68 | before adding symbol as the last ("leaf") node. 69 | """ 70 | path = pathlib.Path(symbol.file) 71 | node = self.tree_root 72 | for part in path.parts: 73 | # find it the part exists in children 74 | path_children = filter(self.Node.is_path, node.children) 75 | path_child = list(filter(lambda node: node.data == part, path_children)) 76 | assert len(path_child) <= 1 77 | # if it does not exsits, then create it and add 78 | if len(path_child) == 0: 79 | path_child = self.Node(part, is_dir=True) 80 | node.add(path_child) 81 | else: 82 | path_child = path_child[0] 83 | # go 'into' this path part's node 84 | node = path_child 85 | # remove directory signature from last path part 86 | node._is_dir = False 87 | # last, add the symbol, the "tree leaf" 88 | node.add(self.Node(symbol)) 89 | 90 | def merge_paths(self, fish_like=False): 91 | """Merges all path componenets that have only one child into single nodes.""" 92 | for node, depth in self.tree_root.pre_order(): 93 | # we want only path nodes that have only one path node 94 | if node.is_path() and len(node.children) == 1: 95 | child = node.children[0] 96 | if child.is_path(): 97 | # add this node's path to its child 98 | this_path = node.data 99 | if fish_like: 100 | head, tail = os.path.split(this_path) 101 | this_path = os.path.join(head, tail[:1]) 102 | child.data = os.path.join(this_path, child.data) 103 | # remove this node and reparent its child 104 | node.parent.children.remove(node) 105 | node.parent.add(child) 106 | 107 | def sort(self, key, reverse=False): 108 | """ 109 | Sort all symbol lists by the given key - function that takes a Symbol as an argument. 110 | sort_paths_by_name - if specified, then paths are sorted by name (directories first). 111 | reverse - applies to symbols 112 | reverse_paths - appliesto paths (still, directories go first) 113 | """ 114 | # to avoid sorting the same list many times, gather them first 115 | nodes_with_children = [] 116 | for node, depth in self.tree_root.pre_order(): 117 | if len(node.children) > 1: 118 | nodes_with_children.append(node) 119 | for node in nodes_with_children: 120 | # we need tee to split generators into many so that filter will work as expected 121 | ch1, ch2 = itertools.tee(node.children) 122 | symbols = filter(self.Node.is_symbol, ch1) 123 | non_symbols = filter(lambda n: not n.is_symbol(), ch2) 124 | # sort others by size if available else by name, directories first 125 | # add - to size, as we need reverse sorting for path names 126 | path_key = lambda node: -node.cumulative_size if node.cumulative_size is not None else node.data 127 | ns1, ns2, ns3 = itertools.tee(non_symbols, 3) 128 | dirs = filter(self.Node.is_dir, ns1) 129 | files = filter(self.Node.is_file, ns2) 130 | others = filter(lambda n: not n.is_file() and not n.is_dir(), ns3) 131 | non_symbols = sorted(dirs, key=path_key) \ 132 | + sorted(files, key=path_key) + list(others) 133 | symbols = sorted(symbols, key=lambda node: key(node.data), reverse=reverse) 134 | children = list(non_symbols) + list(symbols) 135 | node.children = children 136 | 137 | def accumulate_sizes(self, reset=True): 138 | """ 139 | Traverse tree bottom-up to accumulate symbol sizes in paths. 140 | """ 141 | if reset: 142 | for node, depth in self.tree_root.pre_order(): 143 | node.cumulative_size = None 144 | # Avoid errors when there are no orphans but the root Node('?') 145 | self.orphans.cumulative_size = 0 146 | for node, depth in self.tree_root.post_order(): 147 | if node.parent is None: 148 | continue 149 | if node.parent.cumulative_size is None: 150 | node.parent.cumulative_size = 0 151 | if node.is_symbol(): 152 | node.cumulative_size = node.data.size 153 | node.parent.cumulative_size += node.cumulative_size 154 | 155 | def calculate_total_size(self): 156 | # calculate the total size 157 | all_nodes = (node for node, _ in self.tree_root.pre_order()) 158 | all_symbols = filter(self.Node.is_symbol, all_nodes) 159 | self.total_size = sum(s.data.size for s in all_symbols) 160 | 161 | class Protoline: 162 | def __init__(self, depth=0, node=None, string=None, colors=None): 163 | self.depth = depth 164 | self.node = node 165 | self.string = string 166 | self.field_strings = [] 167 | self.colors = colors or [] # avoid creating one list shared by all objects 168 | 169 | def print(self): 170 | if len(self.colors) > 0: 171 | print(sum(self.colors, Color()) + self.string + Color.RESET) 172 | else: 173 | print(self.string) 174 | 175 | def generate_printable_lines(self, *, max_width=80, min_size=0, header=None, indent=2, 176 | colors=True, alternating_colors=False, trim=True, human_readable=False): 177 | """ 178 | Creates printable output in form of Protoline objects. 179 | Handles RIDICULLOUSLY complex printing. Someone could probably implement it easier. 180 | """ 181 | # create and initially fill the lines 182 | protolines = self._generate_protolines(min_size) 183 | self._add_field_strings(protolines, indent, human_readable) 184 | # formatting string 185 | h_fmt = '{:{s0}} {:{s1}} {:{s2}}' 186 | fmt = '{:{s0}} {:>{s1}} {:>{s2}}' 187 | t_fmt = '{:{s0}} {:>{s1}} {:>{s2}}' 188 | table_headers = ('Symbol', 'Size', '%') 189 | # calculate sizes 190 | field_sizes = self._calculate_field_sizes(protolines, max_width=max_width, 191 | initial=[len(h) for h in table_headers]) 192 | # trim too long strings 193 | if trim: 194 | self._trim_strings(protolines, field_sizes) 195 | # prepare sizes dict 196 | sizes_dict = {'s%d' % i: s for i, s in enumerate(field_sizes)} 197 | # "render" the strings 198 | for line in protolines: 199 | if line.string is None: 200 | if len(line.field_strings) == 0: 201 | line.string = '' 202 | else: 203 | line.string = fmt.format(*line.field_strings, **sizes_dict) 204 | # preopare table header 205 | header_lines = self._create_header_protolines(h_fmt, table_headers, sizes_dict, header) 206 | for l in reversed(header_lines): 207 | protolines.insert(0, l) 208 | # prepare totals 209 | if self.total_size is not None: 210 | totals_lines = self._create_totals_protolines(t_fmt, sizes_dict, human_readable) 211 | protolines.extend(totals_lines) 212 | # add colors 213 | if colors: 214 | self._add_colors(protolines, alternating_colors) 215 | return protolines 216 | 217 | def _generate_protolines(self, min_size): 218 | # generate list of nodes with indent to be printed 219 | protolines = [] 220 | for node, depth in self.tree_root.pre_order(): 221 | # we never print root so subtract its depth 222 | depth = depth - 1 223 | if node.is_root(): 224 | continue 225 | elif not (node.is_symbol() or node.is_path()): 226 | raise Exception('Wrong symbol type encountered') 227 | elif node.is_symbol() and node.data.size < min_size: 228 | continue 229 | protolines.append(self.Protoline(depth, node)) 230 | return protolines 231 | 232 | def _generate_node_dict(self, min_size): 233 | # generate dict of nodes 234 | nodeDict = dict() 235 | get_key = lambda node: node.data.name if node.is_symbol() else node.data 236 | 237 | for node, depth in self.tree_root.pre_order(): 238 | if node.is_root(): 239 | continue 240 | elif not (node.is_symbol() or node.is_path()): 241 | raise Exception('Wrong symbol type encountered') 242 | elif node.is_symbol() and node.data.size < min_size: 243 | continue 244 | 245 | nodePath = list() 246 | iterNode = node 247 | while iterNode.parent is not None: 248 | nodePath.append(iterNode) 249 | iterNode = iterNode.parent 250 | nodePath.reverse() 251 | 252 | children = nodeDict 253 | for n in nodePath[:-1]: 254 | children = children[get_key(n)]['children'] 255 | 256 | key = get_key(node) 257 | children[key] = { 258 | 'name': get_key(node), 259 | 'cumulative_size': node.cumulative_size, 260 | } 261 | if not node.is_symbol(): 262 | children[key]['children'] = {} 263 | 264 | return nodeDict 265 | 266 | def _add_field_strings(self, protolines, indent, human_readable): 267 | for line in protolines: 268 | indent_str = ' ' * indent * line.depth 269 | if line.node.is_path(): 270 | size_str, percent_str = '-', '-' 271 | if line.node.cumulative_size is not None: 272 | size_str = self._size_string(line.node.cumulative_size, human_readable) 273 | if self.total_size is not None: 274 | percent_str = '%.2f' % (line.node.cumulative_size / self.total_size * 100) 275 | fields = [indent_str + line.node.data, size_str, percent_str] 276 | elif line.node.is_symbol(): 277 | percent_str = '-' 278 | if self.total_size is not None: 279 | percent_str = '%.2f' % (line.node.data.size / self.total_size * 100) 280 | size_str = self._size_string(line.node.data.size, human_readable) 281 | fields = [indent_str + line.node.data.name, size_str, percent_str] 282 | else: 283 | raise Exception('Wrong symbol type encountered') 284 | line.field_strings = fields 285 | 286 | def _calculate_field_sizes(self, protolines, initial, max_width=0): 287 | field_sizes = initial 288 | for line in protolines: 289 | for i, s, in enumerate(line.field_strings): 290 | field_sizes[i] = max(len(s), field_sizes[i]) 291 | # trim the fields if max_width is > 0 292 | if max_width > 0: 293 | if sum(field_sizes) > max_width: 294 | field_sizes[0] -= sum(field_sizes) - max_width 295 | return field_sizes 296 | 297 | def _trim_strings(self, protolines, field_sizes): 298 | for line in protolines: 299 | for i, s, in enumerate(line.field_strings): 300 | if len(s) > field_sizes[i]: 301 | line.field_strings[i] = s[:field_sizes[i] - 3] + '...' 302 | 303 | def _create_header_protolines(self, header_fmt, table_headers, sizes_dict, header): 304 | table_header = header_fmt.format(*table_headers, **sizes_dict) 305 | separator = self._separator_string(len(table_header)) 306 | if header is None: 307 | header = separator 308 | else: 309 | h = ' %s ' % header 310 | mid = len(separator) // 2 311 | before, after = int(math.ceil(len(h)/2)), int(math.floor(len(h)/2)) 312 | header = separator[:mid - before] + h + separator[mid+after:] 313 | header_protolines = [self.Protoline(string=s) for s in [header, table_header, separator]] 314 | return header_protolines 315 | 316 | def _create_totals_protolines(self, fmt, sizes_dict, human_readable): 317 | totals = fmt.format('Symbols total', self._size_string(self.total_size, human_readable), '', 318 | **sizes_dict) 319 | separator = self._separator_string(len(totals)) 320 | return [self.Protoline(string=s) for s in [separator, totals, separator]] 321 | 322 | def _separator_string(self, length): 323 | return '=' * length 324 | 325 | def _add_colors(self, protolines, alternating_colors): 326 | second_symbol_color = False 327 | for line in protolines: 328 | c = [] 329 | if line.node is None: # header lines 330 | c = [Color.BOLD, Color.BLUE] 331 | elif line.node.is_file(): 332 | c = [Color.L_BLUE] 333 | elif line.node.is_dir(): 334 | c = [Color.BLUE] 335 | elif line.node.is_symbol(): 336 | if second_symbol_color and alternating_colors: 337 | c = [Color.L_GREEN] 338 | second_symbol_color = False 339 | else: 340 | c = [Color.L_YELLOW] 341 | second_symbol_color = True 342 | line.colors += c 343 | 344 | def _size_string(self, size, human_readable): 345 | if human_readable: 346 | return sizeof_fmt(size) 347 | return str(size) 348 | -------------------------------------------------------------------------------- /elf_size_analyze/tree.py: -------------------------------------------------------------------------------- 1 | """ 2 | The tree node class 3 | """ 4 | 5 | import itertools 6 | import sys 7 | 8 | 9 | class TreeNode: 10 | """ 11 | Simple implementation of a tree with dynamic number of nodes. 12 | Provides a depth-first iterator. Someone could actually call this 13 | class TreeNode, as every object represents a single node. 14 | """ 15 | 16 | def __init__(self, parent=None): 17 | self.parent = parent 18 | self.children = [] 19 | 20 | def add(self, children): 21 | if not isinstance(children, (list, tuple)): 22 | children = (children, ) 23 | for child in children: 24 | self.children.append(child) 25 | child.parent = self 26 | 27 | def pre_order(self): 28 | """Iterator that yields tuples (node, depth). Depth-first, pre-order traversal.""" 29 | return self.PreOrderIterator(self) 30 | 31 | def post_order(self): 32 | """Iterator that yields tuples (node, depth). Depth-first, post-order traversal.""" 33 | return self.PostOrderIterator(self) 34 | 35 | def __iter__(self): 36 | for child in self.children: 37 | yield child 38 | 39 | class TreeIterator: 40 | def __init__(self, root, depth=0): 41 | self.root = root 42 | self.depth = depth 43 | 44 | def __iter__(self): 45 | raise NotImplementedError('Should yield pairs (node, depth)') 46 | 47 | # depth-first tree iterators 48 | class PreOrderIterator(TreeIterator): 49 | def __init__(self, *args, **kwargs): 50 | super().__init__(*args, **kwargs) 51 | 52 | def __iter__(self): 53 | yield self.root, self.depth 54 | children_iters = map(lambda child: 55 | self.__class__(child, self.depth + 1), self.root) 56 | for node in itertools.chain(*children_iters): 57 | yield node 58 | 59 | class PostOrderIterator(TreeIterator): 60 | def __init__(self, *args, **kwargs): 61 | super().__init__(*args, **kwargs) 62 | 63 | def __iter__(self): 64 | children_iters = map(lambda child: 65 | self.__class__(child, self.depth + 1), self.root) 66 | for node in itertools.chain(*children_iters): 67 | yield node 68 | yield self.root, self.depth 69 | 70 | 71 | # only for testing the implementation 72 | def test__TreeNode(): 73 | class NameTree(TreeNode): 74 | def __init__(self, name, *args, **kwargs): 75 | self.name = name 76 | super().__init__(*args, **kwargs) 77 | 78 | def __repr__(self): 79 | return 'Node(%s)' % self.name 80 | 81 | def create_tree(): 82 | root = NameTree('root') 83 | root.add([NameTree('n1'), NameTree('n2'), NameTree('n3')]) 84 | root.children[0].add([NameTree('n1n1'), NameTree('n1n2'), NameTree('n1n3')]) 85 | root.children[1].add([NameTree('n2n1'), NameTree('n2n2')]) 86 | root.children[2].add([NameTree('n3n1')]) 87 | root.children[2].children[0].add([NameTree('n3n1n1')]) 88 | return root 89 | 90 | root = create_tree() 91 | print('\nIterate over a node (root node):') 92 | for node in root: 93 | print(' |%s' % node) 94 | 95 | methods = [TreeNode.pre_order, TreeNode.post_order] 96 | for method in methods: 97 | print('\nIterate over tree (%s):' % method.__name__) 98 | for node, depth in method(root): 99 | print(' |%s%-30s parent=%s' % (' ' * depth, node, node.parent)) 100 | 101 | sys.exit(0) 102 | 103 | 104 | # test__TreeNode() -------------------------------------------------------------------------------- /example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedrzejboczar/elf-size-analyze/5aac86c7c4da60875d6c5b4f58f0b17bcc893b09/example.jpg -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "setuptools-git-versioning"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools-git-versioning] 6 | enabled = true 7 | 8 | [project] 9 | name = "elf-size-analyze" 10 | description = "Tool to extract information about symbols and sections from an ELF file and filters them." 11 | dynamic = ["version"] 12 | authors = [ 13 | {name = "Jedrzej Boczar"}, 14 | {name = "Dominic Kuschmierz"}, 15 | {name = "Adel Mamin"}, 16 | {name = "Amir Gonnen"} 17 | ] 18 | readme = "README.md" 19 | requires-python = ">=3.7" 20 | license = {text = "Apache-2.0"} 21 | classifiers = [ 22 | "Programming Language :: Python :: 3", 23 | "License :: OSI Approved :: Apache Software License", 24 | "Operating System :: OS Independent", 25 | "Topic :: Software Development :: Compilers", 26 | "Topic :: Software Development :: Debuggers" 27 | ] 28 | 29 | [project.optional-dependencies] 30 | dev = ["pylint==2.6.0", "pytest==7.2.0", "black==20.8b1"] 31 | 32 | [project.urls] 33 | "Homepage" = "https://github.com/jedrzejboczar/elf-size-analyze" 34 | "Bug Tracker" = "https://github.com/jedrzejboczar/elf-size-analyze/issues" 35 | 36 | [project.scripts] 37 | elf-size-analyze = "elf_size_analyze.__main__:main" --------------------------------------------------------------------------------