├── src └── ffpuppet │ ├── py.typed │ ├── __main__.py │ ├── cmds.gdb │ ├── __init__.py │ ├── exceptions.py │ ├── test_display.py │ ├── test_job_object.py │ ├── test_checks.py │ ├── display.py │ ├── job_object.py │ ├── resources │ ├── testff.py │ └── tree.py │ ├── test_sanitizer_util.py │ ├── sanitizer_util.py │ ├── test_main.py │ ├── checks.py │ ├── lsof.py │ ├── bootstrapper.py │ ├── test_bootstrapper.py │ ├── test_helpers.py │ ├── test_puppet_logger.py │ ├── puppet_logger.py │ ├── test_profile.py │ ├── profile.py │ ├── test_minidump_parser.py │ ├── test_process_tree.py │ ├── main.py │ ├── minidump_parser.py │ └── process_tree.py ├── .github ├── CODEOWNERS └── workflows │ └── ci.yml ├── MANIFEST.in ├── CODE_OF_CONDUCT.md ├── setup.cfg ├── tox.ini ├── .gitignore ├── .pre-commit-config.yaml ├── pyproject.toml └── README.md /src/ffpuppet/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @MozillaSecurity/fuzzing-team-reviewers 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include src/ffpuppet/cmds.gdb 2 | include src/ffpuppet/py.typed 3 | -------------------------------------------------------------------------------- /src/ffpuppet/__main__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """FFPuppet module main""" 5 | 6 | from .main import main 7 | 8 | main() 9 | -------------------------------------------------------------------------------- /src/ffpuppet/cmds.gdb: -------------------------------------------------------------------------------- 1 | define quit_with_code 2 | if $_siginfo 3 | quit 128+$_siginfo.si_signo 4 | else 5 | quit $_exitcode 6 | end 7 | end 8 | 9 | handle SIG38 nostop noprint pass 10 | set breakpoint pending on 11 | set confirm off 12 | set prompt 13 | maint set internal-error quit yes 14 | maint set internal-error corefile no 15 | set backtrace limit 25 16 | set print elements 10 17 | set python print-stack full 18 | set trace-commands on 19 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Participation Guidelines 2 | 3 | This repository is governed by Mozilla's code of conduct and etiquette guidelines. 4 | For more details, please read the 5 | [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 6 | 7 | ## How to Report 8 | For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. 9 | 10 | 16 | -------------------------------------------------------------------------------- /src/ffpuppet/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """FFPuppet module""" 5 | 6 | from .core import Debugger, FFPuppet, Reason 7 | from .display import DisplayMode 8 | from .exceptions import ( 9 | BrowserExecutionError, 10 | BrowserTerminatedError, 11 | BrowserTimeoutError, 12 | LaunchError, 13 | ) 14 | from .sanitizer_util import SanitizerOptions 15 | 16 | __all__ = ( 17 | "BrowserExecutionError", 18 | "BrowserTerminatedError", 19 | "BrowserTimeoutError", 20 | "Debugger", 21 | "DisplayMode", 22 | "FFPuppet", 23 | "LaunchError", 24 | "Reason", 25 | "SanitizerOptions", 26 | ) 27 | __author__ = "Tyson Smith" 28 | -------------------------------------------------------------------------------- /src/ffpuppet/exceptions.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet exceptions""" 5 | 6 | 7 | class LaunchError(Exception): 8 | """ 9 | Raised when the browser process does not appear to be in a functional state 10 | during launch. 11 | """ 12 | 13 | 14 | class BrowserExecutionError(LaunchError): 15 | """ 16 | Raised when the browser binary cannot be executed. 17 | """ 18 | 19 | 20 | class BrowserTerminatedError(LaunchError): 21 | """ 22 | Raised when the browser process goes away during launch. 23 | """ 24 | 25 | 26 | class BrowserTimeoutError(LaunchError): 27 | """ 28 | Raised when the browser process appears to hang during launch. 29 | """ 30 | 31 | 32 | class InvalidPrefs(LaunchError): 33 | """ 34 | Raised when an invalid prefs.js file is used. 35 | """ 36 | 37 | 38 | class TerminateError(Exception): 39 | """ 40 | Raised when attempts to terminate the browser fail. 41 | """ 42 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | author = Tyson Smith 3 | author_email = twsmith@mozilla.com 4 | classifiers = 5 | Intended Audience :: Developers 6 | License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) 7 | Programming Language :: Python :: 3 8 | Topic :: Software Development :: Testing 9 | description = A Python module that aids in the automation of Firefox at the process level 10 | keywords = automation firefox fuzz fuzzing security test testing 11 | license = MPL 2.0 12 | long_description = file: README.md 13 | long_description_content_type = text/markdown 14 | maintainer = Mozilla Fuzzing Team 15 | maintainer_email = fuzzing@mozilla.com 16 | name = ffpuppet 17 | url = https://github.com/MozillaSecurity/ffpuppet 18 | 19 | [options] 20 | include_package_data = True 21 | install_requires = 22 | psutil >= 5.9.0 23 | xvfbwrapper >= 0.2.10; sys_platform == "linux" 24 | package_dir = 25 | = src 26 | packages = 27 | ffpuppet 28 | python_requires = >=3.9 29 | zip_safe = False 30 | 31 | [options.entry_points] 32 | console_scripts = 33 | ffpuppet = ffpuppet.main:main 34 | ffpuppet-create-profile = ffpuppet.profile:create_profile 35 | 36 | [options.extras_require] 37 | dev = 38 | pre-commit 39 | tox 40 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{39,310,311,312,313},lint 3 | skip_missing_interpreters = true 4 | tox_pip_extensions_ext_venv_update = true 5 | 6 | [testenv] 7 | commands = pytest -v --cache-clear --cov={toxinidir} --cov-config={toxinidir}/pyproject.toml --cov-report=term-missing --basetemp={envtmpdir} {posargs} --disable-pytest-warnings 8 | deps = 9 | pytest 10 | pytest-cov 11 | pytest-mock 12 | passenv = 13 | BUILD_CACHE 14 | CI 15 | CI_* 16 | CODECOV_* 17 | TOXENV 18 | TRAVIS 19 | TRAVIS_* 20 | TWINE_* 21 | VCS_* 22 | usedevelop = true 23 | 24 | [testenv:codecov] 25 | commands = 26 | codecov upload-process 27 | deps = 28 | codecov-cli 29 | coverage[toml] 30 | skip_install = true 31 | 32 | [testenv:lint] 33 | commands = 34 | pre-commit run -a {posargs} 35 | deps = 36 | pre-commit 37 | skip_install = true 38 | 39 | [testenv:mypy] 40 | commands = 41 | mypy --install-types --non-interactive {posargs} 42 | deps = 43 | mypy==v1.17.1 44 | usedevelop = true 45 | 46 | [testenv:pylint] 47 | commands = 48 | pylint -j 0 {posargs} 49 | deps = 50 | pylint==3.3.7 51 | usedevelop = true 52 | 53 | [testenv:pypi] 54 | commands = 55 | python -m build 56 | twine upload --skip-existing dist/* 57 | deps = 58 | build 59 | twine 60 | skip_install = true 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | # VSCode settings 92 | .vscode/ 93 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | rev: v0.12.7 4 | hooks: 5 | - id: ruff 6 | args: [--fix] 7 | - id: ruff-format 8 | - repo: https://github.com/pre-commit/pre-commit-hooks 9 | rev: v5.0.0 10 | hooks: 11 | - id: check-added-large-files 12 | - id: check-ast 13 | - id: check-case-conflict 14 | - id: check-docstring-first 15 | - id: check-executables-have-shebangs 16 | - id: check-merge-conflict 17 | - id: check-symlinks 18 | - id: check-json 19 | - id: check-toml 20 | - id: check-yaml 21 | - id: debug-statements 22 | - id: end-of-file-fixer 23 | - id: mixed-line-ending 24 | - id: name-tests-test 25 | args: ["--django"] 26 | - id: requirements-txt-fixer 27 | - id: trailing-whitespace 28 | - repo: https://github.com/codespell-project/codespell 29 | rev: v2.4.1 30 | hooks: 31 | - id: codespell 32 | exclude_types: [json] 33 | exclude: asan_symbolize\.py 34 | - repo: meta 35 | hooks: 36 | - id: check-useless-excludes 37 | - repo: https://github.com/jorisroovers/gitlint 38 | rev: v0.19.1 39 | hooks: 40 | - id: gitlint 41 | args: [--contrib=contrib-title-conventional-commits, --ignore=body-is-missing, --msg-filename] 42 | stages: [ commit-msg ] 43 | - repo: local 44 | hooks: 45 | - id: mypy 46 | name: mypy 47 | entry: tox -e mypy -- 48 | language: system 49 | require_serial: true 50 | exclude: /test_.*\.py$ 51 | types: [python] 52 | - id: pylint 53 | name: pylint 54 | entry: tox -e pylint -- 55 | language: system 56 | require_serial: true 57 | types: [python] 58 | 59 | default_language_version: 60 | python: python3 61 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 43", "wheel", "setuptools_scm[toml] >= 3.4"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.coverage.run] 6 | omit = [ 7 | "*/asan_symbolize.py", 8 | "*/__main__.py", 9 | "*/test_*", 10 | "*/build/*", 11 | "*/dist/*", 12 | "*/resources/*", 13 | "*/.tox/*", 14 | "*/.egg/*", 15 | ] 16 | 17 | [tool.coverage.report] 18 | exclude_lines = [ 19 | "if __name__ == .__main__.:", 20 | "if TYPE_CHECKING:", 21 | "pragma: no cover", 22 | ] 23 | 24 | [tool.mypy] 25 | ignore_missing_imports = true 26 | strict = true 27 | show_error_codes = true 28 | 29 | [tool.pylint] 30 | ignore = [ 31 | "asan_symbolize.py", 32 | ] 33 | 34 | [tool.pylint.format] 35 | max-line-length = 88 36 | 37 | [tool.pylint.messages_control] 38 | disable = [ 39 | "duplicate-code", 40 | "fixme", 41 | "too-few-public-methods", 42 | "too-many-arguments", 43 | "too-many-branches", 44 | "too-many-instance-attributes", 45 | "too-many-lines", 46 | "too-many-locals", 47 | "too-many-nested-blocks", 48 | "too-many-positional-arguments", 49 | "too-many-statements", 50 | ] 51 | 52 | [tool.pylint.typecheck] 53 | ignored-modules = ["pytest"] 54 | 55 | [tool.pytest.ini_options] 56 | log_level = "DEBUG" 57 | 58 | [tool.ruff] 59 | exclude = ["asan_symbolize.py"] 60 | fix = true 61 | target-version = "py39" 62 | 63 | [tool.ruff.lint] 64 | select = [ 65 | # flake8-comprehensions 66 | "C4", 67 | # pycodestyle 68 | "E", 69 | # Pyflakes 70 | "F", 71 | # Flynt 72 | "FLY", 73 | # isort 74 | "I", 75 | # Perflint 76 | "PERF", 77 | # Ruff-specific rules 78 | "RUF", 79 | # flake8-simplify 80 | "SIM", 81 | # flake8-type-checking 82 | "TCH", 83 | # pyupgrade 84 | "UP", 85 | # pycodestyle 86 | "W", 87 | ] 88 | 89 | [tool.setuptools_scm] 90 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Python CI 2 | 3 | on: 4 | pull_request: 5 | branches: [master] 6 | push: 7 | branches: [master] 8 | release: 9 | types: [released] 10 | 11 | jobs: 12 | test: 13 | name: Python ${{ matrix.python-version }} (${{ matrix.platform }}) 14 | runs-on: ${{ matrix.platform }} 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | include: 20 | - python-version: "3.9" 21 | platform: ubuntu-latest 22 | toxenv: py39 23 | - python-version: "3.10" 24 | platform: ubuntu-latest 25 | toxenv: py310 26 | - python-version: "3.11" 27 | platform: ubuntu-latest 28 | toxenv: py311 29 | - python-version: "3.12" 30 | platform: ubuntu-latest 31 | toxenv: py312 32 | - python-version: "3.13" 33 | platform: ubuntu-latest 34 | toxenv: py313 35 | - python-version: "3.12" 36 | platform: macos-latest 37 | toxenv: py312 38 | - python-version: "3.12" 39 | platform: windows-latest 40 | toxenv: py312 41 | 42 | steps: 43 | - uses: actions/checkout@v4 44 | 45 | - name: Set up Python ${{ matrix.python-version }} 46 | uses: actions/setup-python@v5 47 | with: 48 | python-version: ${{ matrix.python-version }} 49 | 50 | - name: Install tox 51 | run: python -m pip install --upgrade tox 52 | 53 | - name: Run lint 54 | run: tox -e lint 55 | 56 | - name: Run tests 57 | run: tox -e ${{ matrix.toxenv }} 58 | 59 | - name: Run Codecov 60 | env: 61 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 62 | run: tox -e codecov 63 | 64 | publish: 65 | name: Build & Publish to PyPI 66 | if: github.event_name == 'release' 67 | needs: test 68 | runs-on: ubuntu-latest 69 | 70 | steps: 71 | - uses: actions/checkout@v4 72 | 73 | - name: Set up Python 74 | uses: actions/setup-python@v5 75 | with: 76 | python-version: "3.12" 77 | 78 | - name: Install tox 79 | run: python -m pip install --upgrade tox 80 | 81 | - name: Publish to PyPI 82 | env: 83 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 84 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 85 | run: tox -e pypi 86 | -------------------------------------------------------------------------------- /src/ffpuppet/test_display.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """display.py tests""" 5 | 6 | from platform import system 7 | from subprocess import TimeoutExpired 8 | 9 | from pytest import mark, raises 10 | 11 | from .display import DISPLAYS, Display, DisplayMode, HeadlessDisplay, XvfbDisplay 12 | 13 | 14 | @mark.parametrize("mode", tuple(x for x in DisplayMode)) 15 | def test_displays(mocker, mode): 16 | """test Displays()""" 17 | if system() == "Linux": 18 | mocker.patch("ffpuppet.display.Xvfb", autospec=True) 19 | display = DISPLAYS[mode]() 20 | assert display 21 | try: 22 | if mode.name == "DEFAULT": 23 | assert isinstance(display, Display) 24 | elif mode.name == "HEADLESS": 25 | assert isinstance(display, HeadlessDisplay) 26 | elif mode.name == "XVFB": 27 | assert isinstance(display, XvfbDisplay) 28 | else: 29 | raise AssertionError(f"Unknown DisplayMode: {mode.name}") 30 | finally: 31 | display.close() 32 | 33 | 34 | @mark.skipif(system() != "Linux", reason="Only supported on Linux") 35 | def test_xvfb_missing_deps(mocker): 36 | """test XvfbDisplay() missing deps""" 37 | mocker.patch("ffpuppet.display.Xvfb", side_effect=NameError("test")) 38 | with raises(NameError): 39 | XvfbDisplay() 40 | 41 | 42 | @mark.skipif(system() != "Linux", reason="Only supported on Linux") 43 | @mark.parametrize( 44 | "resolution, expected_width, expected_height", 45 | ( 46 | (None, 1280, 1024), 47 | ("1920x1080", 1920, 1080), 48 | ("a", 1280, 1024), 49 | ), 50 | ) 51 | def test_xvfb_resolution(mocker, resolution, expected_width, expected_height): 52 | """test XvfbDisplay() XVFB_RESOLUTION""" 53 | xvfb = mocker.patch("ffpuppet.display.Xvfb", autospec=True) 54 | mocker.patch.dict( 55 | "ffpuppet.display.environ", 56 | {} if resolution is None else {"XVFB_RESOLUTION": resolution}, 57 | ) 58 | XvfbDisplay() 59 | assert xvfb.return_value.start.call_count == 1 60 | xvfb.assert_called_with(width=expected_width, height=expected_height, timeout=60) 61 | 62 | 63 | @mark.skipif(system() != "Linux", reason="Only supported on Linux") 64 | def test_xvfb_stop_hang(mocker): 65 | """test XvfbDisplay.stop hang""" 66 | xvfb = mocker.patch("ffpuppet.display.Xvfb") 67 | xvfb.return_value.stop.side_effect = TimeoutExpired(["foo"], 1) 68 | display = XvfbDisplay() 69 | display.close() 70 | assert xvfb.return_value.proc.kill.call_count == 1 71 | -------------------------------------------------------------------------------- /src/ffpuppet/test_job_object.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet job object tests""" 5 | 6 | from platform import system 7 | from subprocess import PIPE, Popen 8 | from sys import executable 9 | from time import sleep 10 | 11 | from pytest import skip 12 | 13 | if system() == "Windows": 14 | from .core import CREATE_SUSPENDED 15 | from .job_object import config_job_object, resume_suspended_process 16 | else: 17 | skip("skipping windows-only tests", allow_module_level=True) 18 | 19 | 20 | def test_job_object_01(): 21 | """test config_job_object() set limit higher than usage""" 22 | with Popen([executable, "-c", "input()"], stdin=PIPE, stderr=PIPE) as proc: 23 | # pylint: disable=no-member,protected-access,possibly-used-before-assignment 24 | config_job_object(proc._handle, 1024 * 1024 * 1024) 25 | proc.communicate(input=b"a", timeout=10) 26 | assert proc.wait(10) == 0 27 | 28 | 29 | def test_job_object_02(): 30 | """test config_job_object() enforce limit""" 31 | with Popen( 32 | [executable, "-c", "input(); a = ['A' * 1024 * 1024 for _ in range(50)]"], 33 | stdin=PIPE, 34 | stderr=PIPE, 35 | ) as proc: 36 | # pylint: disable=no-member,protected-access,possibly-used-before-assignment 37 | config_job_object(proc._handle, 32 * 1024 * 1024) 38 | _, err = proc.communicate(input=b"a", timeout=10) 39 | assert proc.wait(10) == 1 40 | assert b"MemoryError" in err 41 | 42 | 43 | def test_thread_resume(): 44 | """test that suspended process is created in job""" 45 | # the test function creates a subprocess to show that the parent process 46 | # is suspended on launch. if creationflags=CREATE_SUSPENDED is omitted, 47 | # the test should fail (no MemoryError) 48 | with Popen( 49 | [ 50 | executable, 51 | "-c", 52 | "from subprocess import run; import sys;" 53 | "run([sys.executable, '-c', " 54 | "\"input(); a = ['A' * 1024 * 1024 for _ in range(50)]\"], check=True)", 55 | ], 56 | # pylint: disable=possibly-used-before-assignment 57 | creationflags=CREATE_SUSPENDED, 58 | stdin=PIPE, 59 | stderr=PIPE, 60 | ) as proc: 61 | sleep(0.1) 62 | # pylint: disable=no-member,protected-access,possibly-used-before-assignment 63 | config_job_object(proc._handle, 32 * 1024 * 1024) 64 | resume_suspended_process(proc.pid) 65 | _, err = proc.communicate(input=b"a", timeout=10) 66 | assert proc.wait(10) == 1 67 | assert b"MemoryError" in err 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | FFPuppet 2 | ======== 3 | 4 | [![CI](https://github.com/MozillaSecurity/ffpuppet/actions/workflows/ci.yml/badge.svg)](https://github.com/MozillaSecurity/ffpuppet/actions/workflows/ci.yml) 5 | [![codecov](https://codecov.io/gh/MozillaSecurity/ffpuppet/branch/master/graph/badge.svg)](https://codecov.io/gh/MozillaSecurity/ffpuppet) 6 | [![Matrix](https://img.shields.io/badge/chat-%23fuzzing-green?logo=matrix)](https://matrix.to/#/#fuzzing:mozilla.org) 7 | [![PyPI](https://img.shields.io/pypi/v/ffpuppet)](https://pypi.org/project/ffpuppet) 8 | 9 | FFPuppet is a Python module that automates browser process related tasks to aid in fuzzing. Happy bug hunting! 10 | 11 | Are you [fuzzing](https://firefox-source-docs.mozilla.org/tools/fuzzing/index.html) the browser? [Grizzly](https://github.com/MozillaSecurity/grizzly) can help. 12 | 13 | Installation 14 | ------------ 15 | 16 | ##### To install the latest version from PyPI 17 | 18 | pip install ffpuppet 19 | 20 | ##### Xvfb on Linux 21 | 22 | On Linux `xvfb` can be used in order to run headless (this is not the same as Firefox's `-headless` mode). 23 | 24 | To install `xvfb` on Ubuntu run: 25 | 26 | apt-get install xvfb 27 | 28 | ##### Install minidump-stackwalk 29 | 30 | `minidump-stackwalk` is used to collect crash reports from minidump files. More 31 | information can be found [here](https://lib.rs/crates/minidump-stackwalk). 32 | 33 | Browser Builds 34 | -------------- 35 | 36 | If you are looking for builds to use with FFPuppet there are a few options. 37 | 38 | ##### Download a build 39 | 40 | [fuzzfetch](https://github.com/MozillaSecurity/fuzzfetch) is the recommended method for obtaining builds and is also very helpful in automation. 41 | 42 | Taskcluster has a collection of many different build types for multiple platforms and branches. 43 | An index of the latest mozilla-central builds can be found [here](https://firefox-ci-tc.services.mozilla.com/tasks/index/gecko.v2.mozilla-central.latest.firefox/). 44 | 45 | ##### Create your own build 46 | 47 | If you would like to compile your own, build instructions can be found [here](https://firefox-source-docs.mozilla.org/setup/index.html). When using `minidump-stackwalk` 48 | breakpad [symbols](https://firefox-source-docs.mozilla.org/setup/building_with_debug_symbols.html#building-with-debug-symbols) are required for symbolized stacks. 49 | 50 | Usage 51 | ----- 52 | 53 | Once installed FFPuppet can be run using the following command: 54 | 55 | ffpuppet 56 | 57 | ##### Replaying a test case 58 | 59 | ffpuppet -p -d -u 60 | 61 | This will open the provided test case file in Firefox using the provided prefs.js file. Any log data (stderr, stdout, ASan logs... etc) will be dumped to the console if a failure is detected. [Grizzly Replay](https://github.com/MozillaSecurity/grizzly/wiki/Grizzly-Replay) is recommended for replaying test cases. 62 | 63 | ##### Prefs.js files 64 | 65 | prefs.js files that can be used for fuzzing or other automated testing can be generated with [PrefPicker](https://github.com/MozillaSecurity/prefpicker). 66 | -------------------------------------------------------------------------------- /src/ffpuppet/test_checks.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """checks.py tests""" 5 | 6 | from os import getpid 7 | from re import compile as re_compile 8 | 9 | from psutil import Process 10 | 11 | from .checks import CheckLogContents, CheckLogSize, CheckMemoryUsage 12 | 13 | 14 | def test_check_01(mocker, tmp_path): 15 | """test CheckLogContents()""" 16 | test_log = tmp_path / "test.log" 17 | # input contains token 18 | test_log.write_bytes(b"\xf0\x9f\x91\x8dblah\nfoo\ntest\n123") 19 | checker = CheckLogContents([str(test_log)], [re_compile("test")]) 20 | assert checker.check() 21 | with test_log.open("wb") as lfp: 22 | checker.dump_log(lfp) 23 | assert lfp.tell() 24 | # input does not contains token 25 | checker = CheckLogContents([str(test_log)], [re_compile("no_token")]) 26 | assert not checker.check() 27 | # check a 2nd time 28 | assert not checker.check() 29 | with test_log.open("wb") as lfp: 30 | checker.dump_log(lfp) 31 | assert not lfp.tell() 32 | # log does not exist 33 | checker = CheckLogContents(["missing_log"], [re_compile("no_token")]) 34 | assert not checker.check() 35 | with test_log.open("wb") as lfp: 36 | checker.dump_log(lfp) 37 | assert not lfp.tell() 38 | # input exceeds chunk_size 39 | with test_log.open("w") as lfp_txt: 40 | lfp_txt.write("A" * (CheckLogContents.buf_limit - 2)) 41 | lfp_txt.write("test123") 42 | lfp_txt.write("A" * 20) 43 | checker = CheckLogContents([str(test_log)], [re_compile("test123")]) 44 | mocker.patch( 45 | "ffpuppet.checks.CheckLogContents.chunk_size", CheckLogContents.buf_limit 46 | ) 47 | assert not checker.check() 48 | assert checker.check() 49 | with test_log.open("wb") as lfp: 50 | checker.dump_log(lfp) 51 | assert lfp.tell() 52 | 53 | 54 | def test_check_02(tmp_path): 55 | """test CheckLogSize()""" 56 | stde = tmp_path / "stderr" 57 | stde.write_text("test\n") 58 | stdo = tmp_path / "stdout" 59 | stdo.write_text("test\n") 60 | # exceed limit 61 | checker = CheckLogSize(1, str(stde), str(stdo)) 62 | assert checker.check() 63 | with (tmp_path / "log").open("wb") as lfp: 64 | checker.dump_log(lfp) 65 | assert lfp.tell() 66 | # don't exceed limit 67 | checker = CheckLogSize(12, str(stde), str(stdo)) 68 | assert not checker.check() 69 | with (tmp_path / "log").open("wb") as lfp: 70 | checker.dump_log(lfp) 71 | assert not lfp.tell() 72 | 73 | 74 | def test_check_03(tmp_path): 75 | """test CheckMemoryUsage()""" 76 | 77 | def get_procs(): 78 | yield Process(getpid()) 79 | 80 | checker = CheckMemoryUsage(getpid(), 300 * 1024 * 1024, get_procs) 81 | # don't exceed limit 82 | assert not checker.check() 83 | with (tmp_path / "log").open("wb") as lfp: 84 | checker.dump_log(lfp) 85 | assert not lfp.tell() 86 | checker = CheckMemoryUsage(getpid(), 10, get_procs) 87 | # exceed limit 88 | assert checker.check() 89 | with (tmp_path / "log").open("wb") as lfp: 90 | checker.dump_log(lfp) 91 | assert lfp.tell() 92 | -------------------------------------------------------------------------------- /src/ffpuppet/display.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet display module""" 5 | 6 | from __future__ import annotations 7 | 8 | from enum import Enum, auto, unique 9 | from logging import getLogger 10 | from os import environ 11 | from platform import system 12 | from subprocess import TimeoutExpired 13 | from types import MappingProxyType 14 | from typing import TYPE_CHECKING 15 | 16 | if system() == "Linux": 17 | from xvfbwrapper import Xvfb # pylint: disable=import-error 18 | 19 | if TYPE_CHECKING: 20 | from collections.abc import Mapping, Sequence 21 | 22 | 23 | LOG = getLogger(__name__) 24 | 25 | 26 | @unique 27 | class DisplayMode(Enum): 28 | """Supported display modes.""" 29 | 30 | DEFAULT = auto() 31 | HEADLESS = auto() 32 | if system() == "Linux": 33 | XVFB = auto() 34 | 35 | 36 | class Display: 37 | """Default display mode. 38 | 39 | Attributes: 40 | args: Extra command line arguments to pass to Firefox. 41 | env: Extra environment variables to set. 42 | mode: DisplayMode enum name. 43 | """ 44 | 45 | __slots__ = ("args", "env") 46 | 47 | def __init__(self) -> None: 48 | self.args: Sequence[str] = () 49 | self.env: Mapping[str, str] = MappingProxyType({}) 50 | 51 | def close(self) -> None: 52 | """Perform any required operations to shutdown and cleanup. 53 | 54 | Args: 55 | None 56 | 57 | Returns: 58 | None 59 | """ 60 | 61 | 62 | class HeadlessDisplay(Display): 63 | """Headless display mode.""" 64 | 65 | def __init__(self) -> None: 66 | super().__init__() 67 | self.args = ("-headless",) 68 | 69 | 70 | class XvfbDisplay(Display): 71 | """Xvfb display mode.""" 72 | 73 | __slots__ = ("_xvfb",) 74 | 75 | def __init__(self) -> None: 76 | super().__init__() 77 | self.env = MappingProxyType({"MOZ_ENABLE_WAYLAND": "0"}) 78 | resolution = environ.get("XVFB_RESOLUTION") 79 | width = 1280 80 | height = 1024 81 | if resolution is not None: 82 | try: 83 | w_str, h_str = resolution.lower().split("x") 84 | width, height = int(w_str), int(h_str) 85 | except ValueError: 86 | LOG.warning("Invalid XVFB_RESOLUTION '%s'", resolution) 87 | LOG.debug("xvfb resolution: %dx%d", width, height) 88 | try: 89 | self._xvfb: Xvfb | None = Xvfb(width=width, height=height, timeout=60) 90 | except NameError: 91 | LOG.error("Missing xvfbwrapper") 92 | raise 93 | self._xvfb.start() 94 | 95 | def close(self) -> None: 96 | if self._xvfb is not None: 97 | try: 98 | self._xvfb.stop() 99 | except TimeoutExpired: 100 | if self._xvfb.proc is not None: 101 | self._xvfb.proc.kill() 102 | self._xvfb = None 103 | 104 | 105 | _displays: dict[DisplayMode, type[Display]] = { 106 | DisplayMode.DEFAULT: Display, 107 | DisplayMode.HEADLESS: HeadlessDisplay, 108 | } 109 | if system() == "Linux": 110 | _displays[DisplayMode.XVFB] = XvfbDisplay 111 | 112 | DISPLAYS = MappingProxyType(_displays) 113 | -------------------------------------------------------------------------------- /src/ffpuppet/job_object.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """Windows Job Object management""" 5 | 6 | import ctypes 7 | import ctypes.wintypes 8 | import sys 9 | from logging import getLogger 10 | from subprocess import Handle # type: ignore[attr-defined] 11 | 12 | from psutil import Process 13 | 14 | assert sys.platform == "win32" 15 | 16 | JOB_OBJECT_EXTENDED_LIMIT_INFORMATION = 9 17 | JOB_OBJECT_LIMIT_JOB_MEMORY = 0x200 18 | JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x100 19 | 20 | THREAD_SUSPEND_RESUME = 0x0002 21 | 22 | __author__ = "Jesse Schwartzentruber" 23 | 24 | LOG = getLogger(__name__) 25 | 26 | 27 | class IOCounters(ctypes.Structure): 28 | """IOCounters""" 29 | 30 | _fields_ = ( 31 | ("read_operation_count", ctypes.c_ulonglong), 32 | ("write_operation_count", ctypes.c_ulonglong), 33 | ("other_operation_count", ctypes.c_ulonglong), 34 | ("read_transfer_count", ctypes.c_ulonglong), 35 | ("write_transfer_count", ctypes.c_ulonglong), 36 | ("other_transfer_count", ctypes.c_ulonglong), 37 | ) 38 | 39 | 40 | class JobObjectBasicLimitInformation(ctypes.Structure): 41 | """JobObjectBasicLimitInformation""" 42 | 43 | _fields_ = ( 44 | ("per_process_user_time_limit", ctypes.wintypes.LARGE_INTEGER), 45 | ("per_job_user_time_limit", ctypes.wintypes.LARGE_INTEGER), 46 | ("limit_flags", ctypes.wintypes.DWORD), 47 | ("minimum_working_set_size", ctypes.c_size_t), 48 | ("maximum_working_set_size", ctypes.c_size_t), 49 | ("active_process_limit", ctypes.wintypes.DWORD), 50 | ("affinity", ctypes.wintypes.PULONG), 51 | ("priority_class", ctypes.wintypes.DWORD), 52 | ("scheduling_class", ctypes.wintypes.DWORD), 53 | ) 54 | 55 | 56 | class JobObjectExtendedLimitInformation(ctypes.Structure): 57 | """JobObjectExtendedLimitInformation""" 58 | 59 | _fields_ = ( 60 | ("basic_limit_information", JobObjectBasicLimitInformation), 61 | ("io_info", IOCounters), 62 | ("process_memory_limit", ctypes.c_size_t), 63 | ("job_memory_limit", ctypes.c_size_t), 64 | ("peak_process_memory_used", ctypes.c_size_t), 65 | ("peak_job_memory_used", ctypes.c_size_t), 66 | ) 67 | 68 | 69 | def config_job_object(handle: Handle, limit: int) -> None: 70 | """Configure Windows Job object. 71 | 72 | Args: 73 | handle: Process handle to assigned to the job object. 74 | limit: Total memory limit for the job. 75 | 76 | Returns: 77 | None 78 | """ 79 | assert limit > 0 80 | kernel32 = ctypes.windll.kernel32 81 | job = Handle(kernel32.CreateJobObjectA(None, None)) 82 | try: 83 | assert kernel32.AssignProcessToJobObject(job, handle) 84 | info = JobObjectExtendedLimitInformation() 85 | info.basic_limit_information.limit_flags = JOB_OBJECT_LIMIT_JOB_MEMORY 86 | # pylint: disable=attribute-defined-outside-init 87 | info.job_memory_limit = limit 88 | assert kernel32.SetInformationJobObject( 89 | job, 90 | JOB_OBJECT_EXTENDED_LIMIT_INFORMATION, 91 | ctypes.byref(info), 92 | ctypes.sizeof(info), 93 | ) 94 | finally: 95 | job.Close() 96 | 97 | 98 | def resume_suspended_process(pid: int) -> None: 99 | """Resume a possibly suspended Windows Process. 100 | 101 | Args: 102 | pid: Process ID. 103 | 104 | Returns: 105 | None 106 | """ 107 | kernel32 = ctypes.windll.kernel32 108 | for tinfo in Process(pid).threads(): 109 | thnd = Handle(kernel32.OpenThread(THREAD_SUSPEND_RESUME, False, tinfo.id)) 110 | try: 111 | result = kernel32.ResumeThread(thnd) 112 | LOG.debug("resuming thread %d returned %d", tinfo.id, result) 113 | assert result >= 0, f"ResumeThread for tid={tinfo.id} returned {result}" 114 | finally: 115 | thnd.Close() 116 | -------------------------------------------------------------------------------- /src/ffpuppet/resources/testff.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """fake firefox""" 3 | 4 | import sys 5 | from argparse import ArgumentParser 6 | from enum import IntEnum, auto, unique 7 | from pathlib import Path 8 | from time import sleep 9 | from urllib.error import URLError 10 | from urllib.request import urlopen 11 | 12 | EXIT_DELAY = 45 13 | 14 | 15 | @unique 16 | class Mode(IntEnum): 17 | """Available testing modes""" 18 | 19 | BIG_LOG = auto() 20 | EXIT_CODE = auto() 21 | INVALID_JS = auto() 22 | MEMORY = auto() 23 | NONE = auto() 24 | SOFT_ASSERT = auto() 25 | 26 | 27 | def main() -> int: 28 | """Fake Firefox for testing""" 29 | parser = ArgumentParser(prog="testff", description="Fake Firefox for testing") 30 | parser.add_argument("url") 31 | parser.add_argument("-headless", action="store_true", help="ignored") 32 | parser.add_argument("-marionette", nargs="?", type=int, help="ignored") 33 | parser.add_argument("-new-instance", action="store_true", help="ignored") 34 | parser.add_argument("-no-deelevate", action="store_true", help="ignored") 35 | parser.add_argument("-wait-for-browser", action="store_true", help="ignored") 36 | parser.add_argument("-profile", type=Path, required=True) 37 | args = parser.parse_args() 38 | 39 | # read prefs to see how to run 40 | exit_code = 0 41 | mode = Mode.NONE 42 | with (args.profile / "prefs.js").open() as prefs_js: 43 | for line in prefs_js: 44 | if line.startswith("user_pref"): 45 | pass 46 | elif line.startswith("/"): 47 | line = line.lstrip("/").strip() 48 | if line == "fftest_memory": 49 | mode = Mode.MEMORY 50 | elif line == "fftest_soft_assert": 51 | mode = Mode.SOFT_ASSERT 52 | elif line == "fftest_invalid_js": 53 | mode = Mode.INVALID_JS 54 | elif line == "fftest_big_log": 55 | mode = Mode.BIG_LOG 56 | elif line.startswith("fftest_exit_code_"): 57 | mode = Mode.EXIT_CODE 58 | exit_code = int(line.split("fftest_exit_code_")[-1]) 59 | # don't worry about unknown values 60 | elif line.startswith("#"): 61 | pass # skip comments 62 | elif line.strip(): 63 | raise RuntimeError(f"unknown value in prefs.js: {line}") 64 | # sys.stdout.write(f'cmd: {cmd}\n') 65 | # sys.stdout.flush() 66 | 67 | if mode == Mode.INVALID_JS: 68 | (args.profile / "Invalidprefs.js").write_text("bad!") 69 | 70 | target_url = None 71 | try: 72 | # pylint: disable=consider-using-with 73 | conn = urlopen(args.url) 74 | except URLError as req_err: 75 | # can't redirect to file:// from http:// 76 | # pylint: disable=consider-using-with 77 | conn = urlopen(str(req_err.reason).split("'")[1]) 78 | try: 79 | target_url = conn.geturl() 80 | if target_url == args.url: 81 | target_url = None 82 | sys.stdout.write(conn.read().decode()) 83 | sys.stdout.write("\n") 84 | sys.stdout.flush() 85 | finally: 86 | conn.close() 87 | 88 | sys.stdout.write(f"url: {target_url!r}\n") 89 | sys.stdout.flush() 90 | 91 | if mode == Mode.MEMORY: 92 | sys.stdout.write("simulating high memory usage\n") 93 | sys.stdout.flush() 94 | _ = ["A" * 1024 * 1024 for _ in range(200)] 95 | elif mode == Mode.SOFT_ASSERT: 96 | sys.stdout.write("simulating soft assertion\n") 97 | sys.stdout.flush() 98 | sys.stderr.write("A" * 512 * 1024) 99 | sys.stderr.write("\n###!!! ASSERTION: test\n\nblah...\n") 100 | sys.stderr.flush() 101 | elif mode == Mode.BIG_LOG: 102 | sys.stdout.write("simulating big logs\n") 103 | buf = "A" * (512 * 1024) # 512KB 104 | for _ in range(25): 105 | sys.stdout.write(buf) 106 | sys.stderr.write(buf) 107 | sys.stdout.flush() 108 | sys.stderr.flush() 109 | elif mode == Mode.EXIT_CODE: 110 | sys.stdout.write(f"exit code test ({exit_code})\n") 111 | sys.stdout.flush() 112 | return exit_code 113 | 114 | sys.stdout.write(f"running... (sleep {EXIT_DELAY})\n") 115 | sys.stdout.flush() 116 | sleep(EXIT_DELAY) # wait before closing (should be terminated before elapse) 117 | sys.stdout.write("exiting normally\n") 118 | sys.stdout.flush() 119 | return 0 120 | 121 | 122 | if __name__ == "__main__": 123 | sys.exit(main()) 124 | -------------------------------------------------------------------------------- /src/ffpuppet/resources/tree.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """fake browser tree""" 3 | 4 | from __future__ import annotations 5 | 6 | # NOTE: this must only use the standard library 7 | import signal 8 | from argparse import ArgumentParser, Namespace 9 | from logging import DEBUG, basicConfig, getLogger 10 | from os import getpid 11 | from pathlib import Path 12 | from socket import AF_INET, SOCK_STREAM, socket 13 | from subprocess import Popen 14 | from sys import executable 15 | from time import perf_counter, sleep 16 | from typing import Any 17 | 18 | LOG = getLogger(__name__) 19 | SHUTDOWN = False 20 | SOCKET_TIMEOUT = 60 21 | 22 | 23 | def handle_signal(signum: int, _frame: Any) -> None: 24 | """handle signal to allow manual shutdown""" 25 | # pylint: disable=global-statement 26 | global SHUTDOWN 27 | LOG.info("caught %r", signal.Signals(signum).name) 28 | SHUTDOWN = True 29 | 30 | 31 | def main(args: Namespace) -> int: 32 | """Mock a Firefox browser process tree for testing purposes""" 33 | child_procs: tuple[Popen[bytes], ...] | None = None 34 | start = perf_counter() 35 | try: 36 | pid = getpid() 37 | signal.signal(signal.SIGINT, handle_signal) 38 | signal.signal(signal.SIGTERM, handle_signal) 39 | assert args.procs >= 1, f"procs must be >= 1 ({pid})" 40 | assert args.duration >= 1, f"duration must be >= 1 ({pid})" 41 | assert not args.sync.exists(), f"sync file should not exist ({pid})" 42 | 43 | cmd = [ 44 | executable, 45 | __file__, 46 | str(args.procs), 47 | str(args.sync), 48 | "--parent-pid", 49 | str(pid), 50 | "--duration", 51 | str(args.duration), 52 | ] 53 | if args.no_deelevate and not args.launcher_is_parent: 54 | assert not args.contentproc, f"-contentproc not expected! ({pid})" 55 | LOG.info("Launcher process") 56 | # pylint: disable=consider-using-with 57 | child_procs = (Popen(cmd),) 58 | elif args.contentproc: 59 | LOG.info("Content process (ppid: %r)", args.parent_pid) 60 | with socket(AF_INET, SOCK_STREAM) as conn: 61 | conn.connect(("127.0.0.1", args.port)) 62 | # don't hang forever 63 | conn.settimeout(SOCKET_TIMEOUT) 64 | conn.sendall(str(pid).encode()) 65 | else: 66 | assert not args.no_deelevate or args.launcher_is_parent 67 | LOG.info("Parent process (ppid: %r)", args.parent_pid) 68 | with socket(AF_INET, SOCK_STREAM) as srv: 69 | srv.settimeout(SOCKET_TIMEOUT) 70 | srv.bind(("127.0.0.1", 0)) 71 | srv.listen() 72 | cmd.append("--port") 73 | cmd.append(str(srv.getsockname()[1])) 74 | cmd.append("-contentproc") 75 | # pylint: disable=consider-using-with 76 | child_procs = tuple(Popen(cmd) for _ in range(args.procs)) 77 | # wait for processes to launch 78 | for _ in range(args.procs): 79 | conn, _ = srv.accept() 80 | # don't hang forever 81 | conn.settimeout(SOCKET_TIMEOUT) 82 | with conn: 83 | conn.recv(64) 84 | LOG.info("Tree running (%0.03f)", perf_counter() - start) 85 | args.sync.touch() 86 | 87 | # wait loop 88 | while not SHUTDOWN and perf_counter() - start < args.duration: 89 | if child_procs and all(x.poll() is not None for x in child_procs): 90 | break 91 | sleep(0.1) 92 | 93 | except KeyboardInterrupt: 94 | pass 95 | 96 | finally: 97 | if not args.contentproc: 98 | args.sync.unlink(missing_ok=True) 99 | if child_procs: 100 | for proc in child_procs: 101 | if proc.poll() is None: 102 | proc.terminate() 103 | for proc in child_procs: 104 | proc.wait(timeout=10) 105 | LOG.info("Exiting, runtime %0.3fs", perf_counter() - start) 106 | 107 | return 0 108 | 109 | 110 | if __name__ == "__main__": 111 | parser = ArgumentParser() 112 | parser.add_argument("procs", type=int, help="number of content processes") 113 | parser.add_argument("sync", type=Path, help="used to indicate tree readiness") 114 | parser.add_argument("--duration", type=int, default=60) 115 | parser.add_argument("--launcher-is-parent", action="store_true") 116 | parser.add_argument("--parent-pid", type=int) 117 | parser.add_argument("--port", type=int) 118 | parser.add_argument("-contentproc", action="store_true", help="fake browser arg") 119 | parser.add_argument("-no-deelevate", action="store_true", help="fake browser arg") 120 | 121 | basicConfig( 122 | datefmt="%H:%M:%S", 123 | format="[%(asctime)s.%(msecs)03d][%(process)d] %(message)s", 124 | level=DEBUG, 125 | ) 126 | 127 | raise SystemExit(main(parser.parse_args())) 128 | -------------------------------------------------------------------------------- /src/ffpuppet/test_sanitizer_util.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet sanitizer_util tests""" 5 | 6 | from subprocess import CalledProcessError, TimeoutExpired 7 | 8 | from pytest import mark, raises 9 | 10 | from .sanitizer_util import SanitizerOptions, symbolize_log 11 | 12 | 13 | @mark.parametrize( 14 | "init, add, result, overwrite", 15 | [ 16 | # do nothing 17 | ("", {}, [""], False), 18 | # add single option 19 | ("", {"a": "1"}, ["a=1"], False), 20 | # add multiple options 21 | ("", {"b": "2", "a": "1"}, ["a=1", "b=2"], False), 22 | # existing 23 | ("a=1", {}, ["a=1"], False), 24 | # add to existing 25 | ("a=1", {"b": "2"}, ["a=1", "b=2"], False), 26 | # no overwrite existing 27 | ("a=1", {"a": "2"}, ["a=1"], False), 28 | # overwrite existing 29 | ("a=1", {"a": "2"}, ["a=2"], True), 30 | # parse quoted 31 | ( 32 | "a='C:\\test\\':b=\"/dev/null\"", 33 | {}, 34 | ["a='C:\\test\\'", 'b="/dev/null"'], 35 | False, 36 | ), 37 | ], 38 | ) 39 | def test_sanitizer_options_parsing_adding(init, add, result, overwrite): 40 | """test SanitizerOptions() - parsing and adding""" 41 | opts = SanitizerOptions(init) 42 | for key, value in add.items(): 43 | opts.add(key, value, overwrite=overwrite) 44 | # test __str__ 45 | split_opts = SanitizerOptions.re_delim.split(str(opts)) 46 | assert len(split_opts) == len(result) 47 | if opts: 48 | # test __len___ 49 | assert len(opts) == len(result) 50 | for opt in split_opts: 51 | assert opt in result 52 | # test __iter__ 53 | for opt, value in opts: 54 | assert f"{opt}={value}" in result 55 | # test __contains___ 56 | for opt in result: 57 | assert opt.split("=")[0] in opts 58 | else: 59 | assert not result[-1] 60 | 61 | 62 | def test_sanitizer_load_options(): 63 | """test SanitizerOptions.load_options -""" 64 | opts = SanitizerOptions() 65 | # empty 66 | assert not opts 67 | assert len(opts) == 0 68 | # single options 69 | opts.load_options("a=1") 70 | assert opts 71 | assert len(opts) == 1 72 | assert opts.pop("a") == "1" 73 | # multiple options 74 | opts.load_options("a=1:b=2") 75 | assert len(opts) == 2 76 | assert opts.pop("a") == "1" 77 | assert opts.pop("b") == "2" 78 | # malformed option 79 | opts.load_options("foo") 80 | assert len(opts) == 0 81 | # malformed option with valid option 82 | opts.load_options("a=1:foo") 83 | assert len(opts) == 1 84 | assert opts.pop("a") == "1" 85 | 86 | 87 | @mark.parametrize( 88 | "flag, value, msg", 89 | [ 90 | # empty flag name 91 | ("", "test", r"Flag name cannot be empty"), 92 | # missing quotes with ':' 93 | ("test", "a:b", r"'a:b' \(test\) must be quoted"), 94 | # missing quotes with ' ' 95 | ("test", "a b", r"'a b' \(test\) must be quoted"), 96 | ], 97 | ) 98 | def test_sanitizer_options_invalid_add(flag, value, msg): 99 | """test SanitizerOptions() -""" 100 | with raises(ValueError, match=msg): 101 | SanitizerOptions().add(flag, value) 102 | 103 | 104 | def test_sanitizer_options_get_pop(): 105 | """test SanitizerOptions() - get() and pop()""" 106 | opts = SanitizerOptions() 107 | assert opts.get("missing") is None 108 | assert opts.pop("missing") is None 109 | opts.add("exists", "1") 110 | assert opts.pop("exists") == "1" 111 | assert opts.get("exists") is None 112 | 113 | 114 | def test_sanitizer_options_check_path(tmp_path): 115 | """test SanitizerOptions() - check_path()""" 116 | opts = SanitizerOptions() 117 | # test missing key 118 | assert opts.check_path("file") 119 | # test exists 120 | file = tmp_path / "file.bin" 121 | file.touch() 122 | opts.add("file", f"'{file}'") 123 | assert opts.check_path("file") 124 | # test missing file 125 | file.unlink() 126 | assert not opts.check_path("file") 127 | 128 | 129 | def test_sanitizer_options_is_quoted(): 130 | """test SanitizerOptions.is_quoted()""" 131 | assert SanitizerOptions.is_quoted("'quoted'") 132 | assert SanitizerOptions.is_quoted('"quoted"') 133 | assert not SanitizerOptions.is_quoted("not'quoted") 134 | assert not SanitizerOptions.is_quoted("'not'quoted") 135 | assert not SanitizerOptions.is_quoted("not'quoted'") 136 | assert not SanitizerOptions.is_quoted("'test\"") 137 | assert not SanitizerOptions.is_quoted("'") 138 | 139 | 140 | def test_symbolize_log(mocker, tmp_path): 141 | """test symbolize_log()""" 142 | fake_run = mocker.patch("ffpuppet.sanitizer_util.run", autospec=True) 143 | log = tmp_path / "foo.txt" 144 | log.write_text("foo") 145 | # default built in llvm-symbolizer 146 | assert symbolize_log(log) 147 | # specify llvm-symbolizer 148 | llvm_sym = tmp_path / "fake-llvm-symbolizer" 149 | llvm_sym.touch() 150 | assert symbolize_log(log, llvm_sym) 151 | # symbolizer tool failed 152 | fake_run.side_effect = CalledProcessError(1, mocker.Mock()) 153 | assert not symbolize_log(log, llvm_sym) 154 | # symbolizer tool hung 155 | fake_run.side_effect = TimeoutExpired(1, mocker.Mock()) 156 | assert not symbolize_log(log, llvm_sym) 157 | # symbolizer log is not available (or permission error) 158 | fake_run.reset_mock() 159 | log.unlink() 160 | assert not symbolize_log(log, llvm_sym) 161 | -------------------------------------------------------------------------------- /src/ffpuppet/sanitizer_util.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet sanitizer utilities""" 5 | 6 | from __future__ import annotations 7 | 8 | from logging import getLogger 9 | from os import environ 10 | from os.path import exists 11 | from pathlib import Path 12 | from re import compile as re_compile 13 | from shutil import copyfileobj 14 | from subprocess import CalledProcessError, TimeoutExpired, run 15 | from sys import executable 16 | from tempfile import TemporaryFile 17 | from typing import TYPE_CHECKING 18 | 19 | if TYPE_CHECKING: 20 | from collections.abc import Generator, Sequence 21 | 22 | # included from: 23 | # https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/asan/scripts/ 24 | ASAN_SYMBOLIZE = str(Path(__file__).parent / "asan_symbolize.py") 25 | LOG = getLogger(__name__) 26 | 27 | __author__ = "Tyson Smith" 28 | 29 | 30 | class SanitizerOptions: 31 | """Used to parse, load and manage sanitizer options.""" 32 | 33 | re_delim = re_compile(r":(?![\\|/])") 34 | 35 | __slots__ = ("_options",) 36 | 37 | def __init__(self, options: str | None = None) -> None: 38 | """ 39 | Args: 40 | options: Sanitizer options string to load. 41 | """ 42 | self._options: dict[str, str] = {} 43 | if options is not None: 44 | self.load_options(options) 45 | 46 | def __bool__(self) -> bool: 47 | return any(self._options) 48 | 49 | def __contains__(self, item: str) -> bool: 50 | return item in self._options 51 | 52 | def __iter__(self) -> Generator[Sequence[str]]: 53 | yield from self._options.items() 54 | 55 | def __len__(self) -> int: 56 | return len(self._options) 57 | 58 | def __str__(self) -> str: 59 | return ":".join(f"{k}={v}" for k, v in self) 60 | 61 | def add(self, flag: str, value: str, overwrite: bool = False) -> None: 62 | """Add sanitizer option flag. 63 | 64 | Args: 65 | flag: Sanitizer option flag to set. 66 | value: Value to use. Values containing ':' or ' ' must be quoted. 67 | overwrite: Overwrite existing value. 68 | 69 | Returns: 70 | None 71 | """ 72 | if not flag: 73 | raise ValueError("Flag name cannot be empty") 74 | if (":" in value or " " in value) and not self.is_quoted(value): 75 | raise ValueError(f"'{value}' ({flag}) must be quoted") 76 | if flag not in self._options or overwrite: 77 | self._options[flag] = value 78 | 79 | def check_path(self, flag: str) -> bool: 80 | """Check path exists on disk. 81 | Only indicate failure if flag exists and path does not. 82 | 83 | Args: 84 | flag: Flags to set. 85 | 86 | Returns: 87 | False if the flag exists and the path does not otherwise True. 88 | """ 89 | if flag in self._options: 90 | value = self._options[flag] 91 | if self.is_quoted(value): 92 | value = value[1:-1] 93 | return exists(value) 94 | return True 95 | 96 | def get(self, flag: str) -> str | None: 97 | """Get sanitizer flag. 98 | 99 | Args: 100 | flag: Flags to retrieve. 101 | 102 | Returns: 103 | Value of given flag or None 104 | """ 105 | return self._options.get(flag) 106 | 107 | @staticmethod 108 | def is_quoted(token: str) -> bool: 109 | """Check if token is quoted. 110 | 111 | Args: 112 | token: Value to check. 113 | 114 | Returns: 115 | True if token is quoted otherwise False. 116 | """ 117 | return len(token) > 1 and token[0] == token[-1] and token[0] in ('"', "'") 118 | 119 | def load_options(self, options: str | None) -> None: 120 | """Load flags from *SAN_OPTIONS in env. 121 | 122 | Args: 123 | options: Colon separated list of `flag=value` pairs. 124 | 125 | Returns: 126 | None 127 | """ 128 | self._options.clear() 129 | if options: 130 | for option in self.re_delim.split(options): 131 | try: 132 | self.add(*option.split("=", maxsplit=1)) 133 | except TypeError: # noqa: PERF203 134 | LOG.warning("Malformed sanitizer option %r", option) 135 | 136 | def pop(self, flag: str) -> str | None: 137 | """Pop sanitizer flag. 138 | 139 | Args: 140 | flag: Flags to retrieve. 141 | 142 | Returns: 143 | Value of given flag or None 144 | """ 145 | return self._options.pop(flag, None) 146 | 147 | 148 | def symbolize_log( 149 | log: Path, 150 | llvm_symbolizer: str | None = None, 151 | timeout: int = 120, 152 | ) -> bool: 153 | """Symbolize a sanitizer log. 154 | 155 | Args: 156 | log: Log file to symbolize. 157 | llvm_symbolizer: Binary to use. System installed binary is used by default. 158 | timeout: Time limit for symbolizer process. 159 | 160 | Returns: 161 | True if symbolizer process ran successfully otherwise False. 162 | """ 163 | assert timeout > 0 164 | env = dict(environ) 165 | # don't override 166 | if "LLVM_SYMBOLIZER_PATH" not in environ and llvm_symbolizer is not None: 167 | env["LLVM_SYMBOLIZER_PATH"] = llvm_symbolizer 168 | with TemporaryFile("wb+") as tmp_fp: 169 | try: 170 | with log.open("rb") as log_fp: 171 | run( 172 | (executable, ASAN_SYMBOLIZE, "-d"), 173 | env=env, 174 | stdin=log_fp, 175 | stdout=tmp_fp, 176 | check=True, 177 | timeout=timeout, 178 | ) 179 | except CalledProcessError: 180 | return False 181 | except (FileNotFoundError, PermissionError) as exc: 182 | LOG.debug("failed to open sanitizer log: %s", exc) 183 | return False 184 | except TimeoutExpired: 185 | # this *should* never happen 186 | LOG.warning("Symbolizer process did not complete in %ds", timeout) 187 | return False 188 | tmp_fp.seek(0) 189 | with log.open("wb") as log_fp: 190 | copyfileobj(tmp_fp, log_fp) 191 | return True 192 | -------------------------------------------------------------------------------- /src/ffpuppet/test_main.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet main.py tests""" 5 | 6 | from platform import system 7 | 8 | from pytest import mark, raises 9 | 10 | from .core import Reason 11 | from .exceptions import BrowserExecutionError 12 | from .main import dump_to_console, main, parse_args 13 | from .profile import Profile 14 | 15 | 16 | @mark.parametrize( 17 | "reason, launch, is_healthy, extra_args", 18 | [ 19 | # browser exit 20 | (Reason.EXITED, None, (False,), ["-d", "--save-all"]), 21 | # browser exit - more flags 22 | (Reason.EXITED, None, (False,), ["-a", "token", "--log-level", "DEBUG"]), 23 | # cannot launch browser binary 24 | (Reason.CLOSED, (BrowserExecutionError(),), None, []), 25 | # browser crash 26 | (Reason.ALERT, None, (False,), []), 27 | # user exit 28 | (Reason.CLOSED, None, (True, KeyboardInterrupt()), []), 29 | # exception 30 | (None, None, (False,), []), 31 | ], 32 | ) 33 | def test_main_01(mocker, tmp_path, reason, launch, is_healthy, extra_args): 34 | """test main()""" 35 | mocker.patch("ffpuppet.main.sleep", autospec=True) 36 | fake_ffp = mocker.patch("ffpuppet.main.FFPuppet", autospec=True) 37 | fake_ffp.return_value.get_pid.return_value = 12345 38 | fake_ffp.return_value.is_healthy.side_effect = is_healthy 39 | fake_ffp.return_value.launch.side_effect = launch 40 | fake_ffp.return_value.profile = mocker.Mock(spec_set=Profile, path=tmp_path) 41 | fake_ffp.return_value.reason = reason 42 | out_logs = tmp_path / "logs" 43 | out_logs.mkdir() 44 | prefs = tmp_path / "prefs.js" 45 | prefs.touch() 46 | fake_bin = tmp_path / "fake.bin" 47 | fake_bin.touch() 48 | args = [str(fake_bin), "-l", str(out_logs), "-p", str(prefs)] 49 | main(args + extra_args) 50 | if "-a" in extra_args: 51 | assert fake_ffp.return_value.add_abort_token.call_count == 1 52 | else: 53 | assert fake_ffp.return_value.add_abort_token.call_count == 0 54 | assert fake_ffp.return_value.close.call_count == 1 55 | assert fake_ffp.return_value.save_logs.call_count == 1 56 | assert fake_ffp.return_value.clean_up.call_count == 1 57 | 58 | 59 | def test_parse_args_01(capsys, mocker, tmp_path): 60 | """test parse_args()""" 61 | mocker.patch("ffpuppet.main.Path.read_bytes", autospec=True, return_value=b"99") 62 | certutil_avail = mocker.patch("ffpuppet.main.certutil_available", autospec=True) 63 | fake_which = mocker.patch("ffpuppet.main.which", autospec=True) 64 | with raises(SystemExit): 65 | parse_args(["-h"]) 66 | # invalid/missing binary 67 | with raises(SystemExit): 68 | parse_args(["missing_bin"]) 69 | assert "error: Invalid browser binary 'missing_bin'" in capsys.readouterr()[-1] 70 | fake_bin = tmp_path / "fake.bin" 71 | fake_bin.touch() 72 | # invalid log-limit 73 | with raises(SystemExit): 74 | parse_args([str(fake_bin), "--log-limit", "-1"]) 75 | assert "error: --log-limit must be >= 0" in capsys.readouterr()[-1] 76 | # invalid marionette port 77 | with raises(SystemExit): 78 | parse_args([str(fake_bin), "--marionette", "123"]) 79 | assert ( 80 | "error: --marionette must be 0 or > 1024 and < 65536" in capsys.readouterr()[-1] 81 | ) 82 | # invalid memory limit 83 | with raises(SystemExit): 84 | parse_args([str(fake_bin), "--memory", "-1"]) 85 | assert "error: --memory must be >= 0" in capsys.readouterr()[-1] 86 | # missing prefs 87 | with raises(SystemExit): 88 | parse_args([str(fake_bin), "-p", "missing_prefs"]) 89 | assert "error: Invalid prefs.js file 'missing_prefs'" in capsys.readouterr()[-1] 90 | # missing extension 91 | with raises(SystemExit): 92 | parse_args([str(fake_bin), "-e", "missing_ext"]) 93 | assert "error: Extension 'missing_ext' does not exist" in capsys.readouterr()[-1] 94 | # missing certificate 95 | certutil_avail.return_value = True 96 | with raises(SystemExit): 97 | parse_args([str(fake_bin), "--cert", "missing_cert"]) 98 | assert "error: Invalid certificate file 'missing_cert'" in capsys.readouterr()[-1] 99 | # missing certutil 100 | certutil_avail.return_value = False 101 | with raises(SystemExit): 102 | parse_args([str(fake_bin), "--cert", str(fake_bin)]) 103 | assert "error: '--certs' requires NSS certutil" in capsys.readouterr()[-1] 104 | # invalid log path 105 | (tmp_path / "junk.log").touch() 106 | missing = tmp_path / "missing" 107 | with raises(SystemExit): 108 | parse_args([str(fake_bin), "--logs", str(missing)]) 109 | assert f"Log output directory is invalid '{missing}'" in capsys.readouterr()[-1] 110 | # rr is Linux only 111 | if system() == "Linux": 112 | # missing rr 113 | fake_which.return_value = None 114 | with raises(SystemExit): 115 | parse_args([str(fake_bin), "--rr"]) 116 | assert "error: rr is not installed" in capsys.readouterr()[-1] 117 | # rr - perf_event_paranoid > 1 118 | fake_which.return_value = "rr" 119 | with raises(SystemExit): 120 | parse_args([str(fake_bin), "--rr"]) 121 | assert "/proc/sys/kernel/perf_event_paranoid <= 1" in capsys.readouterr()[-1] 122 | # success 123 | assert parse_args([str(fake_bin)]) 124 | 125 | 126 | def test_dump_to_console_01(tmp_path): 127 | """test dump_to_console()""" 128 | # call with no logs 129 | assert not dump_to_console(tmp_path) 130 | # call with dummy logs 131 | (tmp_path / "log_stderr.txt").write_bytes(b"dummy-stderr") 132 | (tmp_path / "log_stdout.txt").write_bytes(b"dummy-stdout") 133 | output = dump_to_console(tmp_path) 134 | assert "Dumping 'log_stderr.txt'" in output 135 | assert "dummy-stderr" in output 136 | assert "Dumping 'log_stdout.txt'" in output 137 | assert "dummy-stdout" in output 138 | # truncate log 139 | with (tmp_path / "log_stdout.txt").open("wb") as log_fp: 140 | log_fp.write(b"dummy-stdout") 141 | for _ in range(1024): 142 | log_fp.write(b"test") 143 | output = dump_to_console(tmp_path, log_quota=100) 144 | assert "Dumping 'log_stderr.txt'" in output 145 | assert "dummy-stderr" in output 146 | assert "Dumping 'log_stdout.txt'" in output 147 | assert "dummy-stdout" not in output 148 | -------------------------------------------------------------------------------- /src/ffpuppet/checks.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet checks module""" 5 | 6 | from __future__ import annotations 7 | 8 | from abc import ABC, abstractmethod 9 | from os import SEEK_SET, stat 10 | from platform import system 11 | from typing import IO, TYPE_CHECKING, Callable 12 | 13 | from psutil import AccessDenied, NoSuchProcess, Process 14 | 15 | if TYPE_CHECKING: 16 | from collections.abc import Iterable 17 | from re import Pattern 18 | 19 | __author__ = "Tyson Smith" 20 | __credits__ = ["Tyson Smith"] 21 | 22 | 23 | class _LogContentsCheckState: 24 | __slots__ = ("buffer", "fname", "offset") 25 | 26 | def __init__(self, fname: str) -> None: 27 | self.fname: str = fname 28 | self.buffer: bytes = b"" 29 | self.offset: int = 0 30 | 31 | 32 | class Check(ABC): 33 | """ 34 | Check base class 35 | """ 36 | 37 | name: str 38 | 39 | __slots__ = ("message", "name") 40 | 41 | def __init__(self) -> None: 42 | self.message: str | None = None 43 | 44 | @abstractmethod 45 | def check(self) -> bool: 46 | """ 47 | Implement a check that returns True when the abort conditions are met. 48 | """ 49 | 50 | def dump_log(self, dst_fp: IO[bytes]) -> None: 51 | """Write log contents to file. 52 | 53 | Args: 54 | dst_fp: Open file object to write logs to. 55 | 56 | Returns: 57 | None 58 | """ 59 | if self.message is not None: 60 | dst_fp.write(self.message.encode(errors="ignore")) 61 | 62 | 63 | class CheckLogContents(Check): 64 | """ 65 | CheckLogContents will search through the browser logs for a token. 66 | """ 67 | 68 | buf_limit = 1024 # 1KB 69 | chunk_size = 0x20000 # 128KB 70 | name = "log_contents" 71 | 72 | __slots__ = ("logs", "tokens") 73 | 74 | def __init__( 75 | self, log_files: Iterable[str], search_tokens: Iterable[Pattern[str]] 76 | ) -> None: 77 | assert log_files, "log_files is empty" 78 | assert search_tokens, "search_tokens is empty" 79 | super().__init__() 80 | self.logs: list[_LogContentsCheckState] = [] 81 | for log_file in log_files: 82 | self.logs.append(_LogContentsCheckState(log_file)) 83 | self.tokens = search_tokens 84 | 85 | def check(self) -> bool: 86 | """Collect log contents for tokens. 87 | 88 | Args: 89 | None 90 | 91 | Returns: 92 | True if a token is located otherwise False. 93 | """ 94 | for log in self.logs: 95 | try: 96 | # check if file has new data 97 | if stat(log.fname).st_size <= log.offset: 98 | continue 99 | with open(log.fname, "rb") as scan_fp: 100 | # only collect new data 101 | scan_fp.seek(log.offset, SEEK_SET) 102 | # read and prepend chunk of previously read data 103 | data = b"".join((log.buffer, scan_fp.read(self.chunk_size))) 104 | log.offset = scan_fp.tell() 105 | except OSError: 106 | # log does not exist 107 | continue 108 | for token in self.tokens: 109 | match = token.search(data.decode(errors="replace")) 110 | if match: 111 | self.message = f"TOKEN_LOCATED: {match.group()}\n" 112 | return True 113 | log.buffer = data[-1 * self.buf_limit :] 114 | return False 115 | 116 | 117 | class CheckLogSize(Check): 118 | """ 119 | CheckLogSize will check the total file size of the browser logs. 120 | """ 121 | 122 | name = "log_size" 123 | 124 | __slots__ = ("limit", "stderr_file", "stdout_file") 125 | 126 | def __init__(self, limit: int, stderr_file: str, stdout_file: str) -> None: 127 | super().__init__() 128 | self.limit = limit 129 | self.stderr_file = stderr_file 130 | self.stdout_file = stdout_file 131 | 132 | def check(self) -> bool: 133 | """Collect log disk usage info and compare with limit. 134 | 135 | Args: 136 | None 137 | 138 | Returns: 139 | True if the total usage is greater than or equal to 140 | self.limit otherwise False. 141 | """ 142 | err_size = stat(self.stderr_file).st_size 143 | out_size = stat(self.stdout_file).st_size 144 | total_size = err_size + out_size 145 | if total_size > self.limit: 146 | self.message = ( 147 | f"LOG_SIZE_LIMIT_EXCEEDED: {total_size:,}\n" 148 | f"Limit: {self.limit:,} ({self.limit / 1_048_576}MB)\n" 149 | f"stderr log: {err_size:,} ({err_size / 1_048_576}MB)\n" 150 | f"stdout log: {out_size:,} ({out_size / 1_048_576}MB)\n" 151 | ) 152 | return self.message is not None 153 | 154 | 155 | class CheckMemoryUsage(Check): 156 | """ 157 | CheckMemoryUsage is used to check the amount of memory used by the browser 158 | process and its descendants against a defined limit. 159 | """ 160 | 161 | name = "memory_usage" 162 | 163 | __slots__ = ("_get_procs", "_is_linux", "limit", "pid") 164 | 165 | def __init__( 166 | self, pid: int, limit: int, get_procs_cb: Callable[[], list[Process]] 167 | ) -> None: 168 | super().__init__() 169 | self._get_procs = get_procs_cb 170 | self._is_linux = system() == "Linux" 171 | self.limit = limit 172 | self.pid = pid 173 | 174 | def check(self) -> bool: 175 | """Use psutil to collect memory usage info and compare with limit. 176 | 177 | Args: 178 | None 179 | 180 | Returns: 181 | True if the total usage is greater than or equal to 182 | self.limit otherwise False. 183 | """ 184 | largest_shared = 0 185 | proc_info: list[tuple[int, int]] = [] 186 | total_usage = 0 187 | for proc in self._get_procs(): 188 | try: 189 | mem_info = proc.memory_info() 190 | except (AccessDenied, NoSuchProcess): # pragma: no cover 191 | continue 192 | cur_usage: int = mem_info.rss 193 | if self._is_linux: 194 | # on Linux use "rss - shared" as the current usage 195 | cur_usage -= mem_info.shared 196 | # track largest shared amount to be appended to the grand total 197 | # this is not perfect but it is close enough for this 198 | largest_shared = max(largest_shared, mem_info.shared) 199 | total_usage += cur_usage 200 | proc_info.append((proc.pid, cur_usage)) 201 | total_usage += largest_shared 202 | if total_usage >= self.limit: 203 | msg = [ 204 | f"MEMORY_LIMIT_EXCEEDED: {total_usage:,}\n", 205 | f"Limit: {self.limit:,} ({self.limit / 1_048_576}MB)\n", 206 | f"Parent PID: {self.pid}\n", 207 | ] 208 | for pid, usage in proc_info: 209 | msg.append(f"-> PID {pid: 6}: {usage: 14,}\n") 210 | self.message = "".join(msg) 211 | return self.message is not None 212 | -------------------------------------------------------------------------------- /src/ffpuppet/lsof.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """Windows utility to map all open files on the system to process""" 5 | 6 | from __future__ import annotations 7 | 8 | import ctypes 9 | import ctypes.wintypes 10 | import sys 11 | from pathlib import Path 12 | 13 | DUPLICATE_SAME_ACCESS = 2 14 | FILE_TYPE_DISK = 1 15 | FILE_TYPE_UNKNOWN = 0 16 | MAX_PATH = 260 17 | NO_ERROR = 0 18 | PROCESS_DUP_HANDLE = 0x40 19 | STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 20 | SYSTEM_EXTENDED_HANDLE_INFORMATION_CLASS = 0x40 21 | SYSTEM_HANDLE_INFORMATION_CLASS = 0x10 22 | 23 | __author__ = "Jesse Schwartzentruber" 24 | 25 | 26 | assert sys.platform == "win32" 27 | 28 | 29 | def nt_status(status: int) -> int: 30 | """Cast a signed integer to 32-bit unsigned. 31 | 32 | Args: 33 | status: an NTSTATUS result 34 | 35 | Returns: 36 | status cast to uint32 37 | """ 38 | return status & 0xFFFFFFFF 39 | 40 | 41 | def create_winerror(function: str) -> OSError: # pragma: no cover 42 | """Create a WinError exception. 43 | 44 | Args: 45 | function: Windows API function name that generated the error. 46 | 47 | Returns: 48 | OSError representing a windows error from fall to a given function. 49 | """ 50 | errno = ctypes.GetLastError() 51 | desc = f"{ctypes.FormatError()} ({function})" 52 | return OSError(errno, desc, None, errno) 53 | 54 | 55 | class SystemHandleTableEntryInfoEx(ctypes.Structure): 56 | """NT API Handle table entry structure""" 57 | 58 | _fields_ = ( 59 | ("Object", ctypes.c_void_p), 60 | ("UniqueProcessId", ctypes.wintypes.HANDLE), 61 | ("HandleValue", ctypes.wintypes.HANDLE), 62 | ("GrantedAccess", ctypes.c_ulong), 63 | ("CreatorBackTraceIndex", ctypes.c_ushort), 64 | ("ObjectTypeIndex", ctypes.c_ushort), 65 | ("HandleAttributes", ctypes.c_ulong), 66 | ("Reserved", ctypes.c_ulong), 67 | ) 68 | 69 | 70 | def nt_query_system_handle_information_ex() -> ctypes.Structure: 71 | """List all open handles in the system. 72 | 73 | Args: 74 | None 75 | 76 | Returns: 77 | A ctypes Structure with fields: 78 | NumberOfHandles (int) 79 | Handles (list[SystemHandleTableEntryInfoEx]) 80 | """ 81 | buf_size = 64 * 1024 82 | buf = ctypes.create_string_buffer(buf_size) 83 | ntdll = ctypes.windll.ntdll 84 | while True: 85 | status = ntdll.NtQuerySystemInformation( 86 | SYSTEM_EXTENDED_HANDLE_INFORMATION_CLASS, 87 | buf, 88 | buf_size, 89 | None, 90 | ) 91 | if nt_status(status) != STATUS_INFO_LENGTH_MISMATCH: 92 | break 93 | buf_size *= 2 94 | buf = ctypes.create_string_buffer(buf_size) 95 | assert status >= 0, f"NtQuerySystemInformation returned 0x{nt_status(status):08X}" 96 | num_handles = ctypes.c_void_p.from_buffer(buf).value 97 | 98 | class SystemHandleInformationEx(ctypes.Structure): 99 | """NT API Handle table structure""" 100 | 101 | _fields_ = ( 102 | ("NumberOfHandles", ctypes.c_void_p), 103 | ("Reserved", ctypes.c_void_p), 104 | ("Handles", SystemHandleTableEntryInfoEx * (num_handles or 0)), 105 | ) 106 | 107 | return SystemHandleInformationEx.from_buffer(buf) 108 | 109 | 110 | def pid_handle_to_filename( 111 | pid: int, hnd: int, raise_for_error: bool = False 112 | ) -> Path | None: 113 | """Resolve a PID/Handle pair to a filesystem Path. 114 | 115 | Args: 116 | pid: The Process ID the Handle belongs to 117 | hnd: The Handle belonging to the Process 118 | raise_for_error: if True, raise OSError when any error occurs 119 | 120 | Returns: 121 | Path the handle represents 122 | or None if error occurred and `raise_for_error` is False 123 | """ 124 | kernel32 = ctypes.windll.kernel32 125 | buf_size = MAX_PATH * 2 + 1 126 | buf = ctypes.create_string_buffer(buf_size) 127 | process_handle = kernel32.OpenProcess(PROCESS_DUP_HANDLE, False, pid) 128 | close_hnd = False 129 | try: 130 | if process_handle: 131 | handle_out = ctypes.wintypes.HANDLE() 132 | if kernel32.DuplicateHandle( 133 | process_handle, 134 | hnd, 135 | ctypes.wintypes.HANDLE(kernel32.GetCurrentProcess()), 136 | ctypes.byref(handle_out), 137 | 0, 138 | False, 139 | DUPLICATE_SAME_ACCESS, 140 | ): 141 | assert handle_out.value is not None 142 | hnd = int(handle_out.value) 143 | close_hnd = True 144 | else: 145 | if not raise_for_error: 146 | kernel32.SetLastError(0) 147 | return None 148 | raise create_winerror("DuplicateHandle") # pragma: no cover 149 | else: 150 | if not raise_for_error: 151 | kernel32.SetLastError(0) 152 | return None 153 | raise create_winerror("OpenProcess") # pragma: no cover 154 | ftype = kernel32.GetFileType(hnd) 155 | if ftype == FILE_TYPE_UNKNOWN: 156 | code = ctypes.GetLastError() 157 | if code != NO_ERROR: 158 | if not raise_for_error: 159 | kernel32.SetLastError(0) 160 | return None 161 | raise create_winerror("GetFileType") # pragma: no cover 162 | if ftype != FILE_TYPE_DISK: 163 | if not raise_for_error: 164 | return None 165 | raise OSError("Given handle is not a file") # pragma: no cover 166 | status = kernel32.GetFinalPathNameByHandleW(hnd, buf, buf_size, 0) 167 | finally: 168 | if process_handle: 169 | kernel32.CloseHandle(process_handle) 170 | if close_hnd: 171 | kernel32.CloseHandle(hnd) 172 | if not status: 173 | if not raise_for_error: 174 | kernel32.SetLastError(0) 175 | return None 176 | raise create_winerror("GetFinalPathnameByHandle") # pragma: no cover 177 | return Path(ctypes.wstring_at(buf)[4:]) # always prefixed with \\?\ 178 | 179 | 180 | def pids_by_file() -> dict[Path, set[int]]: 181 | """Create a mapping of open paths to the Processes that own the open file handles. 182 | 183 | Args: 184 | None 185 | 186 | Returns: 187 | dict mapping Path (the path of the open file) to a set of PIDs which have 188 | that path open. 189 | """ 190 | result: dict[Path, set[int]] = {} 191 | for hnd in nt_query_system_handle_information_ex().Handles: 192 | fname = pid_handle_to_filename(hnd.UniqueProcessId, hnd.HandleValue) 193 | if fname is not None: 194 | proc_pids = result.setdefault(fname, set()) 195 | proc_pids.add(hnd.UniqueProcessId) 196 | return result 197 | 198 | 199 | if __name__ == "__main__": # pragma: no cover 200 | 201 | def main() -> None: 202 | """test main""" 203 | printed = False 204 | for path, pids in sorted(pids_by_file().items()): 205 | print(f"{path}") 206 | for pid in sorted(pids): 207 | print(f"\t{pid}") 208 | printed = True 209 | if not printed: 210 | print("no open files?", file=sys.stderr) 211 | sys.exit(1) 212 | 213 | main() 214 | -------------------------------------------------------------------------------- /src/ffpuppet/bootstrapper.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet bootstrapper module""" 5 | 6 | from __future__ import annotations 7 | 8 | from logging import getLogger 9 | from select import select 10 | from socket import SO_REUSEADDR, SOL_SOCKET, socket 11 | from time import perf_counter, sleep 12 | from typing import TYPE_CHECKING, Callable 13 | 14 | # as of python 3.10 socket.timeout was made an alias of TimeoutError 15 | # pylint: disable=ungrouped-imports,wrong-import-order 16 | from socket import timeout as socket_timeout # isort: skip 17 | 18 | from .exceptions import BrowserTerminatedError, BrowserTimeoutError, LaunchError 19 | 20 | if TYPE_CHECKING: 21 | from collections.abc import Iterable 22 | 23 | LOG = getLogger(__name__) 24 | 25 | __author__ = "Tyson Smith" 26 | 27 | 28 | class Bootstrapper: # pylint: disable=missing-docstring 29 | # see: searchfox.org/mozilla-central/source/netwerk/base/nsIOService.cpp 30 | # include ports above 1023 31 | BLOCKED_PORTS = frozenset( 32 | ( 33 | 1719, 34 | 1720, 35 | 1723, 36 | 2049, 37 | 3659, 38 | 4045, 39 | 5060, 40 | 5061, 41 | 6000, 42 | 6566, 43 | 6665, 44 | 6666, 45 | 6667, 46 | 6668, 47 | 6669, 48 | 6697, 49 | 10080, 50 | ) 51 | ) 52 | # receive buffer size 53 | BUF_SIZE = 4096 54 | # duration of initial blocking socket operations 55 | POLL_WAIT = 1.0 56 | 57 | __slots__ = ("_socket",) 58 | 59 | def __init__(self, sock: socket) -> None: 60 | self._socket = sock 61 | 62 | def __enter__(self) -> Bootstrapper: 63 | return self 64 | 65 | def __exit__(self, *exc: object) -> None: 66 | self.close() 67 | 68 | @classmethod 69 | def check_port(cls, value: int) -> bool: 70 | """Verify port value is in valid range. 71 | 72 | Args: 73 | None 74 | 75 | Returns: 76 | bool 77 | """ 78 | return value == 0 or 1024 <= value <= 65535 79 | 80 | def close(self) -> None: 81 | """Close listening socket. 82 | 83 | Args: 84 | None 85 | 86 | Returns: 87 | None 88 | """ 89 | self._socket.close() 90 | 91 | @classmethod 92 | def create(cls, attempts: int = 50, port: int = 0) -> Bootstrapper: 93 | """Create a Bootstrapper. 94 | 95 | Args: 96 | attempts: Number of times to attempt to bind. 97 | port: Port to use. Use 0 for system select. 98 | 99 | Returns: 100 | Bootstrapper. 101 | """ 102 | sock = cls.create_socket(attempts=attempts, port=port) 103 | if sock is None: 104 | raise LaunchError("Could not find available port") 105 | return cls(sock) 106 | 107 | @classmethod 108 | def create_socket( 109 | cls, 110 | attempts: int = 50, 111 | blocked: Iterable[int] | None = BLOCKED_PORTS, 112 | port: int = 0, 113 | ) -> socket | None: 114 | """Create a listening socket. 115 | 116 | Args: 117 | attempts: Number of times to attempt to bind. 118 | blocked: Ports that cannot be used. 119 | port: Port to use. Use 0 for system select. 120 | 121 | Returns: 122 | A listening socket. 123 | """ 124 | assert attempts > 0 125 | if not cls.check_port(port): 126 | LOG.debug("requested invalid port: %d", port) 127 | return None 128 | if blocked and port in blocked: 129 | LOG.debug("requested blocked port: %d", port) 130 | return None 131 | for _ in range(attempts): 132 | sock = socket() 133 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 134 | try: 135 | sock.bind(("127.0.0.1", port)) 136 | sock.listen() 137 | except (OSError, PermissionError) as exc: 138 | LOG.debug("%s: %s", type(exc).__name__, exc) 139 | sock.close() 140 | sleep(0.1) 141 | continue 142 | # avoid blocked ports 143 | if blocked and sock.getsockname()[1] in blocked: 144 | LOG.debug("bound to blocked port, retrying...") 145 | sock.close() 146 | continue 147 | break 148 | else: 149 | return None 150 | return sock 151 | 152 | @property 153 | def location(self) -> str: 154 | """Location in the format of 'http://127.0.0.1:#'. 155 | 156 | Args: 157 | None 158 | 159 | Returns: 160 | Location. 161 | """ 162 | return f"http://127.0.0.1:{self.port}" 163 | 164 | @property 165 | def port(self) -> int: 166 | """Listening socket port number. 167 | 168 | Args: 169 | None 170 | 171 | Returns: 172 | Port number. 173 | """ 174 | return int(self._socket.getsockname()[1]) 175 | 176 | def wait( 177 | self, 178 | cb_continue: Callable[[], bool], 179 | timeout: float = 60, 180 | url: str | None = None, 181 | ) -> None: 182 | """Wait for browser connection, read request and send response. 183 | 184 | Args: 185 | cb_continue: Callback that communicates browser process health. 186 | timeout: Amount of time wait before raising BrowserTimeoutError. 187 | url: Location to redirect to. 188 | 189 | Returns: 190 | None 191 | """ 192 | assert timeout >= 0 193 | start_time = perf_counter() 194 | time_limit = start_time + timeout 195 | conn: socket | None = None 196 | try: 197 | LOG.debug("waiting for browser connection...") 198 | while conn is None: 199 | readable, _, _ = select([self._socket], (), (), self.POLL_WAIT) 200 | if self._socket not in readable: 201 | # no connections ready for reading 202 | if not cb_continue(): 203 | raise BrowserTerminatedError( 204 | "Failure waiting for browser connection" 205 | ) 206 | if perf_counter() >= time_limit: 207 | raise BrowserTimeoutError( 208 | "Timeout waiting for browser connection" 209 | ) 210 | continue 211 | conn, _ = self._socket.accept() 212 | conn.settimeout(1) 213 | count_recv = 0 214 | total_recv = 0 215 | LOG.debug("waiting for browser request...") 216 | while True: 217 | try: 218 | count_recv = len(conn.recv(self.BUF_SIZE)) 219 | total_recv += count_recv 220 | except socket_timeout: 221 | # use -1 to indicate timeout 222 | count_recv = -1 223 | if count_recv == self.BUF_SIZE: 224 | # check if there is more to read 225 | continue 226 | if total_recv: 227 | LOG.debug("request size: %d bytes(s)", total_recv) 228 | break 229 | if not cb_continue(): 230 | raise BrowserTerminatedError("Failure waiting for request") 231 | if perf_counter() >= time_limit: 232 | raise BrowserTimeoutError("Timeout waiting for request") 233 | if count_recv == 0: 234 | LOG.debug("connection failed, waiting for next connection...") 235 | conn.close() 236 | conn = None 237 | break 238 | 239 | # build response 240 | if url is None: 241 | resp = "HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n" 242 | else: 243 | resp = ( 244 | "HTTP/1.1 301 Moved Permanently\r\n" 245 | f"Location: {url}\r\n" 246 | "Connection: close\r\n\r\n" 247 | ) 248 | # set timeout to match remaining time 249 | conn.settimeout(max(time_limit - perf_counter(), 1)) 250 | LOG.debug("sending response (redirect: %s)", url) 251 | try: 252 | conn.sendall(resp.encode("ascii")) 253 | except socket_timeout: 254 | resp_timeout = True 255 | else: 256 | resp_timeout = False 257 | if not cb_continue(): 258 | raise BrowserTerminatedError("Failure during browser startup") 259 | if resp_timeout: 260 | raise BrowserTimeoutError("Timeout sending response") 261 | LOG.debug("bootstrap complete (%0.1fs)", perf_counter() - start_time) 262 | except OSError as exc: # pragma: no cover 263 | raise LaunchError(f"Error attempting to launch browser: {exc}") from exc 264 | finally: 265 | if conn is not None: 266 | conn.close() 267 | -------------------------------------------------------------------------------- /src/ffpuppet/test_bootstrapper.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet bootstrapper tests""" 5 | # pylint: disable=protected-access 6 | 7 | # as of python 3.10 socket.timeout was made an alias of TimeoutError 8 | # pylint: disable=ungrouped-imports 9 | from socket import timeout as socket_timeout # isort: skip 10 | 11 | from itertools import repeat 12 | from socket import socket 13 | from threading import Thread 14 | 15 | from pytest import mark, raises 16 | 17 | from .bootstrapper import Bootstrapper 18 | from .exceptions import BrowserTerminatedError, BrowserTimeoutError, LaunchError 19 | 20 | 21 | def test_bootstrapper_01(): 22 | """test Bootstrapper.create()""" 23 | with Bootstrapper.create() as bts: 24 | assert bts._socket is not None 25 | assert bts.location.startswith("http://127.0.0.1:") 26 | assert int(bts.location.rsplit(":", maxsplit=1)[-1]) >= 1024 27 | assert bts.port >= 1024 28 | assert bts.port not in Bootstrapper.BLOCKED_PORTS 29 | bts.close() 30 | 31 | 32 | @mark.parametrize( 33 | "exc, msg, continue_cb", 34 | [ 35 | # test failure 36 | ( 37 | BrowserTerminatedError, 38 | "Failure waiting for browser connection", 39 | lambda: False, 40 | ), 41 | # test timeout 42 | ( 43 | BrowserTimeoutError, 44 | "Timeout waiting for browser connection", 45 | lambda: True, 46 | ), 47 | ], 48 | ) 49 | def test_bootstrapper_02(mocker, exc, msg, continue_cb): 50 | """test Bootstrapper.wait() failure waiting for initial connection""" 51 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([], None, None)) 52 | mocker.patch("ffpuppet.bootstrapper.perf_counter", side_effect=(1, 1, 2, 3)) 53 | fake_sock = mocker.MagicMock(spec_set=socket) 54 | with Bootstrapper(fake_sock) as bts: 55 | with raises(exc, match=msg): 56 | bts.wait(continue_cb, timeout=2) 57 | assert fake_sock.accept.call_count == 0 58 | 59 | 60 | def test_bootstrapper_03(mocker): 61 | """test Bootstrapper.wait() failure waiting for request""" 62 | fake_sock = mocker.MagicMock(spec_set=socket) 63 | fake_conn = mocker.Mock(spec_set=socket) 64 | fake_conn.recv.side_effect = socket_timeout 65 | fake_sock.accept.return_value = (fake_conn, None) 66 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([fake_sock], None, None)) 67 | with Bootstrapper(fake_sock) as bts: 68 | # test failure 69 | with raises(BrowserTerminatedError, match="Failure waiting for request"): 70 | bts.wait(lambda: False) 71 | assert fake_conn.recv.call_count == 1 72 | assert fake_conn.close.call_count == 1 73 | fake_conn.reset_mock() 74 | # test timeout 75 | mocker.patch("ffpuppet.bootstrapper.perf_counter", side_effect=(1, 1, 1, 1, 2)) 76 | with raises(BrowserTimeoutError, match="Timeout waiting for request"): 77 | bts.wait(lambda: True, timeout=0.1) 78 | # should call recv() at least 2x for positive and negative timeout check 79 | assert fake_conn.recv.call_count > 1 80 | assert fake_conn.close.call_count == 1 81 | 82 | 83 | def test_bootstrapper_04(mocker): 84 | """test Bootstrapper.wait() failure sending response""" 85 | fake_sock = mocker.MagicMock(spec_set=socket) 86 | fake_conn = mocker.Mock(spec_set=socket) 87 | fake_conn.recv.return_value = "A" 88 | fake_conn.sendall.side_effect = socket_timeout 89 | fake_sock.accept.return_value = (fake_conn, None) 90 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([fake_sock], None, None)) 91 | with Bootstrapper(fake_sock) as bts: 92 | # test timeout 93 | with raises(BrowserTimeoutError, match="Timeout sending response"): 94 | bts.wait(lambda: True) 95 | assert fake_conn.recv.call_count == 1 96 | assert fake_conn.sendall.call_count == 1 97 | assert fake_conn.close.call_count == 1 98 | fake_conn.reset_mock() 99 | # test failure 100 | with raises(BrowserTerminatedError, match="Failure during browser startup"): 101 | bts.wait(lambda: False) 102 | assert fake_conn.recv.call_count == 1 103 | assert fake_conn.sendall.call_count == 1 104 | assert fake_conn.close.call_count == 1 105 | 106 | 107 | def test_bootstrapper_05(mocker): 108 | """test Bootstrapper.wait() target crashed""" 109 | fake_sock = mocker.MagicMock(spec_set=socket) 110 | fake_conn = mocker.Mock(spec_set=socket) 111 | fake_conn.recv.return_value = "foo" 112 | fake_sock.accept.return_value = (fake_conn, None) 113 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([fake_sock], None, None)) 114 | with ( 115 | Bootstrapper(fake_sock) as bts, 116 | raises(BrowserTerminatedError, match="Failure during browser startup"), 117 | ): 118 | bts.wait(lambda: False) 119 | assert fake_conn.close.call_count == 1 120 | 121 | 122 | @mark.parametrize( 123 | "redirect, recv, closed", 124 | [ 125 | # normal startup 126 | (None, ("foo",), 1), 127 | # with a redirect url 128 | ("http://127.0.0.1:9999/test.html", ("foo",), 1), 129 | # request size matches buffer size 130 | (None, ("A" * Bootstrapper.BUF_SIZE, socket_timeout), 1), 131 | # large request 132 | (None, ("A" * Bootstrapper.BUF_SIZE, "foo"), 1), 133 | # slow startup 134 | (None, (socket_timeout, socket_timeout, "foo"), 1), 135 | # slow failed startup with retry 136 | (None, (socket_timeout, "", "foo"), 2), 137 | ], 138 | ) 139 | def test_bootstrapper_06(mocker, redirect, recv, closed): 140 | """test Bootstrapper.wait()""" 141 | fake_sock = mocker.MagicMock(spec_set=socket) 142 | fake_conn = mocker.Mock(spec_set=socket) 143 | fake_conn.recv.side_effect = recv 144 | fake_sock.accept.return_value = (fake_conn, None) 145 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([fake_sock], None, None)) 146 | with Bootstrapper(fake_sock) as bts: 147 | bts.wait(lambda: True, url=redirect) 148 | assert fake_conn.close.call_count == closed 149 | assert fake_conn.recv.call_count == len(recv) 150 | assert fake_conn.sendall.call_count == 1 151 | 152 | 153 | def test_bootstrapper_07(): 154 | """test Bootstrapper.wait() with a fake browser""" 155 | 156 | def _fake_browser(port, payload_size=5120): 157 | with socket() as conn: 158 | # 50 x 0.1 = 5 seconds 159 | conn.settimeout(0.1) 160 | # open connection 161 | for attempt in reversed(range(50)): 162 | try: 163 | conn.connect(("127.0.0.1", port)) 164 | break 165 | except socket_timeout: 166 | if not attempt: 167 | raise 168 | # send request and receive response 169 | conn.settimeout(10) 170 | conn.sendall(b"A" * payload_size) 171 | conn.send(b"") 172 | conn.recv(8192) 173 | 174 | with Bootstrapper.create() as bts: 175 | browser_thread = Thread(target=_fake_browser, args=(bts.port,)) 176 | try: 177 | browser_thread.start() 178 | bts.wait(lambda: True, timeout=10) 179 | finally: 180 | browser_thread.join() 181 | 182 | 183 | @mark.parametrize( 184 | "bind, attempts", 185 | [ 186 | # failed to bind (OSError) 187 | ((OSError(0, "foo1"),), 1), 188 | # failed to bind (PermissionError) - multiple attempts 189 | (repeat(PermissionError(10013, "foo2"), 4), 4), 190 | ], 191 | ) 192 | def test_bootstrapper_08(mocker, bind, attempts): 193 | """test Bootstrapper.create_socket() - failures""" 194 | mocker.patch("ffpuppet.bootstrapper.sleep", autospec=True) 195 | fake_sock = mocker.MagicMock(spec_set=socket) 196 | fake_sock.bind.side_effect = bind 197 | mocker.patch("ffpuppet.bootstrapper.select", return_value=([fake_sock], None, None)) 198 | mocker.patch("ffpuppet.bootstrapper.socket", return_value=fake_sock) 199 | assert Bootstrapper.create_socket(attempts=attempts) is None 200 | assert fake_sock.bind.call_count == attempts 201 | assert fake_sock.close.call_count == attempts 202 | 203 | 204 | def test_bootstrapper_09(mocker): 205 | """test Bootstrapper() - blocked ports""" 206 | fake_sock = mocker.MagicMock(spec_set=socket) 207 | fake_sock.getsockname.side_effect = ( 208 | (None, next(iter(Bootstrapper.BLOCKED_PORTS))), 209 | (None, 12345), 210 | ) 211 | mocker.patch("ffpuppet.bootstrapper.socket", return_value=fake_sock) 212 | with Bootstrapper.create(attempts=2): 213 | pass 214 | assert fake_sock.close.call_count == 2 215 | 216 | 217 | def test_bootstrapper_10(mocker): 218 | """test Bootstrapper.create() - failure""" 219 | mocker.patch("ffpuppet.bootstrapper.Bootstrapper.create_socket", return_value=None) 220 | with raises(LaunchError), Bootstrapper.create(): 221 | pass 222 | 223 | 224 | @mark.parametrize("value", [123, 5555]) 225 | def test_bootstrapper_11(value): 226 | """test Bootstrapper.create_socket() - unusable ports""" 227 | assert Bootstrapper.create_socket(blocked=[5555], port=value) is None 228 | 229 | 230 | @mark.parametrize( 231 | "value, result", 232 | [ 233 | (0, True), 234 | (1337, True), 235 | (32768, True), 236 | (-1, False), 237 | (1, False), 238 | (1023, False), 239 | (65536, False), 240 | ], 241 | ) 242 | def test_bootstrapper_12(value, result): 243 | """test Bootstrapper.check_port()""" 244 | assert Bootstrapper.check_port(value) == result 245 | -------------------------------------------------------------------------------- /src/ffpuppet/test_helpers.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet helpers tests""" 5 | 6 | from os import getpid 7 | from pathlib import Path 8 | from subprocess import CalledProcessError 9 | 10 | from pytest import mark, raises 11 | 12 | from .helpers import ( 13 | CERTUTIL, 14 | _configure_sanitizers, 15 | certutil_available, 16 | certutil_find, 17 | detect_sanitizer, 18 | files_in_use, 19 | prepare_environment, 20 | wait_on_files, 21 | warn_open, 22 | ) 23 | from .sanitizer_util import SanitizerOptions 24 | 25 | 26 | def test_helpers_01(tmp_path): 27 | """test _configure_sanitizers()""" 28 | # test with empty environment 29 | env = _configure_sanitizers({}, tmp_path) 30 | assert "ASAN_OPTIONS" in env 31 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 32 | assert opts.get("external_symbolizer_path") is None 33 | assert opts.get("detect_leaks") == "false" 34 | assert opts.get("log_path") == f"'{tmp_path}'" 35 | assert "LSAN_OPTIONS" in env 36 | assert "UBSAN_OPTIONS" in env 37 | # test symbolize 38 | env = _configure_sanitizers({}, tmp_path, symbolize=False) 39 | assert "symbolize=0" in env["ASAN_OPTIONS"] 40 | assert "symbolize=0" in env["TSAN_OPTIONS"] 41 | env = _configure_sanitizers({}, tmp_path, symbolize=True) 42 | assert "symbolize=1" in env["ASAN_OPTIONS"] 43 | assert "symbolize=1" in env["TSAN_OPTIONS"] 44 | # test with presets environment 45 | env = _configure_sanitizers( 46 | { 47 | "ASAN_OPTIONS": "detect_leaks=true", 48 | "LSAN_OPTIONS": "a=1", 49 | "UBSAN_OPTIONS": "", 50 | }, 51 | tmp_path, 52 | ) 53 | assert "ASAN_OPTIONS" in env 54 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 55 | assert opts.get("detect_leaks") == "true" 56 | assert "LSAN_OPTIONS" in env 57 | assert "UBSAN_OPTIONS" in env 58 | opts = SanitizerOptions(env["UBSAN_OPTIONS"]) 59 | assert opts.get("print_stacktrace") is not None 60 | # test suppression file 61 | sup = tmp_path / "test.sup" 62 | sup.touch() 63 | env = _configure_sanitizers({"ASAN_OPTIONS": f"suppressions='{sup}'"}, tmp_path) 64 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 65 | assert opts.get("suppressions") is not None 66 | # test overwrite log_path 67 | env = _configure_sanitizers( 68 | { 69 | "ASAN_OPTIONS": "log_path='overwrite'", 70 | "TSAN_OPTIONS": "log_path='overwrite'", 71 | "UBSAN_OPTIONS": "log_path='overwrite'", 72 | }, 73 | tmp_path, 74 | ) 75 | assert "ASAN_OPTIONS" in env 76 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 77 | assert opts.get("log_path") == f"'{tmp_path}'" 78 | assert "UBSAN_OPTIONS" in env 79 | opts = SanitizerOptions(env["UBSAN_OPTIONS"]) 80 | assert opts.get("log_path") == f"'{tmp_path}'" 81 | # test missing suppression file 82 | with raises(AssertionError, match="missing suppressions file"): 83 | _configure_sanitizers({"ASAN_OPTIONS": "suppressions=not_a_file"}, tmp_path) 84 | # unquoted path containing ':' 85 | with raises(ValueError, match=r"\(strip_path_prefix\) must be quoted"): 86 | _configure_sanitizers( 87 | {"ASAN_OPTIONS": "strip_path_prefix=x:\\foo\\bar"}, tmp_path 88 | ) 89 | # multiple options 90 | options = ( 91 | "opt1=1", 92 | "opt2=", 93 | "opt3=test", 94 | "opt4='x:\\foo'", 95 | 'opt5="z:/bar"', 96 | "opt6=''", 97 | "opt7='/with space/'", 98 | "opt8='x:\\with a space\\or two'", 99 | ) 100 | env = _configure_sanitizers({"ASAN_OPTIONS": ":".join(options)}, tmp_path) 101 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 102 | for key, value in (x.split(sep="=", maxsplit=1) for x in options): 103 | assert opts.get(key) == value 104 | # test malformed option pair 105 | env = _configure_sanitizers({"ASAN_OPTIONS": "a=b=c:malformed"}, tmp_path) 106 | opts = SanitizerOptions(env["ASAN_OPTIONS"]) 107 | assert opts.get("a") == "b=c" 108 | assert "malformed" not in str(opts) 109 | 110 | 111 | def test_helpers_02(tmp_path): 112 | """test prepare_environment()""" 113 | env = prepare_environment(tmp_path) 114 | assert "ASAN_OPTIONS" in env 115 | assert "LSAN_OPTIONS" in env 116 | assert "UBSAN_OPTIONS" in env 117 | assert "RUST_BACKTRACE" in env 118 | assert "MOZ_CRASHREPORTER" in env 119 | 120 | 121 | def test_helpers_03(mocker, tmp_path): 122 | """test prepare_environment() using some predefined environment variables""" 123 | mocker.patch.dict( 124 | "ffpuppet.helpers.environ", 125 | { 126 | "MOZ_SKIA_DISABLE_ASSERTS": "0", 127 | "TEST_EXISTING_OVERWRITE": "0", 128 | "TEST_EXISTING_REMOVE": "1", 129 | "TEST_SECRET_TO_REMOVE": "1", 130 | }, 131 | ) 132 | pre = { 133 | "LSAN_OPTIONS": "lopt=newopt", 134 | "MOZ_GDB_SLEEP": "2", # update default 135 | "MOZ_SKIA_DISABLE_ASSERTS": "1", # existing optional 136 | "RUST_BACKTRACE": None, # remove default 137 | "TEST_FAKE": None, # remove non existing entry 138 | "TEST_VAR": "123", # add non existing entry 139 | "TEST_EXISTING_OVERWRITE": "1", 140 | "TEST_EXISTING_REMOVE": None, 141 | } 142 | env = prepare_environment(tmp_path, pre) 143 | assert "ASAN_OPTIONS" in env 144 | assert "LSAN_OPTIONS" in env 145 | assert "lopt=newopt" in env["LSAN_OPTIONS"].split(":") 146 | assert "max_leaks=1" in env["LSAN_OPTIONS"].split(":") 147 | assert "UBSAN_OPTIONS" in env 148 | assert env["TEST_VAR"] == "123" 149 | assert "MOZ_CRASHREPORTER" in env 150 | assert env["MOZ_GDB_SLEEP"] == "2" 151 | assert "RUST_BACKTRACE" not in env 152 | assert "TEST_FAKE" not in env 153 | assert "TEST_EXISTING_REMOVE" not in env 154 | assert env["MOZ_SKIA_DISABLE_ASSERTS"] == "0" 155 | assert env["TEST_EXISTING_OVERWRITE"] == "1" 156 | assert "TEST_SECRET_TO_REMOVE" not in env 157 | # MOZ_CRASHREPORTER should not be added if MOZ_CRASHREPORTER_DISABLE is set 158 | pre = {"MOZ_CRASHREPORTER_DISABLE": "1"} 159 | env = prepare_environment(tmp_path, pre) 160 | assert "MOZ_CRASHREPORTER" not in env 161 | 162 | 163 | def test_helpers_04(mocker, tmp_path): 164 | """test wait_on_files()""" 165 | fake_sleep = mocker.patch("ffpuppet.helpers.sleep", autospec=True) 166 | fake_time = mocker.patch("ffpuppet.helpers.perf_counter", autospec=True) 167 | t_file = tmp_path / "file.bin" 168 | t_file.touch() 169 | # test with open file (timeout) 170 | fake_time.side_effect = (1, 1, 2) 171 | with (tmp_path / "open.bin").open("w") as wait_fp: 172 | assert not wait_on_files([Path(wait_fp.name), t_file], timeout=0.1) 173 | assert fake_sleep.call_count == 1 174 | fake_sleep.reset_mock() 175 | # existing but closed file 176 | fake_time.side_effect = (1, 1) 177 | assert wait_on_files([t_file]) 178 | assert fake_sleep.call_count == 0 179 | # file that does not exist 180 | fake_time.side_effect = (1, 1) 181 | assert wait_on_files([Path("missing")]) 182 | assert fake_sleep.call_count == 0 183 | # empty file list 184 | fake_time.side_effect = (1, 1) 185 | assert wait_on_files([]) 186 | assert fake_sleep.call_count == 0 187 | 188 | 189 | def test_helpers_06(tmp_path): 190 | """test files_in_use()""" 191 | t_file = tmp_path / "file.bin" 192 | t_file.touch() 193 | # test with open file 194 | with (tmp_path / "file").open("w") as wait_fp: 195 | in_use = next(files_in_use([t_file, Path(wait_fp.name)])) 196 | assert in_use 197 | assert len(in_use) == 3 198 | assert Path(wait_fp.name).samefile(in_use[0]) 199 | assert in_use[1] == getpid() 200 | assert isinstance(in_use[2], str) 201 | # existing but closed file 202 | assert not any(files_in_use([t_file])) 203 | # missing file 204 | assert not any(files_in_use([tmp_path / "missing_file"])) 205 | # no files 206 | assert not any(files_in_use([])) 207 | 208 | 209 | def test_helpers_07(tmp_path): 210 | """test warn_open()""" 211 | with (tmp_path / "file.bin").open("w") as _: 212 | warn_open(tmp_path) 213 | 214 | 215 | @mark.parametrize( 216 | "raised, result", 217 | [ 218 | (None, False), 219 | (OSError("test"), False), 220 | (CalledProcessError(1, "test"), False), 221 | ( 222 | CalledProcessError( 223 | 1, 224 | "test", 225 | output=b"certutil - Utility to manipulate NSS certificate databases", 226 | ), 227 | True, 228 | ), 229 | ], 230 | ) 231 | def test_certutil_available_01(mocker, raised, result): 232 | """test certutil_available()""" 233 | mocker.patch("ffpuppet.helpers.check_output", autospec=True, side_effect=raised) 234 | assert certutil_available(CERTUTIL) == result 235 | 236 | 237 | def test_certutil_find_01(tmp_path): 238 | """test certutil_find()""" 239 | # default 240 | assert certutil_find() == CERTUTIL 241 | # missing bundled certutil 242 | browser_bin = tmp_path / "browser" 243 | browser_bin.touch() 244 | assert certutil_find(browser_bin) == CERTUTIL 245 | # found bundled certutil 246 | certutil_bin = tmp_path / "bin" / CERTUTIL 247 | certutil_bin.parent.mkdir() 248 | certutil_bin.touch() 249 | assert certutil_find(browser_bin) == str(certutil_bin) 250 | 251 | 252 | @mark.parametrize( 253 | "bin_content, result", 254 | [ 255 | (b"_foo", None), 256 | (b"foo __asan_foo", "asan"), 257 | (b"foo __tsan_foo", "tsan"), 258 | (b"foo __ubsan_foo", "ubsan"), 259 | ], 260 | ) 261 | def test_detect_sanitizer_01(tmp_path, bin_content, result): 262 | """test detect_sanitizer()""" 263 | binary = tmp_path / "file.bin" 264 | binary.write_bytes(bin_content) 265 | assert detect_sanitizer(binary) == result 266 | -------------------------------------------------------------------------------- /src/ffpuppet/test_puppet_logger.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet puppet logger tests""" 5 | # pylint: disable=protected-access 6 | 7 | from os import stat 8 | from os.path import isfile 9 | from tempfile import SpooledTemporaryFile 10 | from time import sleep 11 | 12 | from pytest import raises 13 | 14 | from .puppet_logger import PuppetLogger 15 | 16 | 17 | def test_puppet_logger_01(tmp_path): 18 | """test simple PuppetLogger()""" 19 | plog = PuppetLogger(base_path=str(tmp_path)) 20 | assert not plog.closed 21 | assert not plog._logs 22 | assert plog.path is not None 23 | assert plog.path.is_dir() 24 | assert plog._base == str(tmp_path) 25 | assert any(tmp_path.iterdir()) 26 | plog.close() 27 | assert any(tmp_path.iterdir()) 28 | assert plog.closed 29 | with raises(AssertionError): 30 | plog.add_log("test") 31 | assert plog.log_length("missing") is None 32 | 33 | 34 | def test_puppet_logger_02(tmp_path): 35 | """test PuppetLogger.add_log() and PuppetLogger.available_logs()""" 36 | with PuppetLogger(base_path=str(tmp_path)) as plog: 37 | assert not plog._logs 38 | assert not plog.available_logs() 39 | assert not any(plog.files) 40 | plog.add_log("test_new") # non-existing log 41 | assert "test_new" in plog.available_logs() 42 | plog_fp_test_new = plog.get_fp("test_new") 43 | assert plog_fp_test_new is not None 44 | assert isfile(plog_fp_test_new.name) 45 | with (tmp_path / "test_existing.txt").open("w+b") as in_fp: 46 | in_fp.write(b"blah") 47 | plog.add_log("test_existing", logfp=in_fp) 48 | assert len(plog.available_logs()) == 2 49 | assert len(tuple(plog.files)) == 2 50 | plog_fp_test_existing = plog.get_fp("test_existing") 51 | assert plog_fp_test_existing is not None 52 | assert isfile(plog_fp_test_existing.name) 53 | assert plog.log_length("test_new") == 0 54 | assert plog.log_length("test_existing") == 4 55 | 56 | 57 | def test_puppet_logger_03(tmp_path): 58 | """test PuppetLogger.clean_up()""" 59 | with PuppetLogger(base_path=str(tmp_path)) as plog: 60 | assert not plog.closed 61 | assert not plog._logs 62 | assert plog.path is not None 63 | assert plog.path.is_dir() 64 | assert plog._base == str(tmp_path) 65 | assert any(tmp_path.iterdir()) 66 | plog.add_log("test_new") 67 | plog.clean_up() 68 | assert plog.closed 69 | assert not any(tmp_path.iterdir()) 70 | assert plog.path is None 71 | assert plog.closed 72 | assert not plog._logs 73 | 74 | 75 | def test_puppet_logger_04(tmp_path): 76 | """test PuppetLogger.reset()""" 77 | with PuppetLogger(base_path=str(tmp_path)) as plog: 78 | plog.add_log("test_new") 79 | plog.clean_up() 80 | plog.reset() 81 | assert not plog.closed 82 | assert not plog._logs 83 | assert plog.path is not None 84 | assert plog.path.is_dir() 85 | assert plog._base == str(tmp_path) 86 | assert len(tuple(tmp_path.iterdir())) == 1 87 | 88 | 89 | def test_puppet_logger_05(tmp_path): 90 | """test PuppetLogger.clone_log()""" 91 | with PuppetLogger(base_path=str(tmp_path)) as plog: 92 | plog.add_log("test_empty") 93 | plog.add_log("test_extra") 94 | plog_fp_test_extra = plog.get_fp("test_extra") 95 | assert plog_fp_test_extra is not None 96 | plog_fp_test_extra.write(b"stuff") 97 | plog_fp_test_extra.flush() 98 | # test clone 99 | plog.add_log("test_new") 100 | pl_fp = plog.get_fp("test_new") 101 | assert pl_fp is not None 102 | pl_fp.write(b"test1") 103 | cloned = plog.clone_log("test_new") 104 | assert cloned is not None 105 | assert cloned.is_file() 106 | assert cloned.read_bytes() == b"test1" 107 | cloned.unlink() 108 | # test target exists 109 | target = tmp_path / "target.txt" 110 | target.touch() 111 | pl_fp.write(b"test2") 112 | pl_fp.flush() 113 | cloned = plog.clone_log("test_new", target_file=str(target)) 114 | assert cloned is not None 115 | assert cloned.is_file() 116 | assert cloned.read_bytes() == b"test1test2" 117 | cloned.unlink() 118 | # test target does not exist with offset 119 | assert not target.is_file() 120 | pl_fp.write(b"test3") 121 | pl_fp.flush() 122 | cloned = plog.clone_log("test_new", target_file=str(target), offset=4) 123 | assert cloned is not None 124 | assert cloned.is_file() 125 | assert cloned.read_bytes() == b"1test2test3" 126 | assert plog.log_length("test_new") == 15 127 | cloned.unlink() 128 | # test non existent log 129 | assert plog.clone_log("no_log") is None 130 | # test empty log 131 | assert plog.log_length("test_empty") == 0 132 | cloned = plog.clone_log("test_empty") 133 | assert cloned is not None 134 | assert cloned.is_file() 135 | assert not cloned.stat().st_size 136 | cloned.unlink() 137 | 138 | 139 | def test_puppet_logger_06(tmp_path): 140 | """test PuppetLogger.save_logs()""" 141 | with PuppetLogger(base_path=str(tmp_path)) as plog: 142 | plog.close() 143 | # save when there are no logs 144 | dest = tmp_path / "dest" 145 | plog.save_logs(dest) 146 | assert not any(dest.iterdir()) 147 | plog.reset() 148 | dest.rmdir() 149 | # add small log 150 | plog.add_log("test_1") 151 | plog_fp_test_1 = plog.get_fp("test_1") 152 | assert plog_fp_test_1 is not None 153 | plog_fp_test_1.write(b"test1\ntest1\n") 154 | # add binary data in log 155 | plog.add_log("test_2") 156 | plog_fp_test_2 = plog.get_fp("test_2") 157 | assert plog_fp_test_2 is not None 158 | plog_fp_test_2.write(b"\x00TEST\xff\xef") 159 | # add empty log 160 | plog.add_log("test_empty") 161 | # add larger log (not a power of 2 to help catch buffer issues) 162 | plog.add_log("test_3") 163 | data = b"A" * 1234 164 | plog_fp_test_3 = plog.get_fp("test_3") 165 | assert plog_fp_test_3 is not None 166 | for _ in range(500): 167 | plog_fp_test_3.write(data) 168 | # delay to check if creation time was copied when save_logs is called 169 | sleep(0.1) 170 | plog.close() 171 | dest.mkdir() 172 | plog.save_logs(dest) 173 | # check saved file count 174 | assert len(plog.available_logs()) == 4 175 | assert len(tuple(dest.iterdir())) == 4 176 | # verify all data was copied 177 | assert stat(plog_fp_test_1.name).st_size == 12 178 | assert stat(plog_fp_test_2.name).st_size == 7 179 | assert stat(plog_fp_test_3.name).st_size == 500 * 1234 180 | 181 | 182 | def test_puppet_logger_07(mocker, tmp_path): 183 | """test PuppetLogger.save_logs() rr trace directory""" 184 | fake_ck = mocker.patch("ffpuppet.puppet_logger.check_output", autospec=True) 185 | with PuppetLogger(base_path=str(tmp_path)) as plog: 186 | assert plog.path is not None 187 | # add log data to test rr backtrace detection 188 | with (tmp_path / "test_stderr.txt").open("w+b") as in_fp: 189 | in_fp.write(b"foo\n") 190 | in_fp.write(b"=== Start rr backtrace:\n") 191 | in_fp.write(b"foo\n") 192 | plog.add_log("stderr", logfp=in_fp) 193 | (plog.path / plog.PATH_RR / "latest-trace").mkdir(parents=True) 194 | plog.close() 195 | # test call to rr failing 196 | fake_ck.side_effect = OSError 197 | plog.save_logs(tmp_path / "dest1", rr_pack=True) 198 | assert fake_ck.call_count == 1 199 | assert not plog._rr_packed 200 | # test call to rr passing 201 | fake_ck.side_effect = None 202 | plog.save_logs(tmp_path / "dest2", rr_pack=True) 203 | assert fake_ck.call_count == 2 204 | assert plog._rr_packed 205 | # test 'taskcluster-build-task' copied 206 | bin_path = tmp_path / "bin_path" 207 | bin_path.mkdir() 208 | (bin_path / "taskcluster-build-task").write_text("task-info\n") 209 | plog.save_logs(tmp_path / "dest3", bin_path=bin_path) 210 | assert ( 211 | tmp_path 212 | / "dest3" 213 | / "rr-traces" 214 | / "latest-trace" 215 | / "files.mozilla" 216 | / "taskcluster-build-task" 217 | ).is_file() 218 | assert fake_ck.call_count == 2 219 | assert plog._rr_packed 220 | 221 | 222 | def test_puppet_logger_08(tmp_path): 223 | """test PuppetLogger.add_log() with file not on disk""" 224 | with ( 225 | PuppetLogger(base_path=str(tmp_path)) as plog, 226 | SpooledTemporaryFile(max_size=2048) as log_fp, 227 | ): 228 | plog.add_log("test", logfp=log_fp) 229 | with raises(FileNotFoundError, match="Log file not found: None"): 230 | plog.get_fp("test") 231 | 232 | 233 | def test_puppet_logger_09(mocker, tmp_path): 234 | """test PuppetLogger.clean_up() with in-use file or inaccessible directory""" 235 | fake_rmtree = mocker.patch("ffpuppet.puppet_logger.rmtree", autospec=True) 236 | with PuppetLogger(base_path=str(tmp_path)) as plog: 237 | plog.add_log("test") 238 | path = plog.path 239 | # test with ignore_errors=False 240 | fake_rmtree.side_effect = OSError("test") 241 | with raises(OSError): 242 | plog.clean_up() 243 | assert fake_rmtree.call_count == 1 244 | fake_rmtree.assert_called_with(path, ignore_errors=False) 245 | assert plog.path is not None 246 | fake_rmtree.reset_mock() 247 | # test with ignore_errors=True 248 | fake_rmtree.side_effect = None 249 | plog.clean_up(ignore_errors=True) 250 | assert fake_rmtree.call_count == 1 251 | fake_rmtree.assert_called_with(path, ignore_errors=True) 252 | assert plog.path is None 253 | 254 | 255 | def test_puppet_logger_10(tmp_path): 256 | """test PuppetLogger.add_path()""" 257 | with PuppetLogger(base_path=str(tmp_path)) as plog: 258 | path = plog.add_path("test") 259 | assert path.is_dir() 260 | (path / "simple.txt").write_text("test") 261 | plog.close() 262 | dest = tmp_path / "dest" 263 | plog.save_logs(dest) 264 | assert (dest / "test").is_dir() 265 | assert (dest / "test" / "simple.txt").is_file() 266 | -------------------------------------------------------------------------------- /src/ffpuppet/puppet_logger.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """browser and debugger log management""" 5 | 6 | from __future__ import annotations 7 | 8 | from contextlib import suppress 9 | from logging import getLogger 10 | from mmap import ACCESS_READ, mmap 11 | from os import getpid, stat 12 | from os.path import isfile 13 | from pathlib import Path 14 | from shutil import copy2, copyfileobj, copytree, rmtree 15 | from subprocess import STDOUT, CalledProcessError, check_output 16 | from tempfile import NamedTemporaryFile, mkdtemp 17 | from typing import IO, TYPE_CHECKING 18 | 19 | from .helpers import warn_open 20 | 21 | if TYPE_CHECKING: 22 | from collections.abc import Generator 23 | 24 | LOG = getLogger(__name__) 25 | 26 | __author__ = "Tyson Smith" 27 | __credits__ = ["Tyson Smith"] 28 | 29 | 30 | class PuppetLogger: # pylint: disable=missing-docstring 31 | BUF_SIZE = 0x10000 # buffer size used to copy logs 32 | PATH_RR = "rr-traces" 33 | PREFIX_SAN = f"ffp_asan_{getpid()}.log" 34 | PREFIX_VALGRIND = f"valgrind.{getpid()}" 35 | 36 | __slots__ = ("_base", "_logs", "_rr_packed", "closed", "path", "watching") 37 | 38 | def __init__(self, base_path: str | None = None) -> None: 39 | self._base = base_path 40 | self._logs: dict[str, IO[bytes]] = {} 41 | self._rr_packed = False 42 | self.closed = True 43 | self.path: Path | None = None 44 | self.watching: dict[str, int] = {} 45 | self.reset() 46 | 47 | def __enter__(self) -> PuppetLogger: 48 | return self 49 | 50 | def __exit__(self, *exc: object) -> None: 51 | self.clean_up() 52 | 53 | def add_log(self, log_id: str, logfp: IO[bytes] | None = None) -> IO[bytes]: 54 | """Add a log file to the log manager. 55 | 56 | Args: 57 | log_id: ID of the log to add. 58 | logfp: File object to use. If None a new log file will be created. 59 | 60 | Returns: 61 | Newly added log file. 62 | """ 63 | assert log_id not in self._logs 64 | assert not self.closed 65 | if logfp is None: 66 | logfp = PuppetLogger.open_unique( 67 | base_dir=str(self.path) if self.path else None 68 | ) 69 | self._logs[log_id] = logfp 70 | return logfp 71 | 72 | def add_path(self, name: str) -> Path: 73 | """Add a directory that can be used as temporary storage for 74 | miscellaneous items such as additional debugger output. 75 | 76 | Args: 77 | name: Name of directory to create. 78 | 79 | Returns: 80 | Path of newly created directory. 81 | """ 82 | assert not self.closed 83 | assert self.path is not None 84 | path = self.path / name 85 | LOG.debug("adding path '%s' as '%s'", name, path) 86 | path.mkdir() 87 | return path 88 | 89 | def available_logs(self) -> frozenset[str]: 90 | """IDs for the available logs. 91 | 92 | Args: 93 | None 94 | 95 | Returns: 96 | All available log IDs. 97 | """ 98 | return frozenset(self._logs.keys()) 99 | 100 | def clean_up(self, ignore_errors: bool = False) -> None: 101 | """Remove log files from disk. 102 | 103 | Args: 104 | ignore_errors: Ignore errors triggered by removing files and directories. 105 | 106 | Returns: 107 | None 108 | """ 109 | if not self.closed: 110 | self.close() 111 | if self.path is not None: 112 | try: 113 | if self.path.exists(): 114 | rmtree(self.path, ignore_errors=ignore_errors) 115 | except OSError: 116 | warn_open(self.path) 117 | raise 118 | self._logs.clear() 119 | self.path = None 120 | 121 | def clone_log( 122 | self, 123 | log_id: str, 124 | offset: int = 0, 125 | target_file: str | None = None, 126 | ) -> Path | None: 127 | """Create a copy of the specified log. 128 | 129 | Args: 130 | log_id: ID of the log to clone. 131 | offset: Where to begin reading the log from. 132 | target_file: The log contents will be saved to target_file. 133 | 134 | Returns: 135 | Name of the file containing the cloned log or None on failure. 136 | """ 137 | log_fp = self.get_fp(log_id) 138 | if log_fp is None: 139 | return None 140 | if not log_fp.closed: 141 | log_fp.flush() 142 | with open(log_fp.name, "rb") as in_fp: 143 | if offset: 144 | in_fp.seek(offset) 145 | if target_file is None: 146 | with PuppetLogger.open_unique(base_dir=self._base) as cpyfp: 147 | target_file = cpyfp.name 148 | with open(target_file, "wb") as cpyfp: 149 | copyfileobj(in_fp, cpyfp, self.BUF_SIZE) 150 | return Path(target_file) 151 | 152 | def close(self) -> None: 153 | """Close all open file objects. 154 | 155 | Args: 156 | None 157 | 158 | Returns: 159 | None 160 | """ 161 | for lfp in self._logs.values(): 162 | if not lfp.closed: 163 | lfp.close() 164 | self.closed = True 165 | 166 | @property 167 | def files(self) -> Generator[str]: 168 | """File names of log files. 169 | 170 | Args: 171 | None 172 | 173 | Yields: 174 | File names of log files. 175 | """ 176 | for lfp in self._logs.values(): 177 | if lfp.name is not None: 178 | yield lfp.name 179 | 180 | def get_fp(self, log_id: str) -> IO[bytes] | None: 181 | """Lookup log file object by ID. 182 | 183 | Args: 184 | log_id: ID of the log (stderr, stdout... etc). 185 | 186 | Returns: 187 | The file matching given ID otherwise None. 188 | """ 189 | try: 190 | log_fp = self._logs[log_id] 191 | except KeyError: 192 | LOG.warning("log_id '%s' does not exist", log_id) 193 | return None 194 | if log_fp.name is None or not isfile(log_fp.name): 195 | raise FileNotFoundError(f"Log file not found: {log_fp.name}") 196 | return log_fp 197 | 198 | def log_length(self, log_id: str) -> int | None: 199 | """Get the length of the specified log. 200 | 201 | Args: 202 | log_id: ID of the log to measure. 203 | 204 | Returns: 205 | Length of the specified log in bytes or None if the log does not exist. 206 | """ 207 | log_fp = self.get_fp(log_id) 208 | if log_fp is None: 209 | return None 210 | if not log_fp.closed: 211 | log_fp.flush() 212 | return stat(log_fp.name).st_size 213 | 214 | @staticmethod 215 | def open_unique(base_dir: str | None = None, mode: str = "wb") -> IO[bytes]: 216 | """Create and open a unique file. 217 | 218 | Args: 219 | base_dir: This is where the file will be created. If None is 220 | passed the system default will be used. 221 | mode: File mode. See documentation for open(). 222 | 223 | Returns: 224 | An open file object. 225 | """ 226 | return NamedTemporaryFile( 227 | mode, delete=False, dir=base_dir, prefix="ffp_log_", suffix=".txt" 228 | ) 229 | 230 | def reset(self) -> None: 231 | """Reset logger for reuse. 232 | 233 | Args: 234 | None 235 | 236 | Returns: 237 | None 238 | """ 239 | self.clean_up() 240 | self.closed = False 241 | self._rr_packed = False 242 | self.path = Path(mkdtemp(prefix="ffplogs_", dir=self._base)) 243 | 244 | def save_logs( 245 | self, 246 | dest: Path, 247 | logs_only: bool = False, 248 | bin_path: Path | None = None, 249 | rr_pack: bool = True, 250 | ) -> None: 251 | """The browser logs will be saved to dest. This can only be called 252 | after close() has been called. 253 | 254 | Args: 255 | dest: Destination path for log data. Existing files will be overwritten. 256 | logs_only: Do not include other data, including debugger output files. 257 | bin_path: Firefox binary. 258 | rr_pack: Pack rr trace if required. 259 | 260 | Returns: 261 | None 262 | """ 263 | assert self.closed, "save_logs() cannot be called before calling close()" 264 | assert self.path is not None 265 | 266 | # copy log to location specified by dest 267 | dest.mkdir(parents=True, exist_ok=True) 268 | 269 | for log_id, log_fp in self._logs.items(): 270 | copy2(log_fp.name, dest / f"log_{log_id}.txt") 271 | 272 | if not logs_only: 273 | rr_trace = self.path / self.PATH_RR / "latest-trace" 274 | if rr_trace.is_dir(): 275 | # check logs for rr related issues 276 | # OSError: in case the file does not exist 277 | # ValueError: cannot mmap an empty file on Windows 278 | with ( 279 | suppress(OSError, ValueError), 280 | (dest / "log_stderr.txt").open("rb") as lfp, 281 | mmap(lfp.fileno(), 0, access=ACCESS_READ) as lmm, 282 | ): 283 | if lmm.find(b"=== Start rr backtrace:") != -1: 284 | LOG.warning("rr traceback detected in stderr log") 285 | if rr_pack and not self._rr_packed: 286 | LOG.debug("packing rr trace") 287 | try: 288 | check_output(["rr", "pack", str(rr_trace)], stderr=STDOUT) 289 | self._rr_packed = True 290 | except (OSError, CalledProcessError): 291 | LOG.warning("Error calling 'rr pack %s'", rr_trace) 292 | # copy `taskcluster-build-task` for use with Pernosco if available 293 | if bin_path is not None: 294 | task_info = bin_path / "taskcluster-build-task" 295 | if task_info.is_file(): 296 | moz_rr = rr_trace / "files.mozilla" 297 | moz_rr.mkdir(parents=True, exist_ok=True) 298 | copy2(task_info, moz_rr) 299 | LOG.debug("Copied 'taskcluster-build-task' to trace") 300 | 301 | for entry in self.path.iterdir(): 302 | if entry.is_dir(): 303 | copytree(entry, dest / entry.name, symlinks=True) 304 | -------------------------------------------------------------------------------- /src/ffpuppet/test_profile.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet profile tests""" 5 | 6 | from shutil import rmtree 7 | from subprocess import CalledProcessError 8 | 9 | from pytest import mark, raises 10 | 11 | from .profile import Profile 12 | 13 | 14 | def test_profile_basic(tmp_path): 15 | """test basic Profile""" 16 | with Profile(working_path=str(tmp_path)) as profile: 17 | assert profile 18 | assert str(profile) 19 | assert profile.path.parent == tmp_path 20 | assert not (profile.path / "times.json").is_file() 21 | assert profile.invalid_prefs is None 22 | (profile.path / "Invalidprefs.js").touch() 23 | assert profile.invalid_prefs is not None 24 | profile.remove() 25 | assert profile.path is None 26 | 27 | 28 | def test_profile_use_template(tmp_path): 29 | """test Profile with template""" 30 | template = tmp_path / "template" 31 | template.mkdir() 32 | (template / "a.txt").touch() 33 | (template / "Invalidprefs.js").touch() 34 | working = tmp_path / "working" 35 | working.mkdir() 36 | with Profile(template=template, working_path=str(working)) as profile: 37 | assert profile 38 | assert profile.path.parent == working 39 | assert (profile.path / "a.txt").is_file() 40 | assert not (profile.path / "Invalidprefs.js").is_file() 41 | 42 | 43 | @mark.parametrize( 44 | "existing, additional", 45 | [ 46 | ({}, {}), 47 | ({"pre.existing": "1"}, {}), 48 | ({"pre.existing": "1"}, {"foo": "'a1b1c1'", "test.enabled": "true"}), 49 | ({}, {"foo": "'a1b1c1'", "test.enabled": "true"}), 50 | ], 51 | ) 52 | def test_profile_prefs_js(tmp_path, existing, additional): 53 | """test Profile with prefs.js""" 54 | prefs = None 55 | if existing: 56 | prefs = tmp_path / "prefs.js" 57 | for name, value in existing.items(): 58 | prefs.write_text(f"user_pref('{name}', {value});\n") 59 | working = tmp_path / "working" 60 | working.mkdir() 61 | with Profile(prefs_file=prefs, working_path=str(working)) as profile: 62 | assert profile 63 | assert profile.path.parent == working 64 | profile.add_prefs(additional) 65 | if additional or existing: 66 | assert (profile.path / "prefs.js").is_file() 67 | assert (profile.path / "times.json").is_file() 68 | data = (profile.path / "prefs.js").read_text() 69 | for name, value in existing.items(): 70 | assert f"user_pref('{name}', {value});\n" in data 71 | for name, value in additional.items(): 72 | assert f"user_pref('{name}', {value});\n" in data 73 | lines = [x for x in data.splitlines() if x.startswith("user_pref(")] 74 | assert len(lines) == len(existing) + len(additional) 75 | 76 | 77 | def test_profile_extensions(mocker, tmp_path): 78 | """test create_profile() extension support""" 79 | mocker.patch( 80 | "ffpuppet.profile.mkdtemp", autospec=True, return_value=str(tmp_path / "dst") 81 | ) 82 | # create a profile with a non-existent ext 83 | (tmp_path / "dst").mkdir() 84 | with raises(RuntimeError, match=r"Unknown extension: '.+?fake_ext'"): 85 | Profile(extensions=[tmp_path / "fake_ext"]) 86 | assert not (tmp_path / "dst").is_dir() 87 | # create a profile with an xpi ext 88 | (tmp_path / "dst").mkdir() 89 | xpi = tmp_path / "xpi-ext.xpi" 90 | xpi.touch() 91 | with Profile(extensions=[xpi]) as prof: 92 | assert any(prof.path.glob("extensions")) 93 | assert (prof.path / "extensions" / "xpi-ext.xpi").is_file() 94 | rmtree(tmp_path / "dst") 95 | # create a profile with an unknown ext 96 | (tmp_path / "dst").mkdir() 97 | dummy_ext = tmp_path / "dummy_ext" 98 | dummy_ext.mkdir() 99 | with raises( 100 | RuntimeError, match=r"Failed to find extension id in manifest: '.+?dummy_ext'" 101 | ): 102 | Profile(extensions=[dummy_ext]) 103 | assert not (tmp_path / "dst").is_dir() 104 | # create a profile with a bad legacy ext 105 | (tmp_path / "dst").mkdir() 106 | bad_legacy = tmp_path / "bad_legacy" 107 | bad_legacy.mkdir() 108 | (bad_legacy / "install.rdf").touch() 109 | with raises( 110 | RuntimeError, match=r"Failed to find extension id in manifest: '.+?bad_legacy'" 111 | ): 112 | Profile(extensions=[bad_legacy]) 113 | assert not (tmp_path / "dst").is_dir() 114 | # create a profile with a good legacy ext 115 | (tmp_path / "dst").mkdir() 116 | good_legacy = tmp_path / "good_legacy" 117 | good_legacy.mkdir() 118 | (good_legacy / "install.rdf").write_text( 119 | '' 120 | '\n' 122 | ' \n' 123 | " good-ext-id\n" 124 | " \n" 125 | "" 126 | ) 127 | (good_legacy / "example.js").touch() 128 | with Profile(extensions=[good_legacy]) as prof: 129 | assert any(prof.path.glob("extensions")) 130 | ext_path = prof.path / "extensions" / "good-ext-id" 131 | assert (ext_path / "install.rdf").is_file() 132 | assert (ext_path / "example.js").is_file() 133 | rmtree(tmp_path / "dst") 134 | # create a profile with a bad webext 135 | (tmp_path / "dst").mkdir() 136 | bad_webext = tmp_path / "bad_webext" 137 | bad_webext.mkdir() 138 | (bad_webext / "manifest.json").touch() 139 | with raises( 140 | RuntimeError, match=r"Failed to find extension id in manifest: '.+?bad_webext'" 141 | ): 142 | Profile(extensions=[bad_webext]) 143 | assert not (tmp_path / "dst").is_dir() 144 | # create a profile with a good webext 145 | (tmp_path / "dst").mkdir() 146 | good_webext = tmp_path / "good_webext" 147 | good_webext.mkdir() 148 | (good_webext / "manifest.json").write_bytes( 149 | b"""{"applications": {"gecko": {"id": "good-webext-id"}}}""" 150 | ) 151 | (good_webext / "example.js").touch() 152 | with Profile(extensions=[good_webext]) as prof: 153 | assert any(prof.path.glob("extensions")) 154 | ext_path = prof.path / "extensions" / "good-webext-id" 155 | assert ext_path.is_dir() 156 | assert (ext_path / "manifest.json").is_file() 157 | assert (ext_path / "example.js").is_file() 158 | rmtree(tmp_path / "dst") 159 | # create a profile with multiple extensions 160 | (tmp_path / "dst").mkdir() 161 | with Profile(extensions=[good_webext, good_legacy]) as prof: 162 | assert any(prof.path.glob("extensions")) 163 | ext_path = prof.path / "extensions" 164 | assert ext_path.is_dir() 165 | ext_path = prof.path / "extensions" / "good-webext-id" 166 | assert ext_path.is_dir() 167 | assert (ext_path / "manifest.json").is_file() 168 | assert (ext_path / "example.js").is_file() 169 | ext_path = prof.path / "extensions" / "good-ext-id" 170 | assert ext_path.is_dir() 171 | assert (ext_path / "install.rdf").is_file() 172 | assert (ext_path / "example.js").is_file() 173 | 174 | 175 | def test_profile_check_prefs(tmp_path): 176 | """test check_prefs()""" 177 | dummy_prefs = tmp_path / "dummy.js" 178 | dummy_prefs.write_text( 179 | "// comment line\n" 180 | "# comment line\n" 181 | " \n\n" 182 | 'user_pref("a.a", 0);\n' 183 | 'user_pref("a.b", "test");\n' 184 | 'user_pref("a.c", true);\n' 185 | ) 186 | custom_prefs = tmp_path / "custom.js" 187 | custom_prefs.write_text( 188 | "// comment line\n" 189 | "# comment line\n" 190 | "/* comment block.\n" 191 | "*\n" 192 | " \n\n" 193 | 'user_pref("a.a", 0); // test comment\n' 194 | 'user_pref("a.c", true);\n' 195 | ) 196 | assert Profile.check_prefs(dummy_prefs, custom_prefs) 197 | # test detecting missing prefs 198 | custom_prefs.write_text('user_pref("a.a", 0);\nuser_pref("b.a", false);\n') 199 | assert not Profile.check_prefs(dummy_prefs, custom_prefs) 200 | 201 | 202 | def test_profile_remove(mocker, tmp_path): 203 | """test Profile.remove() fail to remove data directory""" 204 | mocker.patch("ffpuppet.profile.rmtree", autospec=True) 205 | with Profile(working_path=str(tmp_path)) as profile: 206 | path = profile.path 207 | profile.remove() 208 | assert profile.path is None 209 | assert path.exists() 210 | 211 | 212 | def test_profile_install_certs(mocker, tmp_path): 213 | """test Profile with certs""" 214 | mocker.patch("ffpuppet.profile.certutil_available", autospec=True) 215 | fake_check = mocker.patch("ffpuppet.profile.check_output", autospec=True) 216 | working = tmp_path / "working" 217 | working.mkdir() 218 | cert = tmp_path / "cert" 219 | cert.touch() 220 | with Profile(cert_files=[cert], working_path=str(working)): 221 | assert fake_check.call_count == 2 222 | 223 | 224 | def test_profile_certutil_missing(mocker, tmp_path): 225 | """test Profile missing certutil binary""" 226 | mocker.patch("ffpuppet.profile.certutil_available", return_value=False) 227 | mocker.patch("ffpuppet.profile.certutil_find", autospec=True) 228 | cert = tmp_path / "cert" 229 | cert.touch() 230 | with raises(OSError, match="certutil not found"): 231 | Profile(cert_files=[cert], working_path=str(tmp_path)) 232 | 233 | 234 | def test_profile_install_cert(mocker, tmp_path): 235 | """test Profile.install_cert() certutil""" 236 | mocker.patch("ffpuppet.profile.certutil_available", autospec=True) 237 | fake_check = mocker.patch("ffpuppet.profile.check_output", autospec=True) 238 | 239 | cert = tmp_path / "cert" 240 | cert.touch() 241 | 242 | Profile.install_cert(tmp_path, cert, "fake_certutil") 243 | assert fake_check.call_count == 1 244 | 245 | fake_check.side_effect = CalledProcessError(1, "test", output=b"error msg") 246 | with raises(RuntimeError, match="Install cert: certutil error"): 247 | Profile.install_cert(tmp_path, cert, "fake_certutil") 248 | 249 | 250 | def test_profile_init_cert_db(mocker, tmp_path): 251 | """test Profile.init_cert_db() certutil""" 252 | mocker.patch("ffpuppet.profile.certutil_available", autospec=True) 253 | fake_check = mocker.patch("ffpuppet.profile.check_output", autospec=True) 254 | 255 | Profile.init_cert_db(tmp_path, "fake_certutil") 256 | assert fake_check.call_count == 1 257 | 258 | fake_check.side_effect = CalledProcessError(1, "test", output=b"error msg") 259 | with raises(RuntimeError, match="Init cert db: certutil error"): 260 | Profile.init_cert_db(tmp_path, "fake_certutil") 261 | -------------------------------------------------------------------------------- /src/ffpuppet/profile.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet profile manager""" 5 | 6 | from __future__ import annotations 7 | 8 | from argparse import ArgumentParser 9 | from json import load as json_load 10 | from logging import DEBUG, INFO, basicConfig, getLogger 11 | from pathlib import Path 12 | from shutil import copyfile, copytree, rmtree 13 | from subprocess import STDOUT, CalledProcessError, TimeoutExpired, check_output 14 | from tempfile import mkdtemp 15 | from time import strftime, time 16 | from typing import TYPE_CHECKING 17 | from xml.etree import ElementTree 18 | 19 | from .helpers import certutil_available, certutil_find 20 | 21 | if TYPE_CHECKING: 22 | from collections.abc import Iterable 23 | 24 | LOG = getLogger(__name__) 25 | 26 | __author__ = "Tyson Smith" 27 | 28 | 29 | class Profile: 30 | """ 31 | Browser profile management object. 32 | """ 33 | 34 | __slots__ = ("path",) 35 | 36 | def __init__( 37 | self, 38 | browser_bin: Path | None = None, 39 | cert_files: Iterable[Path] | None = None, 40 | extensions: Iterable[Path] | None = None, 41 | prefs_file: Path | None = None, 42 | template: Path | None = None, 43 | working_path: str | None = None, 44 | ) -> None: 45 | if cert_files and not certutil_available(certutil_find(browser_bin)): 46 | raise OSError("NSS certutil not found") 47 | self.path: Path | None = Path( 48 | mkdtemp(dir=working_path, prefix=strftime("ffprofile_%Y%m%d-%H%M%S_")) 49 | ) 50 | try: 51 | if template is not None: 52 | self._copy_template(template) 53 | if prefs_file is not None: 54 | self._copy_prefs_file(prefs_file) 55 | if extensions is not None: 56 | self._copy_extensions(extensions) 57 | if cert_files: 58 | certutil_bin = certutil_find(browser_bin) 59 | self.init_cert_db(self.path, certutil_bin) 60 | for cert in cert_files: 61 | self.install_cert(self.path, cert, certutil_bin) 62 | except Exception: 63 | if self.path.exists(): 64 | rmtree(self.path, ignore_errors=True) 65 | raise 66 | 67 | def __enter__(self) -> Profile: 68 | return self 69 | 70 | def __exit__(self, *exc: object) -> None: 71 | self.remove() 72 | 73 | def __str__(self) -> str: 74 | return str(self.path) 75 | 76 | def _add_times_json(self, overwrite: bool = True) -> None: 77 | assert self.path 78 | times_json = self.path / "times.json" 79 | if overwrite or not times_json.is_file(): 80 | # times.json only needs to be created when using a custom prefs.js 81 | times_json.write_text(f'{{"created":{int(time() * 1000)}}}') 82 | 83 | def _copy_extensions(self, extensions: Iterable[Path]) -> None: 84 | assert self.path 85 | ext_path = self.path / "extensions" 86 | ext_path.mkdir(exist_ok=True) 87 | for ext in extensions: 88 | if ext.is_file() and ext.name.endswith(".xpi"): 89 | copyfile(ext, ext_path / ext.name) 90 | elif ext.is_dir(): 91 | # read manifest to see what the folder should be named 92 | ext_name = None 93 | if (ext / "manifest.json").is_file(): 94 | try: 95 | with (ext / "manifest.json").open("r") as manifest: 96 | manifest_loaded_json = json_load(manifest) 97 | ext_name = manifest_loaded_json["applications"]["gecko"]["id"] 98 | except (OSError, KeyError, ValueError) as exc: 99 | LOG.debug("Failed to parse manifest.json: %s", exc) 100 | elif (ext / "install.rdf").is_file(): 101 | try: 102 | xmlns = { 103 | "x": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", 104 | "em": "http://www.mozilla.org/2004/em-rdf#", 105 | } 106 | tree = ElementTree.parse(str(ext / "install.rdf")) 107 | assert tree.getroot().tag == f"{{{xmlns['x']}}}RDF" 108 | ids = tree.findall("./x:Description/em:id", namespaces=xmlns) 109 | assert len(ids) == 1 110 | ext_name = ids[0].text 111 | except (AssertionError, OSError, ElementTree.ParseError) as exc: 112 | LOG.debug("Failed to parse install.rdf: %s", exc) 113 | if ext_name is None: 114 | raise RuntimeError( 115 | f"Failed to find extension id in manifest: '{ext}'" 116 | ) 117 | copytree(ext, self.path / "extensions" / ext_name) 118 | else: 119 | raise RuntimeError(f"Unknown extension: '{ext}'") 120 | 121 | def _copy_prefs_file(self, prefs_file: Path) -> None: 122 | assert self.path 123 | LOG.debug("using prefs.js: '%s'", prefs_file) 124 | copyfile(prefs_file, self.path / "prefs.js") 125 | self._add_times_json() 126 | 127 | def _copy_template(self, template: Path) -> None: 128 | assert self.path 129 | LOG.debug("using profile template: '%s'", template) 130 | rmtree(self.path) 131 | copytree(template, self.path) 132 | invalid_prefs = self.path / "Invalidprefs.js" 133 | # if Invalidprefs.js was copied from the template profile remove it 134 | if invalid_prefs.is_file(): 135 | invalid_prefs.unlink() 136 | 137 | def add_prefs(self, prefs: dict[str, str]) -> None: 138 | """Write or append preferences from prefs to prefs.js file in profile_path. 139 | 140 | Args: 141 | prefs: preferences to add. 142 | 143 | Returns: 144 | None 145 | """ 146 | assert self.path 147 | self._add_times_json(overwrite=False) 148 | with (self.path / "prefs.js").open("a") as prefs_fp: 149 | # make sure there is a newline before appending to prefs.js 150 | prefs_fp.write("\n") 151 | for name, value in prefs.items(): 152 | prefs_fp.write(f"user_pref('{name}', {value});\n") 153 | 154 | @staticmethod 155 | def check_prefs(prof_prefs: Path, input_prefs: Path) -> bool: 156 | """Check that the given prefs.js file in use by the browser contains all 157 | the requested preferences. 158 | NOTE: There will be false positives if input_prefs does not adhere to the 159 | formatting that is used in prefs.js file generated by the browser. 160 | 161 | Args: 162 | prof_prefs: Profile prefs.js file. 163 | input_prefs: Prefs.js file that contains prefs that should be merged into 164 | the prefs.js file generated by the browser. 165 | 166 | Returns: 167 | True if all expected preferences are found otherwise False. 168 | """ 169 | with prof_prefs.open() as p_fp, input_prefs.open() as i_fp: 170 | p_prefs = {p.split(",")[0] for p in p_fp if p.startswith("user_pref(")} 171 | i_prefs = {p.split(",")[0] for p in i_fp if p.startswith("user_pref(")} 172 | missing_prefs = i_prefs - p_prefs 173 | for missing in missing_prefs: 174 | LOG.debug("pref not set '%s'", missing) 175 | return not missing_prefs 176 | 177 | @staticmethod 178 | def init_cert_db(dst: Path, certutil: str) -> None: 179 | """Create required certificate database files. 180 | 181 | Args: 182 | dst: Path of directory to initialize. 183 | certutil: certutil binary. 184 | 185 | Returns: 186 | None 187 | """ 188 | # remove any existing db files to avoid any compatibility issues 189 | (dst / "cert9.db").unlink(missing_ok=True) 190 | (dst / "key4.db").unlink(missing_ok=True) 191 | (dst / "pkcs11.txt").unlink(missing_ok=True) 192 | try: 193 | check_output( 194 | (certutil, "-N", "-d", str(dst), "--empty-password"), 195 | stderr=STDOUT, 196 | timeout=60, 197 | ) 198 | except (CalledProcessError, TimeoutExpired) as exc: 199 | LOG.error(str(exc)) 200 | if exc.output: 201 | LOG.error(exc.output.decode().strip()) 202 | raise RuntimeError("Init cert db: certutil error") from None 203 | 204 | @staticmethod 205 | def install_cert(dst: Path, cert_file: Path, certutil: str) -> None: 206 | """Install certificate in the database. 207 | 208 | Args: 209 | dst: Directory containing database. 210 | cert_file: Certificate file to install. 211 | certutil: certutil binary. 212 | 213 | Returns: 214 | None 215 | """ 216 | LOG.debug("installing certificate '%s' with '%s'", cert_file, certutil) 217 | try: 218 | check_output( 219 | ( 220 | certutil, 221 | "-A", 222 | "-d", 223 | str(dst), 224 | "-t", 225 | "CT,,", 226 | "-n", 227 | "test cert", 228 | "-i", 229 | str(cert_file), 230 | ), 231 | stderr=STDOUT, 232 | timeout=60, 233 | ) 234 | except (CalledProcessError, TimeoutExpired) as exc: 235 | LOG.error(str(exc)) 236 | if exc.output: 237 | LOG.error(exc.output.decode().strip()) 238 | raise RuntimeError("Install cert: certutil error") from None 239 | 240 | @property 241 | def invalid_prefs(self) -> Path | None: 242 | """Path to Invalidprefs.js if it exists. 243 | 244 | Args: 245 | None 246 | 247 | Returns: 248 | Invalidprefs.js or None if it does not exist. 249 | 250 | """ 251 | if self.path and (self.path / "Invalidprefs.js").is_file(): 252 | return self.path / "Invalidprefs.js" 253 | return None 254 | 255 | def remove(self) -> None: 256 | """Remove the profile from the filesystem. 257 | 258 | Args: 259 | None 260 | 261 | Returns: 262 | None 263 | """ 264 | if self.path is not None: 265 | LOG.debug("removing profile") 266 | rmtree(self.path, ignore_errors=True) 267 | if self.path.exists(): 268 | LOG.error("Failed to remove profile '%s'", self.path) 269 | self.path = None 270 | 271 | 272 | def create_profile() -> None: 273 | """Command line tool to create a Firefox profile. 274 | 275 | Args: 276 | None 277 | 278 | Returns: 279 | None 280 | """ 281 | parser = ArgumentParser(description="Create a Firefox Profile") 282 | parser.add_argument("--debug", action="store_true", help="Display debug output.") 283 | parser.add_argument( 284 | "-o", "--output", default=Path.cwd(), type=Path, help="Location to new profile." 285 | ) 286 | parser.add_argument("-p", "--prefs", type=Path, help="Prefs.js file") 287 | args = parser.parse_args() 288 | 289 | # set output verbosity 290 | if args.debug: 291 | basicConfig(format="[%(levelname).1s] %(message)s", level=DEBUG) 292 | else: 293 | basicConfig(format="%(message)s", level=INFO) 294 | 295 | # create a profile 296 | profile = Profile(prefs_file=args.prefs, working_path=args.output) 297 | LOG.info("Created profile: %s", profile.path) 298 | -------------------------------------------------------------------------------- /src/ffpuppet/test_minidump_parser.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | # pylint: disable=protected-access 5 | """ffpuppet minidump parser tests""" 6 | 7 | from copy import deepcopy 8 | from json import dumps 9 | from pathlib import Path 10 | from subprocess import CompletedProcess 11 | from sys import executable 12 | 13 | from pytest import mark 14 | 15 | from .minidump_parser import MinidumpParser 16 | 17 | MD_BASE_AMD64_WIN = { 18 | "crash_info": { 19 | "address": "0x00007ffe4e09af8d", 20 | "type": "EXCEPTION_BREAKPOINT", 21 | }, 22 | "system_info": { 23 | "cpu_arch": "amd64", 24 | "cpu_count": 8, 25 | "cpu_info": "family 6 model 70 stepping 1", 26 | "os": "Windows NT", 27 | "os_ver": "10.0.19044", 28 | }, 29 | } 30 | 31 | MD_UNSYMBOLIZED_AMD64_WIN = deepcopy(MD_BASE_AMD64_WIN) 32 | MD_UNSYMBOLIZED_AMD64_WIN["crash_info"]["crashing_thread"] = 0 33 | MD_UNSYMBOLIZED_AMD64_WIN["crashing_thread"] = { 34 | "frame_count": 49, 35 | "frames": [ 36 | { 37 | "file": None, 38 | "frame": 0, 39 | "function": None, 40 | "function_offset": None, 41 | "line": None, 42 | "module": "xul.dll", 43 | "registers": {"r10": "0x0"}, 44 | }, 45 | ], 46 | } 47 | 48 | MD_UNSYMBOLIZED_ARM64_MAC = { 49 | "crash_info": { 50 | "address": "0x0000000000000000", 51 | "crashing_thread": 0, 52 | "type": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS", 53 | }, 54 | "crashing_thread": { 55 | "frame_count": 32, 56 | "frames": [ 57 | { 58 | "file": None, 59 | "frame": 0, 60 | "function": None, 61 | "function_offset": None, 62 | "line": None, 63 | "module": "XUL", 64 | "registers": { 65 | "x1": "0x0000000000000001", 66 | "x2": "0x0000000000000002", 67 | }, 68 | }, 69 | ], 70 | }, 71 | "system_info": { 72 | "cpu_arch": "arm64", 73 | "cpu_count": 8, 74 | "cpu_info": None, 75 | "os": "Mac OS X", 76 | "os_ver": "13.0.1 22A400", 77 | }, 78 | } 79 | 80 | 81 | @mark.parametrize( 82 | "symbols", 83 | [ 84 | # use local path 85 | True, 86 | # use url 87 | False, 88 | ], 89 | ) 90 | def test_minidump_parser_01(mocker, tmp_path, symbols): 91 | """test MinidumpParser._cmd()""" 92 | mocker.patch.object(MinidumpParser, "MDSW_BIN", "minidump-stackwalk") 93 | with MinidumpParser(symbols=tmp_path if symbols else None) as parser: 94 | assert parser 95 | cmd = parser._cmd(tmp_path) 96 | assert cmd 97 | assert "minidump-stackwalk" in cmd 98 | if symbols: 99 | assert "--symbols-path" in cmd 100 | else: 101 | assert "--symbols-url" in cmd 102 | 103 | 104 | @mark.parametrize( 105 | "code, token, timeout", 106 | [ 107 | # success 108 | (f"print('{dumps(MD_UNSYMBOLIZED_AMD64_WIN)}')", "xul.dll", 60), 109 | # mdsw failed 110 | ("exit(1)", "minidump-stackwalk failed", 60), 111 | # invalid json 112 | ("print('bad,json')", "json decode error", 60), 113 | # mdsw hang 114 | ("import time;time.sleep(10)", "minidump-stackwalk timeout", 0), 115 | ], 116 | ) 117 | def test_minidump_parser_02(mocker, code, token, timeout): 118 | """test MinidumpParser.create_log()""" 119 | mocker.patch.object(MinidumpParser, "_cmd", return_value=[executable, "-c", code]) 120 | with MinidumpParser() as parser: 121 | assert parser._storage.is_dir() 122 | output = parser.create_log(Path("foo.dmp"), "minidump_00.txt", timeout=timeout) 123 | assert output 124 | assert output.name == "minidump_00.txt" 125 | assert output.is_file() 126 | assert token in output.read_text() 127 | assert not output.is_file() 128 | 129 | 130 | @mark.parametrize( 131 | "data, reg, operating_system, cpu, crash, frame", 132 | [ 133 | # Windows - x86_64 / AMD64 134 | ( 135 | MD_UNSYMBOLIZED_AMD64_WIN, 136 | "r10 = 0x0", 137 | "OS|Windows NT|10.0.19044", 138 | "CPU|amd64|family 6 model 70 stepping 1|8", 139 | "Crash|EXCEPTION_BREAKPOINT|0x00007ffe4e09af8d|0", 140 | "0|0|xul.dll||||", 141 | ), 142 | # MacOS - ARM64 143 | ( 144 | MD_UNSYMBOLIZED_ARM64_MAC, 145 | " x1 = 0x0000000000000001\t x2 = 0x0000000000000002", 146 | "OS|Mac OS X|13.0.1 22A400", 147 | "CPU|arm64||8", 148 | "Crash|EXC_BAD_ACCESS / KERN_INVALID_ADDRESS|0x0000000000000000|0", 149 | "0|0|XUL||||", 150 | ), 151 | ], 152 | ) 153 | def test_minidump_parser_03(tmp_path, data, reg, operating_system, cpu, crash, frame): 154 | """test MinidumpParser._fmt_output() - un-symbolized""" 155 | with (tmp_path / "out.txt").open("w+b") as ofp: 156 | MinidumpParser._fmt_output(data, ofp, {}, limit=2) 157 | ofp.seek(0) 158 | formatted = ofp.read().rstrip().decode().split("\n") 159 | assert len(formatted) == 5 160 | assert formatted[0] == reg 161 | assert formatted[1] == operating_system 162 | assert formatted[2] == cpu 163 | assert formatted[3] == crash 164 | assert formatted[4] == frame 165 | 166 | 167 | def test_minidump_parser_04(tmp_path): 168 | """test MinidumpParser._fmt_output() - symbolized""" 169 | data = deepcopy(MD_BASE_AMD64_WIN) 170 | data["crash_info"]["crashing_thread"] = 0 171 | data["crashing_thread"] = { 172 | "frames": [ 173 | { 174 | "file": "file0.cpp", 175 | "frame": 0, 176 | "function": "function00()", 177 | "function_offset": "0x00000000000001ed", 178 | "line": 47, 179 | "module": "xul.dll", 180 | "registers": { 181 | "r10": "0x12345678", 182 | "r11": "0x0badf00d", 183 | "r12": "0x00000000", 184 | "r13": "0x000000dceebfc2e8", 185 | }, 186 | }, 187 | { 188 | "file": "file1.cpp", 189 | "frame": 1, 190 | "function": "function01()", 191 | "function_offset": "0x00000000000001bb", 192 | "line": 210, 193 | "module": "xul.dll", 194 | }, 195 | { 196 | "file": "file2.cpp", 197 | "frame": 2, 198 | "function": "function02()", 199 | "function_offset": "0x0000000000000123", 200 | "line": 123, 201 | "module": "xul.dll", 202 | }, 203 | ], 204 | } 205 | 206 | with (tmp_path / "out.txt").open("w+b") as ofp: 207 | MinidumpParser._fmt_output(data, ofp, {"metadata": "foo"}, limit=2) 208 | ofp.seek(0) 209 | formatted = ofp.read().rstrip().decode().split("\n") 210 | assert len(formatted) == 9 211 | assert formatted[0] == "r10 = 0x12345678\tr11 = 0x0badf00d\tr12 = 0x00000000" 212 | assert formatted[1] == "r13 = 0x000000dceebfc2e8" 213 | assert formatted[2] == "metadata|foo" 214 | assert formatted[3] == "OS|Windows NT|10.0.19044" 215 | assert formatted[4] == "CPU|amd64|family 6 model 70 stepping 1|8" 216 | assert formatted[5] == "Crash|EXCEPTION_BREAKPOINT|0x00007ffe4e09af8d|0" 217 | assert formatted[6] == "0|0|xul.dll|function00()|file0.cpp|47|0x1ed" 218 | assert formatted[7] == "0|1|xul.dll|function01()|file1.cpp|210|0x1bb" 219 | assert formatted[8] == "WARNING: Hit stack size output limit!" 220 | 221 | 222 | @mark.parametrize( 223 | "call_result, mdsw_bin, result", 224 | [ 225 | # minidump-stackwalk is available 226 | ( 227 | (CompletedProcess([], 0, stdout=b"minidump-stackwalk 0.17.0\n"),), 228 | "minidump-stackwalk", 229 | True, 230 | ), 231 | # minidump-stackwalk is matches minimum version 232 | ( 233 | (CompletedProcess([], 0, stdout=b"minidump-stackwalk 0.15.2\n"),), 234 | "minidump-stackwalk", 235 | True, 236 | ), 237 | # minidump-stackwalk is out-of-date 238 | ( 239 | (CompletedProcess([], 0, stdout=b"minidump-stackwalk 0.10.0\n"),), 240 | "minidump-stackwalk", 241 | False, 242 | ), 243 | # minidump-stackwalk is out-of-date 244 | ( 245 | (CompletedProcess([], 0, stdout=b"minidump-stackwalk 0.15.1\n"),), 246 | "minidump-stackwalk", 247 | False, 248 | ), 249 | # minidump-stackwalk is bad version 250 | ( 251 | (CompletedProcess([], 0, stdout=b"minidump-stackwalk badversion\n"),), 252 | "minidump-stackwalk", 253 | False, 254 | ), 255 | # minidump-stackwalk is not available 256 | (OSError("test"), "minidump-stackwalk", False), 257 | # minidump-stackwalk not installed 258 | (None, None, False), 259 | ], 260 | ) 261 | def test_minidump_parser_05(mocker, call_result, mdsw_bin, result): 262 | """test MinidumpParser.mdsw_available()""" 263 | mocker.patch("ffpuppet.minidump_parser.run", side_effect=call_result) 264 | mocker.patch.object(MinidumpParser, "MDSW_BIN", mdsw_bin) 265 | assert MinidumpParser.mdsw_available(min_version="0.15.2") == result 266 | 267 | 268 | def test_minidump_parser_06(tmp_path): 269 | """test MinidumpParser.dmp_files()""" 270 | # empty minidump path 271 | assert not MinidumpParser.dmp_files(tmp_path) 272 | # find single dump file 273 | (tmp_path / "a.dmp").write_text("a") 274 | assert tmp_path / "a.dmp" in MinidumpParser.dmp_files(tmp_path) 275 | # find multiple dump files 276 | (tmp_path / "b.dmp").write_text("b") 277 | (tmp_path / "c.dmp").write_text("c") 278 | assert len(MinidumpParser.dmp_files(tmp_path)) == 3 279 | # add .extra file to prioritize .dmp file 280 | (tmp_path / "b.extra").write_text('{"MozCrashReason":"foo"}') 281 | assert MinidumpParser.dmp_files(tmp_path)[0] == (tmp_path / "b.dmp") 282 | (tmp_path / "b.extra").unlink() 283 | # add .extra file to prioritize .dmp file 284 | (tmp_path / "c-browser.dmp").write_text("c-browser") 285 | (tmp_path / "c.extra").write_text('{"additional_minidumps":"browser"}') 286 | assert MinidumpParser.dmp_files(tmp_path)[0] == (tmp_path / "c-browser.dmp") 287 | # corrupt (bad json) .extra file 288 | (tmp_path / "c.extra").write_text("!") 289 | assert MinidumpParser.dmp_files(tmp_path) 290 | 291 | 292 | def test_minidump_parser_missing_crashing_thread(tmp_path): 293 | """test MinidumpParser._fmt_output() - missing crashing thread""" 294 | with (tmp_path / "out.txt").open("w+b") as ofp: 295 | MinidumpParser._fmt_output(MD_BASE_AMD64_WIN, ofp, {}) 296 | ofp.seek(0) 297 | formatted = ofp.read().rstrip().decode().split("\n") 298 | assert len(formatted) == 3 299 | assert formatted[0] == "OS|Windows NT|10.0.19044" 300 | assert formatted[1] == "CPU|amd64|family 6 model 70 stepping 1|8" 301 | assert formatted[2] == "Crash|EXCEPTION_BREAKPOINT|0x00007ffe4e09af8d|?" 302 | 303 | 304 | def test_minidump_parser_metadata(tmp_path): 305 | """test MinidumpParser._metadata()""" 306 | # collect metadata from .extra file 307 | (tmp_path / "out.extra").write_text(dumps({"a": "1", "b": "2", "c": "3"})) 308 | result = MinidumpParser._metadata(tmp_path / "out.dmp", ("a", "c")) 309 | assert "a" in result 310 | assert "b" not in result 311 | assert "c" in result 312 | # invalid .extra file 313 | (tmp_path / "out.extra").write_text("!") 314 | assert not MinidumpParser._metadata(tmp_path / "out.dmp", ("a", "c")) 315 | -------------------------------------------------------------------------------- /src/ffpuppet/test_process_tree.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """process_tree.py tests""" 5 | 6 | from collections import namedtuple 7 | from itertools import chain, count, repeat 8 | from pathlib import Path 9 | from subprocess import Popen 10 | from time import sleep 11 | from unittest import mock 12 | 13 | from psutil import STATUS_ZOMBIE, AccessDenied, NoSuchProcess, Process, TimeoutExpired 14 | from pytest import mark, raises 15 | 16 | from .exceptions import TerminateError 17 | from .process_tree import ( 18 | ProcessTree, 19 | _filter_zombies, 20 | _last_modified, 21 | _safe_wait_procs, 22 | _writing_coverage, 23 | ) 24 | 25 | TREE = Path(__file__).parent / "resources" / "tree.py" 26 | 27 | 28 | @mark.parametrize( 29 | "enable_launcher, launcher_is_parent", 30 | [ 31 | # no launcher 32 | (False, False), 33 | # use launcher 34 | (True, False), 35 | # launcher disabled (browser.launcherProcess.enabled=false) 36 | (True, True), 37 | ], 38 | ) 39 | def test_process_tree_01(tmp_path, enable_launcher, launcher_is_parent): 40 | """test ProcessTree() with actual processes""" 41 | content_procs = 3 42 | flag = tmp_path / "running" 43 | # don't use sys.executable it is not always correct (incompatible with tox) 44 | cmd = [Process().exe(), str(TREE), str(content_procs), str(flag)] 45 | 46 | # parent + content + launcher 47 | expected_procs = 1 + content_procs 48 | if enable_launcher: 49 | if launcher_is_parent: 50 | cmd.append("--launcher-is-parent") 51 | else: 52 | expected_procs += 1 53 | cmd.append("-no-deelevate") 54 | else: 55 | # make sure the test is not broken 56 | assert not launcher_is_parent, "launcher_is_parent requires launcher!" 57 | 58 | # pylint: disable=consider-using-with 59 | proc = Popen(cmd) 60 | tree = None 61 | try: 62 | # wait (30 seconds) for tree to launch all processes 63 | for _ in range(300): 64 | if flag.exists(): 65 | break 66 | assert proc.poll() is None 67 | sleep(0.1) 68 | else: 69 | raise AssertionError(f"Process tree ({expected_procs}) failed to launch") 70 | 71 | tree = ProcessTree(proc) 72 | # pylint: disable=protected-access 73 | tree._launcher_check = enable_launcher 74 | assert tree.parent 75 | if enable_launcher and not launcher_is_parent: 76 | assert tree.launcher is not None 77 | assert tree.launcher.pid == proc.pid 78 | else: 79 | assert tree.launcher is None 80 | assert tree.parent.pid == proc.pid 81 | assert ProcessTree._poll(tree.parent) is None 82 | assert tree.is_running() 83 | assert len(tree.processes()) == expected_procs 84 | assert tree.wait_procs() == expected_procs 85 | usage = tuple(tree.cpu_usage()) 86 | assert len(usage) == expected_procs 87 | tree.terminate() 88 | finally: 89 | # this should cause everything to close gracefully if it is still running 90 | flag.unlink(missing_ok=True) 91 | if tree and tree.parent.is_running(): 92 | tree.parent.terminate() 93 | if proc.poll() is None: 94 | proc.terminate() 95 | proc.wait(timeout=30) 96 | assert not tree.is_running() 97 | assert not tree.processes() 98 | assert tree.wait() is not None 99 | assert tree.wait_procs() == 0 100 | 101 | 102 | @mark.parametrize( 103 | "side_effect, expected_result", 104 | [ 105 | # process exited 106 | ((0,), 0), 107 | # process exited - exit code not available 108 | ((None,), 0), 109 | # can't find process 110 | (NoSuchProcess(1), 0), 111 | # process is running 112 | (TimeoutExpired(1), None), 113 | ], 114 | ) 115 | def test_process_tree_02(mocker, side_effect, expected_result): 116 | """test ProcessTree._poll()""" 117 | proc = mocker.Mock(spec_set=Process) 118 | proc.wait.side_effect = side_effect 119 | # pylint: disable=protected-access 120 | assert ProcessTree._poll(proc) == expected_result 121 | 122 | 123 | def test_process_tree_03(mocker): 124 | """test ProcessTree.terminate()""" 125 | mocker.patch("ffpuppet.process_tree.Process", autospec=True) 126 | wait_procs = mocker.patch("ffpuppet.process_tree.wait_procs", autospec=True) 127 | 128 | # no processes to terminate 129 | mocker.patch.object(ProcessTree, "processes", side_effect=([],)) 130 | tree = ProcessTree(mocker.Mock()) 131 | tree.parent = mocker.Mock(spec_set=Process) 132 | tree.terminate() 133 | # pylint: disable=no-member 134 | assert tree.processes.call_count == 1 135 | assert tree.parent.wait.call_count == 0 136 | assert tree.parent.terminate.call_count == 0 137 | 138 | # this should be the "normal" code path 139 | proc = mocker.Mock(spec_set=Process, pid=1337) 140 | wait_procs.return_value = ([proc], []) 141 | proc.wait.side_effect = (TimeoutExpired(1), None) 142 | mocker.patch.object(ProcessTree, "processes", side_effect=([proc],)) 143 | tree = ProcessTree(mocker.Mock()) 144 | tree.parent = proc 145 | tree.terminate() 146 | # pylint: disable=no-member 147 | assert tree.processes.call_count == 1 148 | assert tree.parent.wait.call_count == 2 149 | assert tree.parent.terminate.call_count == 1 150 | assert wait_procs.call_count == 1 151 | wait_procs.reset_mock() 152 | 153 | # this is the stubborn code path that should not happen 154 | proc = mocker.Mock(spec_set=Process, pid=1337) 155 | wait_procs.return_value = ([], [proc]) 156 | proc.wait.side_effect = (TimeoutExpired(1), None) 157 | mocker.patch.object(ProcessTree, "processes", side_effect=([proc],)) 158 | tree = ProcessTree(mocker.Mock()) 159 | tree.parent = proc 160 | with raises(TerminateError, match="Failed to terminate processes"): 161 | tree.terminate() 162 | # pylint: disable=no-member 163 | assert tree.processes.call_count == 1 164 | assert tree.parent.wait.call_count == 2 165 | assert tree.parent.terminate.call_count == 2 166 | assert tree.parent.kill.call_count == 1 167 | assert wait_procs.call_count == 3 168 | 169 | 170 | def test_process_tree_04(mocker): 171 | """test ProcessTree.cpu_usage()""" 172 | mocker.patch("ffpuppet.process_tree.Process", autospec=True) 173 | proc = mocker.Mock(spec_set=Process, pid=1234) 174 | proc.cpu_percent.return_value = 2.3 175 | mocker.patch.object(ProcessTree, "processes", side_effect=([proc],)) 176 | tree = ProcessTree(mocker.Mock()) 177 | stats = tuple(tree.cpu_usage()) 178 | assert stats 179 | assert stats[0][0] == 1234 180 | assert stats[0][1] == 2.3 181 | 182 | 183 | @mark.parametrize( 184 | "procs, last_mod, writing, is_running, success", 185 | [ 186 | # no processes 187 | (False, repeat(0), False, True, True), 188 | # data written successfully 189 | (True, chain([0], repeat(2)), False, True, True), 190 | # data not updated 191 | (True, repeat(0), False, True, False), 192 | # data write timeout 193 | (True, chain([0], repeat(2)), True, True, False), 194 | # process exits 195 | (True, repeat(0), False, False, True), 196 | ], 197 | ) 198 | def test_process_tree_05(mocker, procs, last_mod, writing, is_running, success): 199 | """test ProcessTree.dump_coverage()""" 200 | mocker.patch("ffpuppet.process_tree.COVERAGE_SIGNAL", return_value="foo") 201 | mocker.patch("ffpuppet.process_tree.getenv", return_value="foo") 202 | mocker.patch("ffpuppet.process_tree.perf_counter", side_effect=count(step=0.25)) 203 | mocker.patch("ffpuppet.process_tree.sleep", autospec=True) 204 | mocker.patch("ffpuppet.process_tree._last_modified", side_effect=last_mod) 205 | mocker.patch("ffpuppet.process_tree._writing_coverage", return_value=writing) 206 | 207 | # pylint: disable=missing-class-docstring,super-init-not-called 208 | class CovProcessTree(ProcessTree): 209 | def __init__(self): 210 | pass 211 | 212 | def is_running(self) -> bool: 213 | return is_running 214 | 215 | def processes(self, recursive=False): 216 | return [] if not procs else [mocker.Mock(spec_set=Process)] 217 | 218 | tree = CovProcessTree() 219 | assert tree.dump_coverage() == success 220 | 221 | 222 | def test_last_modified_01(tmp_path): 223 | """test _last_modified()""" 224 | # scan missing path 225 | assert _last_modified(tmp_path / "missing") is None 226 | # scan empty path 227 | assert _last_modified(tmp_path) is None 228 | # scan path without gcda files 229 | (tmp_path / "somefile.txt").touch() 230 | assert _last_modified(tmp_path) is None 231 | # scan nested path with gcda files 232 | (tmp_path / "a").mkdir() 233 | (tmp_path / "a" / "file.gcda").touch() 234 | assert _last_modified(tmp_path) > 0 235 | 236 | 237 | def test_writing_coverage_01(mocker): 238 | """test _writing_coverage()""" 239 | openfile = namedtuple("openfile", ["path", "fd"]) 240 | # empty list 241 | assert not _writing_coverage([]) 242 | # no open files 243 | proc = mocker.Mock(spec_set=Process, pid=1337) 244 | proc.open_files.return_value = () 245 | assert not _writing_coverage([proc]) 246 | assert proc.open_files.call_count == 1 247 | # open test 248 | proc.reset_mock() 249 | proc.open_files.return_value = (openfile("file.txt", None),) 250 | assert not _writing_coverage([proc]) 251 | assert proc.open_files.call_count == 1 252 | # open gcda 253 | proc.reset_mock() 254 | proc.open_files.return_value = (openfile("file.gcda", None),) 255 | assert _writing_coverage([proc]) 256 | assert proc.open_files.call_count == 1 257 | 258 | 259 | @mark.parametrize( 260 | "wait_side_effect, procs, alive_count, gone_count", 261 | [ 262 | # no processes - passthrough 263 | ((([], []),), [], 0, 0), 264 | # AccessDenied - no procs 265 | (AccessDenied(), [], 0, 0), 266 | # AccessDenied - alive (is_running check) 267 | ( 268 | AccessDenied(), 269 | [mock.Mock(spec_set=Process, is_running=mock.Mock(return_value=True))], 270 | 1, 271 | 0, 272 | ), 273 | # AccessDenied - gone (is_running check) 274 | ( 275 | AccessDenied(), 276 | [mock.Mock(spec_set=Process, is_running=mock.Mock(return_value=False))], 277 | 0, 278 | 1, 279 | ), 280 | # AccessDenied - alive 281 | ( 282 | AccessDenied(), 283 | [ 284 | mock.Mock( 285 | spec_set=Process, is_running=mock.Mock(side_effect=AccessDenied()) 286 | ) 287 | ], 288 | 1, 289 | 0, 290 | ), 291 | # AccessDenied - gone 292 | ( 293 | AccessDenied(), 294 | [ 295 | mock.Mock( 296 | spec_set=Process, 297 | is_running=mock.Mock(side_effect=NoSuchProcess(pid=1)), 298 | ) 299 | ], 300 | 0, 301 | 1, 302 | ), 303 | ], 304 | ) 305 | def test_safe_wait_procs_01(mocker, wait_side_effect, procs, alive_count, gone_count): 306 | """test _safe_wait_procs()""" 307 | mocker.patch("ffpuppet.process_tree.perf_counter", side_effect=count(step=0.25)) 308 | mocker.patch("ffpuppet.process_tree.sleep", autospec=True) 309 | mocker.patch("ffpuppet.process_tree.wait_procs", side_effect=wait_side_effect) 310 | 311 | result = _safe_wait_procs(procs, timeout=1) 312 | assert len(result[0]) == gone_count 313 | assert len(result[1]) == alive_count 314 | 315 | 316 | def test_filter_zombies_01(mocker): 317 | """test _filter_zombies()""" 318 | zombie = mocker.Mock(spec_set=Process, pid=123) 319 | zombie.status.return_value = STATUS_ZOMBIE 320 | procs = tuple(_filter_zombies([zombie, mocker.Mock(spec_set=Process)])) 321 | assert len(procs) == 1 322 | assert not any(x for x in procs if x.status() == STATUS_ZOMBIE) 323 | -------------------------------------------------------------------------------- /src/ffpuppet/main.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet main.py""" 5 | 6 | from __future__ import annotations 7 | 8 | from argparse import ArgumentParser, Namespace 9 | from importlib.metadata import PackageNotFoundError, version 10 | from logging import DEBUG, ERROR, INFO, WARNING, basicConfig, getLogger 11 | from pathlib import Path 12 | from platform import system 13 | from shutil import rmtree, which 14 | from tempfile import mkdtemp 15 | from time import sleep, strftime 16 | 17 | from .bootstrapper import Bootstrapper 18 | from .core import Debugger, FFPuppet, Reason 19 | from .display import DisplayMode 20 | from .exceptions import LaunchError 21 | from .helpers import certutil_available, certutil_find 22 | from .profile import Profile 23 | 24 | LOG = getLogger(__name__) 25 | 26 | __author__ = "Tyson Smith" 27 | try: 28 | __version__ = version("ffpuppet") 29 | except PackageNotFoundError: # pragma: no cover 30 | # package is not installed 31 | __version__ = "unknown" 32 | 33 | 34 | def dump_to_console(log_dir: Path, log_quota: int = 0x8000) -> str: 35 | """Read and merge log files and format for output on the console. 36 | 37 | Args: 38 | log_dir: Directory to scan for logs. 39 | log_quota: Maximum number of bytes to read per log. 40 | 41 | Returns: 42 | Merged log data to be displayed on the console. 43 | """ 44 | 45 | logs = [x for x in log_dir.iterdir() if x.is_file()] 46 | if not logs: 47 | return "" 48 | # display stdout and stderr last to avoid the need to scroll back 49 | # this assumes stderr contains the most relevant information 50 | for l_order in ("log_stdout", "log_stderr"): 51 | found = None 52 | for log in logs: 53 | if log.name.startswith(l_order): 54 | found = log 55 | break 56 | # move to the end of the print list 57 | if found and logs[-1] != found: 58 | logs.remove(found) 59 | logs.append(found) 60 | # merge logs 61 | lines = [] 62 | for log in logs: 63 | fsize = log.stat().st_size 64 | lines.append("\n===\n") 65 | lines.append(f"=== Dumping {log.name!r} ({fsize / 1024.0:0.2f}KB)") 66 | with log.open("rb") as log_fp: 67 | # tail log if needed 68 | log_fp.seek(max(fsize - log_quota, 0)) 69 | if log_fp.tell() > 0: 70 | lines.append(f" - tailed ({log_quota / 1024.0:0.2f}KB)") 71 | lines.append("\n===\n") 72 | lines.append(log_fp.read().decode("ascii", errors="ignore")) 73 | return "".join(lines) 74 | 75 | 76 | def parse_args(argv: list[str] | None = None) -> Namespace: 77 | """Handle argument parsing. 78 | 79 | Args: 80 | argv: Arguments from the user. 81 | 82 | Returns: 83 | Parsed and sanitized arguments. 84 | """ 85 | 86 | log_level_map = {"ERROR": ERROR, "WARN": WARNING, "INFO": INFO, "DEBUG": DEBUG} 87 | 88 | parser = ArgumentParser( 89 | prog="ffpuppet", 90 | description="FFPuppet - Firefox process launcher and log collector. " 91 | "Happy bug hunting!", 92 | ) 93 | parser.add_argument("binary", type=Path, help="Firefox binary to launch") 94 | parser.add_argument( 95 | "-d", 96 | "--display-logs", 97 | action="store_true", 98 | help="Display summary of browser logs on process exit.", 99 | ) 100 | parser.add_argument( 101 | "--log-level", 102 | choices=sorted(log_level_map), 103 | default="INFO", 104 | help="Configure console logging (default: %(default)s)", 105 | ) 106 | parser.add_argument( 107 | "--version", 108 | "-V", 109 | action="version", 110 | version=f"%(prog)s {__version__}", 111 | help="Show version number", 112 | ) 113 | 114 | cfg_group = parser.add_argument_group("Browser Configuration") 115 | cfg_group.add_argument( 116 | "--certs", 117 | nargs="+", 118 | type=Path, 119 | help="Install trusted certificates.", 120 | ) 121 | cfg_group.add_argument( 122 | "--display", 123 | choices=sorted(x.name.lower() for x in DisplayMode), 124 | default=DisplayMode.DEFAULT.name, 125 | help="Display mode.", 126 | ) 127 | cfg_group.add_argument( 128 | "-e", 129 | "--extension", 130 | action="append", 131 | type=Path, 132 | help="Install extensions. Specify the path to the xpi or the directory " 133 | "containing the unpacked extension.", 134 | ) 135 | cfg_group.add_argument( 136 | "--marionette", 137 | const=0, 138 | default=None, 139 | nargs="?", 140 | type=int, 141 | help="Enable marionette. If a port is provided it is used otherwise " 142 | "a random port is selected. (default: disabled)", 143 | ) 144 | cfg_group.add_argument( 145 | "-p", 146 | "--prefs", 147 | type=Path, 148 | help="Custom prefs.js file to use (default: profile default)", 149 | ) 150 | cfg_group.add_argument( 151 | "-P", 152 | "--profile", 153 | type=Path, 154 | help="Profile to use. This is non-destructive. A copy of the target profile " 155 | "will be used. (default: temporary profile)", 156 | ) 157 | cfg_group.add_argument( 158 | "-u", "--url", help="Server URL or path to local file to load." 159 | ) 160 | 161 | report_group = parser.add_argument_group("Issue Detection & Reporting") 162 | report_group.add_argument( 163 | "-a", 164 | "--abort-token", 165 | action="append", 166 | default=[], 167 | help="Scan the browser logs for the given value and close browser if detected. " 168 | "For example '-a ###!!! ASSERTION:' would be used to detect soft assertions.", 169 | ) 170 | report_group.add_argument( 171 | "--launch-timeout", 172 | type=int, 173 | default=300, 174 | help="Number of seconds to wait for the browser to become " 175 | "responsive after launching. (default: %(default)s)", 176 | ) 177 | report_group.add_argument( 178 | "-l", 179 | "--logs", 180 | default=Path.cwd(), 181 | type=Path, 182 | help="Location to save browser logs. " 183 | "A sub-directory containing the browser logs will be created.", 184 | ) 185 | report_group.add_argument( 186 | "--log-limit", 187 | type=int, 188 | default=0, 189 | help="Browser log file size limit in MBs (default: %(default)s, no limit)", 190 | ) 191 | report_group.add_argument( 192 | "-m", 193 | "--memory", 194 | type=int, 195 | default=0, 196 | help="Browser memory limit in MBs (default: %(default)s, no limit)", 197 | ) 198 | report_group.add_argument( 199 | "--poll-interval", 200 | type=float, 201 | default=0.5, 202 | help="Delay between checks for results (default: %(default)s)", 203 | ) 204 | report_group.add_argument( 205 | "--save-all", 206 | action="store_true", 207 | help="Always save logs." 208 | " By default logs are saved only when an issue is detected.", 209 | ) 210 | 211 | parser.set_defaults(debugger=Debugger.NONE) 212 | if system() == "Linux": 213 | dbg_group = parser.add_argument_group("Available Debuggers") 214 | # Add the mutually exclusive group to a regular group 215 | # because mutually exclusive groups don't accept a title 216 | dbg_group = dbg_group.add_mutually_exclusive_group() 217 | dbg_group.add_argument( 218 | "--gdb", 219 | action="store_const", 220 | const=Debugger.GDB, 221 | dest="debugger", 222 | help="Use GDB.", 223 | ) 224 | dbg_group.add_argument( 225 | "--pernosco", 226 | action="store_const", 227 | const=Debugger.PERNOSCO, 228 | dest="debugger", 229 | help="Use rr. Trace intended to be submitted to Pernosco.", 230 | ) 231 | dbg_group.add_argument( 232 | "--rr", 233 | action="store_const", 234 | const=Debugger.RR, 235 | dest="debugger", 236 | help="Use rr.", 237 | ) 238 | dbg_group.add_argument( 239 | "--valgrind", 240 | action="store_const", 241 | const=Debugger.VALGRIND, 242 | dest="debugger", 243 | help="Use Valgrind.", 244 | ) 245 | 246 | args = parser.parse_args(argv) 247 | 248 | # sanity checks 249 | if not args.binary.is_file(): 250 | parser.error(f"Invalid browser binary '{args.binary}'") 251 | if args.certs: 252 | if not certutil_available(certutil_find(args.binary)): 253 | parser.error("'--certs' requires NSS certutil") 254 | for cert in args.certs: 255 | if not cert.is_file(): 256 | parser.error(f"Invalid certificate file '{cert}'") 257 | if args.extension: 258 | for ext in args.extension: 259 | if not ext.exists(): 260 | parser.error(f"Extension '{ext}' does not exist") 261 | if args.debugger in (Debugger.PERNOSCO, Debugger.RR): 262 | # rr is only supported on Linux 263 | if not which("rr"): 264 | parser.error("rr is not installed") 265 | settings = "/proc/sys/kernel/perf_event_paranoid" 266 | value = int(Path(settings).read_bytes()) 267 | if value > 1: 268 | parser.error(f"rr needs {settings} <= 1, but it is {value}") 269 | if args.marionette is not None and not Bootstrapper.check_port(args.marionette): 270 | parser.error("--marionette must be 0 or > 1024 and < 65536") 271 | if not args.logs.is_dir(): 272 | parser.error(f"Log output directory is invalid '{args.logs}'") 273 | args.log_level = log_level_map[args.log_level] 274 | if args.log_limit < 0: 275 | parser.error("--log-limit must be >= 0") 276 | args.log_limit *= 1_048_576 277 | if args.memory < 0: 278 | parser.error("--memory must be >= 0") 279 | args.memory *= 1_048_576 280 | if args.prefs is not None and not args.prefs.is_file(): 281 | parser.error(f"Invalid prefs.js file '{args.prefs}'") 282 | 283 | return args 284 | 285 | 286 | def main(argv: list[str] | None = None) -> None: 287 | """FFPuppet main entry point.""" 288 | args = parse_args(argv) 289 | # set output verbosity 290 | if args.log_level == DEBUG: 291 | date_fmt = None 292 | log_fmt = "%(asctime)s %(levelname).1s %(name)s | %(message)s" 293 | else: 294 | date_fmt = "%Y-%m-%d %H:%M:%S" 295 | log_fmt = "[%(asctime)s] %(message)s" 296 | basicConfig(format=log_fmt, datefmt=date_fmt, level=args.log_level) 297 | 298 | ffp = FFPuppet( 299 | debugger=args.debugger, 300 | display_mode=DisplayMode[args.display.upper()], 301 | use_profile=args.profile, 302 | ) 303 | for a_token in args.abort_token: 304 | ffp.add_abort_token(a_token) 305 | 306 | user_exit = False 307 | try: 308 | LOG.info("Launching Firefox...") 309 | ffp.launch( 310 | args.binary, 311 | location=args.url, 312 | launch_timeout=args.launch_timeout, 313 | log_limit=args.log_limit, 314 | marionette=args.marionette, 315 | memory_limit=args.memory, 316 | prefs_js=args.prefs, 317 | extension=args.extension, 318 | cert_files=args.certs, 319 | ) 320 | if args.prefs and args.prefs.is_file(): 321 | assert ffp.profile is not None 322 | assert ffp.profile.path is not None 323 | Profile.check_prefs(ffp.profile.path / "prefs.js", args.prefs) 324 | if ffp.marionette is not None: 325 | LOG.info("Marionette listening on port: %d", ffp.marionette) 326 | LOG.info("Running Firefox (pid: %d)...", ffp.get_pid()) 327 | while ffp.is_healthy(): 328 | sleep(args.poll_interval) 329 | except KeyboardInterrupt: 330 | user_exit = True 331 | LOG.info("Ctrl+C detected.") 332 | except LaunchError as exc: 333 | LOG.error("Launch failed: %s", exc) 334 | finally: 335 | LOG.info("Shutting down...") 336 | ffp.close() 337 | if ffp.reason is not None: 338 | LOG.info("Firefox process is closed. (Reason: %s)", ffp.reason.name) 339 | else: 340 | LOG.error("FFPuppet.close() failed") 341 | logs = Path(mkdtemp(prefix=strftime("%Y%m%d-%H%M%S_ffp_logs_"), dir=args.logs)) 342 | ffp.save_logs(logs, logs_only=user_exit) 343 | if args.display_logs: 344 | LOG.info("Displaying logs...%s", dump_to_console(logs)) 345 | if ffp.reason == Reason.ALERT or args.save_all: 346 | LOG.info("Browser logs available here '%s'", logs.resolve()) 347 | else: 348 | rmtree(logs, ignore_errors=True) 349 | ffp.clean_up() 350 | -------------------------------------------------------------------------------- /src/ffpuppet/minidump_parser.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet minidump parsing module""" 5 | 6 | from __future__ import annotations 7 | 8 | from json import JSONDecodeError, load 9 | from logging import DEBUG, INFO, basicConfig, getLogger 10 | from pathlib import Path 11 | from shutil import rmtree, which 12 | from subprocess import CalledProcessError, TimeoutExpired, run 13 | from tempfile import TemporaryFile, mkdtemp 14 | from typing import IO, Any 15 | 16 | EXTRA_FIELDS = ( 17 | # android only 18 | "CrashType", 19 | # Reason why the cycle collector crashed. 20 | "CycleCollector", 21 | # usually assertions message 22 | "MozCrashReason", 23 | # Set if the crash was the result of a hang, with a value which describes the 24 | # type of hang (e.g. "ui" or "shutdown"). 25 | "Hang", 26 | # Set before a content process crashes because of an IPC channel error, holds 27 | # a description of the error. 28 | "ipc_channel_error", 29 | "ShutdownReason", 30 | ) 31 | LOG = getLogger(__name__) 32 | MDSW_URL = "https://lib.rs/crates/minidump-stackwalk" 33 | SYMS_URL = "https://symbols.mozilla.org/" 34 | 35 | __author__ = "Tyson Smith" 36 | 37 | 38 | class MinidumpParser: 39 | """Parse minidump files via minidump-stackwalk. 40 | 41 | Attributes: 42 | symbols: Path containing debug symbols. 43 | """ 44 | 45 | MDSW_BIN = which("minidump-stackwalk") 46 | 47 | __slots__ = ("_storage", "_symbols") 48 | 49 | def __init__(self, symbols: Path | None = None) -> None: 50 | self._storage = Path(mkdtemp(prefix="md-parser-")) 51 | self._symbols = symbols 52 | 53 | def __enter__(self) -> MinidumpParser: 54 | return self 55 | 56 | def __exit__(self, *exc: object) -> None: 57 | self.close() 58 | 59 | def _cmd(self, src: Path) -> list[str]: 60 | """Generate minidump-stackwalk command line. 61 | 62 | Args: 63 | src: minidump to load. 64 | 65 | Returns: 66 | Command line. 67 | """ 68 | assert self.MDSW_BIN 69 | cmd = [self.MDSW_BIN, "--no-color", "--no-interactive", "--json"] 70 | if self._symbols: 71 | cmd.extend(["--symbols-path", str(self._symbols.resolve(strict=True))]) 72 | else: 73 | cmd.extend(["--symbols-url", SYMS_URL]) 74 | cmd.append(str(src.resolve(strict=True))) 75 | return cmd 76 | 77 | @staticmethod 78 | def _metadata(dmp: Path, fields: tuple[str, ...]) -> dict[str, str]: 79 | """Collect metadata from .extra file. 80 | 81 | Args: 82 | dmp: Matching minidump file. 83 | fields: Fields to collect if available. 84 | 85 | Returns: 86 | Metadata from .extra file. 87 | """ 88 | extra = dmp.with_suffix(".extra") 89 | metadata: dict[str, str] = {} 90 | if extra.is_file(): 91 | with extra.open("r") as extra_fp: 92 | try: 93 | extra_data = load(extra_fp) 94 | except JSONDecodeError: 95 | LOG.warning("Invalid json in: %s", extra) 96 | return {} 97 | for entry in fields: 98 | if entry in extra_data: 99 | metadata[entry] = str(extra_data[entry]) 100 | return metadata 101 | 102 | @staticmethod 103 | def _fmt_output( 104 | data: dict[str, Any], 105 | out_fp: IO[bytes], 106 | metadata: dict[str, str], 107 | limit: int = 150, 108 | ) -> None: 109 | """Write summarized contents of a minidump to a file in a format that is 110 | consumable by FuzzManager. 111 | 112 | Args: 113 | data: Minidump contents. 114 | out_fp: Formatted content destination. 115 | metadata: Extra file contents. 116 | limit: Maximum number of stack frames to include. 117 | 118 | Returns: 119 | None 120 | """ 121 | assert limit > 0 122 | # generate register information lines 123 | try: 124 | frames = data["crashing_thread"]["frames"] 125 | except KeyError: 126 | LOG.warning("No frames available for 'crashing thread'") 127 | frames = [] 128 | if frames: 129 | reg_lines: list[str] = [] 130 | for reg, value in frames[0]["registers"].items(): 131 | # display three registers per line 132 | sep = "\t" if (len(reg_lines) + 1) % 3 else "\n" 133 | reg_lines.append(f"{reg:>3} = {value}{sep}") 134 | out_fp.write("".join(reg_lines).rstrip().encode()) 135 | out_fp.write(b"\n") 136 | 137 | # include metadata 138 | for entry in metadata.items(): 139 | out_fp.write("|".join(entry).encode()) 140 | out_fp.write(b"\n") 141 | 142 | # generate OS information line 143 | line = "|".join( 144 | ("OS", data["system_info"]["os"], data["system_info"]["os_ver"]) 145 | ) 146 | out_fp.write(line.encode()) 147 | out_fp.write(b"\n") 148 | 149 | # generate CPU information line 150 | line = "|".join( 151 | ( 152 | "CPU", 153 | data["system_info"]["cpu_arch"] or "unknown", 154 | data["system_info"]["cpu_info"] or "", 155 | str(data["system_info"]["cpu_count"]), 156 | ) 157 | ) 158 | out_fp.write(line.encode()) 159 | out_fp.write(b"\n") 160 | 161 | # generate Crash information line 162 | crashing_thread = str(data["crash_info"].get("crashing_thread", "?")) 163 | line = "|".join( 164 | ( 165 | "Crash", 166 | data["crash_info"]["type"], 167 | data["crash_info"]["address"], 168 | crashing_thread, 169 | ) 170 | ) 171 | out_fp.write(line.encode()) 172 | out_fp.write(b"\n") 173 | 174 | # generate Frame information lines 175 | for frame in frames[:limit]: 176 | if frame["function_offset"]: 177 | # remove the padding zeros 178 | func_offset = hex(int(frame["function_offset"], 16)) 179 | else: 180 | func_offset = "" 181 | line = "|".join( 182 | ( 183 | crashing_thread, 184 | str(frame["frame"]), 185 | frame["module"] or "", 186 | frame["function"] or "", 187 | frame["file"] or "", 188 | str(frame["line"] or ""), 189 | func_offset, 190 | ) 191 | ) 192 | out_fp.write(line.encode()) 193 | out_fp.write(b"\n") 194 | 195 | if limit < len(frames): 196 | out_fp.write(b"WARNING: Hit stack size output limit!\n") 197 | 198 | def close(self) -> None: 199 | """Remove working data. 200 | 201 | Args: 202 | None 203 | 204 | Returns: 205 | None 206 | """ 207 | if self._storage.is_dir(): 208 | rmtree(self._storage) 209 | 210 | def create_log(self, src: Path, filename: str, timeout: int = 300) -> Path: 211 | """Create a human readable log from a minidump file. 212 | 213 | Args: 214 | src: Minidump file. 215 | filename: Name to use for output file. 216 | timeout: Maximum runtime of minidump-stackwalk. NOTE: Symbols may be 217 | downloaded if not provided which can add overhead. 218 | 219 | Returns: 220 | Log file. 221 | """ 222 | assert filename 223 | assert timeout >= 0 224 | 225 | # collect data from .extra file if it exists 226 | metadata = self._metadata(src, EXTRA_FIELDS) 227 | 228 | cmd = self._cmd(src) 229 | dst = self._storage / filename 230 | with ( 231 | TemporaryFile(dir=self._storage, prefix="mdsw_out_") as out_fp, 232 | TemporaryFile(dir=self._storage, prefix="mdsw_err_") as err_fp, 233 | ): 234 | LOG.debug("running '%s'", " ".join(cmd)) 235 | try: 236 | run(cmd, check=True, stderr=err_fp, stdout=out_fp, timeout=timeout) 237 | out_fp.seek(0) 238 | # load json, format data and write log 239 | with dst.open("wb") as log_fp: 240 | self._fmt_output(load(out_fp), log_fp, metadata) 241 | except (CalledProcessError, JSONDecodeError, TimeoutExpired) as exc: 242 | if isinstance(exc, CalledProcessError): 243 | msg = f"minidump-stackwalk failed ({exc.returncode})" 244 | elif isinstance(exc, JSONDecodeError): 245 | msg = "json decode error" 246 | else: 247 | msg = "minidump-stackwalk timeout" 248 | LOG.warning("Failed to parse minidump: %s", msg) 249 | err_fp.seek(0) 250 | out_fp.seek(0) 251 | # write log 252 | with dst.open("wb") as log_fp: 253 | log_fp.write(f"Failed to parse minidump: {msg}".encode()) 254 | log_fp.write(b"\n\nminidump-stackwalk stderr:\n") 255 | log_fp.write(err_fp.read()) 256 | log_fp.write(b"\n\nminidump-stackwalk stdout:\n") 257 | log_fp.write(out_fp.read()) 258 | return dst 259 | 260 | @staticmethod 261 | def dmp_files(src_dir: Path) -> list[Path]: 262 | """Scan a directory for minidump (.dmp) files. Prioritize files that also have 263 | a MozCrashReason entry in the supporting .extra file. 264 | 265 | Args: 266 | src_dir: Directory containing minidump files. 267 | 268 | Returns: 269 | Dump files. 270 | """ 271 | prioritize: set[str] = set() 272 | for entry in sorted(src_dir.glob("*.extra")): 273 | with entry.open("r") as out_fp: 274 | try: 275 | extra_data = load(out_fp) 276 | except JSONDecodeError: 277 | extra_data = {} 278 | LOG.debug("invalid json in: %s", extra_data) 279 | if "additional_minidumps" in extra_data: 280 | for other in extra_data["additional_minidumps"].split(","): 281 | prioritize.add(f"{entry.stem}-{other}.dmp") 282 | elif "MozCrashReason" in extra_data: 283 | prioritize.add(f"{entry.stem}.dmp") 284 | 285 | dmps: list[Path] = [] 286 | for dmp in sorted(src_dir.glob("*.dmp"), key=lambda x: x.stat().st_mtime): 287 | if dmp.name in prioritize: 288 | dmps.insert(0, dmp) 289 | else: 290 | dmps.append(dmp) 291 | return dmps 292 | 293 | @classmethod 294 | def mdsw_available(cls, min_version: str = "0.15.2") -> bool: 295 | """Check if minidump-stackwalk binary is available. 296 | 297 | Args: 298 | min_version: Minimum supported minidump-stackwalk version. 299 | 300 | Returns: 301 | True if binary is available otherwise False. 302 | """ 303 | assert min_version.count(".") == 2 304 | 305 | if not cls.MDSW_BIN: 306 | LOG.debug("minidump-stackwalk not found") 307 | return False 308 | try: 309 | result = run([cls.MDSW_BIN, "--version"], check=False, capture_output=True) 310 | except OSError: 311 | LOG.debug("minidump-stackwalk not available (%s)", cls.MDSW_BIN) 312 | return False 313 | LOG.debug("using minidump-stackwalk (%s)", cls.MDSW_BIN) 314 | # expected output is 'minidump-stackwalk #.#.#' 315 | current_version = result.stdout.strip().split()[-1].decode() 316 | if current_version.count(".") != 2: 317 | LOG.error( 318 | "Unknown minidump-stackwalk version: '%s'", 319 | result.stdout.decode(errors="ignore"), 320 | ) 321 | return False 322 | # version check 323 | for cver, mver in zip(current_version.split("."), min_version.split(".")): 324 | if int(cver) > int(mver): 325 | break 326 | if int(cver) < int(mver): 327 | LOG.error( 328 | "minidump-stackwalk '%s' is unsupported (minimum '%s')", 329 | current_version, 330 | min_version, 331 | ) 332 | return False 333 | LOG.debug("detected minidump-stackwalk version '%s'", current_version) 334 | return True 335 | 336 | 337 | if __name__ == "__main__": 338 | from argparse import ArgumentParser 339 | 340 | parser = ArgumentParser() 341 | parser.add_argument("minidump", type=Path, help="Minidump to process.") 342 | parser.add_argument("--debug", action="store_true", help="Display debug output.") 343 | parser.add_argument( 344 | "--symbols", 345 | type=Path, 346 | help="Local symbols directory. " 347 | f"If not provided attempt to download symbols from {SYMS_URL}", 348 | ) 349 | args = parser.parse_args() 350 | 351 | # set output verbosity 352 | if args.debug: 353 | basicConfig(format="[%(levelname).1s] %(message)s", level=DEBUG) 354 | else: 355 | basicConfig(format="%(message)s", level=INFO) 356 | 357 | if MinidumpParser.mdsw_available(): 358 | with MinidumpParser(symbols=args.symbols) as md_parser: 359 | log = md_parser.create_log(args.minidump, "minidump_tmp.txt") 360 | LOG.info("Parsed %s\n%s", args.minidump.resolve(), log.read_text()) 361 | else: 362 | LOG.error( 363 | "Unable to process minidump, minidump-stackwalk is required. %s", MDSW_URL 364 | ) 365 | -------------------------------------------------------------------------------- /src/ffpuppet/process_tree.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ffpuppet process tree module""" 5 | 6 | from __future__ import annotations 7 | 8 | import sys 9 | from contextlib import suppress 10 | from logging import getLogger 11 | from os import getenv 12 | from pathlib import Path 13 | from time import perf_counter, sleep 14 | from typing import TYPE_CHECKING, Callable, cast 15 | 16 | from psutil import ( 17 | STATUS_ZOMBIE, 18 | AccessDenied, 19 | NoSuchProcess, 20 | Process, 21 | TimeoutExpired, 22 | wait_procs, 23 | ) 24 | 25 | from .exceptions import TerminateError 26 | 27 | if TYPE_CHECKING: 28 | from collections.abc import Generator, Iterable 29 | from subprocess import Popen 30 | 31 | if sys.platform != "win32": 32 | from signal import SIGUSR1, Signals # pylint: disable=no-name-in-module 33 | 34 | COVERAGE_SIGNAL: Signals | None = SIGUSR1 35 | IS_WINDOWS = False 36 | else: 37 | COVERAGE_SIGNAL = None 38 | IS_WINDOWS = True 39 | 40 | 41 | LOG = getLogger(__name__) 42 | 43 | 44 | def _filter_zombies(procs: Iterable[Process]) -> Generator[Process]: 45 | """Filter out zombie processes from a collection of processes. 46 | 47 | Args: 48 | procs: Processes to check. 49 | 50 | Yields: 51 | Processes that are not zombies. 52 | """ 53 | for proc in procs: 54 | with suppress(AccessDenied, NoSuchProcess): 55 | if proc.status() == STATUS_ZOMBIE: 56 | LOG.debug("filtering zombie: %d - %s", proc.pid, proc.name()) 57 | continue 58 | yield proc 59 | 60 | 61 | def _last_modified(scan_dir: Path) -> float | None: 62 | """Scan directory recursively and find the latest modified date of all .gcda files. 63 | 64 | Args: 65 | scan_dir: Directory to scan. 66 | 67 | Returns: 68 | Last modified date or None if no files are found. 69 | """ 70 | with suppress(ValueError): 71 | return max(x.stat().st_mtime for x in scan_dir.glob("**/*.gcda")) 72 | return None 73 | 74 | 75 | def _safe_wait_procs( 76 | procs: Iterable[Process], 77 | timeout: float | None = 0, 78 | callback: Callable[[Process], object] | None = None, 79 | ) -> tuple[list[Process], list[Process]]: 80 | """Wrapper for psutil.wait_procs() to avoid AccessDenied. 81 | This can be an issue on Windows. 82 | 83 | Args: 84 | See psutil.wait_procs(). 85 | 86 | Returns: 87 | See psutil.wait_procs(). 88 | """ 89 | assert timeout is None or timeout >= 0 90 | 91 | deadline = None if timeout is None else perf_counter() + timeout 92 | while True: 93 | remaining = None if deadline is None else max(deadline - perf_counter(), 0) 94 | with suppress(AccessDenied): 95 | return cast( 96 | "tuple[list[Process], list[Process]]", 97 | wait_procs(procs, timeout=remaining, callback=callback), 98 | ) 99 | if deadline is not None and deadline <= perf_counter(): 100 | break 101 | sleep(0.25) 102 | 103 | # manually check processes 104 | alive: list[Process] = [] 105 | gone: list[Process] = [] 106 | for proc in procs: 107 | try: 108 | if not proc.is_running(): 109 | gone.append(proc) 110 | else: 111 | alive.append(proc) 112 | except AccessDenied: # noqa: PERF203 113 | alive.append(proc) 114 | except NoSuchProcess: 115 | gone.append(proc) 116 | return (gone, alive) 117 | 118 | 119 | def _writing_coverage(procs: Iterable[Process]) -> bool: 120 | """Check if any processes have open .gcda files. 121 | 122 | Args: 123 | procs: Processes to check. 124 | 125 | Returns: 126 | True if processes with open .gcda files are found. 127 | """ 128 | for proc in procs: 129 | with suppress(AccessDenied, NoSuchProcess): 130 | if any(x for x in proc.open_files() if x.path.endswith(".gcda")): 131 | return True 132 | return False 133 | 134 | 135 | class ProcessTree: 136 | """Manage the Firefox process tree. The process tree layout depends on the platform. 137 | Windows: 138 | python -> firefox (launcher) -> firefox (parent) -> firefox (content procs) 139 | 140 | Linux and others: 141 | python -> firefox (parent) -> firefox (content procs) 142 | """ 143 | 144 | __slots__ = ("_launcher", "_launcher_check", "_proc", "parent") 145 | 146 | def __init__(self, proc: Popen[bytes]) -> None: 147 | self._launcher: Process | None = None 148 | # only perform the launcher check on Windows 149 | self._launcher_check = IS_WINDOWS 150 | self._proc = proc 151 | self.parent: Process = Process(proc.pid) 152 | 153 | def cpu_usage(self) -> Generator[tuple[int, float]]: 154 | """Collect percentage of CPU usage per process. 155 | 156 | Note: the returned value can be > 100.0 in case of a process running multiple 157 | threads on different CPU cores. 158 | See: https://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_percent 159 | 160 | This value is not divided by CPU count because we are typically more concerned 161 | with the low end for detecting idle processes. 162 | 163 | Args: 164 | None 165 | 166 | Yields: 167 | PID and the CPU usage as a percentage. 168 | """ 169 | procs = self.processes() 170 | for proc in procs: 171 | with suppress(AccessDenied, NoSuchProcess): 172 | proc.cpu_percent() 173 | # psutil recommends at least '0.1'. 174 | sleep(0.1) 175 | for proc in procs: 176 | with suppress(AccessDenied, NoSuchProcess): 177 | yield proc.pid, proc.cpu_percent() 178 | 179 | def dump_coverage(self, timeout: int = 15, idle_wait: int = 2) -> bool: 180 | """Signal processes to write coverage data to disk. Running coverage builds in 181 | parallel that are writing to the same location on disk is not recommended. 182 | NOTE: Coverage data is also written when launching and closing the browser. 183 | 184 | Args: 185 | timeout: Number of seconds to wait for data to be written to disk. 186 | idle_wait: Number of seconds to wait to determine if update is complete. 187 | 188 | Returns: 189 | True if coverage is written to disk or processes exit otherwise False. 190 | """ 191 | assert COVERAGE_SIGNAL is not None 192 | assert getenv("GCOV_PREFIX_STRIP"), "GCOV_PREFIX_STRIP not set" 193 | assert getenv("GCOV_PREFIX"), "GCOV_PREFIX not set" 194 | # coverage output can take a few seconds to start and complete 195 | assert timeout > 5 196 | cov_path = Path(getenv("GCOV_PREFIX", "")) 197 | last_mdate = _last_modified(cov_path) or 0 198 | signaled = 0 199 | # send COVERAGE_SIGNAL (SIGUSR1) to browser processes 200 | for proc in self.processes(): 201 | with suppress(AccessDenied, NoSuchProcess): 202 | proc.send_signal(COVERAGE_SIGNAL) 203 | signaled += 1 204 | # no processes signaled 205 | if signaled == 0: 206 | LOG.debug("coverage signal not sent, no browser processes found") 207 | return True 208 | # wait for processes to write .gcda files (typically takes ~2 seconds) 209 | start_time = perf_counter() 210 | last_change = None 211 | while True: 212 | if not self.is_running(): 213 | LOG.debug("not running waiting for coverage dump") 214 | return True 215 | # collect latest last modified dates 216 | mdate = _last_modified(cov_path) or 0 217 | # check if gcda files have been updated 218 | now = perf_counter() 219 | elapsed = now - start_time 220 | if mdate > last_mdate: 221 | last_change = now 222 | last_mdate = mdate 223 | # check if gcda write is complete (wait) 224 | if ( 225 | last_change is not None 226 | and now - last_change > idle_wait 227 | and not _writing_coverage(self.processes()) 228 | ): 229 | LOG.debug("coverage (gcda) dump took %0.2fs", elapsed) 230 | return True 231 | # check if max duration has been exceeded 232 | if elapsed >= timeout: 233 | if last_change is None: 234 | LOG.warning("Coverage files not modified after %0.2fs", elapsed) 235 | else: 236 | LOG.warning("Coverage file open after %0.2fs", elapsed) 237 | break 238 | sleep(0.25) 239 | return False 240 | 241 | def is_running(self) -> bool: 242 | """Check if parent process is running. 243 | 244 | Args: 245 | None 246 | 247 | Returns: 248 | True if the parent process is running otherwise False 249 | """ 250 | return self._poll(self.parent) is None 251 | 252 | @property 253 | def launcher(self) -> Process | None: 254 | """Inspect process tree and identity the browser launcher and parent processes. 255 | 256 | Args: 257 | None 258 | 259 | Returns: 260 | None 261 | """ 262 | if self._launcher_check and self._launcher is None: 263 | try: 264 | cmd = self.parent.cmdline() 265 | except (AccessDenied, NoSuchProcess): # pragma: no cover 266 | LOG.debug("call to self.parent.cmdline() failed") 267 | cmd = [] 268 | # check if launcher process is in use 269 | if "-no-deelevate" in cmd: 270 | launcher_children = self.parent.children(recursive=False) 271 | # launcher should only have one child process 272 | if len(launcher_children) == 1: 273 | LOG.debug("launcher process detected") 274 | self._launcher = self.parent 275 | self.parent = launcher_children[0] 276 | else: 277 | # this is expected behaviour when setting: 278 | # - `browser.launcherProcess.enabled=false` 279 | # it can also happen for unknown reasons... 280 | LOG.debug( 281 | "using launcher as parent, %d child proc(s) detected", 282 | len(launcher_children), 283 | ) 284 | self._launcher_check = False 285 | return self._launcher 286 | 287 | @staticmethod 288 | def _poll(proc: Process) -> int | None: 289 | """Poll a given process. 290 | 291 | Args: 292 | proc: Process to poll. 293 | 294 | Returns: 295 | None if the process is running otherwise the exit code is returned. 296 | """ 297 | try: 298 | return proc.wait(timeout=0) or 0 299 | except NoSuchProcess: 300 | LOG.debug("called poll() on process that does not exist") 301 | return 0 302 | except (AccessDenied, TimeoutExpired): 303 | return None 304 | 305 | def processes(self, recursive: bool = False) -> list[Process]: 306 | """Processes in the process tree. 307 | 308 | Args: 309 | recursive: If False only the parent and child processes are returned. 310 | 311 | Returns: 312 | Processes in the process tree. 313 | """ 314 | procs: list[Process] = [] 315 | if self.launcher is not None and self._poll(self.launcher) is None: 316 | procs.append(self.launcher) 317 | if self._poll(self.parent) is None: 318 | procs.append(self.parent) 319 | with suppress(AccessDenied, NoSuchProcess): 320 | procs.extend(self.parent.children(recursive=recursive)) 321 | return procs 322 | 323 | def terminate(self) -> None: 324 | """Call terminate() on browser processes. If terminate() fails try kill(). 325 | 326 | Args: 327 | None 328 | 329 | Returns: 330 | None 331 | """ 332 | procs = self.processes(recursive=True) 333 | if not procs: 334 | LOG.debug("no processes to terminate") 335 | return 336 | 337 | # try terminating the parent process first, this should be all that is needed 338 | if self._poll(self.parent) is None: 339 | with suppress(AccessDenied, NoSuchProcess, TimeoutExpired): 340 | LOG.debug("attempting to terminate parent (%d)", self.parent.pid) 341 | self.parent.terminate() 342 | self.parent.wait(timeout=10) 343 | # remaining processes should exit if parent process is gone 344 | procs = list(_filter_zombies(_safe_wait_procs(procs, timeout=1)[1])) 345 | 346 | use_kill = False 347 | while procs: 348 | LOG.debug( 349 | "calling %s on %d running process(es)", 350 | "kill()" if use_kill else "terminate()", 351 | len(procs), 352 | ) 353 | # iterate over processes and call terminate()/kill() 354 | for proc in procs: 355 | with suppress(AccessDenied, NoSuchProcess): 356 | if use_kill: 357 | proc.kill() 358 | else: 359 | proc.terminate() 360 | # wait for processes to terminate 361 | procs = list(_filter_zombies(_safe_wait_procs(procs, timeout=30)[1])) 362 | if use_kill: 363 | break 364 | use_kill = True 365 | 366 | if procs: 367 | LOG.warning("Processes still running: %d", len(procs)) 368 | for proc in procs: 369 | with suppress(AccessDenied, NoSuchProcess): 370 | LOG.warning("-> %d: %s (%s)", proc.pid, proc.name(), proc.status()) 371 | raise TerminateError("Failed to terminate processes") 372 | 373 | def wait(self, timeout: int = 300) -> int: 374 | """Wait for parent process to exit. 375 | 376 | Args: 377 | timeout: Maximum time to wait before raising TimeoutExpired. 378 | 379 | Returns: 380 | Process exit code. 381 | """ 382 | with suppress(AccessDenied, NoSuchProcess): 383 | return self.parent.wait(timeout=timeout) or 0 384 | return 0 # pragma: no cover 385 | 386 | def wait_procs(self, timeout: float | None = 0) -> int: 387 | """Wait for process tree to exit. 388 | 389 | Args: 390 | timeout: Maximum time to wait. 391 | 392 | Returns: 393 | Number of processes still alive. 394 | """ 395 | return len(_safe_wait_procs(self.processes(), timeout=timeout)[1]) 396 | --------------------------------------------------------------------------------