├── test ├── __init__.py ├── format │ ├── __init__.py │ ├── conftest.py │ ├── test_markdown.py │ ├── test_columns.py │ ├── test_cyclonedx.py │ └── test_json.py ├── service │ ├── __init__.py │ ├── test_interface.py │ └── test_osv.py ├── dependency_source │ ├── __init__.py │ ├── test_interface.py │ ├── test_pylock.py │ ├── test_pyproject.py │ └── test_pip.py ├── assets │ ├── pylock.invalid.toml │ ├── pylock.invalid-version.toml │ ├── pylock.missing-packages.toml │ ├── pylock.package-missing-name.toml │ ├── pylock.skipped.toml │ ├── pylock.missing-version.toml │ └── pylock.basic.toml ├── test_version.py ├── test_subprocess.py ├── test_util.py ├── test_state.py ├── test_fix.py ├── test_virtual_env.py ├── test_cache.py ├── conftest.py ├── test_audit.py └── test_cli.py ├── pip_audit ├── __init__.py ├── __main__.py ├── _format │ ├── __init__.py │ ├── interface.py │ ├── cyclonedx.py │ ├── json.py │ ├── markdown.py │ └── columns.py ├── _service │ ├── __init__.py │ ├── pypi.py │ ├── esms.py │ ├── osv.py │ └── interface.py ├── _dependency_source │ ├── __init__.py │ ├── interface.py │ ├── pylock.py │ ├── pyproject.py │ └── pip.py ├── _util.py ├── _subprocess.py ├── _audit.py ├── _fix.py ├── _cache.py ├── _virtual_env.py └── _state.py ├── .gitignore ├── .editorconfig ├── .pre-commit-hooks.yaml ├── .github ├── dependabot.yml ├── workflows │ ├── zizmor.yml │ ├── release.yml │ ├── lint.yml │ ├── docs.yml │ ├── scorecards.yml │ └── ci.yml └── ISSUE_TEMPLATE │ ├── feature-request.yml │ └── bug-report.yml ├── .pre-commit-config.yaml ├── Makefile ├── pyproject.toml └── CONTRIBUTING.md /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/format/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/service/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/dependency_source/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/assets/pylock.invalid.toml: -------------------------------------------------------------------------------- 1 | this is not valid toml! 2 | -------------------------------------------------------------------------------- /test/assets/pylock.invalid-version.toml: -------------------------------------------------------------------------------- 1 | lock-version = '666' 2 | -------------------------------------------------------------------------------- /pip_audit/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The `pip_audit` APIs. 3 | """ 4 | 5 | __version__ = "2.10.0" 6 | -------------------------------------------------------------------------------- /test/test_version.py: -------------------------------------------------------------------------------- 1 | import pip_audit 2 | 3 | 4 | def test_version(): 5 | assert isinstance(pip_audit.__version__, str) 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | env/ 2 | pip-wheel-metadata/ 3 | *.egg-info/ 4 | __pycache__/ 5 | .coverage* 6 | html/ 7 | dist/ 8 | .python-version 9 | /.pytest_cache/ 10 | -------------------------------------------------------------------------------- /pip_audit/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The `python -m pip_audit` entrypoint. 3 | """ 4 | 5 | if __name__ == "__main__": # pragma: no cover 6 | from pip_audit._cli import audit 7 | 8 | audit() 9 | -------------------------------------------------------------------------------- /test/assets/pylock.missing-packages.toml: -------------------------------------------------------------------------------- 1 | lock-version = '1.0' 2 | environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"] 3 | requires-python = '==3.12' 4 | created-by = 'mousebender' 5 | -------------------------------------------------------------------------------- /test/test_subprocess.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from pip_audit._subprocess import CalledProcessError, run 4 | 5 | 6 | def test_run_raises(): 7 | with pytest.raises(CalledProcessError): 8 | run(["false"]) 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | 8 | [*.py] 9 | indent_style = space 10 | indent_size = 4 11 | 12 | [Makefile] 13 | indent_style = tab 14 | -------------------------------------------------------------------------------- /.pre-commit-hooks.yaml: -------------------------------------------------------------------------------- 1 | - id: pip-audit 2 | name: pip-audit 3 | description: "Audits Python environments and dependency trees for known vulnerabilities" 4 | entry: pip-audit 5 | pass_filenames: false 6 | language: python 7 | -------------------------------------------------------------------------------- /test/test_util.py: -------------------------------------------------------------------------------- 1 | from packaging.version import Version 2 | 3 | import pip_audit._util as util 4 | 5 | 6 | def test_python_version(): 7 | v = util.python_version() 8 | assert v is not None 9 | assert isinstance(v, Version) 10 | -------------------------------------------------------------------------------- /test/dependency_source/test_interface.py: -------------------------------------------------------------------------------- 1 | from pip_audit._service.interface import Dependency 2 | 3 | 4 | def test_dependency_source(dep_source): 5 | source = dep_source() 6 | 7 | for spec in source.collect(): 8 | assert isinstance(spec, Dependency) 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: pip 5 | directory: / 6 | schedule: 7 | interval: daily 8 | cooldown: 9 | default-days: 7 10 | 11 | - package-ecosystem: github-actions 12 | directory: / 13 | schedule: 14 | interval: daily 15 | cooldown: 16 | default-days: 7 17 | -------------------------------------------------------------------------------- /pip_audit/_format/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Output format interfaces and implementations for `pip-audit`. 3 | """ 4 | 5 | from .columns import ColumnsFormat 6 | from .cyclonedx import CycloneDxFormat 7 | from .interface import VulnerabilityFormat 8 | from .json import JsonFormat 9 | from .markdown import MarkdownFormat 10 | 11 | __all__ = [ 12 | "ColumnsFormat", 13 | "CycloneDxFormat", 14 | "VulnerabilityFormat", 15 | "JsonFormat", 16 | "MarkdownFormat", 17 | ] 18 | -------------------------------------------------------------------------------- /test/assets/pylock.package-missing-name.toml: -------------------------------------------------------------------------------- 1 | lock-version = '1.0' 2 | environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"] 3 | requires-python = '==3.12' 4 | created-by = 'mousebender' 5 | 6 | [[packages]] 7 | version = '25.1.0' 8 | requires-python = '>=3.8' 9 | wheels = [ 10 | { name = 'attrs-25.1.0-py3-none-any.whl', upload-time = 2025-01-25T11:30:10.164985+00:00, url = 'https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl', size = 63152, hashes = { sha256 = 'c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a' } }, 11 | ] 12 | -------------------------------------------------------------------------------- /.github/workflows/zizmor.yml: -------------------------------------------------------------------------------- 1 | name: GitHub Actions Security Analysis with zizmor 🌈 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | branches: ["**"] 8 | 9 | permissions: {} 10 | 11 | jobs: 12 | zizmor: 13 | name: Run zizmor 🌈 14 | runs-on: ubuntu-latest 15 | permissions: 16 | security-events: write 17 | steps: 18 | - name: Checkout repository 19 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 20 | with: 21 | persist-credentials: false 22 | 23 | - name: Run zizmor 🌈 24 | uses: zizmorcore/zizmor-action@e639db99335bc9038abc0e066dfcd72e23d26fb4 # v0.3.0 25 | -------------------------------------------------------------------------------- /test/test_state.py: -------------------------------------------------------------------------------- 1 | import pretend # type: ignore 2 | 3 | from pip_audit import _state as state 4 | 5 | 6 | def test_auditstate(): 7 | class DummyActor(state._StateActor): 8 | update_state = pretend.call_recorder(lambda self, message, logs: None) 9 | initialize = pretend.call_recorder(lambda self: None) 10 | finalize = pretend.call_recorder(lambda self: None) 11 | 12 | actor = DummyActor() 13 | with state.AuditState(members=[actor]) as s: 14 | s.update_state("hello") 15 | 16 | assert DummyActor.update_state.calls == [pretend.call(actor, "hello", None)] 17 | assert DummyActor.initialize.calls == [pretend.call(actor)] 18 | assert DummyActor.finalize.calls == [pretend.call(actor)] 19 | -------------------------------------------------------------------------------- /pip_audit/_service/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Vulnerability service interfaces and implementations for `pip-audit`. 3 | """ 4 | 5 | from .esms import EcosystemsService 6 | from .interface import ( 7 | ConnectionError, 8 | Dependency, 9 | ResolvedDependency, 10 | ServiceError, 11 | SkippedDependency, 12 | VulnerabilityResult, 13 | VulnerabilityService, 14 | ) 15 | from .osv import OsvService 16 | from .pypi import PyPIService 17 | 18 | __all__ = [ 19 | "EcosystemsService", 20 | "ConnectionError", 21 | "Dependency", 22 | "ResolvedDependency", 23 | "ServiceError", 24 | "SkippedDependency", 25 | "VulnerabilityResult", 26 | "VulnerabilityService", 27 | "OsvService", 28 | "PyPIService", 29 | ] 30 | -------------------------------------------------------------------------------- /pip_audit/_dependency_source/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dependency source interfaces and implementations for `pip-audit`. 3 | """ 4 | 5 | from .interface import ( 6 | PYPI_URL, 7 | DependencyFixError, 8 | DependencySource, 9 | DependencySourceError, 10 | InvalidRequirementSpecifier, 11 | ) 12 | from .pip import PipSource, PipSourceError 13 | from .pylock import PyLockSource 14 | from .pyproject import PyProjectSource 15 | from .requirement import RequirementSource 16 | 17 | __all__ = [ 18 | "PYPI_URL", 19 | "DependencyFixError", 20 | "DependencySource", 21 | "DependencySourceError", 22 | "InvalidRequirementSpecifier", 23 | "PipSource", 24 | "PipSourceError", 25 | "PyLockSource", 26 | "PyProjectSource", 27 | "RequirementSource", 28 | ] 29 | -------------------------------------------------------------------------------- /pip_audit/_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for `pip-audit`. 3 | """ 4 | 5 | import sys 6 | from typing import NoReturn # pragma: no cover 7 | 8 | from packaging.version import Version 9 | 10 | 11 | def assert_never(x: NoReturn) -> NoReturn: # pragma: no cover 12 | """ 13 | A hint to the typechecker that a branch can never occur. 14 | """ 15 | assert False, f"unhandled type: {type(x).__name__}" 16 | 17 | 18 | def python_version() -> Version: 19 | """ 20 | Return a PEP-440-style version for the current Python interpreter. 21 | 22 | This is more rigorous than `platform.python_version`, which can include 23 | non-PEP-440-compatible data. 24 | """ 25 | info = sys.version_info 26 | return Version(f"{info.major}.{info.minor}.{info.micro}") 27 | -------------------------------------------------------------------------------- /test/assets/pylock.skipped.toml: -------------------------------------------------------------------------------- 1 | lock-version = '1.0' 2 | environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"] 3 | requires-python = '==3.12' 4 | created-by = 'mousebender' 5 | 6 | [[packages]] 7 | name = 'attrs' 8 | requires-python = '>=3.8' 9 | wheels = [ 10 | { name = 'attrs-25.1.0-py3-none-any.whl', upload-time = 2025-01-25T11:30:10.164985+00:00, url = 'https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl', size = 63152, hashes = { sha256 = 'c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a' } }, 11 | ] 12 | [[packages.attestation-identities]] 13 | environment = 'release-pypi' 14 | kind = 'GitHub' 15 | repository = 'python-attrs/attrs' 16 | workflow = 'pypi-package.yml' 17 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | on: 2 | release: 3 | types: 4 | - published 5 | 6 | name: release 7 | 8 | jobs: 9 | pypi: 10 | name: upload release to PyPI 11 | runs-on: ubuntu-latest 12 | environment: release 13 | 14 | permissions: 15 | # Used to authenticate to PyPI via OIDC. 16 | id-token: write 17 | 18 | # Used to attach signing artifacts to the published release. 19 | contents: write 20 | 21 | steps: 22 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 23 | with: 24 | persist-credentials: false 25 | 26 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 27 | with: 28 | python-version-file: pyproject.toml 29 | 30 | - name: deps 31 | run: python -m pip install -U build 32 | 33 | - name: build 34 | run: python -m build 35 | 36 | - name: publish 37 | uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 38 | 39 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.2.0 4 | hooks: 5 | - id: check-executables-have-shebangs 6 | - id: check-merge-conflict 7 | - id: check-shebang-scripts-are-executable 8 | - id: check-toml 9 | - id: check-yaml 10 | - id: end-of-file-fixer 11 | - id: fix-byte-order-marker 12 | - id: mixed-line-ending 13 | - id: trailing-whitespace 14 | args: [--markdown-linebreak-ext=md] 15 | - id: no-commit-to-branch 16 | args: ["-b", "main"] 17 | - repo: https://github.com/psf/black 18 | rev: 22.3.0 19 | hooks: 20 | - id: black 21 | - repo: https://github.com/pycqa/flake8 22 | rev: 4.0.1 23 | hooks: 24 | - id: flake8 25 | - repo: https://github.com/pycqa/isort 26 | rev: 5.10.1 27 | hooks: 28 | - id: isort 29 | - repo: https://github.com/pypa/pip-audit 30 | rev: v2.10.0 31 | hooks: 32 | - id: pip-audit 33 | - repo: https://github.com/rhysd/actionlint 34 | rev: v1.6.12 35 | hooks: 36 | - id: actionlint-docker 37 | -------------------------------------------------------------------------------- /pip_audit/_format/interface.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interfaces for formatting vulnerability results into a string representation. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from abc import ABC, abstractmethod 8 | 9 | import pip_audit._fix as fix 10 | import pip_audit._service as service 11 | 12 | 13 | class VulnerabilityFormat(ABC): 14 | """ 15 | Represents an abstract string representation for vulnerability results. 16 | """ 17 | 18 | @property 19 | @abstractmethod 20 | def is_manifest(self) -> bool: # pragma: no cover 21 | """ 22 | Is this format a "manifest" format, i.e. one that prints a summary 23 | of all results? 24 | 25 | Manifest formats are always rendered emitted unconditionally, even 26 | if the audit results contain nothing out of the ordinary 27 | (no vulnerabilities, skips, or fixes). 28 | """ 29 | raise NotImplementedError 30 | 31 | @abstractmethod 32 | def format( 33 | self, 34 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 35 | fixes: list[fix.FixVersion], 36 | ) -> str: # pragma: no cover 37 | """ 38 | Convert a mapping of dependencies to vulnerabilities into a string. 39 | """ 40 | raise NotImplementedError 41 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | permissions: {} 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 16 | with: 17 | persist-credentials: false 18 | 19 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 20 | with: 21 | python-version: "3.10" 22 | cache: "pip" 23 | cache-dependency-path: pyproject.toml 24 | 25 | - name: lint 26 | run: make lint PIP_AUDIT_EXTRA=lint 27 | 28 | check-readme: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 32 | with: 33 | persist-credentials: false 34 | 35 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 36 | # NOTE(ww): Important: use pip-audit's minimum supported Python version 37 | # in this check, since Python can change the `--help` rendering in 38 | # `argparse` between major versions. 39 | with: 40 | python-version: "3.10" 41 | cache: "pip" 42 | cache-dependency-path: pyproject.toml 43 | 44 | - name: deps 45 | run: make dev 46 | 47 | - name: check-readme 48 | run: make check-readme 49 | -------------------------------------------------------------------------------- /test/dependency_source/test_pylock.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from packaging.version import Version 3 | 4 | from pip_audit._dependency_source import DependencySourceError, PyLockSource 5 | from pip_audit._service import ResolvedDependency, SkippedDependency 6 | 7 | 8 | class TestPyLockSource: 9 | def test_basic(self, asset): 10 | pylock = asset("pylock.basic.toml") 11 | source = PyLockSource([pylock]) 12 | 13 | deps = list(source.collect()) 14 | assert deps == [ 15 | ResolvedDependency(name="attrs", version=Version("25.1.0")), 16 | ResolvedDependency(name="cattrs", version=Version("24.1.2")), 17 | ResolvedDependency(name="numpy", version=Version("2.2.3")), 18 | ] 19 | 20 | def test_skipped(self, asset): 21 | pylock = asset("pylock.skipped.toml") 22 | source = PyLockSource([pylock]) 23 | 24 | deps = list(source.collect()) 25 | assert deps == [SkippedDependency(name="attrs", skip_reason="no version specified")] 26 | 27 | @pytest.mark.parametrize( 28 | ("name", "error"), 29 | [ 30 | ("pylock.invalid.toml", "invalid TOML in lockfile"), 31 | ("pylock.missing-version.toml", "missing lock-version in lockfile"), 32 | ("pylock.invalid-version.toml", "lockfile version 666 is not supported"), 33 | ("pylock.missing-packages.toml", "missing packages in lockfile"), 34 | ("pylock.package-missing-name.toml", "invalid package #0: no name"), 35 | ], 36 | ) 37 | def test_invalid_pylock(self, asset, name, error): 38 | pylock = asset(name) 39 | source = PyLockSource([pylock]) 40 | 41 | with pytest.raises(DependencySourceError, match=error): 42 | list(source.collect()) 43 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Documentation 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | permissions: {} 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 15 | with: 16 | persist-credentials: false 17 | 18 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 19 | with: 20 | # NOTE: We use 3.10+ typing syntax via future, which pdoc only 21 | # understands if it's actually run with Python 3.10 or newer. 22 | python-version: ">= 3.10" 23 | cache: "pip" 24 | cache-dependency-path: pyproject.toml 25 | 26 | - name: setup 27 | run: | 28 | make dev PIP_AUDIT_EXTRA=doc 29 | - name: build docs 30 | run: | 31 | make doc 32 | - name: upload docs artifact 33 | uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0 34 | with: 35 | path: ./html/ 36 | 37 | # This is copied from the official `pdoc` example: 38 | # https://github.com/mitmproxy/pdoc/blob/main/.github/workflows/docs.yml 39 | # 40 | # Deploy the artifact to GitHub pages. 41 | # This is a separate job so that only actions/deploy-pages has the necessary permissions. 42 | deploy: 43 | needs: build 44 | runs-on: ubuntu-latest 45 | permissions: 46 | # NOTE: Needed to push to the repository. 47 | pages: write 48 | id-token: write 49 | environment: 50 | name: github-pages 51 | url: ${{ steps.deployment.outputs.page_url }} 52 | steps: 53 | - id: deployment 54 | uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea or enhancement for pip-audit 3 | title: "Feature: " 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thank for for making a `pip-audit` feature request! 11 | 12 | Please read the following parts of this form carefully. 13 | Invalid or incomplete submissions will be given a lower priority or 14 | closed outright. 15 | 16 | - type: checkboxes 17 | attributes: 18 | label: Pre-submission checks 19 | description: | 20 | By submitting this issue, you affirm that you've satisfied the following conditions. 21 | options: 22 | - label: >- 23 | I am **not** reporting a new vulnerability or requesting a new vulnerability identifier. 24 | These **must** be reported or managed via upstream dependency sources or services, 25 | not this repository. 26 | required: true 27 | - label: >- 28 | I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/). 29 | required: true 30 | - label: >- 31 | I have looked through the open issues for a duplicate request. 32 | required: true 33 | 34 | - type: textarea 35 | attributes: 36 | label: What's the problem this feature will solve? 37 | description: | 38 | A clear and concise description of the problem. 39 | placeholder: | 40 | I'm always frustrated when ... 41 | validations: 42 | required: true 43 | 44 | - type: textarea 45 | attributes: 46 | label: Describe the solution you'd like 47 | description: A clear and concise description of what you want to happen. 48 | validations: 49 | required: true 50 | 51 | - type: textarea 52 | attributes: 53 | label: Additional context 54 | description: | 55 | Any additional context, screenshots, or other material about the feature request. 56 | -------------------------------------------------------------------------------- /pip_audit/_dependency_source/interface.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interfaces for interacting with "dependency sources", i.e. sources 3 | of fully resolved Python dependency trees. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | from abc import ABC, abstractmethod 9 | from collections.abc import Iterator 10 | 11 | from pip_audit._fix import ResolvedFixVersion 12 | from pip_audit._service import Dependency 13 | 14 | PYPI_URL = "https://pypi.org/simple/" 15 | 16 | 17 | class DependencySource(ABC): 18 | """ 19 | Represents an abstract source of fully-resolved Python dependencies. 20 | 21 | Individual concrete dependency sources (e.g. `pip list`) are expected 22 | to subclass `DependencySource` and implement it in their terms. 23 | """ 24 | 25 | @abstractmethod 26 | def collect(self) -> Iterator[Dependency]: # pragma: no cover 27 | """ 28 | Yield the dependencies in this source. 29 | """ 30 | raise NotImplementedError 31 | 32 | @abstractmethod 33 | def fix(self, fix_version: ResolvedFixVersion) -> None: # pragma: no cover 34 | """ 35 | Upgrade a dependency to the given fix version. 36 | """ 37 | raise NotImplementedError 38 | 39 | 40 | class DependencySourceError(Exception): 41 | """ 42 | Raised when a `DependencySource` fails to provide its dependencies. 43 | 44 | Concrete implementations are expected to subclass this exception to 45 | provide more context. 46 | """ 47 | 48 | pass 49 | 50 | 51 | class DependencyFixError(Exception): 52 | """ 53 | Raised when a `DependencySource` fails to perform a "fix" operation, i.e. 54 | fails to upgrade a package to a different version. 55 | 56 | Concrete implementations are expected to subclass this exception to provide 57 | more context. 58 | """ 59 | 60 | pass 61 | 62 | 63 | class InvalidRequirementSpecifier(DependencySourceError): 64 | """ 65 | A `DependencySourceError` specialized for the case of a non-PEP 440 requirements 66 | specifier. 67 | """ 68 | 69 | pass 70 | -------------------------------------------------------------------------------- /.github/workflows/scorecards.yml: -------------------------------------------------------------------------------- 1 | name: Scorecards supply-chain security 2 | on: 3 | # Only the default branch is supported. 4 | branch_protection_rule: 5 | schedule: 6 | - cron: '19 4 * * 0' 7 | push: 8 | branches: [ "main" ] 9 | 10 | # No permissions needed at top-level. 11 | permissions: {} 12 | 13 | jobs: 14 | analysis: 15 | name: Scorecards analysis 16 | runs-on: ubuntu-latest 17 | permissions: 18 | # Needed to upload the results to code-scanning dashboard. 19 | security-events: write 20 | # Used to receive a badge. (Upcoming feature) 21 | id-token: write 22 | 23 | steps: 24 | - name: "Checkout code" 25 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 26 | with: 27 | persist-credentials: false 28 | 29 | - name: "Run analysis" 30 | uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 31 | with: 32 | results_file: results.sarif 33 | results_format: sarif 34 | # Publish the results for public repositories to enable scorecard badges. For more details, see 35 | # https://github.com/ossf/scorecard-action#publishing-results. 36 | # For private repositories, `publish_results` will automatically be set to `false`, regardless 37 | # of the value entered here. 38 | publish_results: true 39 | 40 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 41 | # format to the repository Actions tab. 42 | - name: "Upload artifact" 43 | uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 44 | with: 45 | name: SARIF file 46 | path: results.sarif 47 | retention-days: 5 48 | 49 | # Upload the results to GitHub's code scanning dashboard. 50 | - name: "Upload to code-scanning" 51 | uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 52 | with: 53 | sarif_file: results.sarif 54 | -------------------------------------------------------------------------------- /test/assets/pylock.missing-version.toml: -------------------------------------------------------------------------------- 1 | environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"] 2 | requires-python = '==3.12' 3 | created-by = 'mousebender' 4 | 5 | [[packages]] 6 | name = 'attrs' 7 | version = '25.1.0' 8 | requires-python = '>=3.8' 9 | wheels = [ 10 | { name = 'attrs-25.1.0-py3-none-any.whl', upload-time = 2025-01-25T11:30:10.164985+00:00, url = 'https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl', size = 63152, hashes = { sha256 = 'c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a' } }, 11 | ] 12 | [[packages.attestation-identities]] 13 | environment = 'release-pypi' 14 | kind = 'GitHub' 15 | repository = 'python-attrs/attrs' 16 | workflow = 'pypi-package.yml' 17 | 18 | [[packages]] 19 | name = 'cattrs' 20 | version = '24.1.2' 21 | requires-python = '>=3.8' 22 | dependencies = [{ name = 'attrs' }] 23 | wheels = [ 24 | { name = 'cattrs-24.1.2-py3-none-any.whl', upload-time = 2024-09-22T14:58:34.812643+00:00, url = 'https://files.pythonhosted.org/packages/c8/d5/867e75361fc45f6de75fe277dd085627a9db5ebb511a87f27dc1396b5351/cattrs-24.1.2-py3-none-any.whl', size = 66446, hashes = { sha256 = '67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0' } }, 25 | ] 26 | 27 | [[packages]] 28 | name = 'numpy' 29 | version = '2.2.3' 30 | requires-python = '>=3.10' 31 | wheels = [ 32 | { name = 'numpy-2.2.3-cp312-cp312-win_amd64.whl', upload-time = 2025-02-13T16:51:21.821880+00:00, url = 'https://files.pythonhosted.org/packages/42/6e/55580a538116d16ae7c9aa17d4edd56e83f42126cb1dfe7a684da7925d2c/numpy-2.2.3-cp312-cp312-win_amd64.whl', size = 12626357, hashes = { sha256 = '83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d' } }, 33 | { name = 'numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', upload-time = 2025-02-13T16:50:00.079662+00:00, url = 'https://files.pythonhosted.org/packages/39/04/78d2e7402fb479d893953fb78fa7045f7deb635ec095b6b4f0260223091a/numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', size = 16116679, hashes = { sha256 = '3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe' } }, 34 | ] 35 | 36 | [tool.mousebender] 37 | command = [ 38 | '.', 39 | 'lock', 40 | '--platform', 41 | 'cpython3.12-windows-x64', 42 | '--platform', 43 | 'cpython3.12-manylinux2014-x64', 44 | 'cattrs', 45 | 'numpy', 46 | ] 47 | run-on = 2025-03-06T12:28:57.760769 48 | -------------------------------------------------------------------------------- /test/assets/pylock.basic.toml: -------------------------------------------------------------------------------- 1 | lock-version = '1.0' 2 | environments = ["sys_platform == 'win32'", "sys_platform == 'linux'"] 3 | requires-python = '==3.12' 4 | created-by = 'mousebender' 5 | 6 | [[packages]] 7 | name = 'attrs' 8 | version = '25.1.0' 9 | requires-python = '>=3.8' 10 | wheels = [ 11 | { name = 'attrs-25.1.0-py3-none-any.whl', upload-time = 2025-01-25T11:30:10.164985+00:00, url = 'https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl', size = 63152, hashes = { sha256 = 'c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a' } }, 12 | ] 13 | [[packages.attestation-identities]] 14 | environment = 'release-pypi' 15 | kind = 'GitHub' 16 | repository = 'python-attrs/attrs' 17 | workflow = 'pypi-package.yml' 18 | 19 | [[packages]] 20 | name = 'cattrs' 21 | version = '24.1.2' 22 | requires-python = '>=3.8' 23 | dependencies = [{ name = 'attrs' }] 24 | wheels = [ 25 | { name = 'cattrs-24.1.2-py3-none-any.whl', upload-time = 2024-09-22T14:58:34.812643+00:00, url = 'https://files.pythonhosted.org/packages/c8/d5/867e75361fc45f6de75fe277dd085627a9db5ebb511a87f27dc1396b5351/cattrs-24.1.2-py3-none-any.whl', size = 66446, hashes = { sha256 = '67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0' } }, 26 | ] 27 | 28 | [[packages]] 29 | name = 'numpy' 30 | version = '2.2.3' 31 | requires-python = '>=3.10' 32 | wheels = [ 33 | { name = 'numpy-2.2.3-cp312-cp312-win_amd64.whl', upload-time = 2025-02-13T16:51:21.821880+00:00, url = 'https://files.pythonhosted.org/packages/42/6e/55580a538116d16ae7c9aa17d4edd56e83f42126cb1dfe7a684da7925d2c/numpy-2.2.3-cp312-cp312-win_amd64.whl', size = 12626357, hashes = { sha256 = '83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d' } }, 34 | { name = 'numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', upload-time = 2025-02-13T16:50:00.079662+00:00, url = 'https://files.pythonhosted.org/packages/39/04/78d2e7402fb479d893953fb78fa7045f7deb635ec095b6b4f0260223091a/numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', size = 16116679, hashes = { sha256 = '3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe' } }, 35 | ] 36 | 37 | [tool.mousebender] 38 | command = [ 39 | '.', 40 | 'lock', 41 | '--platform', 42 | 'cpython3.12-windows-x64', 43 | '--platform', 44 | 'cpython3.12-manylinux2014-x64', 45 | 'cattrs', 46 | 'numpy', 47 | ] 48 | run-on = 2025-03-06T12:28:57.760769 49 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | PY_MODULE := pip_audit 4 | 5 | ALL_PY_SRCS := $(shell find $(PY_MODULE) -name '*.py') \ 6 | $(shell find test -name '*.py') 7 | 8 | # Optionally overriden by the user, if they're using a virtual environment manager. 9 | VENV ?= env 10 | 11 | # On Windows, venv scripts/shims are under `Scripts` instead of `bin`. 12 | VENV_BIN := $(VENV)/bin 13 | ifeq ($(OS),Windows_NT) 14 | VENV_BIN := $(VENV)/Scripts 15 | endif 16 | 17 | # Optionally overridden by the user in the `test` target. 18 | TESTS := 19 | 20 | # Optionally overridden by the user/CI, to limit the installation to a specific 21 | # subset of development dependencies. 22 | PIP_AUDIT_EXTRA := dev 23 | 24 | # If the user selects a specific test pattern to run, set `pytest` to fail fast 25 | # and only run tests that match the pattern. 26 | ifneq ($(TESTS),) 27 | TEST_ARGS := -x -k $(TESTS) 28 | else 29 | TEST_ARGS := 30 | endif 31 | 32 | .PHONY: all 33 | all: 34 | @echo "Run my targets individually!" 35 | 36 | .PHONY: dev 37 | dev: $(VENV)/pyvenv.cfg 38 | 39 | .PHONY: run 40 | run: $(VENV)/pyvenv.cfg 41 | @. $(VENV_BIN)/activate && pip-audit $(ARGS) 42 | 43 | $(VENV)/pyvenv.cfg: pyproject.toml 44 | # Create our Python 3 virtual environment 45 | python3 -m venv env 46 | $(VENV_BIN)/python -m pip install --upgrade pip 47 | $(VENV_BIN)/python -m pip install --upgrade -e .[$(PIP_AUDIT_EXTRA)] 48 | 49 | .PHONY: lint 50 | lint: $(VENV)/pyvenv.cfg 51 | . $(VENV_BIN)/activate && \ 52 | ruff format --check $(ALL_PY_SRCS) && \ 53 | ruff check $(ALL_PY_SRCS) && \ 54 | mypy $(PY_MODULE) && \ 55 | interrogate -c pyproject.toml . 56 | 57 | .PHONY: reformat 58 | reformat: 59 | . $(VENV_BIN)/activate && \ 60 | ruff check --fix $(ALL_PY_SRCS) && \ 61 | ruff format $(ALL_PY_SRCS) 62 | 63 | .PHONY: test tests 64 | test tests: $(VENV)/pyvenv.cfg 65 | . $(VENV_BIN)/activate && \ 66 | coverage run -m pytest $(T) $(TEST_ARGS) 67 | 68 | .PHONY: doc 69 | doc: $(VENV)/pyvenv.cfg 70 | . $(VENV_BIN)/activate && \ 71 | PDOC_ALLOW_EXEC=1 pdoc -o html $(PY_MODULE) 72 | 73 | .PHONY: package 74 | package: $(VENV)/pyvenv.cfg 75 | . $(VENV_BIN)/activate && \ 76 | python3 -m build 77 | 78 | .PHONY: check-readme 79 | check-readme: dev 80 | # pip-audit --help 81 | @diff \ 82 | <( \ 83 | awk '/@begin-pip-audit-help@/{f=1;next} /@end-pip-audit-help@/{f=0} f' \ 84 | < README.md | sed '1d;$$d' \ 85 | ) \ 86 | <( \ 87 | $(MAKE) -s run ARGS="--help" \ 88 | ) 89 | 90 | 91 | .PHONY: edit 92 | edit: 93 | $(EDITOR) $(ALL_PY_SRCS) 94 | -------------------------------------------------------------------------------- /pip_audit/_subprocess.py: -------------------------------------------------------------------------------- 1 | """ 2 | A thin `subprocess` wrapper for making long-running subprocesses more 3 | responsive from the `pip-audit` CLI. 4 | """ 5 | 6 | import os.path 7 | import subprocess 8 | from collections.abc import Sequence 9 | from subprocess import Popen 10 | 11 | from ._state import AuditState 12 | 13 | 14 | class CalledProcessError(Exception): 15 | """ 16 | Raised if the underlying subprocess created by `run` exits with a nonzero code. 17 | """ 18 | 19 | def __init__(self, msg: str, *, stderr: str) -> None: 20 | """ 21 | Create a new `CalledProcessError`. 22 | """ 23 | super().__init__(msg) 24 | self.stderr = stderr 25 | 26 | 27 | def run(args: Sequence[str], *, log_stdout: bool = False, state: AuditState = AuditState()) -> str: 28 | """ 29 | Execute the given arguments. 30 | 31 | Uses `state` to provide feedback on the subprocess's status. 32 | 33 | Raises a `CalledProcessError` if the subprocess fails. Otherwise, returns 34 | the process's `stdout` stream as a string. 35 | """ 36 | 37 | # NOTE(ww): We frequently run commands inside of ephemeral virtual environments, 38 | # which have long absolute paths on some platforms. These make for confusing 39 | # state updates, so we trim the first argument down to its basename. 40 | pretty_args = " ".join([os.path.basename(args[0]), *args[1:]]) 41 | 42 | terminated = False 43 | stdout = b"" 44 | stderr = b"" 45 | 46 | # Run the process with unbuffered I/O, to make the poll-and-read loop below 47 | # more responsive. 48 | with Popen(args, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as process: 49 | # NOTE: We use `poll()` to control this loop instead of the `read()` call 50 | # to prevent deadlocks. Similarly, `read(size)` will return an empty bytes 51 | # once `stdout` hits EOF, so we don't have to worry about that blocking. 52 | while not terminated: 53 | terminated = process.poll() is not None 54 | stdout += process.stdout.read() # type: ignore 55 | stderr += process.stderr.read() # type: ignore 56 | state.update_state( 57 | f"Running {pretty_args}", 58 | stdout.decode(errors="replace") if log_stdout else None, 59 | ) 60 | 61 | if process.returncode != 0: 62 | raise CalledProcessError( 63 | f"{pretty_args} exited with {process.returncode}", 64 | stderr=stderr.decode(errors="replace"), 65 | ) 66 | 67 | return stdout.decode("utf-8", errors="replace") 68 | -------------------------------------------------------------------------------- /test/test_fix.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from packaging.version import Version 4 | 5 | from pip_audit._fix import ResolvedFixVersion, SkippedFixVersion, resolve_fix_versions 6 | from pip_audit._service import ( 7 | Dependency, 8 | ResolvedDependency, 9 | SkippedDependency, 10 | VulnerabilityResult, 11 | ) 12 | 13 | 14 | def test_fix(vuln_service): 15 | dep = ResolvedDependency(name="foo", version=Version("0.5.0")) 16 | result: dict[Dependency, list[VulnerabilityResult]] = { 17 | dep: [ 18 | VulnerabilityResult( 19 | id="fake-id", 20 | description="this is not a real result", 21 | fix_versions=[Version("1.0.0")], 22 | aliases=set(), 23 | ) 24 | ] 25 | } 26 | fix_versions = list(resolve_fix_versions(vuln_service(), result)) 27 | assert len(fix_versions) == 1 28 | assert fix_versions[0] == ResolvedFixVersion(dep=dep, version=Version("1.1.0")) 29 | assert not fix_versions[0].is_skipped() 30 | 31 | 32 | def test_fix_skipped_deps(vuln_service): 33 | dep = SkippedDependency(name="foo", skip_reason="skip-reason") 34 | result: dict[Dependency, list[VulnerabilityResult]] = { 35 | dep: [ 36 | VulnerabilityResult( 37 | id="fake-id", 38 | description="this is not a real result", 39 | fix_versions=[Version("1.0.0")], 40 | aliases=set(), 41 | ) 42 | ] 43 | } 44 | fix_versions = list(resolve_fix_versions(vuln_service(), result)) 45 | assert not fix_versions 46 | 47 | 48 | def test_fix_no_vulns(vuln_service): 49 | dep = ResolvedDependency(name="foo", version=Version("0.5.0")) 50 | result: dict[Dependency, list[VulnerabilityResult]] = {dep: list()} 51 | fix_versions = list(resolve_fix_versions(vuln_service(), result)) 52 | assert not fix_versions 53 | 54 | 55 | def test_fix_resolution_impossible(vuln_service): 56 | dep = ResolvedDependency(name="foo", version=Version("0.5.0")) 57 | result: dict[Dependency, list[VulnerabilityResult]] = { 58 | dep: [ 59 | VulnerabilityResult( 60 | id="fake-id", 61 | description="this is not a real result", 62 | fix_versions=list(), 63 | aliases=set(), 64 | ) 65 | ] 66 | } 67 | fix_versions = list(resolve_fix_versions(vuln_service(), result)) 68 | assert len(fix_versions) == 1 69 | assert fix_versions[0] == SkippedFixVersion( 70 | dep=dep, 71 | skip_reason="failed to fix dependency foo (0.5.0), unable to find fix version for " 72 | "vulnerability fake-id", 73 | ) 74 | assert fix_versions[0].is_skipped() 75 | -------------------------------------------------------------------------------- /test/test_virtual_env.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from tempfile import TemporaryDirectory 3 | 4 | import pretend 5 | import pytest 6 | from packaging.version import Version 7 | 8 | import pip_audit._virtual_env as _virtual_env 9 | from pip_audit import _subprocess 10 | from pip_audit._virtual_env import VirtualEnv, VirtualEnvError 11 | 12 | 13 | @pytest.mark.online 14 | def test_virtual_env(): 15 | with TemporaryDirectory() as ve_dir: 16 | ve = VirtualEnv(["flask==2.0.1"]) 17 | ve.create(ve_dir) 18 | packages = list(ve.installed_packages) 19 | assert ("Flask", Version("2.0.1")) in packages 20 | 21 | 22 | def test_virtual_env_incorrect_usage(): 23 | ve = VirtualEnv(["flask==2.0.1"]) 24 | 25 | with pytest.raises(VirtualEnvError): 26 | list(ve.installed_packages) 27 | 28 | 29 | def test_virtual_env_failed_package_installation(monkeypatch): 30 | original_run = _subprocess.run 31 | 32 | def run_mock(args, **kwargs): 33 | if "flask==2.0.1" in args: 34 | raise _subprocess.CalledProcessError("barf", stderr="") 35 | # If it's not the package installation command, then call the original run 36 | return original_run(args, **kwargs) 37 | 38 | monkeypatch.setattr(_virtual_env, "run", run_mock) 39 | 40 | with TemporaryDirectory() as ve_dir: 41 | ve = VirtualEnv(["flask==2.0.1"]) 42 | with pytest.raises(VirtualEnvError): 43 | ve.create(ve_dir) 44 | 45 | 46 | def test_virtual_env_failed_pip_upgrade(monkeypatch): 47 | original_run = _subprocess.run 48 | 49 | def run_mock(args, **kwargs): 50 | # We have to be a bit more specific than usual here because the `EnvBuilder` invokes 51 | # `ensurepip` with similar looking arguments and we DON'T want to mock that call. 52 | if set(["install", "--upgrade", "pip"]).issubset(set(args)): 53 | raise _subprocess.CalledProcessError("barf", stderr="") 54 | # If it's not a call to upgrade pip, then call the original run 55 | return original_run(args, **kwargs) 56 | 57 | monkeypatch.setattr(_virtual_env, "run", run_mock) 58 | 59 | with TemporaryDirectory() as ve_dir: 60 | ve = VirtualEnv(["flask==2.0.1"]) 61 | with pytest.raises(VirtualEnvError): 62 | ve.create(ve_dir) 63 | 64 | 65 | def test_virtual_env_failed_permission_error(monkeypatch): 66 | """ 67 | This is a mocked test for GH#732, which is really caused by a user's 68 | default `$TMPDIR` having the `noexec` flag set. We have no easy way 69 | to unit test this, so we hopefully replicate its effect with a monkeypatch. 70 | """ 71 | 72 | monkeypatch.setattr(subprocess, "run", pretend.raiser(PermissionError)) 73 | with TemporaryDirectory() as ve_dir: 74 | ve = VirtualEnv(["flask==2.0.1"]) 75 | with pytest.raises(VirtualEnvError, match=r"^Couldn't execute in a temporary directory .+"): 76 | ve.create(ve_dir) 77 | -------------------------------------------------------------------------------- /test/test_cache.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pretend # type: ignore 4 | from packaging.version import Version 5 | from platformdirs import user_cache_path 6 | 7 | import pip_audit._cache as cache 8 | from pip_audit._cache import _get_cache_dir, _get_pip_cache 9 | 10 | 11 | def test_get_cache_dir(monkeypatch): 12 | # When we supply a cache directory, always use that 13 | cache_dir = Path("/tmp/foo/cache_dir") 14 | assert _get_cache_dir(cache_dir) == cache_dir 15 | 16 | cache_dir = Path("/fake/pip/cache/dir") 17 | get_pip_cache = pretend.call_recorder(lambda: cache_dir) 18 | monkeypatch.setattr(cache, "_get_pip_cache", get_pip_cache) 19 | 20 | # When `pip cache dir` works, we use it. In this case, it's mocked. 21 | assert _get_cache_dir(None, use_pip=True) == cache_dir 22 | 23 | 24 | def test_get_pip_cache(): 25 | # Actually running `pip cache dir` gets us some path that ends with "http" 26 | cache_dir = _get_pip_cache() 27 | assert cache_dir.stem == "http" 28 | 29 | 30 | def test_get_cache_dir_do_not_use_pip(): 31 | expected = user_cache_path("pip-audit", appauthor=False) 32 | 33 | # Even with None, we never use the pip cache if we're told not to. 34 | assert _get_cache_dir(None, use_pip=False) == expected 35 | 36 | 37 | def test_get_cache_dir_pip_disabled_in_environment(monkeypatch): 38 | monkeypatch.setenv("PIP_NO_CACHE_DIR", "1") 39 | 40 | expected = user_cache_path("pip-audit", appauthor=False) 41 | 42 | # Even with use_pip=True, we avoid pip's cache if the environment tells us to. 43 | assert _get_cache_dir(None, use_pip=True) == expected 44 | 45 | 46 | def test_get_cache_dir_old_pip(monkeypatch): 47 | # Check the case where we have an old `pip` 48 | monkeypatch.setattr(cache, "_PIP_VERSION", Version("1.0.0")) 49 | 50 | # In this case, we can't query `pip` to figure out where its HTTP cache is 51 | # Instead, we use `~/.pip-audit-cache` 52 | cache_dir = _get_cache_dir(None) 53 | expected = user_cache_path("pip-audit", appauthor=False) 54 | assert cache_dir == expected 55 | 56 | 57 | def test_cache_warns_about_old_pip(monkeypatch, cache_dir): 58 | monkeypatch.setattr(cache, "_PIP_VERSION", Version("1.0.0")) 59 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 60 | monkeypatch.setattr(cache, "logger", logger) 61 | 62 | # If we supply a cache directory, we're not relying on finding the `pip` cache so no need to log 63 | # a warning 64 | _get_cache_dir(cache_dir) 65 | assert len(logger.warning.calls) == 0 66 | 67 | # However, if we're not specifying a cache directory, we'll try to call `pip cache dir`. If we 68 | # have an old `pip`, then we should expect a warning to be logged 69 | _get_cache_dir(None) 70 | assert len(logger.warning.calls) == 1 71 | 72 | 73 | def test_delete_legacy_cache_dir(monkeypatch, tmp_path): 74 | legacy = tmp_path / "pip-audit-cache" 75 | legacy.mkdir() 76 | assert legacy.exists() 77 | monkeypatch.setattr(cache, "_PIP_AUDIT_LEGACY_INTERNAL_CACHE", legacy) 78 | 79 | _get_cache_dir(None, use_pip=False) 80 | assert not legacy.exists() 81 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | from pathlib import Path 3 | 4 | import pytest 5 | from packaging.version import Version 6 | 7 | from pip_audit._dependency_source.interface import DependencySource 8 | from pip_audit._service.interface import ( 9 | ResolvedDependency, 10 | VulnerabilityResult, 11 | VulnerabilityService, 12 | ) 13 | 14 | _ASSETS = Path(__file__).parent / "assets" 15 | assert _ASSETS.is_dir(), f"assets directory not found: {_ASSETS}" 16 | 17 | 18 | def pytest_addoption(parser): 19 | parser.addoption( 20 | "--skip-online", 21 | action="store_true", 22 | help="skip tests that require network connectivity", 23 | ) 24 | 25 | 26 | def pytest_runtest_setup(item): 27 | if "online" in item.keywords and item.config.getoption("--skip-online"): 28 | pytest.skip("skipping test that requires network connectivity due to `--skip-online` flag") 29 | 30 | 31 | def pytest_configure(config): 32 | config.addinivalue_line("markers", "online: mark test as requiring network connectivity") 33 | 34 | 35 | @pytest.fixture(autouse=True) 36 | def spec(): 37 | def _spec(version): 38 | return ResolvedDependency(name="foo", version=Version(version)) 39 | 40 | return _spec 41 | 42 | 43 | @pytest.fixture(autouse=True) 44 | def vuln_service(): 45 | # A dummy service that only returns results for the "foo" package 46 | # between [1.0.0, 1.1.0). 47 | class Service(VulnerabilityService): 48 | def query(self, spec): 49 | introduced = Version("1.0.0") 50 | fixed = Version("1.1.0") 51 | 52 | if spec.name == "foo" and (introduced <= spec.version < fixed): 53 | return spec, [ 54 | VulnerabilityResult( 55 | id="fake-id", 56 | description="this is not a real result", 57 | aliases=set(), 58 | fix_versions=[fixed], 59 | ) 60 | ] 61 | 62 | return spec, [] 63 | 64 | return Service 65 | 66 | 67 | @pytest.fixture(autouse=True) 68 | def dep_source(spec): 69 | class Source(DependencySource): 70 | def collect(self): 71 | yield spec("1.0.1") 72 | 73 | def fix(self, _) -> None: 74 | raise NotImplementedError 75 | 76 | return Source 77 | 78 | 79 | @pytest.fixture(scope="session") 80 | def cache_dir(): 81 | cache = tempfile.TemporaryDirectory() 82 | yield Path(cache.name) 83 | cache.cleanup() 84 | 85 | 86 | @pytest.fixture 87 | def req_file(): 88 | def _req_file(): 89 | req_file = tempfile.NamedTemporaryFile() 90 | req_file.close() 91 | 92 | req_path = Path(req_file.name) 93 | assert not req_path.exists() 94 | return req_path 95 | 96 | return _req_file 97 | 98 | 99 | @pytest.fixture 100 | def asset(): 101 | def _asset(name: str): 102 | asset_path = _ASSETS / name 103 | assert asset_path.exists(), f"Asset not found: {asset_path}" 104 | return asset_path 105 | 106 | return _asset 107 | -------------------------------------------------------------------------------- /test/format/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | from packaging.version import Version 5 | 6 | import pip_audit._fix as fix 7 | import pip_audit._service as service 8 | 9 | _RESOLVED_DEP_FOO = service.ResolvedDependency(name="foo", version=Version("1.0")) 10 | _RESOLVED_DEP_BAR = service.ResolvedDependency(name="bar", version=Version("0.1")) 11 | _SKIPPED_DEP = service.SkippedDependency(name="bar", skip_reason="skip-reason") 12 | 13 | _TEST_VULN_DATA: dict[service.Dependency, list[service.VulnerabilityResult]] = { 14 | _RESOLVED_DEP_FOO: [ 15 | service.VulnerabilityResult( 16 | id="VULN-0", 17 | description="The first vulnerability", 18 | fix_versions=[ 19 | Version("1.1"), 20 | Version("1.4"), 21 | ], 22 | aliases={"CVE-0000-00000"}, 23 | ), 24 | service.VulnerabilityResult( 25 | id="VULN-1", 26 | description="The second vulnerability", 27 | fix_versions=[Version("1.0")], 28 | aliases={"CVE-0000-00001"}, 29 | ), 30 | ], 31 | _RESOLVED_DEP_BAR: [ 32 | service.VulnerabilityResult( 33 | id="VULN-2", 34 | description="The third vulnerability", 35 | fix_versions=[], 36 | aliases={"CVE-0000-00002"}, 37 | ) 38 | ], 39 | } 40 | 41 | _TEST_VULN_DATA_SKIPPED_DEP: dict[service.Dependency, list[service.VulnerabilityResult]] = { 42 | _RESOLVED_DEP_FOO: [ 43 | service.VulnerabilityResult( 44 | id="VULN-0", 45 | description="The first vulnerability", 46 | fix_versions=[ 47 | Version("1.1"), 48 | Version("1.4"), 49 | ], 50 | aliases={"CVE-0000-00000"}, 51 | ), 52 | ], 53 | _SKIPPED_DEP: [], 54 | } 55 | 56 | _TEST_NO_VULN_DATA: dict[service.Dependency, list[service.VulnerabilityResult]] = { 57 | _RESOLVED_DEP_FOO: [], 58 | _RESOLVED_DEP_BAR: [], 59 | } 60 | 61 | _TEST_NO_VULN_DATA_SKIPPED_DEP: dict[service.Dependency, list[service.VulnerabilityResult]] = { 62 | _RESOLVED_DEP_FOO: [], 63 | _RESOLVED_DEP_BAR: [], 64 | _SKIPPED_DEP: [], 65 | } 66 | 67 | _TEST_FIX_DATA: list[fix.FixVersion] = [ 68 | fix.ResolvedFixVersion(dep=_RESOLVED_DEP_FOO, version=Version("1.8")), 69 | fix.ResolvedFixVersion(dep=_RESOLVED_DEP_BAR, version=Version("0.3")), 70 | ] 71 | 72 | _TEST_SKIPPED_FIX_DATA: list[fix.FixVersion] = [ 73 | fix.ResolvedFixVersion(dep=_RESOLVED_DEP_FOO, version=Version("1.8")), 74 | fix.SkippedFixVersion(dep=_RESOLVED_DEP_BAR, skip_reason="skip-reason"), 75 | ] 76 | 77 | 78 | @pytest.fixture(autouse=True) 79 | def vuln_data(): 80 | return _TEST_VULN_DATA 81 | 82 | 83 | @pytest.fixture(autouse=True) 84 | def vuln_data_skipped_dep(): 85 | return _TEST_VULN_DATA_SKIPPED_DEP 86 | 87 | 88 | @pytest.fixture(autouse=True) 89 | def no_vuln_data(): 90 | return _TEST_NO_VULN_DATA 91 | 92 | 93 | @pytest.fixture(autouse=True) 94 | def no_vuln_data_skipped_dep(): 95 | return _TEST_NO_VULN_DATA_SKIPPED_DEP 96 | 97 | 98 | @pytest.fixture(autouse=True) 99 | def fix_data(): 100 | return _TEST_FIX_DATA 101 | 102 | 103 | @pytest.fixture(autouse=True) 104 | def skipped_fix_data(): 105 | return _TEST_SKIPPED_FIX_DATA 106 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: File a bug report 3 | title: "Bug: " 4 | labels: 5 | - bug-candidate 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thank you for taking the time to report a potential bug in `pip-audit`! 11 | 12 | Please read the following parts of this form carefully. 13 | Invalid or incomplete submissions will be given a lower priority or 14 | closed outright. 15 | 16 | - type: checkboxes 17 | attributes: 18 | label: Pre-submission checks 19 | description: | 20 | By submitting this issue, you affirm that you've satisfied the following conditions. 21 | options: 22 | - label: >- 23 | I am **not** filing an auditing error (false positive or negative). 24 | These **must** be reported to 25 | [pypa/advisory-database](https://github.com/pypa/advisory-database/issues/new) instead. 26 | required: true 27 | - label: >- 28 | I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/). 29 | required: true 30 | - label: >- 31 | I have looked through the open issues for a duplicate report. 32 | required: true 33 | 34 | - type: textarea 35 | attributes: 36 | label: Expected behavior 37 | description: A clear and concise description of what you expected to happen. 38 | placeholder: | 39 | I expected `pip-audit ...` to do X, Y, and Z. 40 | validations: 41 | required: true 42 | 43 | - type: textarea 44 | attributes: 45 | label: Actual behavior 46 | description: A clear and concise description of what actually happened. 47 | placeholder: | 48 | Instead of doing X, Y, and Z, `pip-audit ...` produced got the following error: ... 49 | validations: 50 | required: true 51 | 52 | - type: textarea 53 | attributes: 54 | label: Reproduction steps 55 | description: A step-by-step list of actions that we can take to reproduce the actual behavior. 56 | placeholder: | 57 | 1. Do this 58 | 2. Do that 59 | 3. Do another thing 60 | validations: 61 | required: true 62 | 63 | - type: textarea 64 | attributes: 65 | label: Logs 66 | description: | 67 | If applicable, please paste any logs or console errors here. 68 | 69 | If you can re-run the command that produced the error, run it with 70 | `--verbose` and paste the full verbose logs here. 71 | render: plain text 72 | 73 | - type: textarea 74 | attributes: 75 | label: Additional context 76 | description: Add any other additional context about the problem here. 77 | 78 | - type: input 79 | attributes: 80 | label: OS name, version, and architecture 81 | placeholder: Mac OS X 10.4.11 on PowerPC 82 | 83 | - type: input 84 | attributes: 85 | label: pip-audit version 86 | description: | 87 | `pip-audit -V` 88 | validations: 89 | required: true 90 | 91 | - type: input 92 | attributes: 93 | label: pip version 94 | description: | 95 | `pip -V` or `pip3 -V` 96 | validations: 97 | required: true 98 | 99 | - type: input 100 | attributes: 101 | label: Python version 102 | description: | 103 | `python -V` or `python3 -V` 104 | validations: 105 | required: true 106 | -------------------------------------------------------------------------------- /pip_audit/_audit.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core auditing APIs. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import logging 8 | from collections.abc import Iterator 9 | from dataclasses import dataclass 10 | 11 | from pip_audit._dependency_source import DependencySource 12 | from pip_audit._service import Dependency, VulnerabilityResult, VulnerabilityService 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | @dataclass(frozen=True) 18 | class AuditOptions: 19 | """ 20 | Settings the control the behavior of an `Auditor` instance. 21 | """ 22 | 23 | dry_run: bool = False 24 | 25 | 26 | class Auditor: 27 | """ 28 | The core class of the `pip-audit` API. 29 | 30 | For a given dependency source and vulnerability service, supply a mapping of dependencies to 31 | known vulnerabilities. 32 | """ 33 | 34 | def __init__( 35 | self, 36 | service: VulnerabilityService, 37 | options: AuditOptions = AuditOptions(), 38 | ): 39 | """ 40 | Create a new auditor. Auditors start with no dependencies to audit; 41 | each `audit` step is fed a `DependencySource`. 42 | 43 | The behavior of the auditor can be optionally tweaked with the `options` 44 | parameter. 45 | """ 46 | self._service = service 47 | self._options = options 48 | 49 | def audit( 50 | self, source: DependencySource 51 | ) -> Iterator[tuple[Dependency, list[VulnerabilityResult]]]: 52 | """ 53 | Perform the auditing step, collecting dependencies from `source`. 54 | 55 | Individual vulnerability results are uniqued based on their `aliases` sets: 56 | any two results for the same dependency that share an alias are collapsed 57 | into a single result with a union of all aliases. 58 | 59 | `PYSEC`-identified results are given priority over other results. 60 | """ 61 | specs = source.collect() 62 | 63 | if self._options.dry_run: 64 | # Drain the iterator in dry-run mode. 65 | logger.info(f"Dry run: would have audited {len(list(specs))} packages") 66 | yield from () 67 | else: 68 | for dep, vulns in self._service.query_all(specs): 69 | unique_vulns: list[VulnerabilityResult] = [] 70 | seen_aliases: set[str] = set() 71 | 72 | # First pass, add all PYSEC vulnerabilities and track their 73 | # alias sets. 74 | for v in vulns: 75 | if not v.id.startswith("PYSEC"): 76 | continue 77 | 78 | seen_aliases.update(v.aliases | {v.id}) 79 | unique_vulns.append(v) 80 | 81 | # Second pass: add any non-PYSEC vulnerabilities. 82 | for v in vulns: 83 | # If we've already seen this vulnerability by another name, 84 | # don't add it. Instead, find the previous result and update 85 | # its alias set. 86 | if seen_aliases.intersection(v.aliases | {v.id}): 87 | idx, previous = next( 88 | (i, p) for (i, p) in enumerate(unique_vulns) if p.alias_of(v) 89 | ) 90 | unique_vulns[idx] = previous.merge_aliases(v) 91 | continue 92 | 93 | seen_aliases.update(v.aliases | {v.id}) 94 | unique_vulns.append(v) 95 | 96 | yield (dep, unique_vulns) 97 | -------------------------------------------------------------------------------- /pip_audit/_format/cyclonedx.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for formatting vulnerability results using the CycloneDX SBOM format. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import enum 8 | import logging 9 | from typing import cast 10 | 11 | from cyclonedx import output 12 | from cyclonedx.model.bom import Bom 13 | from cyclonedx.model.component import Component 14 | from cyclonedx.model.vulnerability import BomTarget, Vulnerability 15 | 16 | import pip_audit._fix as fix 17 | import pip_audit._service as service 18 | 19 | from .interface import VulnerabilityFormat 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | def _pip_audit_result_to_bom( 25 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 26 | ) -> Bom: 27 | vulnerabilities = [] 28 | components = [] 29 | 30 | for dep, vulns in result.items(): 31 | # TODO(alex): Is there anything interesting we can do with skipped dependencies in 32 | # the CycloneDX format? 33 | if dep.is_skipped(): 34 | continue 35 | dep = cast(service.ResolvedDependency, dep) 36 | 37 | c = Component(name=dep.name, version=str(dep.version)) 38 | for vuln in vulns: 39 | vulnerabilities.append( 40 | Vulnerability( 41 | id=vuln.id, 42 | description=vuln.description, 43 | recommendation="Upgrade", 44 | # BomTarget expects str in type hints, but accepts BomRef at runtime 45 | affects=[BomTarget(ref=c.bom_ref)], # type: ignore[arg-type] 46 | ) 47 | ) 48 | 49 | components.append(c) 50 | 51 | return Bom(components=components, vulnerabilities=vulnerabilities) 52 | 53 | 54 | class CycloneDxFormat(VulnerabilityFormat): 55 | """ 56 | An implementation of `VulnerabilityFormat` that formats vulnerability results using CycloneDX. 57 | The container format used by CycloneDX can be additionally configured. 58 | """ 59 | 60 | @enum.unique 61 | class InnerFormat(enum.Enum): 62 | """ 63 | Valid container formats for CycloneDX. 64 | """ 65 | 66 | Json = output.OutputFormat.JSON 67 | Xml = output.OutputFormat.XML 68 | 69 | def __init__(self, inner_format: CycloneDxFormat.InnerFormat): 70 | """ 71 | Create a new `CycloneDxFormat`. 72 | 73 | `inner_format` determines the container format used by CycloneDX. 74 | """ 75 | 76 | self._inner_format = inner_format 77 | 78 | @property 79 | def is_manifest(self) -> bool: 80 | """ 81 | See `VulnerabilityFormat.is_manifest`. 82 | """ 83 | return True 84 | 85 | def format( 86 | self, 87 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 88 | fixes: list[fix.FixVersion], 89 | ) -> str: 90 | """ 91 | Returns a CycloneDX formatted string for a given mapping of dependencies to vulnerability 92 | results. 93 | 94 | See `VulnerabilityFormat.format`. 95 | """ 96 | if fixes: 97 | logger.warning("--fix output is unsupported by CycloneDX formats") 98 | 99 | bom = _pip_audit_result_to_bom(result) 100 | formatter = output.make_outputter( 101 | bom=bom, 102 | output_format=self._inner_format.value, 103 | schema_version=output.SchemaVersion.V1_4, 104 | ) 105 | 106 | return formatter.output_as_string() 107 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit_core >=3.2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [project] 6 | name = "pip_audit" 7 | dynamic = ["version"] 8 | description = "A tool for scanning Python environments for known vulnerabilities" 9 | readme = "README.md" 10 | license = { file = "LICENSE" } 11 | authors = [ 12 | { name = "Alex Cameron" }, 13 | { name = "Dustin Ingram", email = "di@python.org" }, 14 | { name = "William Woodruff", email = "william@trailofbits.com" }, 15 | ] 16 | classifiers = [ 17 | "Development Status :: 5 - Production/Stable", 18 | "Intended Audience :: Developers", 19 | "License :: OSI Approved :: Apache Software License", 20 | "Programming Language :: Python :: 3 :: Only", 21 | "Programming Language :: Python :: 3", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: 3.12", 25 | "Programming Language :: Python :: 3.13", 26 | "Programming Language :: Python :: 3.14", 27 | "Topic :: Security", 28 | ] 29 | dependencies = [ 30 | "CacheControl[filecache] >= 0.13.0", 31 | "cyclonedx-python-lib >= 5,< 12", 32 | "packaging>=23.0.0", # https://github.com/pypa/pip-audit/issues/464 33 | "pip-api>=0.0.28", 34 | "pip-requirements-parser>=32.0.0", 35 | "requests >= 2.31.0", 36 | "rich >= 12.4", 37 | "tomli >= 2.2.1", 38 | "tomli-w >= 1.2.0", 39 | "platformdirs >= 4.2.0", 40 | ] 41 | requires-python = ">=3.10" 42 | 43 | [project.optional-dependencies] 44 | cov = [ 45 | "coverage[toml] ~= 7.0, != 7.3.3", # https://github.com/nedbat/coveragepy/issues/1713 46 | ] 47 | test = ["pretend", "pytest", "pip-audit[cov]"] 48 | lint = [ 49 | "ruff >= 0.11", 50 | "interrogate ~= 1.6", 51 | "mypy", 52 | "types-requests", 53 | "types-toml", 54 | ] 55 | doc = ["pdoc"] 56 | dev = ["build", "pip-audit[doc,test,lint]"] 57 | 58 | [project.scripts] 59 | pip-audit = "pip_audit._cli:audit" 60 | 61 | [project.urls] 62 | Homepage = "https://pypi.org/project/pip-audit/" 63 | Issues = "https://github.com/pypa/pip-audit/issues" 64 | Source = "https://github.com/pypa/pip-audit" 65 | 66 | [tool.coverage.paths] 67 | # This is used for path mapping when combining coverage data 68 | # from multiple machines. The first entry is the local path, 69 | # and subsequent entries are the remote paths that get remapped 70 | # to the local path. 71 | # See: https://coverage.readthedocs.io/en/latest/config.html#paths 72 | source = ["pip_audit", "*/pip_audit", "*\\pip_audit"] 73 | 74 | [tool.coverage.run] 75 | source = ["pip_audit"] 76 | parallel = true 77 | relative_files = true 78 | 79 | [tool.interrogate] 80 | # don't enforce documentation coverage for packaging, testing, the virtual 81 | # environment, or the CLI (which is documented separately). 82 | exclude = ["env", "test", "pip_audit/_cli.py"] 83 | ignore-semiprivate = true 84 | fail-under = 100 85 | 86 | [tool.mypy] 87 | allow_redefinition = true 88 | check_untyped_defs = true 89 | disallow_incomplete_defs = true 90 | disallow_untyped_defs = true 91 | ignore_missing_imports = true 92 | no_implicit_optional = true 93 | show_error_codes = true 94 | sqlite_cache = true 95 | strict_equality = true 96 | warn_no_return = true 97 | warn_redundant_casts = true 98 | warn_return_any = true 99 | warn_unreachable = true 100 | warn_unused_configs = true 101 | warn_unused_ignores = true 102 | 103 | [tool.bump] 104 | input = "pip_audit/__init__.py" 105 | reset = true 106 | 107 | [tool.ruff] 108 | line-length = 100 109 | 110 | [tool.ruff.lint] 111 | # Never enforce `E501` (line length violations). 112 | ignore = ["E501"] 113 | select = ["E", "F", "I", "W", "UP"] 114 | -------------------------------------------------------------------------------- /test/service/test_interface.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import random 3 | from typing import cast 4 | 5 | import pytest 6 | from packaging.version import Version 7 | 8 | from pip_audit._service.interface import ( 9 | Dependency, 10 | ResolvedDependency, 11 | SkippedDependency, 12 | VulnerabilityID, 13 | VulnerabilityResult, 14 | VulnerabilityService, 15 | ) 16 | 17 | 18 | def test_dependency_typing(): 19 | # there are only two subclasses of Dependency 20 | assert set(Dependency.__subclasses__()) == {ResolvedDependency, SkippedDependency} 21 | 22 | # Dependency itself cannot be initialized 23 | with pytest.raises(NotImplementedError): 24 | Dependency(name="explodes") 25 | 26 | r = ResolvedDependency(name="foo", version=Version("1.0.0")) 27 | assert r.name == "foo" 28 | assert r.canonical_name == "foo" 29 | assert not r.is_skipped() 30 | 31 | s = SkippedDependency(name="bar", skip_reason="unit test") 32 | assert s.name == "bar" 33 | assert s.canonical_name == "bar" 34 | assert s.is_skipped() 35 | 36 | 37 | def test_vulnerability_service(vuln_service, spec): 38 | service = vuln_service() 39 | spec = spec("1.0.1") 40 | 41 | _, vulns = service.query(spec) 42 | assert len(vulns) == 1 43 | 44 | all_ = dict(service.query_all([spec])) 45 | assert len(all_) == 1 46 | assert len(all_[spec]) == 1 47 | 48 | 49 | def test_vulnerability_service_no_results(vuln_service, spec): 50 | service = vuln_service() 51 | spec = spec("1.1.1") 52 | 53 | _, vulns = service.query(spec) 54 | assert len(vulns) == 0 55 | 56 | 57 | def test_vulnerability_result_update_aliases(): 58 | result1 = VulnerabilityResult( 59 | id="FOO", 60 | description="stub", 61 | fix_versions=[Version("1.0.0")], 62 | aliases={"BAR", "BAZ", "ZAP"}, 63 | ) 64 | result2 = VulnerabilityResult( 65 | id="BAR", 66 | description="stub", 67 | fix_versions=[Version("1.0.0")], 68 | aliases={"FOO", "BAZ", "QUUX"}, 69 | ) 70 | 71 | merged = result1.merge_aliases(result2) 72 | assert merged.id == "FOO" 73 | assert merged.aliases == {"BAR", "BAZ", "ZAP", "QUUX"} 74 | 75 | 76 | def test_vulnerability_result_has_any_id(): 77 | result = VulnerabilityResult( 78 | id="FOO", 79 | description="bar", 80 | fix_versions=[Version("1.0.0")], 81 | aliases={"BAR", "BAZ", "QUUX"}, 82 | ) 83 | 84 | assert result.has_any_id({"FOO"}) 85 | assert result.has_any_id({"ham", "eggs", "BAZ"}) 86 | assert not result.has_any_id({"zilch"}) 87 | assert not result.has_any_id(set()) 88 | 89 | 90 | @pytest.mark.parametrize("_n", range(10)) 91 | def test_vulnerability_result_create(_n): 92 | ids = cast( 93 | list[VulnerabilityID], 94 | ["testid1", "testid2", "GHSA-XXXX-XXXXX", "CVE-XXXX-XXXXX", "PYSEC-XXXX-XXXXX"], 95 | ) 96 | random.shuffle(ids) 97 | 98 | result = VulnerabilityResult.create(ids, "foo", [], None) 99 | 100 | assert result.id == "PYSEC-XXXX-XXXXX" 101 | ids.remove(VulnerabilityID("PYSEC-XXXX-XXXXX")) 102 | assert result.aliases == set(ids) 103 | 104 | 105 | class TestVulnerabilityService: 106 | @pytest.mark.parametrize( 107 | ["timestamp", "result"], 108 | [ 109 | (None, None), 110 | ("2019-08-24T14:15:22Z", datetime.datetime(2019, 8, 24, 14, 15, 22)), 111 | ( 112 | "2022-10-22T00:00:27.668938Z", 113 | datetime.datetime(2022, 10, 22, 0, 0, 27, 668938), 114 | ), 115 | ], 116 | ) 117 | def test_parse_rfc3339(self, timestamp, result): 118 | assert VulnerabilityService._parse_rfc3339(timestamp) == result 119 | -------------------------------------------------------------------------------- /test/format/test_markdown.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import pip_audit._format as format 4 | 5 | 6 | @pytest.mark.parametrize("output_desc, output_aliases", ([True, False], [True, False])) 7 | def test_columns_not_manifest(output_desc, output_aliases): 8 | fmt = format.MarkdownFormat(output_desc, output_aliases) 9 | assert not fmt.is_manifest 10 | 11 | 12 | def test_markdown(vuln_data): 13 | markdown_format = format.MarkdownFormat(True, True) 14 | expected_markdown = """ 15 | Name | Version | ID | Fix Versions | Aliases | Description 16 | --- | --- | --- | --- | --- | --- 17 | foo | 1.0 | VULN-0 | 1.1,1.4 | CVE-0000-00000 | The first vulnerability 18 | foo | 1.0 | VULN-1 | 1.0 | CVE-0000-00001 | The second vulnerability 19 | bar | 0.1 | VULN-2 | | CVE-0000-00002 | The third vulnerability""" 20 | assert markdown_format.format(vuln_data, list()) == expected_markdown 21 | 22 | 23 | def test_markdown_no_desc(vuln_data): 24 | markdown_format = format.MarkdownFormat(False, True) 25 | expected_markdown = """ 26 | Name | Version | ID | Fix Versions | Aliases 27 | --- | --- | --- | --- | --- 28 | foo | 1.0 | VULN-0 | 1.1,1.4 | CVE-0000-00000 29 | foo | 1.0 | VULN-1 | 1.0 | CVE-0000-00001 30 | bar | 0.1 | VULN-2 | | CVE-0000-00002""" 31 | assert markdown_format.format(vuln_data, list()) == expected_markdown 32 | 33 | 34 | def test_markdown_no_desc_no_aliases(vuln_data): 35 | markdown_format = format.MarkdownFormat(False, False) 36 | expected_markdown = """ 37 | Name | Version | ID | Fix Versions 38 | --- | --- | --- | --- 39 | foo | 1.0 | VULN-0 | 1.1,1.4 40 | foo | 1.0 | VULN-1 | 1.0 41 | bar | 0.1 | VULN-2 | """ 42 | assert markdown_format.format(vuln_data, list()) == expected_markdown 43 | 44 | 45 | def test_markdown_skipped_dep(vuln_data_skipped_dep): 46 | markdown_format = format.MarkdownFormat(False, True) 47 | expected_markdown = """ 48 | Name | Version | ID | Fix Versions | Aliases 49 | --- | --- | --- | --- | --- 50 | foo | 1.0 | VULN-0 | 1.1,1.4 | CVE-0000-00000 51 | 52 | Name | Skip Reason 53 | --- | --- 54 | bar | skip-reason""" 55 | assert markdown_format.format(vuln_data_skipped_dep, list()) == expected_markdown 56 | 57 | 58 | def test_markdown_no_vuln_data(no_vuln_data): 59 | markdown_format = format.MarkdownFormat(False, True) 60 | expected_markdown = "" 61 | assert markdown_format.format(no_vuln_data, list()) == expected_markdown 62 | 63 | 64 | def test_markdown_no_vuln_data_skipped_dep(no_vuln_data_skipped_dep): 65 | markdown_format = format.MarkdownFormat(False, True) 66 | expected_markdown = """ 67 | Name | Skip Reason 68 | --- | --- 69 | bar | skip-reason""" 70 | assert markdown_format.format(no_vuln_data_skipped_dep, list()) == expected_markdown 71 | 72 | 73 | def test_markdown_fix(vuln_data, fix_data): 74 | markdown_format = format.MarkdownFormat(False, True) 75 | expected_markdown = """ 76 | Name | Version | ID | Fix Versions | Applied Fix | Aliases 77 | --- | --- | --- | --- | --- | --- 78 | foo | 1.0 | VULN-0 | 1.1,1.4 | Successfully upgraded foo (1.0 => 1.8) | CVE-0000-00000 79 | foo | 1.0 | VULN-1 | 1.0 | Successfully upgraded foo (1.0 => 1.8) | CVE-0000-00001 80 | bar | 0.1 | VULN-2 | | Successfully upgraded bar (0.1 => 0.3) | CVE-0000-00002""" 81 | assert markdown_format.format(vuln_data, fix_data) == expected_markdown 82 | 83 | 84 | def test_markdown_skipped_fix(vuln_data, skipped_fix_data): 85 | markdown_format = format.MarkdownFormat(False, True) 86 | expected_markdown = """ 87 | Name | Version | ID | Fix Versions | Applied Fix | Aliases 88 | --- | --- | --- | --- | --- | --- 89 | foo | 1.0 | VULN-0 | 1.1,1.4 | Successfully upgraded foo (1.0 => 1.8) | CVE-0000-00000 90 | foo | 1.0 | VULN-1 | 1.0 | Successfully upgraded foo (1.0 => 1.8) | CVE-0000-00001 91 | bar | 0.1 | VULN-2 | | Failed to fix bar (0.1): skip-reason | CVE-0000-00002""" 92 | assert markdown_format.format(vuln_data, skipped_fix_data) == expected_markdown 93 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | schedule: 9 | - cron: "0 12 * * *" 10 | 11 | permissions: {} 12 | 13 | jobs: 14 | test: 15 | strategy: 16 | matrix: 17 | python: 18 | - "3.10" 19 | - "3.11" 20 | - "3.12" 21 | - "3.13" 22 | - "3.14" 23 | runs-on: ubuntu-latest 24 | steps: 25 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 26 | with: 27 | persist-credentials: false 28 | 29 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 30 | with: 31 | python-version: ${{ matrix.python }} 32 | cache: "pip" 33 | cache-dependency-path: pyproject.toml 34 | 35 | - name: test 36 | run: make test PIP_AUDIT_EXTRA=test 37 | 38 | - name: Upload coverage data 39 | uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 40 | with: 41 | name: coverage-data-${{ matrix.python }} 42 | path: .coverage.* 43 | include-hidden-files: true 44 | if-no-files-found: ignore 45 | 46 | test-windows: 47 | runs-on: windows-latest 48 | steps: 49 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 50 | with: 51 | persist-credentials: false 52 | 53 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 54 | with: 55 | # Always test with latest Python on Windows. 56 | python-version: "3.x" 57 | cache: "pip" 58 | cache-dependency-path: pyproject.toml 59 | 60 | - name: test 61 | run: make test PIP_AUDIT_EXTRA=test 62 | 63 | - name: Upload coverage data 64 | uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 65 | with: 66 | name: coverage-data-windows 67 | path: .coverage.* 68 | include-hidden-files: true 69 | if-no-files-found: ignore 70 | 71 | coverage: 72 | name: Combine & check coverage 73 | if: always() 74 | needs: [test, test-windows] 75 | runs-on: ubuntu-latest 76 | 77 | steps: 78 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 79 | with: 80 | persist-credentials: false 81 | 82 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 83 | with: 84 | python-version: "3.x" 85 | 86 | - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 87 | with: 88 | pattern: coverage-data-* 89 | merge-multiple: true 90 | 91 | - name: Combine coverage & fail if it's <100% 92 | run: | 93 | make dev PIP_AUDIT_EXTRA=cov 94 | 95 | ./env/bin/python -Im coverage combine 96 | ./env/bin/python -Im coverage html --skip-covered --skip-empty 97 | 98 | # Report and write to summary. 99 | ./env/bin/python -Im coverage report --format=markdown >> "${GITHUB_STEP_SUMMARY}" 100 | 101 | # Report again and fail if under 100%. 102 | ./env/bin/python -Im coverage report --fail-under=100 103 | 104 | - name: Upload HTML report if check failed 105 | uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 106 | with: 107 | name: html-report 108 | path: htmlcov 109 | if: ${{ failure() }} 110 | 111 | all-tests-pass: 112 | if: always() 113 | 114 | needs: [coverage] 115 | 116 | runs-on: ubuntu-latest 117 | 118 | steps: 119 | - name: check test jobs 120 | uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe # v1.2.2 121 | with: 122 | jobs: ${{ toJSON(needs) }} 123 | -------------------------------------------------------------------------------- /pip_audit/_format/json.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for formatting vulnerability results as an array of JSON objects. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import json 8 | from typing import Any, cast 9 | 10 | import pip_audit._fix as fix 11 | import pip_audit._service as service 12 | 13 | from .interface import VulnerabilityFormat 14 | 15 | 16 | class JsonFormat(VulnerabilityFormat): 17 | """ 18 | An implementation of `VulnerabilityFormat` that formats vulnerability results as an array of 19 | JSON objects. 20 | """ 21 | 22 | def __init__(self, output_desc: bool, output_aliases: bool): 23 | """ 24 | Create a new `JsonFormat`. 25 | 26 | `output_desc` is a flag to determine whether descriptions for each vulnerability should be 27 | included in the output as they can be quite long and make the output difficult to read. 28 | 29 | `output_aliases` is a flag to determine whether aliases (such as CVEs) for each 30 | vulnerability should be included in the output. 31 | """ 32 | self.output_desc = output_desc 33 | self.output_aliases = output_aliases 34 | 35 | @property 36 | def is_manifest(self) -> bool: 37 | """ 38 | See `VulnerabilityFormat.is_manifest`. 39 | """ 40 | return True 41 | 42 | def format( 43 | self, 44 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 45 | fixes: list[fix.FixVersion], 46 | ) -> str: 47 | """ 48 | Returns a JSON formatted string for a given mapping of dependencies to vulnerability 49 | results. 50 | 51 | See `VulnerabilityFormat.format`. 52 | """ 53 | output_json = {} 54 | dep_json = [] 55 | for dep, vulns in result.items(): 56 | dep_json.append(self._format_dep(dep, vulns)) 57 | output_json["dependencies"] = dep_json 58 | fix_json = [] 59 | for f in fixes: 60 | fix_json.append(self._format_fix(f)) 61 | output_json["fixes"] = fix_json 62 | return json.dumps(output_json) 63 | 64 | def _format_dep( 65 | self, dep: service.Dependency, vulns: list[service.VulnerabilityResult] 66 | ) -> dict[str, Any]: 67 | if dep.is_skipped(): 68 | dep = cast(service.SkippedDependency, dep) 69 | return { 70 | "name": dep.canonical_name, 71 | "skip_reason": dep.skip_reason, 72 | } 73 | 74 | dep = cast(service.ResolvedDependency, dep) 75 | return { 76 | "name": dep.canonical_name, 77 | "version": str(dep.version), 78 | "vulns": [self._format_vuln(vuln) for vuln in vulns], 79 | } 80 | 81 | def _format_vuln(self, vuln: service.VulnerabilityResult) -> dict[str, Any]: 82 | vuln_json = { 83 | "id": vuln.id, 84 | "fix_versions": [str(version) for version in vuln.fix_versions], 85 | } 86 | if self.output_aliases: 87 | vuln_json["aliases"] = list(vuln.aliases) 88 | if self.output_desc: 89 | vuln_json["description"] = vuln.description 90 | return vuln_json 91 | 92 | def _format_fix(self, fix_version: fix.FixVersion) -> dict[str, Any]: 93 | if fix_version.is_skipped(): 94 | fix_version = cast(fix.SkippedFixVersion, fix_version) 95 | return { 96 | "name": fix_version.dep.canonical_name, 97 | "version": str(fix_version.dep.version), 98 | "skip_reason": fix_version.skip_reason, 99 | } 100 | fix_version = cast(fix.ResolvedFixVersion, fix_version) 101 | return { 102 | "name": fix_version.dep.canonical_name, 103 | "old_version": str(fix_version.dep.version), 104 | "new_version": str(fix_version.version), 105 | } 106 | -------------------------------------------------------------------------------- /test/format/test_columns.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import pip_audit._format as format 4 | 5 | 6 | @pytest.mark.parametrize("output_desc, output_aliases", ([True, False], [True, False])) 7 | def test_columns_not_manifest(output_desc, output_aliases): 8 | fmt = format.ColumnsFormat(output_desc, output_aliases) 9 | assert not fmt.is_manifest 10 | 11 | 12 | def test_columns(vuln_data): 13 | columns_format = format.ColumnsFormat(True, True) 14 | expected_columns = """Name Version ID Fix Versions Aliases Description 15 | ---- ------- ------ ------------ -------------- ------------------------ 16 | foo 1.0 VULN-0 1.1,1.4 CVE-0000-00000 The first vulnerability 17 | foo 1.0 VULN-1 1.0 CVE-0000-00001 The second vulnerability 18 | bar 0.1 VULN-2 CVE-0000-00002 The third vulnerability""" 19 | assert columns_format.format(vuln_data, list()) == expected_columns 20 | 21 | 22 | def test_columns_no_desc(vuln_data): 23 | columns_format = format.ColumnsFormat(False, True) 24 | expected_columns = """Name Version ID Fix Versions Aliases 25 | ---- ------- ------ ------------ -------------- 26 | foo 1.0 VULN-0 1.1,1.4 CVE-0000-00000 27 | foo 1.0 VULN-1 1.0 CVE-0000-00001 28 | bar 0.1 VULN-2 CVE-0000-00002""" 29 | assert columns_format.format(vuln_data, list()) == expected_columns 30 | 31 | 32 | def test_columns_no_desc_no_aliases(vuln_data): 33 | columns_format = format.ColumnsFormat(False, False) 34 | expected_columns = """Name Version ID Fix Versions 35 | ---- ------- ------ ------------ 36 | foo 1.0 VULN-0 1.1,1.4 37 | foo 1.0 VULN-1 1.0 38 | bar 0.1 VULN-2""" 39 | assert columns_format.format(vuln_data, list()) == expected_columns 40 | 41 | 42 | def test_columns_skipped_dep(vuln_data_skipped_dep): 43 | columns_format = format.ColumnsFormat(False, True) 44 | expected_columns = """Name Version ID Fix Versions Aliases 45 | ---- ------- ------ ------------ -------------- 46 | foo 1.0 VULN-0 1.1,1.4 CVE-0000-00000 47 | Name Skip Reason 48 | ---- ----------- 49 | bar skip-reason""" 50 | assert columns_format.format(vuln_data_skipped_dep, list()) == expected_columns 51 | 52 | 53 | def test_columns_no_vuln_data(no_vuln_data): 54 | columns_format = format.ColumnsFormat(False, True) 55 | expected_columns = "" 56 | assert columns_format.format(no_vuln_data, list()) == expected_columns 57 | 58 | 59 | def test_column_no_vuln_data_skipped_dep(no_vuln_data_skipped_dep): 60 | columns_format = format.ColumnsFormat(False, True) 61 | expected_columns = """Name Skip Reason 62 | ---- ----------- 63 | bar skip-reason""" 64 | assert columns_format.format(no_vuln_data_skipped_dep, list()) == expected_columns 65 | 66 | 67 | def test_columns_fix(vuln_data, fix_data): 68 | columns_format = format.ColumnsFormat(False, True) 69 | expected_columns = """Name Version ID Fix Versions Applied Fix Aliases 70 | ---- ------- ------ ------------ -------------------------------------- -------------- 71 | foo 1.0 VULN-0 1.1,1.4 Successfully upgraded foo (1.0 => 1.8) CVE-0000-00000 72 | foo 1.0 VULN-1 1.0 Successfully upgraded foo (1.0 => 1.8) CVE-0000-00001 73 | bar 0.1 VULN-2 Successfully upgraded bar (0.1 => 0.3) CVE-0000-00002""" 74 | assert columns_format.format(vuln_data, fix_data) == expected_columns 75 | 76 | 77 | def test_columns_skipped_fix(vuln_data, skipped_fix_data): 78 | columns_format = format.ColumnsFormat(False, True) 79 | expected_columns = """Name Version ID Fix Versions Applied Fix Aliases 80 | ---- ------- ------ ------------ -------------------------------------- -------------- 81 | foo 1.0 VULN-0 1.1,1.4 Successfully upgraded foo (1.0 => 1.8) CVE-0000-00000 82 | foo 1.0 VULN-1 1.0 Successfully upgraded foo (1.0 => 1.8) CVE-0000-00001 83 | bar 0.1 VULN-2 Failed to fix bar (0.1): skip-reason CVE-0000-00002""" 84 | assert columns_format.format(vuln_data, skipped_fix_data) == expected_columns 85 | -------------------------------------------------------------------------------- /pip_audit/_dependency_source/pylock.py: -------------------------------------------------------------------------------- 1 | """ 2 | Collect dependencies from `pylock.toml` files. 3 | """ 4 | 5 | import logging 6 | from collections.abc import Iterator 7 | from pathlib import Path 8 | 9 | import tomli 10 | from packaging.version import Version 11 | 12 | from pip_audit._dependency_source import DependencyFixError, DependencySource, DependencySourceError 13 | from pip_audit._fix import ResolvedFixVersion 14 | from pip_audit._service import Dependency, ResolvedDependency 15 | from pip_audit._service.interface import SkippedDependency 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class PyLockSource(DependencySource): 21 | """ 22 | Wraps `pylock.*.toml` dependency collection as a dependency source. 23 | """ 24 | 25 | def __init__(self, filenames: list[Path]) -> None: 26 | """ 27 | Create a new `PyLockSource`. 28 | 29 | `filenames` provides a list of `pylock.*.toml` files to parse. 30 | """ 31 | 32 | self._filenames = filenames 33 | 34 | def collect(self) -> Iterator[Dependency]: 35 | """ 36 | Collect all of the dependencies discovered by this `PyLockSource`. 37 | 38 | Raises a `PyLockSourceError` on any errors. 39 | """ 40 | for filename in self._filenames: 41 | yield from self._collect_from_file(filename) 42 | 43 | def _collect_from_file(self, filename: Path) -> Iterator[Dependency]: 44 | """ 45 | Collect dependencies from a single `pylock.*.toml` file. 46 | 47 | Raises a `PyLockSourceError` on any errors. 48 | """ 49 | try: 50 | with filename.open(mode="rb") as f: 51 | pylock = tomli.load(f) 52 | except tomli.TOMLDecodeError as e: 53 | raise PyLockSourceError(f"{filename}: invalid TOML in lockfile") from e 54 | 55 | lock_version = pylock.get("lock-version") 56 | if not lock_version: 57 | raise PyLockSourceError(f"{filename}: missing lock-version in lockfile") 58 | 59 | lock_version = Version(lock_version) 60 | if lock_version.major != 1: 61 | raise PyLockSourceError(f"{filename}: lockfile version {lock_version} is not supported") 62 | 63 | packages = pylock.get("packages") 64 | if not packages: 65 | raise PyLockSourceError(f"{filename}: missing packages in lockfile") 66 | 67 | try: 68 | yield from self._collect_from_packages(packages) 69 | except PyLockSourceError as e: 70 | raise PyLockSourceError(f"{filename}: {e}") from e 71 | 72 | def _collect_from_packages(self, packages: list[dict]) -> Iterator[Dependency]: 73 | """ 74 | Collect dependencies from a list of packages. 75 | 76 | Raises a `PyLockSourceError` on any errors. 77 | """ 78 | for idx, package in enumerate(packages): 79 | name = package.get("name") 80 | if not name: 81 | raise PyLockSourceError(f"invalid package #{idx}: no name") 82 | 83 | version = package.get("version") 84 | if version: 85 | yield ResolvedDependency(name, Version(version)) 86 | else: 87 | # Versions are optional in PEP 751, e.g. for source tree specifiers. 88 | # We mark these as skipped. 89 | yield SkippedDependency(name, "no version specified") 90 | 91 | def fix(self, fix_version: ResolvedFixVersion) -> None: # pragma: no cover 92 | """ 93 | Raises `NotImplementedError` if called. 94 | 95 | We don't support fixing dependencies in lockfiles, since 96 | lockfiles should be managed/updated by their packaging tool. 97 | """ 98 | 99 | raise NotImplementedError( 100 | "lockfiles cannot be fixed directly; use your packaging tool to perform upgrades" 101 | ) 102 | 103 | 104 | class PyLockSourceError(DependencySourceError): 105 | """A pylock-parsing specific `DependencySourceError`.""" 106 | 107 | pass 108 | 109 | 110 | class PyLockFixError(DependencyFixError): 111 | """A pylock-fizing specific `DependencyFixError`.""" 112 | 113 | pass 114 | -------------------------------------------------------------------------------- /pip_audit/_fix.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for resolving fixed versions of dependencies. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import logging 8 | from collections.abc import Iterator 9 | from dataclasses import dataclass 10 | from typing import Any, cast 11 | 12 | from packaging.version import Version 13 | 14 | from pip_audit._service import ( 15 | Dependency, 16 | ResolvedDependency, 17 | VulnerabilityResult, 18 | VulnerabilityService, 19 | ) 20 | from pip_audit._state import AuditState 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | @dataclass(frozen=True) 26 | class FixVersion: 27 | """ 28 | Represents an abstract dependency fix version. 29 | 30 | This class cannot be constructed directly. 31 | """ 32 | 33 | dep: ResolvedDependency 34 | 35 | def __init__(self, *_args: Any, **_kwargs: Any) -> None: # pragma: no cover 36 | """ 37 | A stub constructor that always fails. 38 | """ 39 | raise NotImplementedError 40 | 41 | def is_skipped(self) -> bool: 42 | """ 43 | Check whether the `FixVersion` was unable to be resolved. 44 | """ 45 | return self.__class__ is SkippedFixVersion 46 | 47 | 48 | @dataclass(frozen=True) 49 | class ResolvedFixVersion(FixVersion): 50 | """ 51 | Represents a resolved fix version. 52 | """ 53 | 54 | version: Version 55 | 56 | 57 | @dataclass(frozen=True) 58 | class SkippedFixVersion(FixVersion): 59 | """ 60 | Represents a fix version that was unable to be resolved and therefore, skipped. 61 | """ 62 | 63 | skip_reason: str 64 | 65 | 66 | def resolve_fix_versions( 67 | service: VulnerabilityService, 68 | result: dict[Dependency, list[VulnerabilityResult]], 69 | state: AuditState = AuditState(), 70 | ) -> Iterator[FixVersion]: 71 | """ 72 | Resolves a mapping of dependencies to known vulnerabilities to a series of fix versions without 73 | known vulnerabilities. 74 | """ 75 | for dep, vulns in result.items(): 76 | if dep.is_skipped(): 77 | continue 78 | if not vulns: 79 | continue 80 | dep = cast(ResolvedDependency, dep) 81 | try: 82 | version = _resolve_fix_version(service, dep, vulns, state) 83 | yield ResolvedFixVersion(dep, version) 84 | except FixResolutionImpossible as fri: 85 | skip_reason = str(fri) 86 | logger.debug(skip_reason) 87 | yield SkippedFixVersion(dep, skip_reason) 88 | 89 | 90 | def _resolve_fix_version( 91 | service: VulnerabilityService, 92 | dep: ResolvedDependency, 93 | vulns: list[VulnerabilityResult], 94 | state: AuditState, 95 | ) -> Version: 96 | # We need to upgrade to a fix version that satisfies all vulnerability results 97 | # 98 | # However, whenever we upgrade a dependency, we run the risk of introducing new vulnerabilities 99 | # so we need to run this in a loop and continue polling the vulnerability service on each 100 | # prospective resolved fix version 101 | current_version = dep.version 102 | current_vulns = vulns 103 | while current_vulns: 104 | state.update_state(f"Resolving fix version for {dep.name}, checking {current_version}") 105 | 106 | def get_earliest_fix_version(d: ResolvedDependency, v: VulnerabilityResult) -> Version: 107 | for fix_version in v.fix_versions: 108 | if fix_version > current_version: 109 | return fix_version 110 | raise FixResolutionImpossible( 111 | f"failed to fix dependency {dep.name} ({dep.version}), unable to find fix version " 112 | f"for vulnerability {v.id}" 113 | ) 114 | 115 | # We want to retrieve a version that potentially fixes all vulnerabilities 116 | current_version = max([get_earliest_fix_version(dep, v) for v in current_vulns]) 117 | _, current_vulns = service.query(ResolvedDependency(dep.name, current_version)) 118 | return current_version 119 | 120 | 121 | class FixResolutionImpossible(Exception): 122 | """ 123 | Raised when `resolve_fix_versions` fails to find a fix version without known vulnerabilities 124 | """ 125 | 126 | pass 127 | -------------------------------------------------------------------------------- /test/test_audit.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | import pretend # type: ignore 4 | import pytest 5 | from packaging.version import Version 6 | 7 | from pip_audit import _audit as audit 8 | from pip_audit._audit import AuditOptions, Auditor 9 | from pip_audit._service.interface import VulnerabilityResult, VulnerabilityService 10 | 11 | 12 | def test_audit(vuln_service, dep_source): 13 | service = vuln_service() 14 | source = dep_source() 15 | 16 | auditor = Auditor(service) 17 | results = auditor.audit(source) 18 | 19 | assert next(results) == ( 20 | next(source.collect()), 21 | [ 22 | VulnerabilityResult( 23 | id="fake-id", 24 | description="this is not a real result", 25 | fix_versions=[Version("1.1.0")], 26 | aliases=set(), 27 | ) 28 | ], 29 | ) 30 | 31 | with pytest.raises(StopIteration): 32 | next(results) 33 | 34 | 35 | def test_audit_dry_run(monkeypatch, vuln_service, dep_source): 36 | service = vuln_service() 37 | source = dep_source() 38 | 39 | auditor = Auditor(service, options=AuditOptions(dry_run=True)) 40 | service = pretend.stub(query_all=pretend.call_recorder(lambda s: None)) 41 | logger = pretend.stub(info=pretend.call_recorder(lambda s: None)) 42 | monkeypatch.setattr(auditor, "_service", service) 43 | monkeypatch.setattr(audit, "logger", logger) 44 | 45 | # dict-construct here to consume the iterator, causing the effects below. 46 | _ = dict(auditor.audit(source)) 47 | 48 | # In dry-run mode, no calls should be made the the vuln service, 49 | # but an appropriate number of logging calls should be made. 50 | assert service.query_all.calls == [] 51 | assert len(logger.info.calls) == len(list(source.collect())) 52 | 53 | 54 | @pytest.mark.parametrize( 55 | "vulns", 56 | itertools.permutations( 57 | [ 58 | VulnerabilityResult( 59 | id="PYSEC-0", 60 | description="fake", 61 | fix_versions=[Version("1.1.0")], 62 | aliases={"alias-1"}, 63 | ), 64 | VulnerabilityResult( 65 | id="FAKE-1", 66 | description="fake", 67 | fix_versions=[Version("1.1.0")], 68 | aliases={"alias-1", "alias-2"}, 69 | ), 70 | ] 71 | ), 72 | ) 73 | def test_audit_dedupes_aliases(dep_source, vulns): 74 | class Service(VulnerabilityService): 75 | def query(self, spec): 76 | return spec, vulns 77 | 78 | service = Service() 79 | source = dep_source() 80 | 81 | auditor = Auditor(service) 82 | results = list(auditor.audit(source)) 83 | 84 | # One dependency, one unique vulnerability result for that dependency. 85 | assert len(results) == 1 86 | assert len(results[0][1]) == 1 87 | assert results[0][1][0].id == "PYSEC-0" 88 | 89 | 90 | @pytest.mark.parametrize( 91 | "vulns", 92 | itertools.permutations( 93 | [ 94 | VulnerabilityResult( 95 | id="PYSEC-0", 96 | description="fake", 97 | fix_versions=[Version("1.1.0")], 98 | aliases={"CVE-XXXX-YYYYY"}, 99 | ), 100 | VulnerabilityResult( 101 | id="FAKE-1", 102 | description="fake", 103 | fix_versions=[Version("1.1.0")], 104 | aliases={"CVE-XXXX-YYYYY"}, 105 | ), 106 | VulnerabilityResult( 107 | id="CVE-XXXX-YYYYY", 108 | description="fake", 109 | fix_versions=[Version("1.1.0")], 110 | aliases={"FAKE-1"}, 111 | ), 112 | ] 113 | ), 114 | ) 115 | def test_audit_dedupes_aliases_by_id(dep_source, vulns): 116 | class Service(VulnerabilityService): 117 | def query(self, spec): 118 | return spec, vulns 119 | 120 | service = Service() 121 | source = dep_source() 122 | 123 | auditor = Auditor(service) 124 | results = list(auditor.audit(source)) 125 | 126 | # One dependency, one unique vulnerability result for that dependency. 127 | assert len(results) == 1 128 | assert len(results[0][1]) == 1 129 | assert results[0][1][0].id == "PYSEC-0" 130 | 131 | # The result contains the merged alias set for all aliases. 132 | assert results[0][1][0].aliases == {"FAKE-1", "CVE-XXXX-YYYYY"} 133 | -------------------------------------------------------------------------------- /test/format/test_cyclonedx.py: -------------------------------------------------------------------------------- 1 | import json 2 | import xml.etree.ElementTree as ET 3 | 4 | import pretend # type: ignore 5 | import pytest 6 | 7 | from pip_audit._format import CycloneDxFormat 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "inner", [CycloneDxFormat.InnerFormat.Xml, CycloneDxFormat.InnerFormat.Json] 12 | ) 13 | def test_cyclonedx_manifest(inner): 14 | fmt = CycloneDxFormat(inner_format=inner) 15 | assert fmt.is_manifest 16 | 17 | 18 | def test_cyclonedx_inner_json(vuln_data): 19 | formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Json) 20 | 21 | # We don't test CycloneDX's formatting/layout decisions, only that 22 | # the formatter emits correct JSON when initialized in JSON mode. 23 | assert json.loads(formatter.format(vuln_data, list())) is not None 24 | 25 | 26 | def test_cyclonedx_inner_xml(vuln_data): 27 | formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Xml) 28 | 29 | # We don't test CycloneDX's formatting/layout decisions, only that 30 | # the formatter emits correct XML when initialized in XML mode. 31 | assert ET.fromstring(formatter.format(vuln_data, list())) is not None 32 | 33 | 34 | def test_cyclonedx_skipped_dep(vuln_data_skipped_dep): 35 | formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Json) 36 | 37 | # Just test that a skipped dependency doesn't cause the formatter to blow up 38 | assert json.loads(formatter.format(vuln_data_skipped_dep, list())) is not None 39 | 40 | 41 | def test_cyclonedx_fix(monkeypatch, vuln_data, fix_data): 42 | import pip_audit._format.cyclonedx as cyclonedx 43 | 44 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 45 | monkeypatch.setattr(cyclonedx, "logger", logger) 46 | 47 | formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Json) 48 | assert json.loads(formatter.format(vuln_data, fix_data)) is not None 49 | 50 | # The CycloneDX format doesn't support fixes so we expect to log a warning 51 | assert len(logger.warning.calls) == 1 52 | 53 | 54 | def test_cyclonedx_vulnerabilities_linked_to_components(vuln_data): 55 | """ 56 | Backstop test to ensure vulnerabilities are correctly linked to their components. 57 | 58 | This test verifies that the CycloneDX output properly links vulnerabilities 59 | to components via the 'affects' field. If the cyclonedx-python-lib library 60 | changes its API in a future version, this test will fail and provide advance 61 | warning of breaking changes. 62 | """ 63 | formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Json) 64 | output = formatter.format(vuln_data, list()) 65 | data = json.loads(output) 66 | 67 | # Build a mapping of component names to their bom-refs 68 | component_bomrefs = {comp["name"]: comp["bom-ref"] for comp in data.get("components", [])} 69 | 70 | # Verify we have components and vulnerabilities in the output 71 | assert len(component_bomrefs) > 0, "Should have components in CycloneDX output" 72 | assert len(data.get("vulnerabilities", [])) > 0, ( 73 | "Should have vulnerabilities in CycloneDX output" 74 | ) 75 | 76 | # Track which components have vulnerabilities linked to them 77 | components_with_vulns = set() 78 | 79 | # Check each vulnerability has proper 'affects' linking 80 | for vuln in data["vulnerabilities"]: 81 | affects = vuln.get("affects", []) 82 | 83 | # Each vulnerability should have at least one affected component 84 | assert len(affects) > 0, f"Vulnerability {vuln.get('id')} should have 'affects' field" 85 | 86 | for affected in affects: 87 | # Each affected entry should have a 'ref' field 88 | assert "ref" in affected, f"Vulnerability {vuln.get('id')} affects entry missing 'ref'" 89 | 90 | vuln_ref = affected["ref"] 91 | 92 | # The ref should not be empty 93 | assert vuln_ref, f"Vulnerability {vuln.get('id')} has empty 'ref' in affects" 94 | 95 | # The ref should match one of the component bom-refs 96 | assert vuln_ref in component_bomrefs.values(), ( 97 | f"Vulnerability {vuln.get('id')} references unknown bom-ref: {vuln_ref}" 98 | ) 99 | 100 | # Track that this component has a vulnerability linked to it 101 | for comp_name, comp_ref in component_bomrefs.items(): 102 | if comp_ref == vuln_ref: 103 | components_with_vulns.add(comp_name) 104 | 105 | # Verify that components with vulnerabilities in vuln_data have them linked in output 106 | # (This ensures the linking is actually working, not just present but wrong) 107 | for dep, vulns in vuln_data.items(): 108 | if vulns: # If this dependency has vulnerabilities 109 | assert dep.name in components_with_vulns, ( 110 | f"Component {dep.name} has vulnerabilities but they're not properly linked" 111 | ) 112 | -------------------------------------------------------------------------------- /test/dependency_source/test_pyproject.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pathlib import Path 4 | 5 | import pretend # type: ignore 6 | import pytest 7 | import tomli 8 | from packaging.version import Version 9 | 10 | from pip_audit._dependency_source import ( 11 | DependencyFixError, 12 | DependencySourceError, 13 | pyproject, 14 | ) 15 | from pip_audit._fix import ResolvedFixVersion 16 | from pip_audit._service import ResolvedDependency 17 | from pip_audit._state import AuditState 18 | from pip_audit._virtual_env import VirtualEnvError 19 | 20 | 21 | def _init_pyproject(filename: Path, contents: str) -> pyproject.PyProjectSource: 22 | with open(filename, mode="w") as f: 23 | f.write(contents) 24 | return pyproject.PyProjectSource(filename) 25 | 26 | 27 | def _check_file(filename: Path, expected_contents: dict) -> None: 28 | with open(filename, "rb") as f: 29 | assert tomli.load(f) == expected_contents 30 | 31 | 32 | @pytest.mark.online 33 | def test_pyproject_source(req_file): 34 | source = _init_pyproject( 35 | req_file(), 36 | """ 37 | [project] 38 | dependencies = [ 39 | "flask==2.0.1" 40 | ] 41 | """, 42 | ) 43 | specs = list(source.collect()) 44 | assert ResolvedDependency("Flask", Version("2.0.1")) in specs 45 | 46 | 47 | def test_pyproject_source_no_project_section(req_file): 48 | source = _init_pyproject( 49 | req_file(), 50 | """ 51 | [some_other_section] 52 | dependencies = [ 53 | "flask==2.0.1" 54 | ] 55 | """, 56 | ) 57 | with pytest.raises(DependencySourceError): 58 | list(source.collect()) 59 | 60 | 61 | def test_pyproject_source_no_deps(monkeypatch, req_file): 62 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 63 | monkeypatch.setattr(pyproject, "logger", logger) 64 | 65 | source = _init_pyproject( 66 | req_file(), 67 | """ 68 | [project] 69 | """, 70 | ) 71 | specs = list(source.collect()) 72 | assert not specs 73 | 74 | # We log a warning when we find a `pyproject.toml` file with no dependencies 75 | assert len(logger.warning.calls) == 1 76 | 77 | 78 | @pytest.mark.online 79 | def test_pyproject_source_duplicate_deps(req_file): 80 | # Click is a dependency of Flask. We should check that the dependencies of Click aren't returned 81 | # twice. 82 | source = _init_pyproject( 83 | req_file(), 84 | """ 85 | [project] 86 | dependencies = [ 87 | "flask", 88 | "click", 89 | ] 90 | """, 91 | ) 92 | specs = list(source.collect()) 93 | 94 | # Check that the list of dependencies is already deduplicated 95 | assert len(specs) == len(set(specs)) 96 | 97 | 98 | def test_pyproject_source_virtualenv_error(monkeypatch, req_file): 99 | class MockVirtualEnv: 100 | def __init__(self, install_args: list[str], state: AuditState) -> None: 101 | pass 102 | 103 | def create(self, dir: Path) -> None: 104 | raise VirtualEnvError 105 | 106 | source = _init_pyproject( 107 | req_file(), 108 | """ 109 | [project] 110 | dependencies = [ 111 | "flask==2.0.1" 112 | ] 113 | """, 114 | ) 115 | monkeypatch.setattr(pyproject, "VirtualEnv", MockVirtualEnv) 116 | with pytest.raises(DependencySourceError): 117 | list(source.collect()) 118 | 119 | 120 | @pytest.mark.online 121 | def test_pyproject_source_fix(req_file): 122 | source = _init_pyproject( 123 | req_file(), 124 | """ 125 | [project] 126 | dependencies = [ 127 | "flask==0.5" 128 | ] 129 | """, 130 | ) 131 | fix = ResolvedFixVersion( 132 | dep=ResolvedDependency(name="flask", version=Version("0.5")), 133 | version=Version("1.0"), 134 | ) 135 | source.fix(fix) 136 | _check_file(source.filename, {"project": {"dependencies": ["flask==1.0"]}}) 137 | 138 | 139 | def test_pyproject_source_fix_no_project_section(req_file): 140 | source = _init_pyproject( 141 | req_file(), 142 | """ 143 | [some_other_section] 144 | dependencies = [ 145 | "flask==2.0.1" 146 | ] 147 | """, 148 | ) 149 | fix = ResolvedFixVersion( 150 | dep=ResolvedDependency(name="flask", version=Version("0.5")), 151 | version=Version("1.0"), 152 | ) 153 | with pytest.raises(DependencyFixError): 154 | source.fix(fix) 155 | 156 | 157 | def test_pyproject_source_fix_no_deps(monkeypatch, req_file): 158 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 159 | monkeypatch.setattr(pyproject, "logger", logger) 160 | 161 | source = _init_pyproject( 162 | req_file(), 163 | """ 164 | [project] 165 | """, 166 | ) 167 | fix = ResolvedFixVersion( 168 | dep=ResolvedDependency(name="flask", version=Version("0.5")), 169 | version=Version("1.0"), 170 | ) 171 | source.fix(fix) 172 | 173 | # We log a warning when we find a `pyproject.toml` file with no dependencies 174 | assert len(logger.warning.calls) == 1 175 | _check_file(source.filename, {"project": {}}) 176 | -------------------------------------------------------------------------------- /pip_audit/_service/pypi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for using the [PyPI](https://warehouse.pypa.io/api-reference/json.html) 3 | API as a `VulnerabilityService`. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import logging 9 | from pathlib import Path 10 | from typing import cast 11 | 12 | import requests 13 | from packaging.version import InvalidVersion, Version 14 | 15 | from pip_audit._cache import caching_session 16 | from pip_audit._service.interface import ( 17 | ConnectionError, 18 | Dependency, 19 | ResolvedDependency, 20 | ServiceError, 21 | SkippedDependency, 22 | VulnerabilityResult, 23 | VulnerabilityService, 24 | ) 25 | 26 | logger = logging.getLogger(__name__) 27 | 28 | 29 | class PyPIService(VulnerabilityService): 30 | """ 31 | An implementation of `VulnerabilityService` that uses PyPI to provide Python 32 | package vulnerability information. 33 | """ 34 | 35 | def __init__( 36 | self, cache_dir: Path | None = None, timeout: int | None = None, **kwargs: dict 37 | ) -> None: 38 | """ 39 | Create a new `PyPIService`. 40 | 41 | `cache_dir` is an optional cache directory to use, for caching and reusing PyPI API 42 | requests. If `None`, `pip-audit` will attempt to use `pip`'s cache directory before falling 43 | back on its own default cache directory. 44 | 45 | `timeout` is an optional argument to control how many seconds the component should wait for 46 | responses to network requests. 47 | """ 48 | self.session = caching_session(cache_dir) 49 | self.timeout = timeout 50 | 51 | def query(self, spec: Dependency) -> tuple[Dependency, list[VulnerabilityResult]]: 52 | """ 53 | Queries PyPI for the given `Dependency` specification. 54 | 55 | See `VulnerabilityService.query`. 56 | """ 57 | if spec.is_skipped(): 58 | return spec, [] 59 | spec = cast(ResolvedDependency, spec) 60 | 61 | url = f"https://pypi.org/pypi/{spec.canonical_name}/{str(spec.version)}/json" 62 | 63 | try: 64 | response: requests.Response = self.session.get(url=url, timeout=self.timeout) 65 | response.raise_for_status() 66 | except requests.TooManyRedirects: 67 | # This should never happen with a healthy PyPI instance, but might 68 | # happen during an outage or network event. 69 | # Ref 2022-06-10: https://status.python.org/incidents/lgpr13fy71bk 70 | raise ConnectionError("PyPI is not redirecting properly") 71 | except requests.ConnectTimeout: 72 | # Apart from a normal network outage, this can happen for two main 73 | # reasons: 74 | # 1. PyPI's APIs are offline 75 | # 2. The user is behind a firewall or corporate network that blocks 76 | # PyPI (and they're probably using custom indices) 77 | raise ConnectionError("Could not connect to PyPI's vulnerability feed") 78 | except requests.HTTPError as http_error: 79 | if response.status_code == 404: 80 | skip_reason = ( 81 | "Dependency not found on PyPI and could not be audited: " 82 | f"{spec.canonical_name} ({spec.version})" 83 | ) 84 | logger.debug(skip_reason) 85 | return SkippedDependency(name=spec.name, skip_reason=skip_reason), [] 86 | raise ServiceError from http_error 87 | 88 | response_json = response.json() 89 | results: list[VulnerabilityResult] = [] 90 | vulns = response_json.get("vulnerabilities") 91 | 92 | # No `vulnerabilities` key means that there are no vulnerabilities for any version 93 | if vulns is None: 94 | return spec, results 95 | 96 | for v in vulns: 97 | id = v["id"] 98 | 99 | # If the vulnerability has been withdrawn, we skip it entirely. 100 | withdrawn_at = v.get("withdrawn") 101 | if withdrawn_at is not None: 102 | logger.debug(f"PyPI vuln entry '{id}' marked as withdrawn at {withdrawn_at}") 103 | continue 104 | 105 | # Put together the fix versions list 106 | try: 107 | fix_versions = [Version(fixed_in) for fixed_in in v["fixed_in"]] 108 | except InvalidVersion as iv: 109 | raise ServiceError(f"Received malformed version from PyPI: {v['fixed_in']}") from iv 110 | 111 | # The ranges aren't guaranteed to come in chronological order 112 | fix_versions.sort() 113 | 114 | description = v.get("summary") 115 | if description is None: 116 | description = v.get("details") 117 | 118 | if description is None: 119 | description = "N/A" 120 | 121 | # The "summary" field should be a single line, but "details" might 122 | # be multiple (Markdown-formatted) lines. So, we normalize our 123 | # description into a single line (and potentially break the Markdown 124 | # formatting in the process). 125 | description = description.replace("\n", " ") 126 | 127 | results.append( 128 | VulnerabilityResult.create( 129 | ids=[id, *v["aliases"]], 130 | description=description, 131 | fix_versions=fix_versions, 132 | published=self._parse_rfc3339(v.get("published")), 133 | ) 134 | ) 135 | 136 | return spec, results 137 | -------------------------------------------------------------------------------- /pip_audit/_format/markdown.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for formatting vulnerability results as a Markdown table. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from textwrap import dedent 8 | from typing import cast 9 | 10 | from packaging.version import Version 11 | 12 | import pip_audit._fix as fix 13 | import pip_audit._service as service 14 | 15 | from .interface import VulnerabilityFormat 16 | 17 | 18 | class MarkdownFormat(VulnerabilityFormat): 19 | """ 20 | An implementation of `VulnerabilityFormat` that formats vulnerability results as a set of 21 | Markdown tables. 22 | """ 23 | 24 | def __init__(self, output_desc: bool, output_aliases: bool) -> None: 25 | """ 26 | Create a new `MarkdownFormat`. 27 | 28 | `output_desc` is a flag to determine whether descriptions for each vulnerability should be 29 | included in the output as they can be quite long and make the output difficult to read. 30 | 31 | `output_aliases` is a flag to determine whether aliases (such as CVEs) for each 32 | vulnerability should be included in the output. 33 | """ 34 | self.output_desc = output_desc 35 | self.output_aliases = output_aliases 36 | 37 | @property 38 | def is_manifest(self) -> bool: 39 | """ 40 | See `VulnerabilityFormat.is_manifest`. 41 | """ 42 | return False 43 | 44 | def format( 45 | self, 46 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 47 | fixes: list[fix.FixVersion], 48 | ) -> str: 49 | """ 50 | Returns a Markdown formatted string representing a set of vulnerability results and applied 51 | fixes. 52 | """ 53 | output = self._format_vuln_results(result, fixes) 54 | skipped_deps_output = self._format_skipped_deps(result) 55 | if skipped_deps_output: 56 | # If we wrote the results table already, we need to add some line breaks to ensure that 57 | # the skipped dependency table renders correctly. 58 | if output: 59 | output += "\n" 60 | output += skipped_deps_output 61 | return output 62 | 63 | def _format_vuln_results( 64 | self, 65 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 66 | fixes: list[fix.FixVersion], 67 | ) -> str: 68 | header = "Name | Version | ID | Fix Versions" 69 | border = "--- | --- | --- | ---" 70 | if fixes: 71 | header += " | Applied Fix" 72 | border += " | ---" 73 | if self.output_aliases: 74 | header += " | Aliases" 75 | border += " | ---" 76 | if self.output_desc: 77 | header += " | Description" 78 | border += " | ---" 79 | 80 | vuln_rows: list[str] = [] 81 | for dep, vulns in result.items(): 82 | if dep.is_skipped(): 83 | continue 84 | dep = cast(service.ResolvedDependency, dep) 85 | applied_fix = next((f for f in fixes if f.dep == dep), None) 86 | for vuln in vulns: 87 | vuln_rows.append(self._format_vuln(dep, vuln, applied_fix)) 88 | 89 | if not vuln_rows: 90 | return "" 91 | 92 | return dedent( 93 | f""" 94 | {header} 95 | {border} 96 | """ 97 | ) + "\n".join(vuln_rows) 98 | 99 | def _format_vuln( 100 | self, 101 | dep: service.ResolvedDependency, 102 | vuln: service.VulnerabilityResult, 103 | applied_fix: fix.FixVersion | None, 104 | ) -> str: 105 | vuln_text = ( 106 | f"{dep.canonical_name} | {dep.version} | {vuln.id} | " 107 | f"{self._format_fix_versions(vuln.fix_versions)}" 108 | ) 109 | if applied_fix is not None: 110 | vuln_text += f" | {self._format_applied_fix(applied_fix)}" 111 | if self.output_aliases: 112 | vuln_text += f" | {', '.join(vuln.aliases)}" 113 | if self.output_desc: 114 | vuln_text += f" | {vuln.description}" 115 | return vuln_text 116 | 117 | def _format_fix_versions(self, fix_versions: list[Version]) -> str: 118 | return ",".join([str(version) for version in fix_versions]) 119 | 120 | def _format_applied_fix(self, applied_fix: fix.FixVersion) -> str: 121 | if applied_fix.is_skipped(): 122 | applied_fix = cast(fix.SkippedFixVersion, applied_fix) 123 | return ( 124 | f"Failed to fix {applied_fix.dep.canonical_name} ({applied_fix.dep.version}): " 125 | f"{applied_fix.skip_reason}" 126 | ) 127 | applied_fix = cast(fix.ResolvedFixVersion, applied_fix) 128 | return ( 129 | f"Successfully upgraded {applied_fix.dep.canonical_name} ({applied_fix.dep.version} " 130 | f"=> {applied_fix.version})" 131 | ) 132 | 133 | def _format_skipped_deps( 134 | self, result: dict[service.Dependency, list[service.VulnerabilityResult]] 135 | ) -> str: 136 | header = "Name | Skip Reason" 137 | border = "--- | ---" 138 | 139 | skipped_dep_rows: list[str] = [] 140 | for dep, _ in result.items(): 141 | if dep.is_skipped(): 142 | dep = cast(service.SkippedDependency, dep) 143 | skipped_dep_rows.append(self._format_skipped_dep(dep)) 144 | 145 | if not skipped_dep_rows: 146 | return "" 147 | 148 | return dedent( 149 | f""" 150 | {header} 151 | {border} 152 | """ 153 | ) + "\n".join(skipped_dep_rows) 154 | 155 | def _format_skipped_dep(self, dep: service.SkippedDependency) -> str: 156 | return f"{dep.name} | {dep.skip_reason}" 157 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing to pip-audit 2 | ========================= 3 | 4 | Thank you for your interest in contributing to `pip-audit`! 5 | 6 | The information below will help you set up a local development environment, 7 | as well as performing common development tasks. 8 | 9 | ## Requirements 10 | 11 | `pip-audit`'s only development environment requirement *should* be Python 3.9 12 | or newer. Development and testing is actively performed on macOS and Linux, 13 | but Windows and other supported platforms that are supported by Python 14 | should also work. 15 | 16 | If you're on a system that has GNU Make, you can use the convenience targets 17 | included in the `Makefile` that comes in the `pip-audit` repository detailed 18 | below. But this isn't required; all steps can be done without Make. 19 | 20 | ## Development steps 21 | 22 | First, clone this repository: 23 | 24 | ```bash 25 | git clone https://github.com/pypa/pip-audit 26 | cd pip-audit 27 | ``` 28 | 29 | Then, use one of the `Makefile` targets to run a task. The first time this is 30 | run, this will also set up the local development virtual environment, and will 31 | install `pip-audit` as an editable package into this environment. 32 | 33 | Any changes you make to the `pip_audit` source tree will take effect 34 | immediately in the virtual environment. 35 | 36 | ### Linting 37 | 38 | You can lint locally with: 39 | 40 | ```bash 41 | make lint 42 | ``` 43 | 44 | `pip-audit` is automatically linted and formatted with a collection of tools: 45 | 46 | * [`ruff`](https://github.com/charliermarsh/ruff): Formatting, PEP-8 linting, style enforcement 47 | * [`mypy`](https://mypy.readthedocs.io/en/stable/): Static type checking 48 | * [`interrogate`](https://interrogate.readthedocs.io/en/latest/): Documentation coverage 49 | 50 | To automatically apply any lint-suggested changes, you can run: 51 | 52 | ```bash 53 | make reformat 54 | ``` 55 | 56 | 57 | ### Testing 58 | 59 | You can run the tests locally with: 60 | 61 | ```bash 62 | make test 63 | ``` 64 | 65 | You can also filter by a pattern (uses `pytest -k`): 66 | 67 | ```bash 68 | make test TESTS=test_audit_dry_run 69 | ``` 70 | 71 | To test a specific file: 72 | 73 | ```bash 74 | make test T=path/to/file.py 75 | ``` 76 | 77 | `pip-audit` has a [`pytest`](https://docs.pytest.org/)-based unit test suite, 78 | including code coverage with [`coverage.py`](https://coverage.readthedocs.io/). 79 | 80 | ### Documentation 81 | 82 | You can run the documentation build locally: 83 | 84 | ```bash 85 | make doc 86 | ``` 87 | 88 | `pip-audit` uses [`pdoc3`](https://github.com/pdoc3/pdoc) to generate HTML documentation for 89 | the public Python APIs. 90 | 91 | Live documentation for the `main` branch is hosted 92 | [here](https://pypa.github.io/pip-audit/). Only the public APIs are 93 | documented, all undocumented APIs are **intentionally private and unstable.** 94 | 95 | ### Releasing 96 | 97 | **NOTE**: If you're a non-maintaining contributor, you don't need the steps 98 | here! They're documented for completeness and for onboarding future maintainers. 99 | 100 | Releases of `pip-audit` are managed with [`bump`](https://github.com/di/bump) 101 | and GitHub Actions. 102 | 103 | The following manual steps are required: 104 | 105 | 1. Create a new development branch for the release. For example: 106 | 107 | ```console 108 | $ git checkout -b prepare-1.0.0 109 | ``` 110 | 111 | 1. Update `pip-audit`'s `__version__` attribute. It can be found under `pip_audit/__init__.py`. 112 | 113 | **Note**: You can do this automatically with `bump`: 114 | 115 | ```console 116 | # See bump --help for all options 117 | $ bump --major 118 | ``` 119 | 120 | 1. Commit your changes to the branch and create a new Pull Request. 121 | 122 | 1. Tag another maintainer for review. Once approved, you may merge your PR. 123 | 124 | 1. Create a new tag corresponding to the merged version change. For example: 125 | 126 | ```console 127 | # IMPORTANT: don't forget the `v` prefix! 128 | $ git tag v1.0.0 129 | ``` 130 | 131 | 1. Push the new tag: 132 | 133 | ```console 134 | $ git push origin v1.0.0 135 | ``` 136 | 137 | 1. Use the [releases page](https://github.com/pypa/pip-audit/releases) to 138 | create a new release, marking it as a "pre-release" if appropriate. 139 | 140 | 1. Copy the relevant 141 | [CHANGELOG](https://github.com/pypa/pip-audit/blob/main/CHANGELOG.md) 142 | entries into the release notes. 143 | 144 | 1. Save and publish the release. The CI will take care of all other tasks. 145 | 146 | 147 | 148 | ## Development practices 149 | 150 | Here are some guidelines to follow if you're working on a new feature or changes to 151 | `pip-audit`'s internal APIs: 152 | 153 | * *Keep the `pip-audit` APIs as private as possible*. Nearly all of `pip-audit`'s 154 | APIs should be private and treated as unstable and unsuitable for public use. 155 | If you're adding a new module to the source tree, prefix the filename with an underscore to 156 | emphasize that it's an internal (e.g., `pip_audit/_foo.py` instead of `pip_audit/foo.py`). 157 | 158 | * *Keep the CLI consistent with `pip`*. `pip-audit`'s CLI should *roughly* mirror that 159 | of `pip`. If you're adding a new flag or option to the CLI, check whether `pip` already 160 | has the same functionality (e.g., HTTP timeout control) and use the same short and long mnemonics. 161 | 162 | * *Perform judicious debug logging.* `pip-audit` uses the standard Python 163 | [`logging`](https://docs.python.org/3/library/logging.html) module. Use 164 | `logger.debug` early and often -- users who experience errors can submit better 165 | bug reports when their debug logs include helpful context! 166 | 167 | * *Update the [CHANGELOG](./CHANGELOG.md)*. If your changes are public or result 168 | in changes to `pip-audit`'s CLI, please record them under the "Unreleased" section, 169 | with an entry in an appropriate subsection ("Added", "Changed", "Removed", or "Fixed"). 170 | -------------------------------------------------------------------------------- /pip_audit/_service/esms.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for using the [Ecosyste.ms](https://ecosyste.ms/) API as a `VulnerabilityService`. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import logging 8 | import re 9 | from pathlib import Path 10 | from typing import Any, cast 11 | from urllib.parse import urlencode 12 | 13 | import requests 14 | from packaging.specifiers import SpecifierSet 15 | from packaging.version import Version 16 | 17 | from pip_audit._cache import caching_session 18 | from pip_audit._service.interface import ( 19 | ConnectionError, 20 | Dependency, 21 | ResolvedDependency, 22 | ServiceError, 23 | VulnerabilityID, 24 | VulnerabilityResult, 25 | VulnerabilityService, 26 | ) 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | 31 | class EcosystemsService(VulnerabilityService): 32 | """ 33 | An implementation of `VulnerabilityService` that uses Ecosyste.ms to provide Python 34 | package vulnerability information. 35 | """ 36 | 37 | def __init__( 38 | self, 39 | cache_dir: Path | None = None, 40 | timeout: int | None = None, 41 | ): 42 | """ 43 | Create a new `EcosystemsService`. 44 | 45 | `cache_dir` is an optional cache directory to use, for caching and reusing OSV API 46 | requests. If `None`, `pip-audit` will use its own internal caching directory. 47 | 48 | `timeout` is an optional argument to control how many seconds the component should wait for 49 | responses to network requests. 50 | """ 51 | self.session = caching_session(cache_dir, use_pip=False) 52 | self.timeout = timeout 53 | 54 | def query(self, spec: Dependency) -> tuple[Dependency, list[VulnerabilityResult]]: 55 | """ 56 | Queries Ecosyste.ms for the given `Dependency` specification. 57 | 58 | See `VulnerabilityService.query`. 59 | """ 60 | url = "https://advisories.ecosyste.ms/api/v1/advisories" 61 | 62 | if spec.is_skipped(): 63 | return spec, [] 64 | spec = cast(ResolvedDependency, spec) 65 | 66 | query = { 67 | "ecosystem": "pypi", 68 | "package_name": spec.canonical_name, 69 | } 70 | 71 | try: 72 | response: requests.Response = self.session.get( 73 | f"{url}?{urlencode(query)}", 74 | timeout=self.timeout, 75 | ) 76 | response.raise_for_status() 77 | except requests.ConnectTimeout: 78 | raise ConnectionError("Could not connect to ESMS' vulnerability feed") 79 | except requests.HTTPError as http_error: 80 | raise ServiceError from http_error 81 | 82 | # If the response is empty, that means that the package/version pair doesn't have any 83 | # associated vulnerabilities 84 | # 85 | # In that case, return an empty list 86 | results: list[VulnerabilityResult] = [] 87 | response_json = response.json() 88 | if not response_json: 89 | return spec, results 90 | 91 | vuln: dict[str, Any] 92 | for vuln in response_json: 93 | # Get the IDs, prioritising PYSEC and CVE. 94 | ids: list[VulnerabilityID] = vuln["identifiers"] 95 | 96 | # If the vulnerability has been withdrawn, we skip it entirely. 97 | withdrawn_at = vuln["withdrawn_at"] 98 | if withdrawn_at is not None: 99 | logger.debug(f"ESMS vuln entry '{ids[0]}' marked as withdrawn at {withdrawn_at}") 100 | continue 101 | 102 | # The title is intended to be shorter, so we prefer it over 103 | # description, if present. The Ecosyste.ms advisory metadata states that 104 | # these fields *should* always be of type `str`; we are being defensive 105 | # here and checking if the strings are empty. 106 | description = vuln["title"] 107 | if not description: 108 | description = vuln["description"] 109 | if not description: 110 | description = "N/A" 111 | 112 | # The "title" field should be a single line, but "description" might 113 | # be multiple (Markdown-formatted) lines. So, we normalize our 114 | # description into a single line (and potentially break the Markdown 115 | # formatting in the process). 116 | description = description.replace("\n", " ") 117 | 118 | seen_vulnerable = False 119 | fix_versions: set[Version] = set() 120 | for affected in vuln["packages"]: 121 | # We only care about PyPI versions. 122 | if ( 123 | affected["package_name"] != spec.canonical_name 124 | or affected["ecosystem"] != "pypi" 125 | ): 126 | continue 127 | 128 | for record in affected["versions"]: 129 | # Very silly: OSV version specs use single `=` for exact matches, while PEP 440 130 | # requires double `==`. All OSV operators have equivalent semantics to their 131 | # PEP 440 counterparts, so we do some gross regex munging here to accommodate for 132 | # the syntactical difference. 133 | osv_spec: str = record["vulnerable_version_range"] 134 | vulnerable = SpecifierSet(re.sub(r"(^|(, ))=", r"\1==", osv_spec)) 135 | if not vulnerable.contains(spec.version): 136 | continue 137 | 138 | seen_vulnerable = True 139 | if (patched := record.get("first_patched_version")) is not None: 140 | fix_versions.add(Version(patched)) 141 | break 142 | 143 | if not seen_vulnerable: 144 | continue 145 | 146 | results.append( 147 | VulnerabilityResult.create( 148 | ids=ids, 149 | description=description, 150 | fix_versions=sorted(fix_versions), 151 | published=self._parse_rfc3339(vuln.get("published")), 152 | ) 153 | ) 154 | 155 | return spec, results 156 | -------------------------------------------------------------------------------- /pip_audit/_dependency_source/pyproject.py: -------------------------------------------------------------------------------- 1 | """ 2 | Collect dependencies from `pyproject.toml` files. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import logging 8 | import os 9 | from collections.abc import Iterator 10 | from pathlib import Path 11 | from tempfile import NamedTemporaryFile, TemporaryDirectory 12 | 13 | import tomli 14 | import tomli_w 15 | from packaging.requirements import Requirement 16 | from packaging.specifiers import SpecifierSet 17 | 18 | from pip_audit._dependency_source import ( 19 | DependencyFixError, 20 | DependencySource, 21 | DependencySourceError, 22 | ) 23 | from pip_audit._fix import ResolvedFixVersion 24 | from pip_audit._service import Dependency, ResolvedDependency 25 | from pip_audit._state import AuditState 26 | from pip_audit._virtual_env import VirtualEnv, VirtualEnvError 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | 31 | class PyProjectSource(DependencySource): 32 | """ 33 | Wraps `pyproject.toml` dependency resolution as a dependency source. 34 | """ 35 | 36 | def __init__( 37 | self, 38 | filename: Path, 39 | index_url: str | None = None, 40 | extra_index_urls: list[str] = [], 41 | state: AuditState = AuditState(), 42 | ) -> None: 43 | """ 44 | Create a new `PyProjectSource`. 45 | 46 | `filename` provides a path to a `pyproject.toml` file 47 | 48 | `index_url` is the base URL of the package index. 49 | 50 | `extra_index_urls` are the extra URLs of package indexes. 51 | 52 | `state` is an `AuditState` to use for state callbacks. 53 | """ 54 | self.filename = filename 55 | self.state = state 56 | 57 | def collect(self) -> Iterator[Dependency]: 58 | """ 59 | Collect all of the dependencies discovered by this `PyProjectSource`. 60 | 61 | Raises a `PyProjectSourceError` on any errors. 62 | """ 63 | 64 | with self.filename.open("rb") as f: 65 | pyproject_data = tomli.load(f) 66 | 67 | project = pyproject_data.get("project") 68 | if project is None: 69 | raise PyProjectSourceError( 70 | f"pyproject file {self.filename} does not contain `project` section" 71 | ) 72 | 73 | deps = project.get("dependencies") 74 | if deps is None: 75 | # Projects without dependencies aren't an error case 76 | logger.warning( 77 | f"pyproject file {self.filename} does not contain `dependencies` list" 78 | ) 79 | return 80 | 81 | # NOTE(alex): This is probably due for a redesign. Since we're leaning on `pip` for 82 | # dependency resolution now, we can think about doing `pip install ` 83 | # regardless of whether the project has a `pyproject.toml` or not. And if it doesn't 84 | # have a `pyproject.toml`, we can raise an error if the user provides `--fix`. 85 | with ( 86 | TemporaryDirectory() as ve_dir, 87 | NamedTemporaryFile(dir=ve_dir, delete=False) as req_file, 88 | ): 89 | # We use delete=False in creating the tempfile to allow it to be 90 | # closed and opened multiple times within the context scope on 91 | # windows, see GitHub issue #646. 92 | 93 | # Write the dependencies to a temporary requirements file. 94 | req_file.write(os.linesep.join(deps).encode()) 95 | req_file.flush() 96 | 97 | # Try to install the generated requirements file. 98 | ve = VirtualEnv(install_args=["-r", req_file.name], state=self.state) 99 | try: 100 | ve.create(ve_dir) 101 | except VirtualEnvError as exc: 102 | raise PyProjectSourceError(str(exc)) from exc 103 | 104 | # Now query the installed packages. 105 | for name, version in ve.installed_packages: 106 | yield ResolvedDependency(name=name, version=version) 107 | 108 | def fix(self, fix_version: ResolvedFixVersion) -> None: 109 | """ 110 | Fixes a dependency version for this `PyProjectSource`. 111 | """ 112 | 113 | with self.filename.open("rb+") as f, NamedTemporaryFile(mode="rb+", delete=False) as tmp: 114 | pyproject_data = tomli.load(f) 115 | 116 | project = pyproject_data.get("project") 117 | if project is None: 118 | raise PyProjectFixError( 119 | f"pyproject file {self.filename} does not contain `project` section" 120 | ) 121 | 122 | deps = project.get("dependencies") 123 | if deps is None: 124 | # Projects without dependencies aren't an error case 125 | logger.warning( 126 | f"pyproject file {self.filename} does not contain `dependencies` list" 127 | ) 128 | return 129 | 130 | reqs = [Requirement(dep) for dep in deps] 131 | for i in range(len(reqs)): 132 | # When we find a requirement that matches the provided fix version, we need to edit 133 | # the requirement's specifier and then write it back to the underlying TOML data. 134 | req = reqs[i] 135 | if ( 136 | req.name == fix_version.dep.name 137 | and req.specifier.contains(fix_version.dep.version) 138 | and not req.specifier.contains(fix_version.version) 139 | ): 140 | req.specifier = SpecifierSet(f"=={fix_version.version}") 141 | deps[i] = str(req) 142 | assert req.marker is None or req.marker.evaluate() 143 | 144 | # Now dump the new edited TOML to the temporary file. 145 | tomli_w.dump(pyproject_data, tmp) 146 | 147 | # And replace the original `pyproject.toml` file. 148 | os.replace(tmp.name, self.filename) 149 | 150 | 151 | class PyProjectSourceError(DependencySourceError): 152 | """A `pyproject.toml` specific `DependencySourceError`.""" 153 | 154 | pass 155 | 156 | 157 | class PyProjectFixError(DependencyFixError): 158 | """A `pyproject.toml` specific `DependencyFixError`.""" 159 | 160 | pass 161 | -------------------------------------------------------------------------------- /pip_audit/_format/columns.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for formatting vulnerability results as a set of human-readable columns. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from collections.abc import Iterable 8 | from itertools import zip_longest 9 | from typing import Any, cast 10 | 11 | from packaging.version import Version 12 | 13 | import pip_audit._fix as fix 14 | import pip_audit._service as service 15 | 16 | from .interface import VulnerabilityFormat 17 | 18 | 19 | def tabulate(rows: Iterable[Iterable[Any]]) -> tuple[list[str], list[int]]: 20 | """Return a list of formatted rows and a list of column sizes. 21 | For example:: 22 | >>> tabulate([['foobar', 2000], [0xdeadbeef]]) 23 | (['foobar 2000', '3735928559'], [10, 4]) 24 | """ 25 | rows = [tuple(map(str, row)) for row in rows] 26 | sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")] 27 | table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows] 28 | return table, sizes 29 | 30 | 31 | class ColumnsFormat(VulnerabilityFormat): 32 | """ 33 | An implementation of `VulnerabilityFormat` that formats vulnerability results as a set of 34 | columns. 35 | """ 36 | 37 | def __init__(self, output_desc: bool, output_aliases: bool): 38 | """ 39 | Create a new `ColumnFormat`. 40 | 41 | `output_desc` is a flag to determine whether descriptions for each vulnerability should be 42 | included in the output as they can be quite long and make the output difficult to read. 43 | 44 | `output_aliases` is a flag to determine whether aliases (such as CVEs) for each 45 | vulnerability should be included in the output. 46 | """ 47 | self.output_desc = output_desc 48 | self.output_aliases = output_aliases 49 | 50 | @property 51 | def is_manifest(self) -> bool: 52 | """ 53 | See `VulnerabilityFormat.is_manifest`. 54 | """ 55 | return False 56 | 57 | def format( 58 | self, 59 | result: dict[service.Dependency, list[service.VulnerabilityResult]], 60 | fixes: list[fix.FixVersion], 61 | ) -> str: 62 | """ 63 | Returns a column formatted string for a given mapping of dependencies to vulnerability 64 | results. 65 | 66 | See `VulnerabilityFormat.format`. 67 | """ 68 | vuln_data: list[list[Any]] = [] 69 | header = ["Name", "Version", "ID", "Fix Versions"] 70 | if fixes: 71 | header.append("Applied Fix") 72 | if self.output_aliases: 73 | header.append("Aliases") 74 | if self.output_desc: 75 | header.append("Description") 76 | vuln_data.append(header) 77 | for dep, vulns in result.items(): 78 | if dep.is_skipped(): 79 | continue 80 | dep = cast(service.ResolvedDependency, dep) 81 | applied_fix = next((f for f in fixes if f.dep == dep), None) 82 | for vuln in vulns: 83 | vuln_data.append(self._format_vuln(dep, vuln, applied_fix)) 84 | 85 | columns_string = "" 86 | 87 | # If it's just a header, don't bother adding it to the output 88 | if len(vuln_data) > 1: 89 | vuln_strings, sizes = tabulate(vuln_data) 90 | 91 | # Create and add a separator. 92 | if len(vuln_data) > 0: 93 | vuln_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes))) 94 | 95 | for row in vuln_strings: 96 | if columns_string: 97 | columns_string += "\n" 98 | columns_string += row 99 | 100 | # Now display the skipped dependencies 101 | skip_data: list[list[Any]] = [] 102 | skip_header = ["Name", "Skip Reason"] 103 | 104 | skip_data.append(skip_header) 105 | for dep, _ in result.items(): 106 | if dep.is_skipped(): 107 | dep = cast(service.SkippedDependency, dep) 108 | skip_data.append(self._format_skipped_dep(dep)) 109 | 110 | # If we only have the header, that means that we haven't skipped any dependencies 111 | # In that case, don't bother printing the header 112 | if len(skip_data) <= 1: 113 | return columns_string 114 | 115 | skip_strings, sizes = tabulate(skip_data) 116 | 117 | # Create separator for skipped dependencies columns 118 | skip_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes))) 119 | 120 | for row in skip_strings: 121 | if columns_string: 122 | columns_string += "\n" 123 | columns_string += row 124 | 125 | return columns_string 126 | 127 | def _format_vuln( 128 | self, 129 | dep: service.ResolvedDependency, 130 | vuln: service.VulnerabilityResult, 131 | applied_fix: fix.FixVersion | None, 132 | ) -> list[Any]: 133 | vuln_data = [ 134 | dep.canonical_name, 135 | dep.version, 136 | vuln.id, 137 | self._format_fix_versions(vuln.fix_versions), 138 | ] 139 | if applied_fix is not None: 140 | vuln_data.append(self._format_applied_fix(applied_fix)) 141 | if self.output_aliases: 142 | vuln_data.append(", ".join(vuln.aliases)) 143 | if self.output_desc: 144 | vuln_data.append(vuln.description) 145 | return vuln_data 146 | 147 | def _format_fix_versions(self, fix_versions: list[Version]) -> str: 148 | return ",".join([str(version) for version in fix_versions]) 149 | 150 | def _format_skipped_dep(self, dep: service.SkippedDependency) -> list[Any]: 151 | return [ 152 | dep.canonical_name, 153 | dep.skip_reason, 154 | ] 155 | 156 | def _format_applied_fix(self, applied_fix: fix.FixVersion) -> str: 157 | if applied_fix.is_skipped(): 158 | applied_fix = cast(fix.SkippedFixVersion, applied_fix) 159 | return ( 160 | f"Failed to fix {applied_fix.dep.canonical_name} ({applied_fix.dep.version}): " 161 | f"{applied_fix.skip_reason}" 162 | ) 163 | applied_fix = cast(fix.ResolvedFixVersion, applied_fix) 164 | return ( 165 | f"Successfully upgraded {applied_fix.dep.canonical_name} ({applied_fix.dep.version} " 166 | f"=> {applied_fix.version})" 167 | ) 168 | -------------------------------------------------------------------------------- /pip_audit/_service/osv.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functionality for using the [OSV](https://osv.dev/) API as a `VulnerabilityService`. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import json 8 | import logging 9 | from pathlib import Path 10 | from typing import Any, cast 11 | 12 | import requests 13 | from packaging.version import Version 14 | 15 | from pip_audit._cache import caching_session 16 | from pip_audit._service.interface import ( 17 | ConnectionError, 18 | Dependency, 19 | ResolvedDependency, 20 | ServiceError, 21 | VulnerabilityResult, 22 | VulnerabilityService, 23 | ) 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | 28 | class OsvService(VulnerabilityService): 29 | """ 30 | An implementation of `VulnerabilityService` that uses OSV to provide Python 31 | package vulnerability information. 32 | """ 33 | 34 | DEFAULT_OSV_URL = "https://api.osv.dev/v1/query" 35 | 36 | def __init__( 37 | self, 38 | cache_dir: Path | None = None, 39 | timeout: int | None = None, 40 | osv_url: str = DEFAULT_OSV_URL, 41 | ): 42 | """ 43 | Create a new `OsvService`. 44 | 45 | `cache_dir` is an optional cache directory to use, for caching and reusing OSV API 46 | requests. If `None`, `pip-audit` will use its own internal caching directory. 47 | 48 | `timeout` is an optional argument to control how many seconds the component should wait for 49 | responses to network requests. 50 | """ 51 | self.session = caching_session(cache_dir, use_pip=False) 52 | self.timeout = timeout 53 | self.osv_url = osv_url 54 | 55 | def query(self, spec: Dependency) -> tuple[Dependency, list[VulnerabilityResult]]: 56 | """ 57 | Queries OSV for the given `Dependency` specification. 58 | 59 | See `VulnerabilityService.query`. 60 | """ 61 | if spec.is_skipped(): 62 | return spec, [] 63 | spec = cast(ResolvedDependency, spec) 64 | 65 | query = { 66 | "package": {"name": spec.canonical_name, "ecosystem": "PyPI"}, 67 | "version": str(spec.version), 68 | } 69 | try: 70 | response: requests.Response = self.session.post( 71 | url=self.osv_url, 72 | data=json.dumps(query), 73 | timeout=self.timeout, 74 | ) 75 | response.raise_for_status() 76 | except requests.ConnectTimeout: 77 | raise ConnectionError("Could not connect to OSV's vulnerability feed") 78 | except requests.HTTPError as http_error: 79 | raise ServiceError from http_error 80 | 81 | # If the response is empty, that means that the package/version pair doesn't have any 82 | # associated vulnerabilities 83 | # 84 | # In that case, return an empty list 85 | results: list[VulnerabilityResult] = [] 86 | response_json = response.json() 87 | if not response_json: 88 | return spec, results 89 | 90 | vuln: dict[str, Any] 91 | for vuln in response_json["vulns"]: 92 | # Sanity check: only the v1 schema is specified at the moment, 93 | # and the code below probably won't work with future incompatible 94 | # schemas without additional changes. 95 | # The absence of a schema is treated as 1.0.0, per the OSV spec. 96 | schema_version = Version(vuln.get("schema_version", "1.0.0")) 97 | if schema_version.major != 1: 98 | logger.warning(f"Unsupported OSV schema version: {schema_version}") 99 | continue 100 | 101 | id = vuln["id"] 102 | 103 | # If the vulnerability has been withdrawn, we skip it entirely. 104 | withdrawn_at = vuln.get("withdrawn") 105 | if withdrawn_at is not None: 106 | logger.debug(f"OSV vuln entry '{id}' marked as withdrawn at {withdrawn_at}") 107 | continue 108 | 109 | # The summary is intended to be shorter, so we prefer it over 110 | # details, if present. However, neither is required. 111 | description = vuln.get("summary") 112 | if description is None: 113 | description = vuln.get("details") 114 | if description is None: 115 | description = "N/A" 116 | 117 | # The "summary" field should be a single line, but "details" might 118 | # be multiple (Markdown-formatted) lines. So, we normalize our 119 | # description into a single line (and potentially break the Markdown 120 | # formatting in the process). 121 | description = description.replace("\n", " ") 122 | 123 | # OSV doesn't mandate this field either. There's very little we 124 | # can do without it, so we skip any results that are missing it. 125 | affecteds = vuln.get("affected") 126 | if affecteds is None: 127 | logger.warning(f"OSV vuln entry '{id}' is missing 'affected' list") 128 | continue 129 | 130 | fix_versions: list[Version] = [] 131 | for affected in affecteds: 132 | pkg = affected["package"] 133 | # We only care about PyPI versions 134 | if pkg["name"] == spec.canonical_name and pkg["ecosystem"] == "PyPI": 135 | for ranges in affected["ranges"]: 136 | if ranges["type"] == "ECOSYSTEM": 137 | # Filter out non-fix versions 138 | fix_version_strs = [ 139 | version["fixed"] 140 | for version in ranges["events"] 141 | if "fixed" in version 142 | ] 143 | # Convert them to version objects 144 | fix_versions = [ 145 | Version(version_str) for version_str in fix_version_strs 146 | ] 147 | break 148 | 149 | # The ranges aren't guaranteed to come in chronological order 150 | fix_versions.sort() 151 | 152 | results.append( 153 | VulnerabilityResult.create( 154 | ids=[id, *vuln.get("aliases", [])], 155 | description=description, 156 | fix_versions=fix_versions, 157 | published=self._parse_rfc3339(vuln.get("published")), 158 | ) 159 | ) 160 | 161 | return spec, results 162 | -------------------------------------------------------------------------------- /test/dependency_source/test_pip.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | import subprocess 5 | import sys 6 | from dataclasses import dataclass 7 | 8 | import pip_api 9 | import pretend # type: ignore 10 | import pytest 11 | from packaging.version import Version 12 | 13 | from pip_audit._dependency_source import pip 14 | from pip_audit._fix import ResolvedFixVersion 15 | from pip_audit._service.interface import ResolvedDependency, SkippedDependency 16 | 17 | 18 | def test_pip_source(): 19 | source = pip.PipSource() 20 | 21 | # We're running under pytest, so we can safely assume that pytest is in 22 | # our execution environment. 23 | pytest_spec = ResolvedDependency(name="pytest", version=Version(pytest.__version__)) 24 | 25 | specs = list(source.collect()) 26 | assert pytest_spec in specs 27 | 28 | 29 | def test_pip_source_warns_about_confused_python(monkeypatch): 30 | monkeypatch.setenv("PIPAPI_PYTHON_LOCATION", "/definitely/fake/path/python") 31 | monkeypatch.setenv("VIRTUAL_ENV", "/definitely/fake/env") 32 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 33 | monkeypatch.setattr(pip, "logger", logger) 34 | 35 | pip.PipSource() 36 | 37 | assert logger.warning.calls == [ 38 | pretend.call( 39 | "pip-audit will run pip against /definitely/fake/path/python, but you have " 40 | "a virtual environment loaded at /definitely/fake/env. " 41 | "This may result in unintuitive audits, since your local environment will not " 42 | "be audited. You can forcefully override this behavior by setting " 43 | "PIPAPI_PYTHON_LOCATION to the location of your virtual environment's Python " 44 | "interpreter." 45 | ) 46 | ] 47 | 48 | 49 | def test_pip_source_warns_about_old_pip(monkeypatch): 50 | # Rather than hack around with virtualenvs and install a very old pip, 51 | # simply lie about how old ours is. 52 | monkeypatch.setattr(pip, "_PIP_VERSION", Version("1.0.0")) 53 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 54 | monkeypatch.setattr(pip, "logger", logger) 55 | 56 | pip.PipSource() 57 | assert ( 58 | pretend.call( 59 | "pip 1.0.0 is very old, and may not provide reliable dependency information! " 60 | "You are STRONGLY encouraged to upgrade to a newer version of pip." 61 | ) 62 | in logger.warning.calls 63 | ) 64 | 65 | 66 | def test_pip_source_pip_api_failure(monkeypatch): 67 | source = pip.PipSource() 68 | 69 | def explode(): 70 | raise ValueError 71 | 72 | monkeypatch.setattr(pip_api, "installed_distributions", explode) 73 | 74 | with pytest.raises(pip.PipSourceError): 75 | list(source.collect()) 76 | 77 | 78 | def test_pip_source_invalid_version(monkeypatch): 79 | logger = pretend.stub( 80 | debug=pretend.call_recorder(lambda s: None), warning=pretend.call_recorder(lambda s: None) 81 | ) 82 | monkeypatch.setattr(pip, "logger", logger) 83 | 84 | source = pip.PipSource() 85 | 86 | @dataclass(frozen=True) 87 | class MockDistribution: 88 | name: str 89 | version: str 90 | editable: bool = False 91 | 92 | # Return a distribution with a version that doesn't conform to PEP 440. 93 | # We should log a debug message and skip it. 94 | def mock_installed_distributions( 95 | local: bool, paths: list[os.PathLike] 96 | ) -> dict[str, MockDistribution]: 97 | return { 98 | "pytest": MockDistribution("pytest", "0.1"), 99 | "pip-audit": MockDistribution("pip-audit", "1.0-ubuntu0.21.04.1"), 100 | "pip-api": MockDistribution("pip-api", "1.0"), 101 | } 102 | 103 | monkeypatch.setattr(pip_api, "installed_distributions", mock_installed_distributions) 104 | 105 | specs = list(source.collect()) 106 | assert len(logger.debug.calls) == 1 107 | assert len(specs) == 3 108 | assert ResolvedDependency(name="pytest", version=Version("0.1")) in specs 109 | assert ( 110 | SkippedDependency( 111 | name="pip-audit", 112 | skip_reason="Package has invalid version and could not be audited: " 113 | "pip-audit (1.0-ubuntu0.21.04.1)", 114 | ) 115 | in specs 116 | ) 117 | assert ResolvedDependency(name="pip-api", version=Version("1.0")) in specs 118 | 119 | 120 | def test_pip_source_skips_editable(monkeypatch): 121 | source = pip.PipSource(skip_editable=True) 122 | 123 | @dataclass(frozen=True) 124 | class MockDistribution: 125 | name: str 126 | version: str 127 | editable: bool = False 128 | 129 | # Return a distribution with a version that doesn't conform to PEP 440. 130 | # We should log a debug message and skip it. 131 | def mock_installed_distributions( 132 | local: bool, paths: list[os.PathLike] 133 | ) -> dict[str, MockDistribution]: 134 | return { 135 | "pytest": MockDistribution("pytest", "0.1"), 136 | "pip-audit": MockDistribution("pip-audit", "2.0.0", True), 137 | "pip-api": MockDistribution("pip-api", "1.0"), 138 | } 139 | 140 | monkeypatch.setattr(pip_api, "installed_distributions", mock_installed_distributions) 141 | 142 | specs = list(source.collect()) 143 | assert ResolvedDependency(name="pytest", version=Version("0.1")) in specs 144 | assert ( 145 | SkippedDependency( 146 | name="pip-audit", 147 | skip_reason="distribution marked as editable", 148 | ) 149 | in specs 150 | ) 151 | assert ResolvedDependency(name="pip-api", version=Version("1.0")) in specs 152 | 153 | 154 | def test_pip_source_fix(monkeypatch): 155 | source = pip.PipSource() 156 | 157 | fix_version = ResolvedFixVersion( 158 | dep=ResolvedDependency(name="pip-api", version=Version("1.0")), 159 | version=Version("1.5"), 160 | ) 161 | 162 | def run_mock(args, **kwargs): 163 | assert " ".join(args) == f"{sys.executable} -m pip install pip-api==1.5" 164 | 165 | monkeypatch.setattr(subprocess, "run", run_mock) 166 | 167 | source.fix(fix_version) 168 | 169 | 170 | def test_pip_source_fix_failure(monkeypatch): 171 | source = pip.PipSource() 172 | 173 | fix_version = ResolvedFixVersion( 174 | dep=ResolvedDependency(name="pip-api", version=Version("1.0")), 175 | version=Version("1.5"), 176 | ) 177 | 178 | def run_mock(args, **kwargs): 179 | assert " ".join(args) == f"{sys.executable} -m pip install pip-api==1.5" 180 | raise subprocess.CalledProcessError(-1, "") 181 | 182 | monkeypatch.setattr(subprocess, "run", run_mock) 183 | 184 | with pytest.raises(pip.PipFixError): 185 | source.fix(fix_version) 186 | -------------------------------------------------------------------------------- /pip_audit/_service/interface.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interfaces for interacting with vulnerability services, i.e. sources 3 | of vulnerability information for fully resolved Python packages. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | from abc import ABC, abstractmethod 9 | from collections.abc import Iterator 10 | from dataclasses import dataclass, replace 11 | from datetime import datetime 12 | from typing import Any, NewType 13 | 14 | from packaging.utils import canonicalize_name 15 | from packaging.version import Version 16 | 17 | VulnerabilityID = NewType("VulnerabilityID", str) 18 | 19 | 20 | def _id_comparison_key(id: str) -> int: 21 | if id.startswith("PYSEC"): 22 | return 1 23 | elif id.startswith("CVE"): 24 | return 2 25 | return 3 26 | 27 | 28 | @dataclass(frozen=True) 29 | class Dependency: 30 | """ 31 | Represents an abstract Python package. 32 | 33 | This class cannot be constructed directly. 34 | """ 35 | 36 | name: str 37 | """ 38 | The package's **uncanonicalized** name. 39 | 40 | Use the `canonicalized_name` property when a canonicalized form is necessary. 41 | """ 42 | 43 | def __init__(self, *_args: Any, **_kwargs: Any) -> None: 44 | """ 45 | A stub constructor that always fails. 46 | """ 47 | raise NotImplementedError 48 | 49 | # TODO(ww): Use functools.cached_property when supported Python is 3.8+. 50 | @property 51 | def canonical_name(self) -> str: 52 | """ 53 | The `Dependency`'s PEP-503 canonicalized name. 54 | """ 55 | return canonicalize_name(self.name) 56 | 57 | def is_skipped(self) -> bool: 58 | """ 59 | Check whether the `Dependency` was skipped by the audit. 60 | """ 61 | return self.__class__ is SkippedDependency 62 | 63 | 64 | @dataclass(frozen=True) 65 | class ResolvedDependency(Dependency): 66 | """ 67 | Represents a fully resolved Python package. 68 | """ 69 | 70 | version: Version 71 | 72 | 73 | @dataclass(frozen=True) 74 | class SkippedDependency(Dependency): 75 | """ 76 | Represents a Python package that was unable to be audited and therefore, skipped. 77 | """ 78 | 79 | skip_reason: str 80 | 81 | 82 | @dataclass(frozen=True) 83 | class VulnerabilityResult: 84 | """ 85 | Represents a "result" from a vulnerability service, indicating a vulnerability 86 | in some Python package. 87 | """ 88 | 89 | id: VulnerabilityID 90 | """ 91 | A service-provided identifier for the vulnerability. 92 | """ 93 | 94 | description: str 95 | """ 96 | A human-readable description of the vulnerability. 97 | """ 98 | 99 | fix_versions: list[Version] 100 | """ 101 | A list of versions that can be upgraded to that resolve the vulnerability. 102 | """ 103 | 104 | aliases: set[VulnerabilityID] 105 | """ 106 | A set of aliases (alternative identifiers) for this result. 107 | """ 108 | 109 | published: datetime | None = None 110 | """ 111 | When the vulnerability was first published. 112 | """ 113 | 114 | @classmethod 115 | def create( 116 | cls, 117 | ids: list[VulnerabilityID], 118 | description: str, 119 | fix_versions: list[Version], 120 | published: datetime | None, 121 | ) -> VulnerabilityResult: 122 | """ 123 | Instantiates a `VulnerabilityResult` with the given data, prioritizing 124 | PYSEC and CVE vulnerability IDs for the primary identifier. 125 | """ 126 | 127 | ids.sort(key=_id_comparison_key) 128 | return cls(ids[0], description, fix_versions, set(ids[1:]), published) 129 | 130 | def alias_of(self, other: VulnerabilityResult) -> bool: 131 | """ 132 | Returns whether this result is an "alias" of another result. 133 | 134 | Two results are said to be aliases if their respective sets of 135 | `{id, *aliases}` intersect at all. A result is therefore its own alias. 136 | """ 137 | return bool((self.aliases | {self.id}).intersection(other.aliases | {other.id})) 138 | 139 | def merge_aliases(self, other: VulnerabilityResult) -> VulnerabilityResult: 140 | """ 141 | Merge `other`'s aliases into this result, returning a new result. 142 | """ 143 | 144 | # Our own ID should never occur in the alias set. 145 | aliases = self.aliases | other.aliases - {self.id} 146 | return replace(self, aliases=aliases) 147 | 148 | def has_any_id(self, ids: set[str]) -> bool: 149 | """ 150 | Returns whether ids intersects with {id} | aliases. 151 | """ 152 | return bool(ids & (self.aliases | {self.id})) 153 | 154 | 155 | class VulnerabilityService(ABC): 156 | """ 157 | Represents an abstract provider of Python package vulnerability information. 158 | """ 159 | 160 | @abstractmethod 161 | def query( 162 | self, spec: Dependency 163 | ) -> tuple[Dependency, list[VulnerabilityResult]]: # pragma: no cover 164 | """ 165 | Query the `VulnerabilityService` for information about the given `Dependency`, 166 | returning a list of `VulnerabilityResult`. 167 | """ 168 | raise NotImplementedError 169 | 170 | def query_all( 171 | self, specs: Iterator[Dependency] 172 | ) -> Iterator[tuple[Dependency, list[VulnerabilityResult]]]: 173 | """ 174 | Query the vulnerability service for information on multiple dependencies. 175 | 176 | `VulnerabilityService` implementations can override this implementation with 177 | a more optimized one, if they support batched or bulk requests. 178 | """ 179 | for spec in specs: 180 | yield self.query(spec) 181 | 182 | @staticmethod 183 | def _parse_rfc3339(dt: str | None) -> datetime | None: 184 | if dt is None: 185 | return None 186 | 187 | # NOTE: OSV's schema says timestamps are RFC3339 but strptime 188 | # has no way to indicate an optional field (like `%f`), so 189 | # we have to try-and-retry with the two different expected formats. 190 | # See: https://github.com/google/osv.dev/issues/857 191 | try: 192 | return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%fZ") 193 | except ValueError: 194 | return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%SZ") 195 | 196 | 197 | class ServiceError(Exception): 198 | """ 199 | Raised when a `VulnerabilityService` fails, for any reason. 200 | 201 | Concrete implementations of `VulnerabilityService` are expected to subclass 202 | this exception to provide more context. 203 | """ 204 | 205 | pass 206 | 207 | 208 | class ConnectionError(ServiceError): 209 | """ 210 | A specialization of `ServiceError` specifically for cases where the 211 | vulnerability service is unreachable or offline. 212 | """ 213 | 214 | pass 215 | -------------------------------------------------------------------------------- /pip_audit/_cache.py: -------------------------------------------------------------------------------- 1 | """ 2 | Caching middleware for `pip-audit`. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import logging 8 | import os 9 | import shutil 10 | import subprocess 11 | import sys 12 | from pathlib import Path 13 | from tempfile import NamedTemporaryFile 14 | from typing import Any 15 | 16 | import pip_api 17 | import requests 18 | from cachecontrol import CacheControl 19 | from cachecontrol.caches import FileCache 20 | from packaging.version import Version 21 | from platformdirs import user_cache_path 22 | 23 | from pip_audit._service.interface import ServiceError 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | # The `cache dir` command was added to `pip` as of 20.1 so we should check before trying to use it 28 | # to discover the `pip` HTTP cache 29 | _MINIMUM_PIP_VERSION = Version("20.1") 30 | 31 | _PIP_VERSION = Version(str(pip_api.PIP_VERSION)) 32 | 33 | _PIP_AUDIT_LEGACY_INTERNAL_CACHE = Path.home() / ".pip-audit-cache" 34 | 35 | 36 | def _get_pip_cache() -> Path: 37 | # Unless the cache directory is specifically set by the `--cache-dir` option, we try to share 38 | # the `pip` HTTP cache 39 | cmd = [sys.executable, "-m", "pip", "cache", "dir"] 40 | try: 41 | process = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) 42 | except subprocess.CalledProcessError as cpe: # pragma: no cover 43 | # NOTE: This should only happen if pip's cache has been explicitly disabled, 44 | # which we check for in the caller (via `PIP_NO_CACHE_DIR`). 45 | raise ServiceError(f"Failed to query the `pip` HTTP cache directory: {cmd}") from cpe 46 | cache_dir = process.stdout.decode("utf-8").strip("\n") 47 | http_cache_dir = Path(cache_dir) / "http" 48 | return http_cache_dir 49 | 50 | 51 | def _get_cache_dir(custom_cache_dir: Path | None, *, use_pip: bool = True) -> Path: 52 | """ 53 | Returns a directory path suitable for HTTP caching. 54 | 55 | The directory is **not** guaranteed to exist. 56 | 57 | `use_pip` tells the function to prefer `pip`'s pre-existing cache, 58 | **unless** `PIP_NO_CACHE_DIR` is present in the environment. 59 | """ 60 | 61 | # If the user has explicitly requested a directory, pass it through unscathed. 62 | if custom_cache_dir is not None: 63 | return custom_cache_dir 64 | 65 | # Retrieve pip-audit's default internal cache using `platformdirs`. 66 | pip_audit_cache_dir = user_cache_path("pip-audit", appauthor=False, ensure_exists=True) 67 | 68 | # If the retrieved cache isn't the legacy one, try to delete the old cache if it exists. 69 | if ( 70 | _PIP_AUDIT_LEGACY_INTERNAL_CACHE.exists() 71 | and pip_audit_cache_dir != _PIP_AUDIT_LEGACY_INTERNAL_CACHE 72 | ): 73 | shutil.rmtree(_PIP_AUDIT_LEGACY_INTERNAL_CACHE) 74 | 75 | # Respect pip's PIP_NO_CACHE_DIR environment setting. 76 | if use_pip and not os.getenv("PIP_NO_CACHE_DIR"): 77 | pip_cache_dir = _get_pip_cache() if _PIP_VERSION >= _MINIMUM_PIP_VERSION else None 78 | if pip_cache_dir is not None: 79 | return pip_cache_dir 80 | else: 81 | logger.warning( 82 | f"pip {_PIP_VERSION} doesn't support the `cache dir` subcommand, " 83 | f"using {pip_audit_cache_dir} instead" 84 | ) 85 | return pip_audit_cache_dir 86 | else: 87 | return pip_audit_cache_dir 88 | 89 | 90 | class _SafeFileCache(FileCache): 91 | """ 92 | A rough mirror of `pip`'s `SafeFileCache` that *should* be runtime-compatible 93 | with `pip` (i.e., does not interfere with `pip` when it shares the same 94 | caching directory as a running `pip` process). 95 | """ 96 | 97 | def __init__(self, directory: Path): 98 | self._logged_warning = False 99 | super().__init__(str(directory)) 100 | 101 | def get(self, key: str) -> Any | None: 102 | try: 103 | return super().get(key) 104 | except Exception as e: # pragma: no cover 105 | if not self._logged_warning: 106 | logger.warning( 107 | f"Failed to read from cache directory, performance may be degraded: {e}" 108 | ) 109 | self._logged_warning = True 110 | return None 111 | 112 | def set(self, key: str, value: bytes, expires: Any | None = None) -> None: 113 | try: 114 | self._set_impl(key, value) 115 | except Exception as e: # pragma: no cover 116 | if not self._logged_warning: 117 | logger.warning( 118 | f"Failed to write to cache directory, performance may be degraded: {e}" 119 | ) 120 | self._logged_warning = True 121 | 122 | def _set_impl(self, key: str, value: bytes) -> None: 123 | name: str = super()._fn(key) 124 | 125 | # Make sure the directory exists 126 | try: 127 | os.makedirs(os.path.dirname(name), self.dirmode) 128 | except OSError: # pragma: no cover 129 | pass 130 | 131 | # We don't want to use lock files since `pip` isn't going to recognise those. We should 132 | # write to the cache in a similar way to how `pip` does it. We create a temporary file, 133 | # then atomically replace the actual cache key's filename with it. This ensures 134 | # that other concurrent `pip` or `pip-audit` instances don't read partial data. 135 | with NamedTemporaryFile(delete=False, dir=os.path.dirname(name)) as io: 136 | io.write(value) 137 | 138 | # NOTE(ww): Similar to what `pip` does in `adjacent_tmp_file`. 139 | io.flush() 140 | os.fsync(io.fileno()) 141 | 142 | # NOTE(ww): Windows won't let us rename the temporary file until it's closed, 143 | # which is why we call `os.replace()` here rather than in the `with` block above. 144 | os.replace(io.name, name) 145 | 146 | def delete(self, key: str) -> None: # pragma: no cover 147 | try: 148 | super().delete(key) 149 | except Exception as e: 150 | if not self._logged_warning: 151 | logger.warning( 152 | f"Failed to delete file from cache directory, performance may be degraded: {e}" 153 | ) 154 | self._logged_warning = True 155 | 156 | 157 | def caching_session(cache_dir: Path | None, *, use_pip: bool = False) -> requests.Session: 158 | """ 159 | Return a `requests` style session, with suitable caching middleware. 160 | 161 | Uses the given `cache_dir` for the HTTP cache. 162 | 163 | `use_pip` determines how the fallback cache directory is determined, if `cache_dir` is None. 164 | When `use_pip` is `False`, `caching_session` will use a `pip-audit` internal cache directory. 165 | When `use_pip` is `True`, `caching_session` will attempt to discover `pip`'s cache 166 | directory, falling back on the internal `pip-audit` cache directory if the user's 167 | version of `pip` is too old. 168 | """ 169 | 170 | # We limit the number of redirects to 5, since the services we connect to 171 | # should really never redirect more than once or twice. 172 | inner_session = requests.Session() 173 | inner_session.max_redirects = 5 174 | 175 | return CacheControl( 176 | inner_session, 177 | cache=_SafeFileCache(_get_cache_dir(cache_dir, use_pip=use_pip)), 178 | ) 179 | -------------------------------------------------------------------------------- /pip_audit/_dependency_source/pip.py: -------------------------------------------------------------------------------- 1 | """ 2 | Collect the local environment's active dependencies via `pip list`, wrapped 3 | by `pip-api`. 4 | """ 5 | 6 | import logging 7 | import os 8 | import subprocess 9 | import sys 10 | from collections.abc import Iterator, Sequence 11 | from pathlib import Path 12 | 13 | import pip_api 14 | from packaging.version import InvalidVersion, Version 15 | 16 | from pip_audit._dependency_source import ( 17 | DependencyFixError, 18 | DependencySource, 19 | DependencySourceError, 20 | ) 21 | from pip_audit._fix import ResolvedFixVersion 22 | from pip_audit._service import Dependency, ResolvedDependency, SkippedDependency 23 | from pip_audit._state import AuditState 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | # Versions of `pip` prior to this version don't support `pip list -v --format=json`, 28 | # which is our baseline for reliable output. We'll attempt to use versions before 29 | # this one, but not before complaining about it. 30 | _MINIMUM_RELIABLE_PIP_VERSION = Version("10.0.0b0") 31 | 32 | # NOTE(ww): The round-trip assignment here is due to type confusion: `pip_api.PIP_VERSION` 33 | # is a `Version` object, but it's a `pip_api._vendor.packaging.version.Version` instead 34 | # of a `packaging.version.Version`. Recreating the version with the correct type 35 | # ensures that our comparison operators work as expected. 36 | _PIP_VERSION = Version(str(pip_api.PIP_VERSION)) 37 | 38 | 39 | class PipSource(DependencySource): 40 | """ 41 | Wraps `pip` (specifically `pip list`) as a dependency source. 42 | """ 43 | 44 | def __init__( 45 | self, 46 | *, 47 | local: bool = False, 48 | paths: Sequence[Path] = [], 49 | skip_editable: bool = False, 50 | state: AuditState = AuditState(), 51 | ) -> None: 52 | """ 53 | Create a new `PipSource`. 54 | 55 | `local` determines whether to do a "local-only" list. If `True`, the 56 | `DependencySource` does not expose globally installed packages. 57 | 58 | `paths` is a list of locations to look for installed packages. If the 59 | list is empty, the `DependencySource` will query the current Python 60 | environment. 61 | 62 | `skip_editable` controls whether dependencies marked as "editable" are skipped. 63 | By default, editable dependencies are not skipped. 64 | 65 | `state` is an `AuditState` to use for state callbacks. 66 | """ 67 | self._local = local 68 | self._paths = paths 69 | self._skip_editable = skip_editable 70 | self.state = state 71 | 72 | # NOTE: By default `pip_api` invokes `pip` through `sys.executable`, like so: 73 | # 74 | # {sys.executable} -m pip [args ...] 75 | # 76 | # This is the right decision 99% of the time, but it can result in unintuitive audits 77 | # for users who have installed `pip-audit` globally but are trying to audit 78 | # a loaded virtual environment, since `pip-audit`'s `sys.executable` will be the global 79 | # Python and not the virtual environment's Python. 80 | # 81 | # To check for this, we check whether the Python that `pip_api` plans to use 82 | # matches the active virtual environment's prefix. We do this instead of comparing 83 | # against the $PATH-prioritized Python because that might be the same "effective" 84 | # Python but with a different symlink (e.g. `/python{,3,3.7}`). We *could* 85 | # handle that case by resolving the symlinks, but that would then piece the 86 | # virtual environment that we're attempting to detect. 87 | effective_python = os.environ.get("PIPAPI_PYTHON_LOCATION", sys.executable) 88 | venv_prefix = os.getenv("VIRTUAL_ENV") 89 | if venv_prefix is not None and not effective_python.startswith(venv_prefix): 90 | logger.warning( 91 | f"pip-audit will run pip against {effective_python}, but you have " 92 | f"a virtual environment loaded at {venv_prefix}. This may result in " 93 | "unintuitive audits, since your local environment will not be audited. " 94 | "You can forcefully override this behavior by setting PIPAPI_PYTHON_LOCATION " 95 | "to the location of your virtual environment's Python interpreter." 96 | ) 97 | 98 | if _PIP_VERSION < _MINIMUM_RELIABLE_PIP_VERSION: 99 | logger.warning( 100 | f"pip {_PIP_VERSION} is very old, and may not provide reliable " 101 | "dependency information! You are STRONGLY encouraged to upgrade to a " 102 | "newer version of pip." 103 | ) 104 | 105 | def collect(self) -> Iterator[Dependency]: 106 | """ 107 | Collect all of the dependencies discovered by this `PipSource`. 108 | 109 | Raises a `PipSourceError` on any errors. 110 | """ 111 | 112 | # The `pip list` call that underlies `pip_api` could fail for myriad reasons. 113 | # We collect them all into a single well-defined error. 114 | try: 115 | for _, dist in pip_api.installed_distributions( 116 | local=self._local, paths=list(self._paths) 117 | ).items(): 118 | dep: Dependency 119 | if dist.editable and self._skip_editable: 120 | dep = SkippedDependency( 121 | name=dist.name, skip_reason="distribution marked as editable" 122 | ) 123 | else: 124 | try: 125 | dep = ResolvedDependency(name=dist.name, version=Version(str(dist.version))) 126 | self.state.update_state(f"Collecting {dep.name} ({dep.version})") 127 | except InvalidVersion: 128 | skip_reason = ( 129 | "Package has invalid version and could not be audited: " 130 | f"{dist.name} ({dist.version})" 131 | ) 132 | logger.debug(skip_reason) 133 | dep = SkippedDependency(name=dist.name, skip_reason=skip_reason) 134 | yield dep 135 | except Exception as e: 136 | raise PipSourceError("failed to list installed distributions") from e 137 | 138 | def fix(self, fix_version: ResolvedFixVersion) -> None: 139 | """ 140 | Fixes a dependency version in this `PipSource`. 141 | """ 142 | self.state.update_state( 143 | f"Fixing {fix_version.dep.name} ({fix_version.dep.version} => {fix_version.version})" 144 | ) 145 | fix_cmd = [ 146 | sys.executable, 147 | "-m", 148 | "pip", 149 | "install", 150 | f"{fix_version.dep.canonical_name}=={fix_version.version}", 151 | ] 152 | try: 153 | subprocess.run( 154 | fix_cmd, 155 | check=True, 156 | stdout=subprocess.DEVNULL, 157 | stderr=subprocess.DEVNULL, 158 | ) 159 | except subprocess.CalledProcessError as cpe: 160 | raise PipFixError( 161 | f"failed to upgrade dependency {fix_version.dep.name} to fix version " 162 | f"{fix_version.version}" 163 | ) from cpe 164 | 165 | 166 | class PipSourceError(DependencySourceError): 167 | """A `pip` specific `DependencySourceError`.""" 168 | 169 | pass 170 | 171 | 172 | class PipFixError(DependencyFixError): 173 | """A `pip` specific `DependencyFixError`.""" 174 | 175 | pass 176 | -------------------------------------------------------------------------------- /test/test_cli.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pretend # type: ignore 4 | import pytest 5 | 6 | import pip_audit._cli 7 | from pip_audit._cli import ( 8 | OutputFormatChoice, 9 | ProgressSpinnerChoice, 10 | VulnerabilityAliasChoice, 11 | VulnerabilityDescriptionChoice, 12 | VulnerabilityServiceChoice, 13 | ) 14 | 15 | 16 | class TestOutputFormatChoice: 17 | def test_to_format_is_exhaustive(self): 18 | for choice in OutputFormatChoice: 19 | assert choice.to_format(False, False) is not None 20 | assert choice.to_format(True, True) is not None 21 | assert choice.to_format(False, True) is not None 22 | assert choice.to_format(True, False) is not None 23 | 24 | def test_str(self): 25 | for choice in OutputFormatChoice: 26 | assert str(choice) == choice.value 27 | 28 | 29 | class TestVulnerabilityServiceChoice: 30 | def test_str(self): 31 | for choice in VulnerabilityServiceChoice: 32 | assert str(choice) == choice.value 33 | 34 | 35 | class TestVulnerabilityDescriptionChoice: 36 | def test_to_bool_is_exhaustive(self): 37 | for choice in VulnerabilityDescriptionChoice: 38 | assert choice.to_bool(OutputFormatChoice.Json) in {True, False} 39 | 40 | def test_auto_to_bool_for_json(self): 41 | assert VulnerabilityDescriptionChoice.Auto.to_bool(OutputFormatChoice.Json) is True 42 | 43 | def test_str(self): 44 | for choice in VulnerabilityDescriptionChoice: 45 | assert str(choice) == choice.value 46 | 47 | 48 | class TestVulnerabilityAliasChoice: 49 | def test_to_bool_is_exhaustive(self): 50 | for choice in VulnerabilityAliasChoice: 51 | assert choice.to_bool(OutputFormatChoice.Json) in {True, False} 52 | assert choice.to_bool(OutputFormatChoice.Markdown) in {True, False} 53 | assert choice.to_bool(OutputFormatChoice.Columns) in {True, False} 54 | assert choice.to_bool(OutputFormatChoice.CycloneDxJson) in {True, False} 55 | assert choice.to_bool(OutputFormatChoice.CycloneDxXml) in {True, False} 56 | 57 | def test_auto_to_bool_for_json(self): 58 | assert VulnerabilityAliasChoice.Auto.to_bool(OutputFormatChoice.Json) is True 59 | 60 | def test_str(self): 61 | for choice in VulnerabilityAliasChoice: 62 | assert str(choice) == choice.value 63 | 64 | 65 | class TestProgressSpinnerChoice: 66 | def test_bool(self): 67 | assert bool(ProgressSpinnerChoice.On) 68 | assert not bool(ProgressSpinnerChoice.Off) 69 | 70 | def test_str(self): 71 | for choice in ProgressSpinnerChoice: 72 | assert str(choice) == choice.value 73 | 74 | 75 | @pytest.mark.parametrize( 76 | "args, vuln_count, pkg_count, expected", 77 | [ 78 | ([], 1, 1, "Found 1 known vulnerability in 1 package"), 79 | ([], 2, 1, "Found 2 known vulnerabilities in 1 package"), 80 | ([], 2, 2, "Found 2 known vulnerabilities in 2 packages"), 81 | ( 82 | ["--ignore-vuln", "bar"], 83 | 2, 84 | 2, 85 | "Found 2 known vulnerabilities, ignored 1 in 2 packages", 86 | ), 87 | (["--fix"], 1, 1, "fixed 1 vulnerability in 1 package"), 88 | (["--fix"], 2, 1, "fixed 2 vulnerabilities in 1 package"), 89 | (["--fix"], 2, 2, "fixed 2 vulnerabilities in 2 packages"), 90 | ([], 0, 0, "No known vulnerabilities found"), 91 | (["--ignore-vuln", "bar"], 0, 1, "No known vulnerabilities found, 1 ignored"), 92 | ], 93 | ) 94 | def test_plurals(capsys, monkeypatch, args, vuln_count, pkg_count, expected): 95 | dummysource = pretend.stub(fix=lambda a: None) 96 | monkeypatch.setattr(pip_audit._cli, "PipSource", lambda *a, **kw: dummysource) 97 | 98 | parser = pip_audit._cli._parser() 99 | monkeypatch.setattr(pip_audit._cli, "_parse_args", lambda *a: parser.parse_args(args)) 100 | 101 | result = [ 102 | ( 103 | pretend.stub( 104 | is_skipped=lambda: False, 105 | name="something" + str(i), 106 | canonical_name="something" + str(i), 107 | version=1, 108 | ), 109 | [ 110 | pretend.stub( 111 | fix_versions=[2], 112 | id="foo", 113 | aliases=set(), 114 | has_any_id=lambda x: False, 115 | ) 116 | ] 117 | * (vuln_count // pkg_count), 118 | ) 119 | for i in range(pkg_count) 120 | ] 121 | 122 | if "--ignore-vuln" in args: 123 | result[0][1].append(pretend.stub(id="bar", aliases=set(), has_any_id=lambda x: True)) 124 | 125 | auditor = pretend.stub(audit=lambda a: result) 126 | monkeypatch.setattr(pip_audit._cli, "Auditor", lambda *a, **kw: auditor) 127 | 128 | resolve_fix_versions = [ 129 | pretend.stub(is_skipped=lambda: False, dep=spec, version=2) for spec, _ in result 130 | ] 131 | monkeypatch.setattr(pip_audit._cli, "resolve_fix_versions", lambda *a: resolve_fix_versions) 132 | 133 | try: 134 | pip_audit._cli.audit() 135 | except SystemExit: 136 | pass 137 | 138 | captured = capsys.readouterr() 139 | assert expected in captured.err 140 | 141 | 142 | @pytest.mark.parametrize( 143 | "vuln_count, pkg_count, skip_count, print_format", 144 | [ 145 | (1, 1, 0, True), 146 | (2, 1, 0, True), 147 | (2, 2, 0, True), 148 | (0, 0, 0, False), 149 | (0, 1, 0, False), 150 | # If there are no vulnerabilities but a dependency has been skipped, we 151 | # should print the formatted result 152 | (0, 0, 1, True), 153 | ], 154 | ) 155 | def test_print_format(monkeypatch, vuln_count, pkg_count, skip_count, print_format): 156 | dummysource = pretend.stub(fix=lambda a: None) 157 | monkeypatch.setattr(pip_audit._cli, "PipSource", lambda *a, **kw: dummysource) 158 | 159 | dummyformat = pretend.stub( 160 | format=pretend.call_recorder(lambda _result, _fixes: None), 161 | is_manifest=False, 162 | ) 163 | monkeypatch.setattr(pip_audit._cli, "ColumnsFormat", lambda *a, **kw: dummyformat) 164 | 165 | parser = pip_audit._cli._parser() 166 | monkeypatch.setattr(pip_audit._cli, "_parse_args", lambda *a: parser.parse_args([])) 167 | 168 | result = [ 169 | ( 170 | pretend.stub( 171 | is_skipped=lambda: False, 172 | name="something" + str(i), 173 | canonical_name="something" + str(i), 174 | version=1, 175 | ), 176 | [ 177 | pretend.stub( 178 | fix_versions=[2], 179 | id="foo", 180 | aliases=set(), 181 | has_any_id=lambda x: False, 182 | ) 183 | ] 184 | * (vuln_count // pkg_count), 185 | ) 186 | for i in range(pkg_count) 187 | ] 188 | result.extend( 189 | ( 190 | pretend.stub( 191 | is_skipped=lambda: True, 192 | name="skipped " + str(i), 193 | canonical_name="skipped " + str(i), 194 | version=1, 195 | skip_reason="reason " + str(i), 196 | ), 197 | [], 198 | ) 199 | for i in range(skip_count) 200 | ) 201 | 202 | auditor = pretend.stub(audit=lambda a: result) 203 | monkeypatch.setattr(pip_audit._cli, "Auditor", lambda *a, **kw: auditor) 204 | 205 | resolve_fix_versions = [ 206 | pretend.stub(is_skipped=lambda: False, dep=spec, version=2) for spec, _ in result 207 | ] 208 | monkeypatch.setattr(pip_audit._cli, "resolve_fix_versions", lambda *a: resolve_fix_versions) 209 | 210 | try: 211 | pip_audit._cli.audit() 212 | except SystemExit: 213 | pass 214 | 215 | assert bool(dummyformat.format.calls) == print_format 216 | 217 | 218 | def test_environment_variable(monkeypatch): 219 | """Environment variables set before execution change CLI option default.""" 220 | monkeypatch.setenv("PIP_AUDIT_DESC", "off") 221 | monkeypatch.setenv("PIP_AUDIT_FORMAT", "markdown") 222 | monkeypatch.setenv("PIP_AUDIT_OUTPUT", "/tmp/fake") 223 | monkeypatch.setenv("PIP_AUDIT_PROGRESS_SPINNER", "off") 224 | monkeypatch.setenv("PIP_AUDIT_VULNERABILITY_SERVICE", "osv") 225 | 226 | parser = pip_audit._cli._parser() 227 | monkeypatch.setattr(pip_audit._cli, "_parse_args", lambda *a: parser.parse_args([])) 228 | args = pip_audit._cli._parse_args(parser, []) 229 | 230 | assert args.desc == VulnerabilityDescriptionChoice.Off 231 | assert args.format == OutputFormatChoice.Markdown 232 | assert args.output == Path("/tmp/fake") 233 | assert not args.progress_spinner 234 | assert args.vulnerability_service == VulnerabilityServiceChoice.Osv 235 | -------------------------------------------------------------------------------- /test/format/test_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | import pip_audit._format as format 6 | 7 | 8 | @pytest.mark.parametrize("output_desc, output_aliases", ([True, False], [True, False])) 9 | def test_json_manifest(output_desc, output_aliases): 10 | fmt = format.JsonFormat(output_desc, output_aliases) 11 | 12 | assert fmt.is_manifest 13 | 14 | 15 | def test_json(vuln_data): 16 | json_format = format.JsonFormat(True, True) 17 | expected_json = { 18 | "dependencies": [ 19 | { 20 | "name": "foo", 21 | "version": "1.0", 22 | "vulns": [ 23 | { 24 | "id": "VULN-0", 25 | "fix_versions": [ 26 | "1.1", 27 | "1.4", 28 | ], 29 | "aliases": ["CVE-0000-00000"], 30 | "description": "The first vulnerability", 31 | }, 32 | { 33 | "id": "VULN-1", 34 | "fix_versions": ["1.0"], 35 | "aliases": ["CVE-0000-00001"], 36 | "description": "The second vulnerability", 37 | }, 38 | ], 39 | }, 40 | { 41 | "name": "bar", 42 | "version": "0.1", 43 | "vulns": [ 44 | { 45 | "id": "VULN-2", 46 | "fix_versions": [], 47 | "aliases": ["CVE-0000-00002"], 48 | "description": "The third vulnerability", 49 | } 50 | ], 51 | }, 52 | ], 53 | "fixes": [], 54 | } 55 | assert json_format.format(vuln_data, list()) == json.dumps(expected_json) 56 | 57 | 58 | def test_json_no_desc(vuln_data): 59 | json_format = format.JsonFormat(False, True) 60 | expected_json = { 61 | "dependencies": [ 62 | { 63 | "name": "foo", 64 | "version": "1.0", 65 | "vulns": [ 66 | { 67 | "id": "VULN-0", 68 | "fix_versions": [ 69 | "1.1", 70 | "1.4", 71 | ], 72 | "aliases": ["CVE-0000-00000"], 73 | }, 74 | { 75 | "id": "VULN-1", 76 | "fix_versions": ["1.0"], 77 | "aliases": ["CVE-0000-00001"], 78 | }, 79 | ], 80 | }, 81 | { 82 | "name": "bar", 83 | "version": "0.1", 84 | "vulns": [ 85 | { 86 | "id": "VULN-2", 87 | "fix_versions": [], 88 | "aliases": ["CVE-0000-00002"], 89 | } 90 | ], 91 | }, 92 | ], 93 | "fixes": [], 94 | } 95 | assert json_format.format(vuln_data, list()) == json.dumps(expected_json) 96 | 97 | 98 | def test_json_no_desc_no_aliases(vuln_data): 99 | json_format = format.JsonFormat(False, False) 100 | expected_json = { 101 | "dependencies": [ 102 | { 103 | "name": "foo", 104 | "version": "1.0", 105 | "vulns": [ 106 | { 107 | "id": "VULN-0", 108 | "fix_versions": [ 109 | "1.1", 110 | "1.4", 111 | ], 112 | }, 113 | { 114 | "id": "VULN-1", 115 | "fix_versions": ["1.0"], 116 | }, 117 | ], 118 | }, 119 | { 120 | "name": "bar", 121 | "version": "0.1", 122 | "vulns": [ 123 | { 124 | "id": "VULN-2", 125 | "fix_versions": [], 126 | } 127 | ], 128 | }, 129 | ], 130 | "fixes": [], 131 | } 132 | assert json_format.format(vuln_data, list()) == json.dumps(expected_json) 133 | 134 | 135 | def test_json_skipped_dep(vuln_data_skipped_dep): 136 | json_format = format.JsonFormat(False, True) 137 | expected_json = { 138 | "dependencies": [ 139 | { 140 | "name": "foo", 141 | "version": "1.0", 142 | "vulns": [ 143 | { 144 | "id": "VULN-0", 145 | "fix_versions": [ 146 | "1.1", 147 | "1.4", 148 | ], 149 | "aliases": ["CVE-0000-00000"], 150 | }, 151 | ], 152 | }, 153 | { 154 | "name": "bar", 155 | "skip_reason": "skip-reason", 156 | }, 157 | ], 158 | "fixes": [], 159 | } 160 | assert json_format.format(vuln_data_skipped_dep, list()) == json.dumps(expected_json) 161 | 162 | 163 | def test_json_fix(vuln_data, fix_data): 164 | json_format = format.JsonFormat(True, True) 165 | expected_json = { 166 | "dependencies": [ 167 | { 168 | "name": "foo", 169 | "version": "1.0", 170 | "vulns": [ 171 | { 172 | "id": "VULN-0", 173 | "fix_versions": [ 174 | "1.1", 175 | "1.4", 176 | ], 177 | "aliases": ["CVE-0000-00000"], 178 | "description": "The first vulnerability", 179 | }, 180 | { 181 | "id": "VULN-1", 182 | "fix_versions": ["1.0"], 183 | "aliases": ["CVE-0000-00001"], 184 | "description": "The second vulnerability", 185 | }, 186 | ], 187 | }, 188 | { 189 | "name": "bar", 190 | "version": "0.1", 191 | "vulns": [ 192 | { 193 | "id": "VULN-2", 194 | "fix_versions": [], 195 | "aliases": ["CVE-0000-00002"], 196 | "description": "The third vulnerability", 197 | } 198 | ], 199 | }, 200 | ], 201 | "fixes": [ 202 | { 203 | "name": "foo", 204 | "old_version": "1.0", 205 | "new_version": "1.8", 206 | }, 207 | { 208 | "name": "bar", 209 | "old_version": "0.1", 210 | "new_version": "0.3", 211 | }, 212 | ], 213 | } 214 | assert json_format.format(vuln_data, fix_data) == json.dumps(expected_json) 215 | 216 | 217 | def test_json_skipped_fix(vuln_data, skipped_fix_data): 218 | json_format = format.JsonFormat(True, True) 219 | expected_json = { 220 | "dependencies": [ 221 | { 222 | "name": "foo", 223 | "version": "1.0", 224 | "vulns": [ 225 | { 226 | "id": "VULN-0", 227 | "fix_versions": [ 228 | "1.1", 229 | "1.4", 230 | ], 231 | "aliases": ["CVE-0000-00000"], 232 | "description": "The first vulnerability", 233 | }, 234 | { 235 | "id": "VULN-1", 236 | "fix_versions": ["1.0"], 237 | "aliases": ["CVE-0000-00001"], 238 | "description": "The second vulnerability", 239 | }, 240 | ], 241 | }, 242 | { 243 | "name": "bar", 244 | "version": "0.1", 245 | "vulns": [ 246 | { 247 | "id": "VULN-2", 248 | "fix_versions": [], 249 | "aliases": ["CVE-0000-00002"], 250 | "description": "The third vulnerability", 251 | } 252 | ], 253 | }, 254 | ], 255 | "fixes": [ 256 | { 257 | "name": "foo", 258 | "old_version": "1.0", 259 | "new_version": "1.8", 260 | }, 261 | {"name": "bar", "version": "0.1", "skip_reason": "skip-reason"}, 262 | ], 263 | } 264 | assert json_format.format(vuln_data, skipped_fix_data) == json.dumps(expected_json) 265 | -------------------------------------------------------------------------------- /test/service/test_osv.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pretend # type: ignore 4 | import pytest 5 | from packaging.version import Version 6 | from requests.exceptions import ConnectTimeout, HTTPError 7 | 8 | import pip_audit._service as service 9 | 10 | 11 | def get_mock_session(func): 12 | class MockSession: 13 | def __init__(self, create_response): 14 | self.create_response = create_response 15 | 16 | def post(self, url, **kwargs): 17 | return self.create_response() 18 | 19 | return MockSession(func) 20 | 21 | 22 | @pytest.mark.online 23 | def test_osv(): 24 | osv = service.OsvService() 25 | dep = service.ResolvedDependency("jinja2", Version("2.4.1")) 26 | results: dict[service.Dependency, list[service.VulnerabilityResult]] = dict( 27 | osv.query_all(iter([dep])) 28 | ) 29 | 30 | assert len(results) == 1 31 | assert dep in results 32 | 33 | vulns = results[dep] 34 | assert len(vulns) > 0 35 | 36 | 37 | @pytest.mark.online 38 | def test_osv_uses_canonical_package_name(): 39 | # OSV's API only recognizes canonicalized package names, so make sure 40 | # that our adapter is canonicalizing any dependencies passed into it. 41 | osv = service.OsvService() 42 | dep = service.ResolvedDependency("PyYAML", Version("5.3")) 43 | _, results = osv.query(dep) 44 | 45 | assert len(results) > 0 46 | 47 | 48 | @pytest.mark.online 49 | def test_osv_version_ranges(): 50 | # Try a package with vulnerabilities that have an explicitly stated introduced and fixed 51 | # version 52 | osv = service.OsvService() 53 | dep = service.ResolvedDependency("ansible", Version("2.8.0")) 54 | results: dict[service.Dependency, list[service.VulnerabilityResult]] = dict( 55 | osv.query_all(iter([dep])) 56 | ) 57 | 58 | assert len(results) == 1 59 | assert dep in results 60 | 61 | vulns = results[dep] 62 | assert len(vulns) > 0 63 | 64 | 65 | @pytest.mark.online 66 | def test_osv_multiple_pkg(): 67 | osv = service.OsvService() 68 | deps: list[service.Dependency] = [ 69 | service.ResolvedDependency("jinja2", Version("2.4.1")), 70 | service.ResolvedDependency("flask", Version("0.5")), 71 | ] 72 | results: dict[service.Dependency, list[service.VulnerabilityResult]] = dict( 73 | osv.query_all(iter(deps)) 74 | ) 75 | 76 | assert len(results) == 2 77 | assert deps[0] in results and deps[1] in results 78 | 79 | assert len(results[deps[0]]) > 0 80 | assert len(results[deps[1]]) > 0 81 | 82 | 83 | @pytest.mark.online 84 | def test_osv_no_vuln(): 85 | osv = service.OsvService() 86 | dep = service.ResolvedDependency("foo", Version("1.0.0")) 87 | results: dict[service.Dependency, list[service.VulnerabilityResult]] = dict( 88 | osv.query_all(iter([dep])) 89 | ) 90 | 91 | assert len(results) == 1 92 | assert dep in results 93 | 94 | vulns = results[dep] 95 | assert len(vulns) == 0 96 | 97 | 98 | def test_osv_connection_error(monkeypatch): 99 | osv = service.OsvService() 100 | monkeypatch.setattr(osv.session, "post", pretend.raiser(ConnectTimeout)) 101 | 102 | dep = service.ResolvedDependency("jinja2", Version("2.4.1")) 103 | with pytest.raises( 104 | service.ConnectionError, match="Could not connect to OSV's vulnerability feed" 105 | ): 106 | dict(osv.query_all(iter([dep]))) 107 | 108 | 109 | def test_osv_error_response(monkeypatch): 110 | def raise_for_status(): 111 | raise HTTPError 112 | 113 | response = pretend.stub(raise_for_status=pretend.call_recorder(raise_for_status)) 114 | post = pretend.call_recorder(lambda *a, **kw: response) 115 | 116 | osv = service.OsvService() 117 | monkeypatch.setattr(osv.session, "post", post) 118 | 119 | dep = service.ResolvedDependency("jinja2", Version("2.4.1")) 120 | with pytest.raises(service.ServiceError): 121 | dict(osv.query_all(iter([dep]))) 122 | 123 | assert len(post.calls) == 1 124 | assert len(response.raise_for_status.calls) == 1 125 | 126 | 127 | def test_osv_skipped_dep(): 128 | osv = service.OsvService() 129 | dep = service.SkippedDependency(name="foo", skip_reason="skip-reason") 130 | results: dict[service.Dependency, list[service.VulnerabilityResult]] = dict( 131 | osv.query_all(iter([dep])) 132 | ) 133 | 134 | assert len(results) == 1 135 | assert dep in results 136 | 137 | vulns = results[dep] 138 | assert len(vulns) == 0 139 | 140 | 141 | @pytest.mark.parametrize("version", ["0.0.0", "2.0.0", "2.3.4"]) 142 | def test_osv_unsupported_schema_version(monkeypatch, version): 143 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 144 | monkeypatch.setattr(service.osv, "logger", logger) 145 | 146 | payload = { 147 | "vulns": [ 148 | {"schema_version": version}, 149 | ] 150 | } 151 | 152 | response = pretend.stub(raise_for_status=lambda: None, json=lambda: payload) 153 | post = pretend.call_recorder(lambda *a, **kw: response) 154 | 155 | osv = service.OsvService() 156 | monkeypatch.setattr(osv.session, "post", post) 157 | 158 | dep = service.ResolvedDependency("foo", Version("1.0.0")) 159 | results = dict(osv.query_all(iter([dep]))) 160 | 161 | assert logger.warning.calls == [pretend.call(f"Unsupported OSV schema version: {version}")] 162 | 163 | assert len(results) == 1 164 | assert dep in results 165 | 166 | vulns = results[dep] 167 | assert len(vulns) == 0 168 | 169 | 170 | @pytest.mark.parametrize( 171 | ["summary", "details", "description"], 172 | [ 173 | ("fakesummary", "fakedetails", "fakesummary"), 174 | ("fakesummary\nanother line", "fakedetails", "fakesummary another line"), 175 | (None, "fakedetails", "fakedetails"), 176 | (None, "fakedetails\nanother line", "fakedetails another line"), 177 | (None, None, "N/A"), 178 | ], 179 | ) 180 | def test_osv_vuln_description_fallbacks(monkeypatch, summary, details, description): 181 | payload = { 182 | "vulns": [ 183 | { 184 | "id": "fakeid", 185 | "summary": summary, 186 | "details": details, 187 | "affected": [ 188 | { 189 | "package": {"name": "foo", "ecosystem": "PyPI"}, 190 | "ranges": [{"type": "ECOSYSTEM", "events": [{"fixed": "1.0.1"}]}], 191 | } 192 | ], 193 | } 194 | ], 195 | } 196 | 197 | response = pretend.stub(raise_for_status=lambda: None, json=lambda: payload) 198 | post = pretend.call_recorder(lambda *a, **kw: response) 199 | 200 | osv = service.OsvService() 201 | monkeypatch.setattr(osv.session, "post", post) 202 | 203 | dep = service.ResolvedDependency("foo", Version("1.0.0")) 204 | results = dict(osv.query_all(iter([dep]))) 205 | 206 | assert len(results) == 1 207 | assert dep in results 208 | 209 | vulns = results[dep] 210 | assert len(vulns) == 1 211 | 212 | assert vulns[0].description == description 213 | 214 | 215 | def test_osv_vuln_affected_missing(monkeypatch): 216 | logger = pretend.stub(warning=pretend.call_recorder(lambda s: None)) 217 | monkeypatch.setattr(service.osv, "logger", logger) 218 | 219 | payload = { 220 | "vulns": [ 221 | { 222 | "id": "fakeid", 223 | "summary": "fakesummary", 224 | "details": "fakedetails", 225 | } 226 | ], 227 | } 228 | 229 | response = pretend.stub(raise_for_status=lambda: None, json=lambda: payload) 230 | post = pretend.call_recorder(lambda *a, **kw: response) 231 | 232 | osv = service.OsvService() 233 | monkeypatch.setattr(osv.session, "post", post) 234 | 235 | dep = service.ResolvedDependency("foo", Version("1.0.0")) 236 | results = dict(osv.query_all(iter([dep]))) 237 | 238 | assert len(results) == 1 239 | assert dep in results 240 | 241 | vulns = results[dep] 242 | assert len(vulns) == 0 243 | 244 | assert logger.warning.calls == [ 245 | pretend.call("OSV vuln entry 'fakeid' is missing 'affected' list") 246 | ] 247 | 248 | 249 | def test_osv_vuln_withdrawn(monkeypatch): 250 | logger = pretend.stub(debug=pretend.call_recorder(lambda s: None)) 251 | monkeypatch.setattr(service.osv, "logger", logger) 252 | 253 | payload = { 254 | "vulns": [ 255 | { 256 | "id": "fakeid", 257 | "withdrawn": "some-datetime", 258 | } 259 | ], 260 | } 261 | 262 | response = pretend.stub(raise_for_status=lambda: None, json=lambda: payload) 263 | post = pretend.call_recorder(lambda *a, **kw: response) 264 | 265 | osv = service.OsvService() 266 | monkeypatch.setattr(osv.session, "post", post) 267 | 268 | dep = service.ResolvedDependency("foo", Version("1.0.0")) 269 | results = dict(osv.query_all(iter([dep]))) 270 | 271 | assert len(results) == 1 272 | assert dep in results 273 | 274 | vulns = results[dep] 275 | assert len(vulns) == 0 276 | 277 | assert logger.debug.calls == [ 278 | pretend.call("OSV vuln entry 'fakeid' marked as withdrawn at some-datetime") 279 | ] 280 | -------------------------------------------------------------------------------- /pip_audit/_virtual_env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create virtual environments with a custom set of packages and inspect their dependencies. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import json 8 | import logging 9 | import venv 10 | from collections.abc import Iterator 11 | from os import PathLike 12 | from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir 13 | from types import SimpleNamespace 14 | 15 | from packaging.version import Version 16 | 17 | from ._state import AuditState 18 | from ._subprocess import CalledProcessError, run 19 | 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | class VirtualEnv(venv.EnvBuilder): 24 | """ 25 | A wrapper around `EnvBuilder` that allows a custom `pip install` command to be executed, and its 26 | resulting dependencies inspected. 27 | 28 | The `pip-audit` API uses this functionality internally to deduce what the dependencies are for a 29 | given requirements file since this can't be determined statically. 30 | 31 | The `create` method MUST be called before inspecting the `installed_packages` property otherwise 32 | a `VirtualEnvError` will be raised. 33 | 34 | The expected usage is: 35 | ``` 36 | # Create a virtual environment and install the `pip-api` package. 37 | ve = VirtualEnv(["pip-api"]) 38 | ve.create(".venv/") 39 | for (name, version) in ve.installed_packages: 40 | print(f"Installed package {name} ({version})") 41 | ``` 42 | """ 43 | 44 | def __init__( 45 | self, 46 | install_args: list[str], 47 | index_url: str | None = None, 48 | extra_index_urls: list[str] = [], 49 | state: AuditState = AuditState(), 50 | ): 51 | """ 52 | Create a new `VirtualEnv`. 53 | 54 | `install_args` is the list of arguments that would be used the custom install command. For 55 | example, if you wanted to execute `pip install -e /tmp/my_pkg`, you would create the 56 | `VirtualEnv` like so: 57 | ``` 58 | ve = VirtualEnv(["-e", "/tmp/my_pkg"]) 59 | ``` 60 | 61 | `index_url` is the base URL of the package index. 62 | 63 | `extra_index_urls` are the extra URLs of package indexes. 64 | 65 | `state` is an `AuditState` to use for state callbacks. 66 | """ 67 | super().__init__(with_pip=True) 68 | self._install_args = install_args 69 | self._index_url = index_url 70 | self._extra_index_urls = extra_index_urls 71 | self._packages: list[tuple[str, Version]] | None = None 72 | self._state = state 73 | 74 | def create(self, env_dir: str | bytes | PathLike[str] | PathLike[bytes]) -> None: 75 | """ 76 | Creates the virtual environment. 77 | """ 78 | 79 | try: 80 | return super().create(env_dir) 81 | except PermissionError: 82 | # `venv` uses a subprocess internally to bootstrap pip, but 83 | # some Linux distributions choose to mark the system temporary 84 | # directory as `noexec`. Apart from having only nominal security 85 | # benefits, this completely breaks our ability to execute from 86 | # within the temporary virtualenv. 87 | # 88 | # We may be able to hack around this in the future, but doing so 89 | # isn't straightforward or reliable. So we bail for now. 90 | # 91 | # See: https://github.com/pypa/pip-audit/issues/732 92 | base_tmpdir = gettempdir() 93 | raise VirtualEnvError( 94 | f"Couldn't execute in a temporary directory under {base_tmpdir}. " 95 | "This is sometimes caused by a noexec mount flag or other setting. " 96 | "Consider changing this setting or explicitly specifying a different " 97 | "temporary directory via the TMPDIR environment variable." 98 | ) 99 | 100 | def post_setup(self, context: SimpleNamespace) -> None: 101 | """ 102 | Install the custom package and populate the list of installed packages. 103 | 104 | This method is overridden from `EnvBuilder` to execute immediately after the virtual 105 | environment has been created and should not be called directly. 106 | 107 | We do a few things in our custom post-setup: 108 | - Upgrade the `pip` version. We'll be using `pip list` with the `--format json` option which 109 | requires a non-ancient version for `pip`. 110 | - Install `wheel`. When our packages install their own dependencies, they might be able 111 | to do so through wheels, which are much faster and don't require us to run 112 | setup scripts. 113 | - Execute the custom install command. 114 | - Call `pip list`, and parse the output into a list of packages to be returned from when the 115 | `installed_packages` property is queried. 116 | """ 117 | self._state.update_state("Updating pip installation in isolated environment") 118 | 119 | # Firstly, upgrade our `pip` versions since `ensurepip` can leave us with an old version 120 | # and install `wheel` in case our package dependencies are offered as wheels 121 | # TODO: This is probably replaceable with the `upgrade_deps` option on `EnvBuilder` 122 | # itself, starting with Python 3.9. 123 | pip_upgrade_cmd = [ 124 | context.env_exe, 125 | "-m", 126 | "pip", 127 | "install", 128 | "--upgrade", 129 | "pip", 130 | "wheel", 131 | "setuptools", 132 | ] 133 | try: 134 | run(pip_upgrade_cmd, state=self._state) 135 | except CalledProcessError as cpe: 136 | raise VirtualEnvError(f"Failed to upgrade `pip`: {pip_upgrade_cmd}") from cpe 137 | 138 | self._state.update_state("Installing package in isolated environment") 139 | 140 | with TemporaryDirectory() as ve_dir, NamedTemporaryFile(dir=ve_dir, delete=False) as tmp: 141 | # We use delete=False in creating the tempfile to allow it to be 142 | # closed and opened multiple times within the context scope on 143 | # windows, see GitHub issue #646. 144 | 145 | # Install our packages 146 | # NOTE(ww): We pass `--no-input` to prevent `pip` from indefinitely 147 | # blocking on user input for repository credentials, and 148 | # `--keyring-provider=subprocess` to allow `pip` to access the `keyring` 149 | # program on the `$PATH` for index credentials, if necessary. The latter flag 150 | # is required beginning with pip 23.1, since `--no-input` disables the default 151 | # keyring behavior. 152 | package_install_cmd = [ 153 | context.env_exe, 154 | "-m", 155 | "pip", 156 | "install", 157 | "--no-input", 158 | "--keyring-provider=subprocess", 159 | *self._index_url_args, 160 | "--dry-run", 161 | "--report", 162 | tmp.name, 163 | *self._install_args, 164 | ] 165 | try: 166 | run(package_install_cmd, log_stdout=True, state=self._state) 167 | except CalledProcessError as cpe: 168 | # TODO: Propagate the subprocess's error output better here. 169 | logger.error(f"internal pip failure: {cpe.stderr}") 170 | raise VirtualEnvError(f"Failed to install packages: {package_install_cmd}") from cpe 171 | 172 | self._state.update_state("Processing package list from isolated environment") 173 | 174 | install_report = json.load(tmp) 175 | package_list = install_report["install"] 176 | 177 | # Convert into a series of name, version pairs 178 | self._packages = [] 179 | for package in package_list: 180 | package_metadata = package["metadata"] 181 | self._packages.append( 182 | (package_metadata["name"], Version(package_metadata["version"])) 183 | ) 184 | 185 | @property 186 | def installed_packages(self) -> Iterator[tuple[str, Version]]: 187 | """ 188 | A property to inspect the list of packages installed in the virtual environment. 189 | 190 | This method can only be called after the `create` method has been called. 191 | """ 192 | if self._packages is None: 193 | raise VirtualEnvError( 194 | "Invalid usage of wrapper." 195 | "The `create` method must be called before inspecting `installed_packages`." 196 | ) 197 | 198 | yield from self._packages 199 | 200 | @property 201 | def _index_url_args(self) -> list[str]: 202 | args = [] 203 | if self._index_url: 204 | args.extend(["--index-url", self._index_url]) 205 | for index_url in self._extra_index_urls: 206 | args.extend(["--extra-index-url", index_url]) 207 | return args 208 | 209 | 210 | class VirtualEnvError(Exception): 211 | """ 212 | Raised when `VirtualEnv` fails to build or inspect dependencies, for any reason. 213 | """ 214 | 215 | pass 216 | -------------------------------------------------------------------------------- /pip_audit/_state.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interfaces for for propagating feedback from the API to provide responsive progress indicators as 3 | well as a progress spinner implementation for use with CLI applications. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import logging 9 | from abc import ABC, abstractmethod 10 | from collections.abc import Sequence 11 | from logging.handlers import MemoryHandler 12 | from typing import Any 13 | 14 | from rich.align import StyleType 15 | from rich.console import Console, Group, RenderableType 16 | from rich.live import Live 17 | from rich.panel import Panel 18 | from rich.status import Spinner 19 | 20 | 21 | class AuditState: 22 | """ 23 | An object that handles abstract "updates" to `pip-audit`'s state. 24 | 25 | Non-UI consumers of `pip-audit` (via `pip_audit`) should have no need for 26 | this class, and can leave it as a default construction in whatever signatures 27 | it appears in. Its primary use is internal and UI-specific: it exists solely 28 | to give the CLI enough state for a responsive progress indicator during 29 | user requests. 30 | """ 31 | 32 | def __init__(self, *, members: Sequence[_StateActor] = []): 33 | """ 34 | Create a new `AuditState` with the given member list. 35 | """ 36 | 37 | self._members = members 38 | 39 | def update_state(self, message: str, logs: str | None = None) -> None: 40 | """ 41 | Called whenever `pip_audit`'s internal state changes in a way that's meaningful to 42 | expose to a user. 43 | 44 | `message` is the message to present to the user. 45 | """ 46 | 47 | for member in self._members: 48 | member.update_state(message, logs) 49 | 50 | def initialize(self) -> None: 51 | """ 52 | Called when `pip-audit`'s state is initializing. 53 | """ 54 | 55 | for member in self._members: 56 | member.initialize() 57 | 58 | def finalize(self) -> None: 59 | """ 60 | Called when `pip_audit`'s state is "done" changing. 61 | """ 62 | for member in self._members: 63 | member.finalize() 64 | 65 | def __enter__(self) -> AuditState: # pragma: no cover 66 | """ 67 | Create an instance of the `pip-audit` state for usage within a `with` statement. 68 | """ 69 | 70 | self.initialize() 71 | return self 72 | 73 | def __exit__( 74 | self, _exc_type: Any, _exc_value: Any, _exc_traceback: Any 75 | ) -> None: # pragma: no cover 76 | """ 77 | Helper to ensure `finalize` gets called when the `pip-audit` state falls out of scope of a 78 | `with` statement. 79 | """ 80 | self.finalize() 81 | 82 | 83 | class _StateActor(ABC): 84 | @abstractmethod 85 | def update_state(self, message: str, logs: str | None = None) -> None: 86 | raise NotImplementedError # pragma: no cover 87 | 88 | @abstractmethod 89 | def initialize(self) -> None: 90 | """ 91 | Called when `pip-audit`'s state is initializing. Implementors should 92 | override this to do nothing if their state management requires no 93 | initialization step. 94 | """ 95 | raise NotImplementedError # pragma: no cover 96 | 97 | @abstractmethod 98 | def finalize(self) -> None: 99 | """ 100 | Called when the overlaying `AuditState` is "done," i.e. `pip-audit`'s 101 | state is done changing. Implementors should override this to do nothing 102 | if their state management requires no finalization step. 103 | """ 104 | raise NotImplementedError # pragma: no cover 105 | 106 | 107 | class StatusLog: # pragma: no cover 108 | """ 109 | Displays a status indicator with an optional log panel to display logs 110 | for external processes. 111 | 112 | This code is based off of Rich's `Status` component: 113 | https://github.com/Textualize/rich/blob/master/rich/status.py 114 | """ 115 | 116 | # NOTE(alex): We limit the panel to 10 characters high and display the last 10 log lines. 117 | # However, the panel won't display all 10 of those lines if some of the lines are long enough 118 | # to wrap in the panel. 119 | LOG_PANEL_HEIGHT = 10 120 | 121 | def __init__( 122 | self, 123 | status: str, 124 | *, 125 | console: Console | None = None, 126 | spinner: str = "dots", 127 | spinner_style: StyleType = "status.spinner", 128 | speed: float = 1.0, 129 | refresh_per_second: float = 12.5, 130 | ): 131 | """ 132 | Construct a new `StatusLog`. 133 | 134 | `status` is the status message to display next to the spinner. 135 | `console` is the Rich console to display the log status in. 136 | `spinner` is the name of the spinner animation (see python -m rich.spinner). Defaults to `dots`. 137 | `spinner_style` is the style of the spinner. Defaults to `status.spinner`. 138 | `speed` is the speed factor for the spinner animation. Defaults to 1.0. 139 | `refresh_per_second` is the number of refreshes per second. Defaults to 12.5. 140 | """ 141 | 142 | self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed) 143 | self._log_panel = Panel("", height=self.LOG_PANEL_HEIGHT) 144 | self._live = Live( 145 | self.renderable, 146 | console=console, 147 | refresh_per_second=refresh_per_second, 148 | transient=True, 149 | ) 150 | 151 | @property 152 | def renderable(self) -> RenderableType: 153 | """ 154 | Create a Rich renderable type for the log panel. 155 | 156 | If the log panel contains text, we should create a group and place the 157 | log panel underneath the spinner. 158 | """ 159 | 160 | if self._log_panel.renderable: 161 | return Group(self._spinner, self._log_panel) 162 | return self._spinner 163 | 164 | def update( 165 | self, 166 | status: str, 167 | logs: str | None, 168 | ) -> None: 169 | """ 170 | Update status and logs. 171 | """ 172 | 173 | if logs is None: 174 | logs = "" 175 | else: 176 | # Limit the logging output to the 10 most recent lines. 177 | logs = "\n".join(logs.splitlines()[-self.LOG_PANEL_HEIGHT :]) 178 | self._spinner.update(text=status) 179 | self._log_panel.renderable = logs 180 | self._live.update(self.renderable, refresh=True) 181 | 182 | def start(self) -> None: 183 | """ 184 | Start the status animation. 185 | """ 186 | 187 | self._live.start() 188 | 189 | def stop(self) -> None: 190 | """ 191 | Stop the spinner animation. 192 | """ 193 | 194 | self._live.stop() 195 | 196 | def __rich__(self) -> RenderableType: 197 | """ 198 | Convert to a Rich renderable type. 199 | """ 200 | 201 | return self.renderable 202 | 203 | 204 | class AuditSpinner(_StateActor): # pragma: no cover 205 | """ 206 | A progress spinner for `pip-audit`, using `rich.status`'s spinner support 207 | under the hood. 208 | """ 209 | 210 | def __init__(self, message: str = "") -> None: 211 | """ 212 | Initialize the `AuditSpinner`. 213 | """ 214 | 215 | self._console = Console() 216 | # NOTE: audits can be quite fast, so we need a pretty high refresh rate here. 217 | self._spinner = StatusLog( 218 | message, console=self._console, spinner="line", refresh_per_second=30 219 | ) 220 | 221 | # Keep the target set to `None` to ensure that the logs don't get written until the spinner 222 | # has finished writing output, regardless of the capacity argument 223 | self.log_handler = MemoryHandler( 224 | 0, flushLevel=logging.ERROR, target=None, flushOnClose=False 225 | ) 226 | self.prev_handlers: list[logging.Handler] = [] 227 | 228 | def update_state(self, message: str, logs: str | None = None) -> None: 229 | """ 230 | Update the spinner's state. 231 | """ 232 | 233 | self._spinner.update(message, logs) 234 | 235 | def initialize(self) -> None: 236 | """ 237 | Redirect logging to an in-memory log handler so that it doesn't get mixed in with the 238 | spinner output. 239 | """ 240 | 241 | # Remove all existing log handlers 242 | # 243 | # We're recording them here since we'll want to restore them once the spinner falls out of 244 | # scope 245 | root_logger = logging.root 246 | for handler in root_logger.handlers: 247 | self.prev_handlers.append(handler) 248 | for handler in self.prev_handlers: 249 | root_logger.removeHandler(handler) 250 | 251 | # Redirect logging to our in-memory handler that will buffer the log lines 252 | root_logger.addHandler(self.log_handler) 253 | 254 | self._spinner.start() 255 | 256 | def finalize(self) -> None: 257 | """ 258 | Cleanup the spinner output so it doesn't get combined with subsequent `stderr` output and 259 | flush any logs that were recorded while the spinner was active. 260 | """ 261 | 262 | self._spinner.stop() 263 | 264 | # Now that the spinner is complete, flush the logs 265 | root_logger = logging.root 266 | stream_handler = logging.StreamHandler() 267 | stream_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) 268 | self.log_handler.setTarget(stream_handler) 269 | self.log_handler.flush() 270 | 271 | # Restore the original log handlers 272 | root_logger.removeHandler(self.log_handler) 273 | for handler in self.prev_handlers: 274 | root_logger.addHandler(handler) 275 | --------------------------------------------------------------------------------