├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .gitlab-ci.yml ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── AUTHORS ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── docs ├── env.yml ├── requirements.txt └── sources │ ├── Makefile │ ├── _static │ ├── db_relationship.png │ ├── logo.png │ ├── pytestmonitor.png │ ├── pytestmonitor_alpha.png │ └── pytestmonitor_readme.png │ ├── changelog.rst │ ├── conf.py │ ├── configuration.rst │ ├── contributing.rst │ ├── index.rst │ ├── installation.rst │ ├── introduction.rst │ ├── make.bat │ ├── operating.rst │ ├── remote.rst │ └── run.rst ├── examples ├── pkg1 │ ├── __init__.py │ ├── test_mod1.py │ └── test_mod2.py ├── pkg2 │ ├── __init__.py │ └── test_mod_a.py ├── pkg3 │ ├── __init__.py │ └── test_mod_cl.py ├── pkg4 │ ├── __init__.py │ └── test_mod_a.py └── pkg5 │ ├── __init__.py │ ├── doctest.py │ └── test_special_pytest.py ├── pyproject.toml ├── pytest_monitor ├── __init__.py ├── handler.py ├── pytest_monitor.py ├── session.py └── sys_utils.py ├── requirements.dev.txt ├── requirements.txt ├── tests ├── conftest.py ├── test_monitor.py ├── test_monitor_component.py ├── test_monitor_context.py └── test_monitor_in_ci.py └── tox.ini /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | aliases: 4 | docker-image: &image 5 | - image: mambaorg/micromamba 6 | filter-pr-only: &PR-only 7 | branches: 8 | ignore: 9 | - master 10 | tags: 11 | ignore: 12 | - /.*/ 13 | filter-master-only: &master-only 14 | branches: 15 | only: 16 | - master 17 | filter-tags-only: &official-tag 18 | branches: 19 | ignore: 20 | - /.*/ 21 | tags: 22 | only: 23 | - /^pytest-monitor-.*/ 24 | matrix: &build-matrix 25 | parameters: 26 | python: [ "3.8", "3.9", "3.10", "3.11" ] 27 | pytest: [ "6.1", "7" ] 28 | exclude: 29 | - pytest: "6.1" 30 | python: "3.11" 31 | - pytest: "6.1" 32 | python: "3.9" 33 | - pytest: "6.1" 34 | python: "3.10" 35 | 36 | commands: 37 | make-env: 38 | description: "Create a brand new environment" 39 | parameters: 40 | python: 41 | type: string 42 | default: "3" 43 | description: "Python version to use for building" 44 | pytest: 45 | type: string 46 | default: "7" 47 | description: "Pytest version to use for testing" 48 | use_specific_requirements_file: 49 | type: string 50 | default: "requirements.txt" 51 | description: "Add specific requirements listed in a file to the environment. " 52 | extra_deps: 53 | type: string 54 | default: "" 55 | description: "Extra dependencies to install (given as a space separated string)" 56 | channels: 57 | type: string 58 | default: "https://conda.anaconda.org/conda-forge" 59 | description: "List of channels for fetching packages" 60 | publish_mode: 61 | type: boolean 62 | default: false 63 | description: "If true, does not pin versions in requirements.txt" 64 | steps: 65 | - when: 66 | condition: 67 | not: << parameters.publish_mode >> 68 | steps: 69 | - checkout 70 | - run: 71 | name: "Apply dependency constraints" 72 | command: | 73 | if [ "<< parameters.pytest >>" != "" ]; then 74 | sed -i 's/^pytest/pytest=<< parameters.pytest >>/g' << parameters.use_specific_requirements_file >> 75 | fi 76 | echo "" >> << parameters.use_specific_requirements_file >> 77 | if [ "<< parameters.extra_deps >>" != "" ]; then 78 | for dep in << parameters.extra_deps >> 79 | do 80 | echo $dep >> << parameters.use_specific_requirements_file >> 81 | done 82 | fi 83 | - run: 84 | name: "Create environment" 85 | command: | 86 | micromamba create -n project 87 | channels=$(echo << parameters.channels >> | sed "s/ / -c /g") 88 | requirements=$(cat << parameters.use_specific_requirements_file >> | tr '\n' ' ') 89 | micromamba install -n project -y python=<< parameters.python >> pip $requirements -c $channels 90 | - run: 91 | name: "Install project in environment" 92 | command: | 93 | eval "$(micromamba shell hook --shell=bash)" 94 | micromamba activate project 95 | python -m pip install -e . 96 | - run: 97 | name: "Dumping env" 98 | command: | 99 | micromamba env export --name project --explicit > manifest.txt 100 | - store_artifacts: 101 | path: manifest.txt 102 | - when: 103 | condition: << parameters.publish_mode >> 104 | steps: 105 | - checkout 106 | - run: 107 | name: "Create environment" 108 | command: | 109 | micromamba create -n project 110 | channels=$(echo << parameters.channels >> | sed "s/ / -c /g") 111 | requirements=$(cat requirements.txt | tr '\n' ' ') 112 | micromamba install -n project -y python=<< parameters.python >> $requirements -c $channels 113 | micromamba install -n project -y << parameters.extra_deps >> -c $channels 114 | 115 | lint-project: 116 | description: "Check code style" 117 | steps: 118 | - run: 119 | name: "Check formatting (black)" 120 | command: | 121 | eval "$(micromamba shell hook --shell=bash)" 122 | micromamba activate project 123 | black . 124 | - run: 125 | name: "Check code style (flake8)" 126 | command: | 127 | eval "$(micromamba shell hook --shell=bash)" 128 | micromamba activate project 129 | flake8 . 130 | - run: 131 | name: "Check import order (isort)" 132 | command: | 133 | eval "$(micromamba shell hook --shell=bash)" 134 | micromamba activate project 135 | isort . 136 | test-project: 137 | description: "Run all the test and store the results" 138 | parameters: 139 | runner: 140 | type: string 141 | default: "pytest" 142 | description: "Test executor" 143 | params: 144 | type: string 145 | default: "-v" 146 | description: "Test executor parameters" 147 | steps: 148 | - run: 149 | name: "Launch test" 150 | command: | 151 | eval "$(micromamba shell hook --shell=bash)" 152 | micromamba activate project 153 | mkdir test-results 154 | << parameters.runner >> <> --junit-xml=test-results/junit.xml 155 | - store_test_results: 156 | path: test-results/junit.xml 157 | - store_artifacts: 158 | path: test-results/junit.xml 159 | inject-pypi: 160 | description: "Inject pypi credentials" 161 | steps: 162 | - run: 163 | name: "Setup Pypi" 164 | command: | 165 | echo -e "[pypi]" >> ~/.pypirc 166 | echo -e "username = __token__" >> ~/.pypirc 167 | echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc 168 | 169 | package-project: 170 | description: "Package project" 171 | steps: 172 | - run: 173 | name: "Make Packages" 174 | command: | 175 | eval "$(micromamba shell hook --shell=bash)" 176 | micromamba activate project 177 | python -m build 178 | publish-project: 179 | description: "Send sdist and wheels to Pypi" 180 | steps: 181 | - run: 182 | name: "Publish" 183 | command: | 184 | eval "$(micromamba shell hook --shell=bash)" 185 | micromamba activate project 186 | twine upload dist/* 187 | 188 | 189 | # Workflow definition 190 | workflows: 191 | PR: 192 | jobs: 193 | - lint 194 | - build: 195 | matrix: *build-matrix 196 | name: "build-py<< matrix.python >>-pytest << matrix.pytest >>" 197 | filters: *PR-only 198 | requires: 199 | - lint 200 | 201 | deploy: 202 | jobs: 203 | - publish: 204 | filters: *official-tag 205 | 206 | nightly: 207 | triggers: 208 | - schedule: 209 | cron: "0 0 * * *" 210 | filters: *master-only 211 | jobs: 212 | - build: 213 | python: "3" 214 | pytest: "7" 215 | 216 | 217 | jobs: 218 | lint: 219 | docker: *image 220 | steps: 221 | - make-env: 222 | use_specific_requirements_file: requirements.dev.txt 223 | - lint-project 224 | build: 225 | docker: *image 226 | parameters: 227 | python: 228 | type: string 229 | pytest: 230 | type: string 231 | steps: 232 | - make-env: 233 | extra_deps: mock 234 | python: << parameters.python >> 235 | pytest: << parameters.pytest >> 236 | - test-project 237 | publish: 238 | docker: *image 239 | steps: 240 | - make-env: 241 | extra_deps: twine setuptools build 242 | channels: https://conda.anaconda.org/conda-forge defaults anaconda 243 | publish_mode: true 244 | - inject-pypi 245 | - package-project 246 | - publish-project -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. Linux, macOS, Windows] 28 | - Python version: [e.g. 3.9.7] 29 | - Pytest version: [e.g. 6.2.3] 30 | - pytest-monitor version: [e.g. 1.6.2] 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Description 14 | 15 | 18 | 19 | Fixes #(issue) 20 | 21 | # Type of change 22 | 23 | 26 | 27 | - [ ] Bug fix (non-breaking change which fixes an issue) 28 | - [ ] New feature (non-breaking change which adds functionality) 29 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 30 | - [ ] This change requires a documentation update 31 | 32 | # Checklist: 33 | 34 | 37 | 38 | - [ ] My code follows the style guidelines of this project 39 | - [ ] I have performed a self-review of my own code 40 | - [ ] I have commented my code, particularly in hard-to-understand areas 41 | - [ ] I have made corresponding changes to the documentation 42 | - [ ] My changes generate no new warnings 43 | - [ ] I have added tests that prove my fix is effective or that my feature works 44 | - [ ] New and existing unit tests pass locally with my changes (not just the [CI](https://link.to.ci)) 45 | - [ ] Any dependent changes have been merged and published in downstream modules 46 | - [ ] I have provided a link to the issue this PR adresses in the Description section above (If there is none yet, 47 | [create one](https://github.com/CFMTech/pytest-monitor/issues) !) 48 | - [ ] I have updated the [changelog](https://github.com/CFMTech/pytest-monitor/blob/master/docs/sources/changelog.rst) 49 | - [ ] I have labeled my PR using appropriate tags (in particular using status labels like [`Status: Code Review Needed`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20Code%20Review%20Needed), [`Business: Test Needed`](https://github.com/jsd-spif/pymonitor/labels/Business%3A%20Test%20Needed) or [`Status: In Progress`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20In%20Progress) if you are still working on the PR) 50 | 51 | Do not forget to @ the people that needs to do the review 52 | 53 | 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/.pymon 2 | .idea/ 3 | 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *,cover 50 | .hypothesis/ 51 | .pytest_cache 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | 61 | # Flask instance folder 62 | instance/ 63 | 64 | # Sphinx documentation 65 | docs/sources/_build/ 66 | 67 | # MkDocs documentation 68 | /site/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # IPython Notebook 74 | .ipynb_checkpoints 75 | 76 | # pyenv 77 | .python-version 78 | 79 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: continuumio/miniconda 2 | 3 | stages: 4 | - test 5 | - deploy 6 | 7 | before_script: 8 | - conda create -q -n pymon -y python=3.6 9 | - conda install -q -n pymon psutil memory_profiler pytest -c https://conda.anaconda.org/conda-forge -c defaults -c anaconda -y 10 | - source activate pymon 11 | - python setup.py develop 12 | - mkdir -p build/public 13 | - mkdir public 14 | 15 | pymon_run_test: 16 | stage: test 17 | script: 18 | - pytest 19 | 20 | pages: 21 | stage: deploy 22 | except: 23 | - branchs 24 | script: 25 | - conda install --file docs/requirements.txt -c defaults -c conda-forge -c anaconda -c pkgs/main -y 26 | - cd docs/sources/ && make html && cd - 27 | - mv docs/sources/_build/html/* public/ 28 | artifacts: 29 | paths: 30 | - public/ 31 | expire_in: 1 year 32 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: local 3 | hooks: 4 | - id: black 5 | name: black 6 | entry: black 7 | language: system 8 | pass_filenames: true 9 | types: [python] 10 | - id: flake8 11 | name: flake8 12 | entry: flake8 --max-line-length=120 13 | language: system 14 | pass_filenames: true 15 | types: [python] 16 | - id: isort 17 | name: isort 18 | entry: isort 19 | language: system 20 | pass_filenames: true 21 | types: [python] 22 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/sources/conf.py 11 | 12 | # Optionally build your docs in additional formats such as PDF and ePub 13 | formats: all 14 | 15 | conda: 16 | environment: docs/env.yml 17 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Project developed and lead by Jean-Sébastine Dieu. 2 | 3 | Contributors include: 4 | - Raymond Gauthier (jraygauthier) added Python 3.5 support. 5 | - Kyle Altendorf (altendky) fixed bugs on session teardown 6 | - Hannes Engelhardt (veritogen) added Bitbucket CI support. 7 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Contribution, getting started 3 | ============================= 4 | 5 | Contributions are highly welcomed and appreciated. Every little help counts, 6 | so do not hesitate! 7 | 8 | .. contents:: 9 | :depth: 2 10 | :backlinks: none 11 | 12 | Create your own development environment 13 | --------------------------------------- 14 | We use conda as our main packaging system, though pip work as well. Nevertheless, 15 | the following instructions describe how to make your development environment using conda. 16 | 17 | #. Create a new environment: 18 | 19 | conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults 20 | 21 | #. Install the dependencies 22 | 23 | conda install --file requirements.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults 24 | 25 | #. Activate your environment 26 | 27 | conda activate pytest-monitor-dev 28 | 29 | #. Install pytest-monitor in development mode 30 | 31 | python setup.py develop 32 | 33 | #. You're done! 34 | 35 | 36 | .. _submitfeedback: 37 | 38 | Feature request and feebacks 39 | ---------------------------- 40 | We'd like to hear about your propositions and suggestions. Feel free to 41 | `submit them as issues `_ and: 42 | 43 | * Explain in detail how they should work. 44 | * Keep the scope as narrow as possible. This will make it easier to implement. 45 | 46 | 47 | .. _reportbugs: 48 | 49 | Report bugs 50 | ----------- 51 | Report bugs for pytest-monitor in the issue tracker. Every filed bugs should include: 52 | * Your operating system name and version. 53 | * Any details about your local setup that might be helpful in troubleshooting, specifically: 54 | * the Python interpreter version 55 | * installed libraries 56 | * and pytest version. 57 | * Detailed steps to reproduce the bug. 58 | 59 | .. _fixbugs: 60 | 61 | Fix bugs 62 | -------- 63 | 64 | Look through the `GitHub issues for bugs `_. 65 | 66 | :ref:`Talk ` to developers to find out how you can fix specific bugs. 67 | 68 | Implement features 69 | ------------------ 70 | 71 | Look through the `GitHub issues for enhancements `_. 72 | 73 | :ref:`Talk ` to developers to find out how you can implement specific 74 | features. 75 | 76 | .. _`pull requests`: 77 | .. _pull-requests: 78 | 79 | Preparing Pull Requests 80 | ----------------------- 81 | 82 | Short version 83 | ~~~~~~~~~~~~~ 84 | 85 | #. Fork the repository. 86 | #. Enable and install `pre-commit `_ to ensure style-guides and code checks are followed. 87 | #. Target ``master`` for bugfixes and doc changes. 88 | #. Target ``features`` for new features or functionality changes. 89 | #. Follow **PEP-8** for naming and `black `_ for formatting. 90 | #. Tests are run using ``tox``:: 91 | 92 | tox -e linting,py37 93 | 94 | The test environments above are usually enough to cover most cases locally. 95 | 96 | #. Write a ``changelog`` entry: ``changelog/2574.bugfix.rst``, use issue id number 97 | and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or 98 | ``trivial`` for the issue type. 99 | #. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please 100 | add yourself to the ``AUTHORS`` file, in alphabetical order. 101 | 102 | 103 | Long version 104 | ~~~~~~~~~~~~ 105 | 106 | What is a "pull request"? It informs the project's core developers about the 107 | changes you want to review and merge. Pull requests are stored on 108 | `GitHub servers `_. 109 | Once you send a pull request, we can discuss its potential modifications and 110 | even add more commits to it later on. There's an excellent tutorial on how Pull 111 | Requests work in the 112 | `GitHub Help Center `_. 113 | 114 | Here is a simple overview, with pytest-specific bits: 115 | 116 | #. Fork the 117 | `pytest GitHub repository `__. It's 118 | fine to use ``pytest`` as your fork repository name because it will live 119 | under your user. 120 | 121 | #. Clone your fork locally using `git `_ and create a branch:: 122 | 123 | $ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git 124 | $ cd pytest 125 | # now, to fix a bug create your own branch off "master": 126 | 127 | $ git checkout -b fix/your-bugfix-branch-name master 128 | 129 | # or to instead add a feature create your own branch off "master": 130 | 131 | $ git checkout -b feature/your-feature-branch-name master 132 | 133 | Given we have "major.minor.micro" version numbers, bugfixes will usually 134 | be released in micro releases whereas features will be released in 135 | minor releases and incompatible changes in major releases. 136 | 137 | If you need some help with Git, follow this quick start 138 | guide: https://git.wiki.kernel.org/index.php/QuickStart 139 | 140 | #. Install `pre-commit `_ and its hook on the pytest repo: 141 | 142 | **Note: pre-commit must be installed as admin, as it will not function otherwise**:: 143 | 144 | $ pip install --user pre-commit 145 | $ pre-commit install 146 | 147 | Afterwards ``pre-commit`` will run whenever you commit. 148 | 149 | https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks 150 | to ensure code-style and code formatting is consistent. 151 | 152 | #. Install tox 153 | 154 | Tox is used to run all the tests and will automatically setup virtualenvs 155 | to run the tests in. 156 | (will implicitly use http://www.virtualenv.org/en/latest/):: 157 | 158 | $ pip install tox 159 | 160 | #. Run all the tests 161 | 162 | You need to have Python 3.7 available in your system. Now 163 | running tests is as simple as issuing this command:: 164 | 165 | $ tox -e linting,py37 166 | 167 | This command will run tests via the "tox" tool against Python 3.7 168 | and also perform "lint" coding-style checks. 169 | 170 | #. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming. 171 | 172 | You can pass different options to ``tox``. For example, to run tests on Python 3.7 and pass options to pytest 173 | (e.g. enter pdb on failure) to pytest you can do:: 174 | 175 | $ tox -e py37 -- --pdb 176 | 177 | Or to only run tests in a particular test module on Python 3.7:: 178 | 179 | $ tox -e py37 -- testing/test_config.py 180 | 181 | 182 | When committing, ``pre-commit`` will re-format the files if necessary. 183 | 184 | #. If instead of using ``tox`` you prefer to run the tests directly, then we suggest to create a virtual environment and use 185 | an editable install with the ``testing`` extra:: 186 | 187 | $ python3 -m venv .venv 188 | $ source .venv/bin/activate # Linux 189 | $ .venv/Scripts/activate.bat # Windows 190 | $ pip install -e ".[testing]" 191 | 192 | Afterwards, you can edit the files and run pytest normally:: 193 | 194 | $ pytest testing/test_config.py 195 | 196 | 197 | #. Commit and push once your tests pass and you are happy with your change(s):: 198 | 199 | $ git commit -a -m "" 200 | $ git push -u 201 | 202 | #. Create a new changelog entry in ``changelog``. The file should be named ``..rst``, 203 | where *issueid* is the number of the issue related to the change and *type* is one of 204 | ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. You may not create a 205 | changelog entry if the change doesn't affect the documented behaviour of Pytest. 206 | 207 | #. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order. 208 | 209 | #. Finally, submit a pull request through the GitHub website using this data:: 210 | 211 | head-fork: YOUR_GITHUB_USERNAME/pytest 212 | compare: your-branch-name 213 | 214 | base-fork: pytest-dev/pytest 215 | base: master # if it's a bugfix 216 | base: features # if it's a feature 217 | 218 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2020 Capital Fund Management 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.rst 3 | 4 | recursive-exclude * __pycache__ 5 | recursive-exclude * *.py[co] 6 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: docs/sources/_static/pytestmonitor_readme.png 2 | :width: 160 3 | :align: center 4 | :alt: Pytest-Monitor 5 | 6 | ------ 7 | 8 | ============== 9 | pytest-monitor 10 | ============== 11 | 12 | .. image:: https://readthedocs.org/projects/pytest-monitor/badge/?version=latest 13 | :target: https://pytest-monitor.readthedocs.io/en/latest/?badge=latest 14 | :alt: Documentation Status 15 | 16 | .. image:: https://img.shields.io/pypi/v/pytest-monitor.svg 17 | :target: https://pypi.org/project/pytest-monitor 18 | :alt: PyPI version 19 | 20 | .. image:: https://img.shields.io/pypi/pyversions/pytest-monitor.svg 21 | :target: https://circleci.com/gh/jsd-spif/pymonitor.svg?style=svg&circle-token=cdf89a7212139aff0cc236227cb519363981de0b 22 | :alt: Python versions 23 | 24 | .. image:: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master.svg?style=shield&circle-token=054adaaf6a19f4f55a4f0ad419649f1807e70ea9 25 | :target: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master 26 | :alt: See Build Status on Circle CI 27 | 28 | .. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/platforms.svg 29 | :target: https://anaconda.org/conda-forge/pytest-monitor 30 | 31 | .. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/version.svg 32 | :target: https://anaconda.org/conda-forge/pytest-monitor 33 | 34 | .. image:: https://img.shields.io/badge/License-MIT-blue.svg 35 | :target: https://opensource.org/licenses/MIT 36 | 37 | 38 | Pytest-monitor is a pytest plugin designed for analyzing resource usage. 39 | 40 | ---- 41 | 42 | 43 | Features 44 | -------- 45 | 46 | - Analyze your resources consumption through test functions: 47 | 48 | * memory consumption 49 | * time duration 50 | * CPU usage 51 | - Keep a history of your resource consumption measurements. 52 | - Compare how your code behaves between different environments. 53 | 54 | 55 | Usage 56 | ----- 57 | 58 | Simply run *pytest* as usual: *pytest-monitor* is active by default as soon as it is installed. 59 | After running your first session, a .pymon `sqlite` database will be accessible in the directory where pytest was run. 60 | 61 | Example of information collected for the execution context: 62 | 63 | +-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+ 64 | | ENV_H| CPU_COUNT| CPU_FREQUENCY_MHZ| CPU_TYPE| CPU_VENDOR| RAM_TOTAL_MB | MACHINE_NODE |MACHINE_TYPE| MACHINE_ARCH | SYSTEM_INFO | PYTHON_INFO| 65 | +===================================+===========+===================+=========+===========================================+===============+====================+============+===============================+===============================+==================================================+ 66 | | 8294b1326007d9f4c8a1680f9590c23d | 36 | 3000 | x86_64 | Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz | 772249 | some.host.vm.fr | x86_64 | 64bit | Linux - 3.10.0-693.el7.x86_64 | 3.6.8 (default, Jun 28 2019, 11:09:04) \n[GCC ...| 67 | +-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+ 68 | 69 | Here is an example of collected data stored in the result database: 70 | 71 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 72 | | RUN_DATE| ENV_H| SCM_ID| ITEM_START_TIME| ITEM| KIND| COMPONENT| TOTAL_TIME| USER_TIME| KERNEL_TIME| CPU_USAGE| MEM_USAGE| 73 | +==============================+==================================+==========================================+============================+========================================+==========+==========+============+===========+=============+============+===========+ 74 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:36.890477 | pkg1.test_mod1/test_sleep1 | function | None | 1.005669 | 0.54 | 0.06 | 0.596618 | 1.781250 | 75 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 76 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.912029 | pkg1.test_mod1/test_heavy[10-10] | function | None | 0.029627 | 0.55 | 0.08 | 21.264498 | 1.781250 | 77 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 78 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.948922 | pkg1.test_mod1/test_heavy[100-100] | function | None | 0.028262 | 0.56 | 0.09 | 22.998773 | 1.781250 | 79 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 80 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.983869 | pkg1.test_mod1/test_heavy[1000-1000] | function | None | 0.030131 | 0.56 | 0.10 | 21.904277 | 2.132812 | 81 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 82 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.020823 | pkg1.test_mod1/test_heavy[10000-10000] | function | None | 0.060060 | 0.57 | 0.14 | 11.821601 | 41.292969 | 83 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 84 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.093490 | pkg1.test_mod2/test_sleep_400ms | function | None | 0.404860 | 0.58 | 0.15 | 1.803093 | 2.320312 | 85 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 86 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.510525 | pkg2.test_mod_a/test_master_sleep | function | None | 5.006039 | 5.57 | 0.15 | 1.142620 | 2.320312 | 87 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 88 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:45.530780 | pkg3.test_mod_cl/test_method1 | function | None | 0.030505 | 5.58 | 0.16 | 188.164762 | 2.320312 | 89 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 90 | | 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:50.582954 | pkg4.test_mod_a/test_force_monitor | function | test | 1.005015 | 11.57 | 0.17 | 11.681416 | 2.320312 | 91 | +------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+ 92 | 93 | Documentation 94 | ------------- 95 | 96 | A full documentation is `available `_. 97 | 98 | Installation 99 | ------------ 100 | 101 | You can install *pytest-monitor* via *conda* (through the `conda-forge` channel):: 102 | 103 | $ conda install pytest-monitor -c https://conda.anaconda.org/conda-forge 104 | 105 | Another possibility is to install *pytest-monitor* via `pip`_ from `PyPI`_:: 106 | 107 | $ pip install pytest-monitor 108 | 109 | 110 | Requirements 111 | ------------ 112 | 113 | You will need a valid Python 3.5+ interpreter. To get measures, we rely on: 114 | 115 | - *psutil* to extract CPU usage 116 | - *memory_profiler* to collect memory usage 117 | - and *pytest* (obviously!) 118 | 119 | **Note: this plugin doesn't work with unittest** 120 | 121 | Storage backends 122 | ---------------- 123 | By default, pytest-monitor stores its result in a local SQLite3 local database, making results accessible. 124 | If you need a more powerful way to analyze your results, checkout the 125 | `monitor-server-api`_ which brings both a REST Api for storing and historize your results and an API to query your data. 126 | An alternative service (using MongoDB) can be used thanks to a contribution from @dremdem: `pytest-monitor-backend`_. 127 | 128 | 129 | Contributing 130 | ------------ 131 | 132 | Contributions are very welcome. Tests can be run with `tox`_. Before submitting a pull request, please ensure 133 | that: 134 | 135 | * both internal tests and examples are passing. 136 | * internal tests have been written if necessary. 137 | * if your contribution provides a new feature, make sure to provide an example and update the documentation accordingly. 138 | 139 | License 140 | ------- 141 | 142 | This code is distributed under the `MIT`_ license. *pytest-monitor* is free, open-source software. 143 | 144 | 145 | Issues 146 | ------ 147 | 148 | If you encounter any problem, please `file an issue`_ along with a detailed description. 149 | 150 | Author 151 | ------ 152 | 153 | The main author of `pytest-monitor` is Jean-Sébastien Dieu, who can be reached at jdieu@salsify.fr. 154 | 155 | ---- 156 | 157 | This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. 158 | 159 | .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter 160 | .. _`@hackebrot`: https://github.com/hackebrot 161 | .. _`MIT`: http://opensource.org/licenses/MIT 162 | .. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause 163 | .. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt 164 | .. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0 165 | .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin 166 | .. _`file an issue`: https://github.com/CFMTech/pytest-monitor/issues 167 | .. _`pytest`: https://github.com/pytest-dev/pytest 168 | .. _`tox`: https://tox.readthedocs.io/en/latest/ 169 | .. _`pip`: https://pypi.org/project/pip/ 170 | .. _`PyPI`: https://pypi.org/project 171 | .. _`monitor-server-api`: : https://github.com/CFMTech/monitor-server-api 172 | .. _`pytest-monitor-backend`: https://github.com/dremdem/pytest-monitor-backend 173 | -------------------------------------------------------------------------------- /docs/env.yml: -------------------------------------------------------------------------------- 1 | name: docenv 2 | 3 | channels: 4 | - anaconda 5 | 6 | dependencies: 7 | - python==3.7 8 | - pip: 9 | - alabaster==0.7.12 10 | - asn1crypto==1.3.0 11 | - Babel==2.8.0 12 | - certifi==2019.11.28 13 | - cffi==1.13.2 14 | - chardet==3.0.4 15 | - cryptography==2.8 16 | - docutils==0.16 17 | - idna==2.8 18 | - imagesize==1.2.0 19 | - Jinja2==2.11.1 20 | - lz4==3.0.2 21 | - MarkupSafe==1.1.1 22 | - packaging==20.1 23 | - pycparser==2.19 24 | - Pygments==2.5.2 25 | - pyOpenSSL==19.1.0 26 | - pyparsing==2.4.6 27 | - PySocks==1.7.1 28 | - pytz==2019.3 29 | - releases==1.6.3 30 | - requests==2.22.0 31 | - semantic-version==2.6.0 32 | - six==1.14.0 33 | - snowballstemmer==2.0.0 34 | - Sphinx==2.3.1 35 | - sphinx-rtd-theme==0.4.3 36 | - sphinxcontrib-applehelp==1.0.1 37 | - sphinxcontrib-devhelp==1.0.1 38 | - sphinxcontrib-htmlhelp==1.0.2 39 | - sphinxcontrib-jsmath==1.0.1 40 | - sphinxcontrib-qthelp==1.0.2 41 | - sphinxcontrib-serializinghtml==1.1.3 42 | - urllib3==1.25.8 43 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | alabaster 2 | babel 3 | sphinx 4 | sphinx-releases 5 | sphinx_rtd_theme 6 | semantic_version==2.6.* 7 | make 8 | pygraphviz 9 | -------------------------------------------------------------------------------- /docs/sources/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/sources/_static/db_relationship.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/docs/sources/_static/db_relationship.png -------------------------------------------------------------------------------- /docs/sources/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/docs/sources/_static/logo.png -------------------------------------------------------------------------------- /docs/sources/_static/pytestmonitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/docs/sources/_static/pytestmonitor.png -------------------------------------------------------------------------------- /docs/sources/_static/pytestmonitor_alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/docs/sources/_static/pytestmonitor_alpha.png -------------------------------------------------------------------------------- /docs/sources/_static/pytestmonitor_readme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/docs/sources/_static/pytestmonitor_readme.png -------------------------------------------------------------------------------- /docs/sources/changelog.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | * :release:`to be discussed` 6 | * :feature:`#75` Automatically gather CI build information for Bitbucket CI. 7 | 8 | * :release:`1.6.6 <2023-05-06>` 9 | * :bug:`#64` Prepare version 1.7.0 of pytest-monitor. Last version to support Python <= 3.7 and all pytest <= 5.* 10 | * :bug:`#0` Improve and fix some CI issues, notably one that may cause python to not be the requested one but a more recent one. 11 | 12 | * :release:`1.6.5 <2022-10-16>` 13 | * :bug:`#60` Make sure that when psutil cannot fetch cpu frequency, the fallback mechanism is used. 14 | 15 | * :release:`1.6.4 <2022-05-18>` 16 | * :bug:`#56` Force the CPU frequency to 0 and emit a warning when unable to fetch it from the system. 17 | * :bug:`#54` Fix a bug that crashes the monitor upon non ASCII characters in commit log under Perforce. Improved P4 change number extraction. 18 | 19 | * :release:`1.6.3 <2021-12-22>` 20 | * :bug:`#50` Fix a bug where a skipping fixture resulted in an exception during teardown. 21 | 22 | * :release:`1.6.2 <2021-08-24>` 23 | * :bug:`#40` Fix a bug that cause the garbage collector to be disable by default. 24 | 25 | * :release:`1.6.1 <2021-08-23>` 26 | * :bug:`#43` Fixes a bug that prevent sending session tags correctly. 27 | * :bug:`#40` Force garbage collector to run between tests (better result accuracy) 28 | 29 | * :release:`1.6.0 <2021-04-16>` 30 | * :feature:`#0` Support for python 3.5 31 | * :feature:`#35` Better support for Doctest item. 32 | * :feature:`#24` Prefer JSON data type for storing session extended information instead of plain text. 33 | 34 | 35 | * :release:`1.5.1 <2021-02-05>` 36 | * :bug:`#31` Rename option --remote into --remote-server as it seems to conflict with some plugins. 37 | * :bug:`#23` Fix requirements minimum version. 38 | 39 | * :release:`1.5.0 <2020-11-20>` 40 | * :feature:`25` Automatically gather CI build information (supported CI are Drone CI, Gitlab CI, Jenkins CI, Travis CI, Circle CI) 41 | * :bug:`#23 major` psutil min requirement is now 5.1.0 42 | * :bug:`#28 major` Fix a bug that cause output to be printed multiple times 43 | 44 | * :release:`1.4.0 <2020-06-04>` 45 | * :feature:`21` Using json format to populate the RUN_DESCRIPTION field (through --description and --tag fields) 46 | 47 | * :release:`1.3.0 <2020-05-12>` 48 | * :feature:`19` Normalized http codes used for sending metrics to a remote server. 49 | 50 | * :release:`1.2.0 <2020-04-17>` 51 | * :feature:`13` Change default analysis scope to function. 52 | * :bug:`12 major` No execution contexts pushed when using a remote server. 53 | * :bug:`14 major` A local database is always created even with --no-db option passed. 54 | 55 | * :release:`1.1.1 <2020-03-31>` 56 | * :bug:`9` Fix remote server interface for sending measures. 57 | 58 | * :release:`1.1.0 <2020-03-30>` 59 | * :feature:`5` Extend item information and separate item from its variants. 60 | * :feature:`3` Compute user time and kernel time on a per test basis for clarity and ease of exploitation. 61 | * :feature:`4` Added an option to add a description to a pytest run 62 | 63 | * :release:`1.0.1 <2020-03-18>` 64 | * :bug:`2` pytest-monitor hangs infinitely when a pytest outcome (skip, fail...) is issued. 65 | 66 | * :release:`1.0.0 <2020-02-20>` 67 | * :feature:`0` Initial release 68 | -------------------------------------------------------------------------------- /docs/sources/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # pytest-monitor documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Oct 1 00:43:18 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import pathlib 16 | 17 | 18 | def read_version(): 19 | init = pathlib.Path(__file__).parent.parent.parent / "pytest_monitor" / "__init__.py" 20 | with init.open("r") as pkg_init_f: 21 | version_read = [line.strip() for line in pkg_init_f if line.startswith("__version__")] 22 | if len(version_read) > 1: 23 | raise ValueError('Multiple version found in "pytest_monitor" package!') 24 | if not version_read: 25 | raise ValueError('No version found in "pytest_monitor" package!') 26 | return version_read[0].split("=", 1)[1].strip("\" '") 27 | 28 | 29 | # If extensions (or modules to document with autodoc) are in another directory, 30 | # add these directories to sys.path here. If the directory is relative to the 31 | # documentation root, use os.path.abspath to make it absolute, like shown here. 32 | # sys.path.insert(0, os.path.abspath('.')) 33 | 34 | # -- General configuration ------------------------------------------------ 35 | 36 | # If your documentation needs a minimal Sphinx version, state it here. 37 | # needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | "sphinx.ext.ifconfig", 44 | "sphinx.ext.todo", 45 | "sphinx.ext.graphviz", 46 | "releases", 47 | ] 48 | 49 | # Add any paths that contain templates here, relative to this directory. 50 | templates_path = ["_templates"] 51 | 52 | # The suffix(es) of source filenames. 53 | # You can specify multiple suffix as a list of string: 54 | # source_suffix = ['.rst', '.md'] 55 | source_suffix = ".rst" 56 | 57 | # The encoding of source files. 58 | # source_encoding = 'utf-8-sig' 59 | 60 | # The master toctree document. 61 | master_doc = "index" 62 | 63 | # General information about the project. 64 | project = "pytest-monitor" 65 | copyright = "2019, Jean-Sébastien Dieu" # noqa A001 66 | author = "Jean-Sébastien Dieu" 67 | 68 | # The version info for the project you're documenting, acts as replacement for 69 | # |version| and |release|, also used in various other places throughout the 70 | # built documents. 71 | # 72 | # The short X.Y version. 73 | version = read_version() 74 | # The full version, including alpha/beta/rc tags. 75 | release = f"pytest-monitor v{version}" 76 | 77 | # The language for content autogenerated by Sphinx. Refer to documentation 78 | # for a list of supported languages. 79 | # 80 | # This is also used if you do content translation via gettext catalogs. 81 | # Usually you set "language" from the command line for these cases. 82 | language = None 83 | 84 | # There are two options for replacing |today|: either, you set today to some 85 | # non-false value, then it is used: 86 | # today = '' 87 | # Else, today_fmt is used as the format for a strftime call. 88 | # today_fmt = '%B %d, %Y' 89 | 90 | # List of patterns, relative to source directory, that match files and 91 | # directories to ignore when looking for source files. 92 | exclude_patterns = ["_build"] 93 | 94 | # The reST default role (used for this markup: `text`) to use for all 95 | # documents. 96 | # default_role = None 97 | 98 | # If true, '()' will be appended to :func: etc. cross-reference text. 99 | # add_function_parentheses = True 100 | 101 | # If true, the current module name will be prepended to all description 102 | # unit titles (such as .. function::). 103 | # add_module_names = True 104 | 105 | # If true, sectionauthor and moduleauthor directives will be shown in the 106 | # output. They are ignored by default. 107 | # show_authors = False 108 | 109 | # The name of the Pygments (syntax highlighting) style to use. 110 | pygments_style = "sphinx" 111 | 112 | # A list of ignored prefixes for module index sorting. 113 | # modindex_common_prefix = [] 114 | 115 | # If true, keep warnings as "system message" paragraphs in the built documents. 116 | # keep_warnings = False 117 | 118 | # If true, `todo` and `todoList` produce output, else they produce nothing. 119 | todo_include_todos = True 120 | todo_emit_warnings = True 121 | 122 | # -- Options for HTML output ---------------------------------------------- 123 | 124 | # The theme to use for HTML and HTML Help pages. See the documentation for 125 | # a list of builtin themes. 126 | html_theme = "sphinx_rtd_theme" 127 | 128 | # Theme options are theme-specific and customize the look and feel of a theme 129 | # further. For a list of options available for each theme, see the 130 | # documentation. 131 | # html_theme_options = {} 132 | 133 | # Add any paths that contain custom themes here, relative to this directory. 134 | # html_theme_path = [] 135 | 136 | # The name for this set of Sphinx documents. If None, it defaults to 137 | # " v documentation". 138 | # html_title = None 139 | 140 | # A shorter title for the navigation bar. Default is the same as html_title. 141 | # html_short_title = None 142 | 143 | # The name of an image file (relative to this directory) to place at the top 144 | # of the sidebar. 145 | html_logo = "_static/pytestmonitor_alpha.png" 146 | 147 | # The name of an image file (within the static path) to use as favicon of the 148 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 149 | # pixels large. 150 | # html_favicon = None 151 | 152 | # Add any paths that contain custom static files (such as style sheets) here, 153 | # relative to this directory. They are copied after the builtin static files, 154 | # so a file named "default.css" will overwrite the builtin "default.css". 155 | html_static_path = ["_static"] 156 | 157 | # Add any extra paths that contain custom files (such as robots.txt or 158 | # .htaccess) here, relative to this directory. These files are copied 159 | # directly to the root of the documentation. 160 | # html_extra_path = [] 161 | 162 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 163 | # using the given strftime format. 164 | # html_last_updated_fmt = '%b %d, %Y' 165 | 166 | # If true, SmartyPants will be used to convert quotes and dashes to 167 | # typographically correct entities. 168 | # html_use_smartypants = True 169 | 170 | # Custom sidebar templates, maps document names to template names. 171 | # html_sidebars = {} 172 | 173 | # Additional templates that should be rendered to pages, maps page names to 174 | # template names. 175 | # html_additional_pages = {} 176 | 177 | # If false, no module index is generated. 178 | # html_domain_indices = True 179 | 180 | # If false, no index is generated. 181 | # html_use_index = True 182 | 183 | # If true, the index is split into individual pages for each letter. 184 | # html_split_index = False 185 | 186 | # If true, links to the reST sources are added to the pages. 187 | # html_show_sourcelink = True 188 | 189 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 190 | # html_show_sphinx = True 191 | 192 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 193 | # html_show_copyright = True 194 | 195 | # If true, an OpenSearch description file will be output, and all pages will 196 | # contain a tag referring to it. The value of this option must be the 197 | # base URL from which the finished HTML is served. 198 | # html_use_opensearch = '' 199 | 200 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 201 | # html_file_suffix = None 202 | 203 | # Language to be used for generating the HTML full-text search index. 204 | # Sphinx supports the following languages: 205 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 206 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 207 | # html_search_language = 'en' 208 | 209 | # A dictionary with options for the search language support, empty by default. 210 | # Now only 'ja' uses this config value 211 | # html_search_options = {'type': 'default'} 212 | 213 | # The name of a javascript file (relative to the configuration directory) that 214 | # implements a search results scorer. If empty, the default will be used. 215 | # html_search_scorer = 'scorer.js' 216 | 217 | # Output file base name for HTML help builder. 218 | htmlhelp_basename = "pytestmonitor-doc" 219 | -------------------------------------------------------------------------------- /docs/sources/configuration.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Configuring your session 3 | ======================== 4 | 5 | `pytest-monitor` gives you flexibility for running your test suite. 6 | In this section, we will discuss the different available options, and how they influence the `pytest` session. 7 | 8 | Scope Restriction 9 | ----------------- 10 | 11 | `pytest-monitor` is able to restrict the scope of the analysis. As a default, 12 | only tests functions discovered by pytest are monitored. 13 | 14 | Sometime, you might want to monitor a whole module or test session. This can be 15 | achieved thanks to the *\-\-restrict-scope-to* option. 16 | 17 | If a scope restriction is set, then the monitoring will be performed at the selected levels. 18 | For example, monitoring at both function and module level can be achieved by the following command: 19 | 20 | .. code-block:: shell 21 | 22 | pytest --restrict-scope-to function,module 23 | 24 | Accepted values are: 25 | 26 | * function: test functions will be monitored individually, leading to one entry per test function. 27 | * module: each discovered module will be monitored regardless of the others. 28 | * class: test class objects will be monitored individually. 29 | * session: monitor the whole session. 30 | 31 | It is important to realize that using multiple scopes has an impact on the monitoring measures. For example, the `pytest-monitor` code that monitors functions does consume resources for each function (notably compute time). As a consequence, the resources consumed by their module will include the resources consumed by `pytest-monitor` for each function. If individual functions were not monitored, the resource consumption reported for the module would therefore be lower. 32 | 33 | Due to the way `pytest` handles test modules, some specificities apply when monitoring modules: 34 | 35 | * The total measured elapsed time includes the setup/teardown process for each function. 36 | On the other hand, a function object measures only the duration of the function run (without the setup and teardown parts). 37 | * Consumed memory will be the peak of memory usage during the whole module run. 38 | 39 | 40 | Handling parameterized tests 41 | ---------------------------- 42 | 43 | Parameterized tests can be introspected by `pytest-monitor` during the setup phase: their real 44 | name is based on the parameter values. This uses the string representation of the parameters (so you want to make sure that this representation suits your needs). 45 | 46 | Let's consider the following test: 47 | 48 | .. code-block:: python 49 | 50 | @pytest.mark.parametrize(('asint', 'asstr'), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")]) 51 | def test_p(asint, asstr): 52 | assert asint == int(asstr) 53 | 54 | By default, `pytest-monitor` will generate the following entries: 55 | 56 | * test_p[10-10] 57 | * test_p[100-100] 58 | * test_p[1000-1000] 59 | * test_p[10000-10000] 60 | 61 | 62 | You can ask `pytest-monitor` to tag parameters with their names (as provided by ``@pytest.mark.parametrize``), with the following option: 63 | 64 | .. code-block:: shell 65 | 66 | pytest --parametrization-explicit 67 | 68 | which will lead to the following entries: 69 | 70 | * test_p[asint_10-asstr_10] 71 | * test_p[asint_100-asstr_100] 72 | * test_p[asint_1000-asstr_1000] 73 | * test_p[asint_10000-asstr_10000] 74 | 75 | 76 | Disable monitoring 77 | ------------------ 78 | 79 | If you need for some reason to disable the monitoring, pass the *\-\-no-monitor* option. 80 | 81 | 82 | Describing a run 83 | ---------------- 84 | 85 | Sometimes, you might want to compare identical state of your code. In such cases, relying only on the scm 86 | references and the run date of the session is not sufficient. For that, `pytest-monitor` can assist you by tagging 87 | your session using description and tags. 88 | 89 | 90 | Description and tags 91 | ~~~~~~~~~~~~~~~~~~~~ 92 | The description should be used to provide a brief summary of your run while tags can be used to 93 | set special information you want to focus during your analysis. 94 | Setting a description is as simple as this: 95 | 96 | .. code-block:: shell 97 | 98 | bash $> pytest --description "Any run description you want" 99 | 100 | 101 | Flagging your session with specific information is as complex as setting the description: 102 | 103 | .. code-block:: shell 104 | 105 | bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17 106 | 107 | This will result in a session with the following description: 108 | 109 | .. code-block:: text 110 | 111 | { 112 | "pandas": "1.0.1", 113 | "numpy": "1.17" 114 | } 115 | 116 | 117 | You can perfectly use both options to fully describe your session: 118 | 119 | .. code-block:: shell 120 | 121 | bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17 --description "Your summary" 122 | 123 | This will result in a session with the following description: 124 | 125 | .. code-block:: text 126 | 127 | { 128 | "msg": "Your summary", 129 | "pandas": "1.0.1", 130 | "numpy": "1.17" 131 | } 132 | 133 | Describing a CI build 134 | ~~~~~~~~~~~~~~~~~~~~~ 135 | For convenience pytest-monitor automatically extends the session's description with some information 136 | extracted from the CI build. For that purpose, pytest-monitor reads the environment 137 | at the start of the test session in search for: 138 | * **pipeline_branch**, which can either represent a CI pipeline name (preferentially) or the source code branch name. 139 | * **pipeline_build_no**, which is the pipeline build number (if available) or the pipeline ID if any. 140 | * **__ci__** which provides you the ci system used. 141 | 142 | Currently, pytest-monitor supports the following CI: 143 | * Gitlab CI 144 | * Travis CI 145 | * Jenkins 146 | * Drone CI 147 | * Circle CI 148 | * Bitbucket CI 149 | 150 | The following table explains how both fields are mapped: 151 | 152 | +--------------+-----------------------------------+-----------------------+---------------+ 153 | | CI | pipeline_branch | pipeline_build_no | __ci__ | 154 | +==============+===================================+=======================+===============+ 155 | | Jenkins CI | BRANCH_NAME if set else JOB_NAME | BUILD_NUMBER | jenkinsci | 156 | +--------------+-----------------------------------+-----------------------+---------------+ 157 | | Drone CI | DRONE_REPO_BRANCH | DRONE_BUILD_NUMBER | droneci | 158 | +--------------+-----------------------------------+-----------------------+---------------+ 159 | | Circle CI | CIRCLE_JOB | CIRCLE_BUILD_NUM | circleci | 160 | +--------------+-----------------------------------+-----------------------+---------------+ 161 | | Gitlab CI | CI_JOB_NAME | CI_PIPELINE_ID | gitlabci | 162 | +--------------+-----------------------------------+-----------------------+---------------+ 163 | | Travis CI | TRAVIS_BUILD_ID | TRAVIS_BUILD_NUMBER | travisci | 164 | +--------------+-----------------------------------+-----------------------+---------------+ 165 | | Bitbucket CI| BITBUCKET_BRANCH | BITBUCKET_BUILD_NUMBER| bitbucketci | 166 | +--------------+-----------------------------------+-----------------------+---------------+ 167 | 168 | Note that none of these two fields will be added if: 169 | * the CI context is incomplete 170 | * the CI context cannot be computed. 171 | 172 | Parameters affecting measures 173 | ----------------------------- 174 | By default, pytest-monitor runs the garbage collector prior to execute the test function. 175 | This leads to finer memory measurements. In the case where you want to disable this call to the 176 | garbage collector, you just have to set the option `--no-gc` on the command line. 177 | 178 | .. code-block:: shell 179 | 180 | bash $> pytest --no-gc 181 | 182 | Forcing CPU frequency 183 | --------------------- 184 | Under some circumstances, you may want to set the CPU frequency instead of asking `pytest-monitor` to compute it. 185 | To do so, you can either: 186 | - ask `pytest-monitor` to use a preset value if it does not manage to compute the CPU frequency 187 | - or to not try computing the CPU frequency and use your preset value. 188 | 189 | Two environment variables controls this behaviour: 190 | - `PYTEST_MONITOR_CPU_FREQ` allows you to preset a value for the CPU frequency. It must be a float convertible value. 191 | This value will be used if `pytest-monitor` cannot compute the CPU frequency. Otherwise, `0.0` will be used as a 192 | default value. 193 | - `PYTEST_MONITOR_FORCE_CPU_FREQ` instructs `pytest-monitor` to try computing the CPU frequency or not. It expects an 194 | integer convertible value. If not set, or if the integer representation of the value is `0`, then `pytest-monitor` will 195 | try to compute the cpu frequency and defaults to the usecase describe for the previous environment variable. 196 | If it set and not equal to `0`, then we use the value that the environment variable `PYTEST_MONITOR_CPU_FREQ` holds 197 | (`0.0` if not set). 198 | -------------------------------------------------------------------------------- /docs/sources/contributing.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Contribution guide 3 | ================== 4 | 5 | If you want to contribute to this project, you are welcome to do so! 6 | 7 | Create your own development environment 8 | --------------------------------------- 9 | We use conda as our main packaging system, though pip works as well. 10 | 11 | The following instructions describe how to create your development environment using conda: 12 | 13 | #. Create a new environment: 14 | 15 | .. code-block:: bash 16 | 17 | conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults 18 | 19 | #. Install the dependencies: 20 | 21 | .. code-block:: bash 22 | 23 | conda install --file requirements.dev.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults 24 | 25 | #. Make sure to have pip install or install it if missing: 26 | 27 | .. code-block:: bash 28 | 29 | # Check for pip 30 | conda list | grep pip 31 | # Install if needed 32 | conda install -n pytest-monitor-dev pip -c https://conda.anaconda.org/conda-forge 33 | 34 | #. Activate your environment: 35 | 36 | .. code-block:: bash 37 | 38 | conda activate pytest-monitor-dev 39 | 40 | #. Install `pytest-monitor` in development mode: 41 | 42 | .. code-block:: bash 43 | 44 | python -m pip install -e ".[dev]" 45 | 46 | #. Install the pre-commit hooks 47 | .. code-block:: bash 48 | 49 | pre-commit install 50 | 51 | #. You're done! 52 | 53 | Feature requests and feedback 54 | ----------------------------- 55 | 56 | We would be happy to hear about your propositions and suggestions. Feel free to 57 | `submit them as issues `_ and: 58 | 59 | * Explain in details the expected behavior. 60 | * Keep the scope as narrow as possible. This will make them easier to implement. 61 | 62 | 63 | .. _reportbugs: 64 | 65 | Bug reporting 66 | ------------- 67 | 68 | Report bugs for `pytest-monitor` in the `issue tracker `_. Every filed bugs should include: 69 | 70 | * Your operating system name and version. 71 | * Any details about your local setup that might be helpful in troubleshooting, specifically: 72 | * the Python interpreter version, 73 | * installed libraries, 74 | * and your `pytest` version. 75 | * Detailed steps to reproduce the bug. 76 | 77 | .. _fixbugs: 78 | 79 | Bug fixing 80 | ---------- 81 | 82 | Look through the `GitHub issues for bugs `_. 83 | Talk to developers to find out how you can fix specific bugs. 84 | 85 | Feature implementation 86 | ---------------------- 87 | 88 | Look through the `GitHub issues for enhancements `_. 89 | 90 | Talk to developers to find out how you can implement specific features. 91 | 92 | Thank you! 93 | -------------------------------------------------------------------------------- /docs/sources/index.rst: -------------------------------------------------------------------------------- 1 | .. pytest-monitor documentation master file, created by 2 | sphinx-quickstart on Thu Oct 1 00:43:18 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to pytest-monitor's documentation! 7 | =============================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | introduction 15 | installation 16 | configuration 17 | run 18 | operating 19 | remote 20 | contributing 21 | changelog 22 | 23 | Indices and tables 24 | ================== 25 | 26 | * :ref:`genindex` 27 | * :ref:`modindex` 28 | * :ref:`search` 29 | 30 | -------------------------------------------------------------------------------- /docs/sources/installation.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | 5 | `pytest-monitor` is a plugin for `pytest`. 6 | 7 | Supported environments 8 | ---------------------- 9 | 10 | `pytest-monitor` currently works on *Linux* and *macOS*. Support for *Windows* is experimental and not tested. 11 | 12 | **You will need pytest 4.4+ to run pytest-monitor.** 13 | 14 | We support all versions of Python >= 3.6. 15 | 16 | 17 | From conda 18 | ---------- 19 | 20 | Simply run the following command to get it installed in your current environment 21 | 22 | .. code-block:: bash 23 | 24 | conda install pytest-monitor -c https://conda.anaconda.org/conda-forge 25 | 26 | 27 | From pip 28 | -------- 29 | 30 | Simply run the following command to get it installed 31 | 32 | .. code-block:: bash 33 | 34 | pip install pytest-monitor 35 | -------------------------------------------------------------------------------- /docs/sources/introduction.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Introduction 3 | ============ 4 | 5 | `pytest-monitor` tracks the resources (like memory and compute time) consumed by a test suite, so that you 6 | can make sure that your code does not use too much of them. 7 | 8 | Thanks to `pytest-monitor`, you can check resource consumption in particular through continuous integration, as this is done by monitoring the consumption of test functions. These tests can be functional (as usual) or be dedicated to the resource consumption checks. 9 | 10 | Use cases 11 | --------- 12 | 13 | Examples of use cases include technical stack updates, and code evolutions. 14 | 15 | Technical stack updates 16 | ~~~~~~~~~~~~~~~~~~~~~~~ 17 | 18 | In the Python world, libraries often depends on several packages. By updating some (or all) of the dependencies, 19 | you update code that you do not own and therefore do not control. Tracking your application's resource footprint 20 | can prevent unwanted resource consumption, and can thus validate the versions of the packages that you depend on. 21 | 22 | Code evolution 23 | ~~~~~~~~~~~~~~ 24 | 25 | Extending your application with new features, or fixing its bugs, might have an impact on the core of your program. The performance of large applications or libraries can be difficult to assess, but by monitoring resource consumption, `pytest-monitor` allows you to check that despite code udpates, the performance of your code remains within desirable limits. 26 | 27 | 28 | Usage 29 | ----- 30 | 31 | Simply run pytest as usual: pytest-monitor is active by default as soon as it is installed. After running your first session, a .pymon sqlite database will be accessible in the directory where pytest was run. 32 | -------------------------------------------------------------------------------- /docs/sources/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\pytest-cookiecutterplugin_name.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\pytest-cookiecutterplugin_name.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/sources/operating.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Operating measures 3 | ================== 4 | 5 | Storage 6 | ------- 7 | 8 | Once measures are collected, `pytest-monitor` dumps them either in a local database 9 | or sends them to a monitor server. 10 | 11 | In the case of local storage, a `sqlite3` database is used, as it is lightweight and 12 | is provided with many Python distributions (being part of the standard library). 13 | 14 | Measures are stored in the `pytest` invocation directory, in a database file named **.pymon**. 15 | You are free to override the name of this database by setting the `--db` option: 16 | 17 | .. code-block:: shell 18 | 19 | pytest --db /path/to/your/monitor/database 20 | 21 | 22 | You can also sends your tests result to a monitor server (under development at that time) in order to centralize 23 | your Metrics and Execution Context (see below): 24 | 25 | .. code-block:: shell 26 | 27 | pytest --remote-server server:port 28 | 29 | Execution Context, Metrics and Session 30 | -------------------------------------- 31 | 32 | We distinguish two kinds of measures: 33 | 34 | - those related to the **Execution Context**. This is related to your machine (node name, CPU, memory…), 35 | - the **Metrics** related to the tests themselves (this can be the memory used, the CPU usage…). 36 | 37 | Regarding tests related **metrics**, one can see metrics which are tests independent and those which 38 | are session independent (session start date, scm reference). For this reason, `pytest-monitor` uses 39 | a notion of session metrics to which each tests are linked to. 40 | 41 | Additionally, each test is linked to an Execution Context so that comparisons between runs is possible. 42 | 43 | 44 | Model 45 | ----- 46 | 47 | The local database associates each test Metrics to the specific context in which it was run: 48 | 49 | .. image:: _static/db_relationship.png 50 | 51 | 52 | Execution Context 53 | ~~~~~~~~~~~~~~~~~ 54 | 55 | Execution Contexts are computed prior to the start of the `pytest` 56 | session. An Execution Context describes much of the machine settings: 57 | 58 | CPU_COUNT (integer) 59 | Number of online CPUs the machine can use. 60 | CPU_FREQUENCY_MHZ (integer) 61 | Base frequency of the CPUs (in megahertz). Set to 0 if unable to fetch it. 62 | CPU_VENDOR (TEXT 256 CHAR) 63 | Full CPU vendor string. 64 | RAM_TOTAL_MB (INTEGER) 65 | Total usable RAM (physical memory) in megabytes. 66 | MACHINE_NODE (TEXT 512 CHAR) 67 | Fully qualified domain name of the machine. 68 | MACHINE_TYPE (TEXT 32 CHAR) 69 | Machine type. 70 | MACHINE_ARCH (TEXT 16 CHAR) 71 | Mode used (64 bits…). 72 | SYSTEM_INFO (TEXT 256 CHAR) 73 | Operating system name and release level. 74 | PYTHON_INFO (TEXT 512 CHAR) 75 | Python information (version, compilation mode used and so on…) 76 | ENV_H (TEXT 64 CHAR) 77 | Hash string used to uniquely identify an execution context. 78 | 79 | In the local database, Execution Contexts are stored in table `EXECUTION_CONTEXTS`. 80 | 81 | 82 | Sessions 83 | -------- 84 | SESSION_H (TEXT 64 CHAR) 85 | Hash string used to uniquely identify a session run. 86 | RUN_DATE (TEXT 64 CHAR) 87 | Time at which the `pytest` session was started. The full format is 88 | 'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero. 89 | SCM_ID (TEXT 128 CHAR) 90 | Full reference to the source code management system if any. 91 | RUN_DESCRIPTION (TEXT 1024 CHAR) 92 | A free text field that you can use to describe a session run. 93 | 94 | In the local database, Sessions are stored under the table `TEST_SESSIONS`. 95 | 96 | 97 | Metrics 98 | ~~~~~~~ 99 | 100 | Metrics are collected at test, class and/or module level. For both classes and modules, some of the 101 | metrics can be skewed due to the technical limitations described earlier. 102 | 103 | SESSION_H (TEXT 64 CHAR) 104 | Session context used for this test. 105 | ENV_H (TEXT 64 CHAR) 106 | Execution Context used for this test. 107 | ITEM_START_TIME (TEXT 64 CHAR) 108 | Time at which the item test was launched. The full format is 109 | 'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero. 110 | ITEM_PATH (TEXT 4096 CHAR) 111 | Path of the item, using an import compatible string specification. 112 | ITEM (TEXT 2096 CHAR) 113 | Initial item name, without any variant. 114 | ITEM_VARIANT varchar(2048) 115 | Full item name, with parametrization used if any. 116 | ITEM_FS_LOC varchar(2048) 117 | Item's module path relative to pytest invocation directory. 118 | KIND (TEXT 64 CHAR) 119 | Type of item (function, class, module…). 120 | COMPONENT (TEXT 512 CHAR), NULLABLE 121 | Component to which the test belongs, if any (this is used when sending results to a server, for identifying each source of Metrics). 122 | TOTAL_TIME (FLOAT) 123 | Total time spent running the item (in seconds). 124 | USER_TIME (FLOAT) 125 | Time spent in User mode (in seconds). 126 | KERNEL_TIME (FLOAT) 127 | Time spent in Kernel mode (in seconds). 128 | CPU_USAGE (FLOAT) 129 | System-wide CPU usage as a percentage (100 % is equivalent to one core). 130 | MEM_USAGE (FLOAT) 131 | Maximum resident memory used during the test execution (in megabytes). 132 | 133 | In the local database, these Metrics are stored in table `TEST_METRICS`. 134 | -------------------------------------------------------------------------------- /docs/sources/remote.rst: -------------------------------------------------------------------------------- 1 | Use of a remote server 2 | ====================== 3 | 4 | You can easily send your metrics to a remote server. This can turn usefull when it comes to running 5 | tests in parallel with plugins such as *pytest-xdist* of *pytest-parallel*. 6 | To do so, instruct pytest with the remote server address to use: 7 | 8 | .. code-block:: shell 9 | 10 | bash $> pytest --remote-server myremote.server.net:port 11 | 12 | This way, *pytest-monitor* will automatically send and query the remote server as soon as it gets 13 | a need. Note that *pytest-monitor* will revert to a normal behaviour if: 14 | 15 | - it cannot query the context or the session for existence 16 | - it cannot create a new context or a new session 17 | 18 | 19 | Implementing a remote server 20 | ============================ 21 | 22 | How pytest-monitor interacts with a remote server 23 | ------------------------------------------------- 24 | 25 | The following sequence is used by *pytest-monitor* when using a remote server: 26 | 27 | 1. Ask the remote server if the **Execution Context** is known. 28 | 2. Insert the **Execution Context** if the server knows nothing about it. 29 | 3. Ask the remote server if the **Session** is known. 30 | 4. Insert the **Session** if the server knows nothing about it. 31 | 5. Insert results once measures have been collected. 32 | 33 | Used HTTP codes 34 | --------------- 35 | Two codes are used by *pytest-monitor* when asked to work with a remote server: 36 | 37 | - 200 (OK) is used to indicate that a query has led to a non-empty result. 38 | - 201 (CREATED) is expected by *pytest-monitor** when sending a new entry (**Execution Context**, **Session** or any **Metric**). 39 | - 204 (NO CONTENT) though not checked explicitely should be returned when a request leads to no results. 40 | 41 | Mandatory routes 42 | ---------------- 43 | The following routes are expected to be reachable: 44 | 45 | GET /contexts/ 46 | 47 | Query the system for a **Execution Context** with the given hash. 48 | 49 | **Return Codes**: Must return *200* (*OK*) if the **Execution Context** exists, *204* (*NO CONTENT*) otherwise 50 | 51 | GET /sessions/ 52 | 53 | Query the system for a **Session** with the given hash. 54 | 55 | **Return Codes**: Must return *200* (*OK*) if the **Session** exists, *204* (*NO CONTENT*) otherwise 56 | 57 | POST /contexts/ 58 | 59 | Request the system to create a new entry for the given **Execution Context**. 60 | Data are sent using Json parameters: 61 | 62 | .. code-block:: json 63 | 64 | { 65 | cpu_count: int, 66 | cpu_frequency: int, 67 | cpu_type: str, 68 | cpu_vendor: str, 69 | ram_tota: int, 70 | machine_node: str, 71 | machine_type: str, 72 | machine_arch: str, 73 | system_info: str, 74 | python_info: str, 75 | h: str 76 | } 77 | 78 | **Return Codes**: Must return *201* (*CREATED*) if the **Execution Context** has been created 79 | 80 | 81 | POST /sessions/ 82 | 83 | Request the system to create a new entry for the given **Session**. 84 | Data are sent using Json parameters: 85 | 86 | .. code-block:: json 87 | 88 | { 89 | session_h: str, 90 | run_date: str, 91 | scm_ref: str, 92 | description: str 93 | } 94 | 95 | **Return Codes**: Must return *201* (*CREATED*) if the **Session** has been created 96 | 97 | POST /metrics/ 98 | 99 | Request the system to create a new **Metrics** entry. 100 | Data are sent using Json parameters: 101 | 102 | .. code-block:: json 103 | 104 | { 105 | session_h: str, 106 | context_h: str, 107 | item_start_time: str, 108 | item_path: str, 109 | item: str, 110 | item_variant: str, 111 | item_fs_loc: str, 112 | kind: str, 113 | component: str, 114 | total_time: float, 115 | user_time: float, 116 | kernel_time: float, 117 | cpu_usage: float, 118 | mem_usage: float 119 | } 120 | 121 | **Return Codes**: Must return *201* (*CREATED*) if the **Metrics** has been created 122 | -------------------------------------------------------------------------------- /docs/sources/run.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Managing your test suite 3 | ======================== 4 | 5 | `pytest-monitor` does not require any specific setup: it is active by default. 6 | Thus all your tests are by default analyzed in order to collect monitored information. 7 | 8 | 9 | About collecting and storing results 10 | ------------------------------------ 11 | 12 | `pytest-monitor` makes a clear distinction between the execution context and the test metrics. 13 | This distinction can been seen clearly in the code and the initialization sequence: 14 | 15 | 1. Collect environment values. 16 | Various pieces of information about the machine are collected. 17 | 2. Store the context. 18 | The Execution Context collected in step #1 is recorded if not yet known. 19 | 3. Prepare the run. 20 | In order to provide more accurate measurements, we "warm up" the context and take an initial set of measurements. 21 | Some will be used for adjusting later measurements. 22 | 4. Run tests and enable measurements. 23 | Depending on the item type (function, class or module), we launch the relevant measurements. 24 | Each time a monitored item ends, the measurement results (Metrics) are recorded right away. 25 | 5. End session. 26 | If sending the monitoring results to a remote server has been requested, this is when `pytest-monitor` does it. 27 | 28 | 29 | Selecting tests to monitor 30 | -------------------------- 31 | 32 | By default, all tests are monitored, even small ones which would not require any specific monitoring. 33 | It is possible to control more finely which tests will be monitored by `pytest-monitor`. This is done through the use of `pytest` markers. 34 | 35 | `pytest-monitor` offers two markers for this: 36 | 37 | ``@pytest.mark.monitor_skip_test`` 38 | marks your test for execution, but without any monitoring. 39 | 40 | ``@pytest.mark.monitor_skip_test_if(cond)`` 41 | tells `pytest-monitor` to execute the test but to monitor results 42 | if and only if the condition is true. 43 | 44 | Here is an example: 45 | 46 | .. code-block:: python 47 | 48 | import pytest 49 | import sys 50 | 51 | 52 | def test_execute_and_monitor(): 53 | assert True 54 | 55 | @pytest.mark.monitor_skip_test 56 | def test_execute_do_not_monitor(): 57 | assert True 58 | 59 | @pytest.mark.monitor_skip_test_if(sys.version_info >= (3,)) 60 | def test_execute_and_monitor_py3_or_above(): 61 | assert True 62 | 63 | 64 | Disabling monitoring except for some tests 65 | ------------------------------------------ 66 | 67 | `pytest` offers global markers. For example, one can set the default to no monitoring: 68 | 69 | .. code-block:: python 70 | 71 | import pytest 72 | 73 | # With the following global module marker, 74 | # monitoring is disabled by default: 75 | pytestmark = [pytest.mark.monitor_skip_test] 76 | 77 | In this case, it is necessary to explicitly activate individual monitoring. This is 78 | accomplished with: 79 | 80 | ``@pytest.mark.monitor_test`` 81 | marks your test as to be executed and monitored, even if monitoring 82 | is disabled for the module. 83 | 84 | ``@pytest.mark.monitor_test_if(cond)`` 85 | tells `pytest-monitor` to execute the test and to monitor results 86 | if and only if the condition is true, regardless of the 87 | module monitor setup. 88 | 89 | 90 | Continuing the example above: 91 | 92 | .. code-block:: python 93 | 94 | import time 95 | import sys 96 | 97 | 98 | def test_executed_not_monitored(): 99 | time.sleep(1) 100 | assert True 101 | 102 | def test_executed_not_monitored_2(): 103 | time.sleep(2) 104 | assert True 105 | 106 | @pytest.mark.monitor_test 107 | def test_executed_and_monitored(): 108 | assert True 109 | 110 | @pytest.mark.monitor_test_if(sys.version_info >= (3, 7)) 111 | def test_executed_and_monitored_if_py37(): 112 | assert True 113 | 114 | 115 | Associating your tests to a component 116 | ------------------------------------- 117 | 118 | `pytest-monitor` allows you to *tag* each test in the database with a "**component**" name. This allows you to identify easily tests that come from a specific part of your application, or for distinguishing test results for two different projects that use the same `pytest-monitor` database. 119 | 120 | Setting up a component name can be done at module level: 121 | 122 | .. code-block:: python 123 | 124 | import time 125 | import pytest 126 | 127 | 128 | pytest_monitor_component = "my_component" # Component name stored in the results database 129 | 130 | def test_monitored(): 131 | t_a = time.time() 132 | b_continue = True 133 | while b_continue: 134 | t_delta = time.time() - t_a 135 | b_continue = t_delta < 1 136 | assert not b_continue 137 | 138 | If no `pytest_monitor_component` variable is defined, the component is set to the empty string. 139 | In projects with many modules, this can be tedious. `pytest-monitor` therefore allows you to force a fixed component name for the all the tests: 140 | 141 | .. code-block:: bash 142 | 143 | $ pytest --force-component YOUR_COMPONENT_NAME 144 | 145 | This will force the component value to be set to the one you provided, whatever the value of 146 | *pytest_monitor_component* in your test module, if any. 147 | 148 | If you need to use a global component name for all your tests while allowing some modules to have a specific component name, you can ask `pytest-monitor` to add a prefix to any module-level component name: 149 | 150 | .. code-block:: bash 151 | 152 | $ pytest --component-prefix YOUR_COMPONENT_NAME 153 | 154 | This way, all tests detected by `pytest` will have their component prefixed with the given value (tests for modules with no `pytest_monitor_component` variable are simply tagged with the prefix). 155 | 156 | For instance the following test module: 157 | 158 | .. code-block:: python 159 | 160 | import time 161 | import pytest 162 | 163 | 164 | pytest_monitor_component = "component_A" 165 | 166 | def test_monitored(): 167 | t_a = time.time() 168 | b_continue = True 169 | while b_continue: 170 | t_delta = time.time() - t_a 171 | b_continue = t_delta < 1 172 | assert not b_continue 173 | 174 | will yield the following value for the component fields, depending on the chosen command-line option: 175 | 176 | +------------------------------------------+-----------------------+ 177 | | Command line used | Component value | 178 | +==========================================+=======================+ 179 | | pytest --force-component PROJECT_A | PROJECT_A | 180 | +------------------------------------------+-----------------------+ 181 | | pytest --component-prefix PROJECT_A | PROJECT_A.component_A | 182 | +------------------------------------------+-----------------------+ 183 | 184 | -------------------------------------------------------------------------------- /examples/pkg1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/examples/pkg1/__init__.py -------------------------------------------------------------------------------- /examples/pkg1/test_mod1.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | 5 | 6 | def test_sleep1(): 7 | time.sleep(1) 8 | 9 | 10 | @pytest.mark.monitor_skip_test() 11 | def test_sleep2(): 12 | time.sleep(2) 13 | 14 | 15 | @pytest.mark.parametrize(("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")]) 16 | def test_heavy(range_max, other): 17 | assert len(["a" * i for i in range(range_max)]) == range_max 18 | -------------------------------------------------------------------------------- /examples/pkg1/test_mod2.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def test_sleep_400ms(): 5 | time.sleep(0.4) 6 | -------------------------------------------------------------------------------- /examples/pkg2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/examples/pkg2/__init__.py -------------------------------------------------------------------------------- /examples/pkg2/test_mod_a.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def test_master_sleep(): 5 | t_a = time.time() 6 | b_continue = True 7 | while b_continue: 8 | t_delta = time.time() - t_a 9 | b_continue = t_delta < 5 10 | -------------------------------------------------------------------------------- /examples/pkg3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/examples/pkg3/__init__.py -------------------------------------------------------------------------------- /examples/pkg3/test_mod_cl.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class TestClass: 5 | def setup_method(self, test_method): 6 | self.__value = test_method.__name__ 7 | time.sleep(1) 8 | 9 | def test_method1(self): 10 | time.sleep(0.5) 11 | assert self.__value == "test_method1" 12 | -------------------------------------------------------------------------------- /examples/pkg4/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/examples/pkg4/__init__.py -------------------------------------------------------------------------------- /examples/pkg4/test_mod_a.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | 5 | pytestmark = pytest.mark.monitor_skip_test 6 | 7 | pytest_monitor_component = "test" 8 | 9 | 10 | def test_not_monitored(): 11 | t_a = time.time() 12 | b_continue = True 13 | while b_continue: 14 | t_delta = time.time() - t_a 15 | b_continue = t_delta < 5 16 | 17 | 18 | @pytest.mark.monitor_test() 19 | def test_force_monitor(): 20 | t_a = time.time() 21 | b_continue = True 22 | while b_continue: 23 | t_delta = time.time() - t_a 24 | b_continue = t_delta < 5 25 | -------------------------------------------------------------------------------- /examples/pkg5/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CFMTech/pytest-monitor/30585e4fae0d1c8e97ca226060d07024e2a00fb3/examples/pkg5/__init__.py -------------------------------------------------------------------------------- /examples/pkg5/doctest.py: -------------------------------------------------------------------------------- 1 | def run(a, b): 2 | """ 3 | >>> a = 3 4 | >>> b = 30 5 | >>> run(a, b) 6 | 33 7 | """ 8 | return a + b 9 | 10 | 11 | def try_doctest(): 12 | """ 13 | >>> try_doctest() 14 | 33 15 | """ 16 | return run(3, 30) 17 | -------------------------------------------------------------------------------- /examples/pkg5/test_special_pytest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.skip(reason="Some special test to skip") 5 | def test_is_skipped(): 6 | assert True 7 | 8 | 9 | def test_that_one_is_skipped_too(): 10 | pytest.skip("Test executed and instructed to be skipped from its body") 11 | 12 | 13 | def test_import_or_skip(): 14 | pytest.importorskip("this_module_does_not_exists") 15 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.distutils.bdist_wheel] 6 | universal = false 7 | 8 | [project] 9 | name = "pytest-monitor" 10 | authors = [ 11 | {name = "Jean-Sébastien Dieu", email = "dieu.jsebastien@yahoo.com"}, 12 | ] 13 | classifiers = [ 14 | "Development Status :: 5 - Production/Stable", 15 | "Framework :: Pytest", 16 | "Intended Audience :: Developers", 17 | "Topic :: Software Development :: Testing", 18 | "Programming Language :: Python", 19 | "Programming Language :: Python :: 3", 20 | "Programming Language :: Python :: 3.8", 21 | "Programming Language :: Python :: 3.9", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: Implementation :: CPython", 25 | "Programming Language :: Python :: Implementation :: PyPy", 26 | "Operating System :: OS Independent", 27 | "License :: OSI Approved :: MIT License", 28 | ] 29 | dependencies = [ 30 | "pytest", 31 | "requests", 32 | "psutil>=5.1.0", 33 | "memory_profiler>=0.58", 34 | "wheel", 35 | ] 36 | description = "A pytest plugin designed for analyzing resource usage during tests." 37 | license = {text = "MIT"} 38 | maintainers = [ 39 | {name = "Jean-Sébastien Dieu", email = "dieu.jsebastien@yahoo.com"}, 40 | ] 41 | readme = "README.rst" 42 | requires-python = ">=3.8" 43 | version = "1.7.0" 44 | 45 | [project.urls] 46 | "Source" = "https://github.com/CFMTech/pytest-monitor" 47 | "Tracker" = "https://github.com/CFMTech/pytest-monitor/issues" 48 | "Documentation" = "https://pytest-monitor.readthedocs.io/" 49 | "Homepage" = "https://pytest-monitor.readthedocs.io/" 50 | 51 | [project.entry-points.pytest11] 52 | monitor = "pytest_monitor.pytest_monitor" 53 | 54 | [project.optional-dependencies] 55 | dev = [ 56 | "black", 57 | "isort", 58 | "flake8==6.0.0", 59 | "flake8-builtins==2.1.0", 60 | "flake8-simplify==0.19.3", 61 | "flake8-comprehensions==3.10.1", 62 | "flake8-pytest-style==1.6.0", 63 | "flake8-return==1.2.0", 64 | "flake8-simplify==0.19.3", 65 | "flake8-pyproject==1.2.3", 66 | "pre-commit==3.3.3" 67 | ] 68 | 69 | [tool.flake8] 70 | max-line-length = 120 71 | 72 | [tool.black] 73 | line-length = 120 74 | 75 | [tool.isort] 76 | profile = "black" 77 | src_paths = ["pytest_monitor"] 78 | -------------------------------------------------------------------------------- /pytest_monitor/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | __version__ = importlib.metadata.version("pytest-monitor") 4 | -------------------------------------------------------------------------------- /pytest_monitor/handler.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | 4 | class DBHandler: 5 | def __init__(self, db_path): 6 | self.__db = db_path 7 | self.__cnx = sqlite3.connect(self.__db) if db_path else None 8 | self.prepare() 9 | 10 | def query(self, what, bind_to, many=False): 11 | cursor = self.__cnx.cursor() 12 | cursor.execute(what, bind_to) 13 | return cursor.fetchall() if many else cursor.fetchone() 14 | 15 | def insert_session(self, h, run_date, scm_id, description): 16 | with self.__cnx: 17 | self.__cnx.execute( 18 | "insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" " values (?,?,?,?)", 19 | (h, run_date, scm_id, description), 20 | ) 21 | 22 | def insert_metric( 23 | self, 24 | session_id, 25 | env_id, 26 | item_start_date, 27 | item, 28 | item_path, 29 | item_variant, 30 | item_loc, 31 | kind, 32 | component, 33 | total_time, 34 | user_time, 35 | kernel_time, 36 | cpu_usage, 37 | mem_usage, 38 | ): 39 | with self.__cnx: 40 | self.__cnx.execute( 41 | "insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM," 42 | "ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME," 43 | "USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) " 44 | "values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", 45 | ( 46 | session_id, 47 | env_id, 48 | item_start_date, 49 | item, 50 | item_path, 51 | item_variant, 52 | item_loc, 53 | kind, 54 | component, 55 | total_time, 56 | user_time, 57 | kernel_time, 58 | cpu_usage, 59 | mem_usage, 60 | ), 61 | ) 62 | 63 | def insert_execution_context(self, exc_context): 64 | with self.__cnx: 65 | self.__cnx.execute( 66 | "insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR," 67 | "RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO," 68 | "PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)", 69 | ( 70 | exc_context.cpu_count, 71 | exc_context.cpu_frequency, 72 | exc_context.cpu_type, 73 | exc_context.cpu_vendor, 74 | exc_context.ram_total, 75 | exc_context.fqdn, 76 | exc_context.machine, 77 | exc_context.architecture, 78 | exc_context.system_info, 79 | exc_context.python_info, 80 | exc_context.compute_hash(), 81 | ), 82 | ) 83 | 84 | def prepare(self): 85 | cursor = self.__cnx.cursor() 86 | cursor.execute( 87 | """ 88 | CREATE TABLE IF NOT EXISTS TEST_SESSIONS( 89 | SESSION_H varchar(64) primary key not null unique, -- Session identifier 90 | RUN_DATE varchar(64), -- Date of test run 91 | SCM_ID varchar(128), -- SCM change id 92 | RUN_DESCRIPTION json 93 | );""" 94 | ) 95 | cursor.execute( 96 | """ 97 | CREATE TABLE IF NOT EXISTS TEST_METRICS ( 98 | SESSION_H varchar(64), -- Session identifier 99 | ENV_H varchar(64), -- Environment description identifier 100 | ITEM_START_TIME varchar(64), -- Effective start time of the test 101 | ITEM_PATH varchar(4096), -- Path of the item, following Python import specification 102 | ITEM varchar(2048), -- Name of the item 103 | ITEM_VARIANT varchar(2048), -- Optional parametrization of an item. 104 | ITEM_FS_LOC varchar(2048), -- Relative path from pytest invocation directory to the item's module. 105 | KIND varchar(64), -- Package, Module or function 106 | COMPONENT varchar(512) NULL, -- Tested component if any 107 | TOTAL_TIME float, -- Total time spent running the item 108 | USER_TIME float, -- time spent in user space 109 | KERNEL_TIME float, -- time spent in kernel space 110 | CPU_USAGE float, -- cpu usage 111 | MEM_USAGE float, -- Max resident memory used. 112 | FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), 113 | FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) 114 | );""" 115 | ) 116 | cursor.execute( 117 | """ 118 | CREATE TABLE IF NOT EXISTS EXECUTION_CONTEXTS ( 119 | ENV_H varchar(64) primary key not null unique, 120 | CPU_COUNT integer, 121 | CPU_FREQUENCY_MHZ integer, 122 | CPU_TYPE varchar(64), 123 | CPU_VENDOR varchar(256), 124 | RAM_TOTAL_MB integer, 125 | MACHINE_NODE varchar(512), 126 | MACHINE_TYPE varchar(32), 127 | MACHINE_ARCH varchar(16), 128 | SYSTEM_INFO varchar(256), 129 | PYTHON_INFO varchar(512) 130 | ); 131 | """ 132 | ) 133 | self.__cnx.commit() 134 | -------------------------------------------------------------------------------- /pytest_monitor/pytest_monitor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import gc 3 | import time 4 | import warnings 5 | 6 | import memory_profiler 7 | import pytest 8 | 9 | from pytest_monitor.session import PyTestMonitorSession 10 | 11 | # These dictionaries are used to compute members set on each items. 12 | # KEY is the marker set on a test function 13 | # value is a tuple: 14 | # expect_args: boolean 15 | # internal marker attribute name: str 16 | # callable that set member's value 17 | # default value 18 | PYTEST_MONITOR_VALID_MARKERS = { 19 | "monitor_skip_test": (False, "monitor_skip_test", lambda x: True, False), 20 | "monitor_skip_test_if": (True, "monitor_skip_test", lambda x: bool(x), False), 21 | "monitor_test": (False, "monitor_force_test", lambda x: True, False), 22 | "monitor_test_if": (True, "monitor_force_test", lambda x: bool(x), False), 23 | } 24 | PYTEST_MONITOR_DEPRECATED_MARKERS = {} 25 | PYTEST_MONITOR_ITEM_LOC_MEMBER = "_location" if tuple(pytest.__version__.split(".")) < ("5", "3") else "location" 26 | 27 | PYTEST_MONITORING_ENABLED = True 28 | 29 | 30 | def pytest_addoption(parser): 31 | group = parser.getgroup("monitor") 32 | group.addoption( 33 | "--restrict-scope-to", 34 | dest="mtr_scope", 35 | default="function", 36 | help="Select the scope to monitor. By default, only function is monitored." 37 | "Values are function, class, module, session. You can set one or more of these" 38 | "by listing them using a comma separated list", 39 | ) 40 | group.addoption( 41 | "--parametrization-explicit", 42 | dest="mtr_want_explicit_ids", 43 | action="store_true", 44 | help="Set this option to distinguish parametrized tests given their values." 45 | " This requires the parameters to be stringifiable.", 46 | ) 47 | group.addoption("--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces") 48 | group.addoption( 49 | "--remote-server", 50 | action="store", 51 | dest="mtr_remote", 52 | help="Remote server to send the results to. Format is :", 53 | ) 54 | group.addoption( 55 | "--db", 56 | action="store", 57 | dest="mtr_db_out", 58 | default=".pymon", 59 | help="Use the given sqlite database for storing results.", 60 | ) 61 | group.addoption( 62 | "--no-db", 63 | action="store_true", 64 | dest="mtr_no_db", 65 | help="Do not store results in local db.", 66 | ) 67 | group.addoption( 68 | "--force-component", 69 | action="store", 70 | dest="mtr_force_component", 71 | help="Force the component to be set at the given value for the all tests run" " in this session.", 72 | ) 73 | group.addoption( 74 | "--component-prefix", 75 | action="store", 76 | dest="mtr_component_prefix", 77 | help="Prefix each found components with the given value (applies to all tests" " run in this session).", 78 | ) 79 | group.addoption( 80 | "--no-gc", 81 | action="store_true", 82 | dest="mtr_disable_gc", 83 | help="Disable garbage collection between tests (may leads to non reliable measures)", 84 | ) 85 | group.addoption( 86 | "--description", 87 | action="store", 88 | default="", 89 | dest="mtr_description", 90 | help="Use this option to provide a small summary about this run.", 91 | ) 92 | group.addoption( 93 | "--tag", 94 | action="append", 95 | dest="mtr_tags", 96 | default=[], 97 | help="Provide meaningfull flags to your run. This can help you in your analysis.", 98 | ) 99 | 100 | 101 | def pytest_configure(config): 102 | config.addinivalue_line("markers", "monitor_skip_test: mark test to be executed but not monitored.") 103 | config.addinivalue_line( 104 | "markers", 105 | "monitor_skip_test_if(cond): mark test to be executed but " "not monitored if cond is verified.", 106 | ) 107 | config.addinivalue_line( 108 | "markers", 109 | "monitor_test: mark test to be monitored (default behaviour)." 110 | " This can turn handy to whitelist some test when you have disabled" 111 | " monitoring on a whole module.", 112 | ) 113 | config.addinivalue_line( 114 | "markers", 115 | "monitor_test_if(cond): mark test to be monitored if and only if cond" 116 | " is verified. This can help you in whitelisting tests to be monitored" 117 | " depending on some external conditions.", 118 | ) 119 | 120 | 121 | def pytest_runtest_setup(item): 122 | """ 123 | Validate marker setup and print warnings if usage of deprecated marker is identified. 124 | Setting marker attribute to the discovered item is done after the above described verification. 125 | :param item: Test item 126 | """ 127 | if not PYTEST_MONITORING_ENABLED: 128 | return 129 | item_markers = {mark.name: mark for mark in item.iter_markers() if mark and mark.name.startswith("monitor_")} 130 | mark_to_del = [] 131 | for set_marker in item_markers.keys(): 132 | if set_marker not in PYTEST_MONITOR_VALID_MARKERS: 133 | warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker)) 134 | mark_to_del.append(set_marker) 135 | if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: 136 | warnings.warn(f"Marker {set_marker} is deprecated. Consider upgrading your tests") 137 | 138 | for marker in mark_to_del: 139 | del item_markers[marker] 140 | 141 | all_valid_markers = PYTEST_MONITOR_VALID_MARKERS 142 | all_valid_markers.update(PYTEST_MONITOR_DEPRECATED_MARKERS) 143 | # Setting instantiated markers 144 | for marker, _ in item_markers.items(): 145 | with_args, attr, fun_val, _ = all_valid_markers[marker] 146 | attr_val = fun_val(item_markers[marker].args[0]) if with_args else fun_val(None) 147 | setattr(item, attr, attr_val) 148 | 149 | # Setting other markers to default values 150 | for marker, marker_value in all_valid_markers.items(): 151 | with_args, attr, _, default = marker_value 152 | if not hasattr(item, attr): 153 | setattr(item, attr, default) 154 | 155 | # Finalize marker processing by enforcing some marker's value 156 | if item.monitor_force_test: 157 | # This test has been explicitly flagged as 'to be monitored'. 158 | item.monitor_skip_test = False 159 | 160 | 161 | @pytest.hookimpl(tryfirst=True, hookwrapper=True) 162 | def pytest_runtest_makereport(item, call): 163 | """ 164 | Used to identify the current call to add times. 165 | :param item: Test item 166 | :param call: call instance associated to the given item 167 | """ 168 | outcome = yield 169 | rep = outcome.get_result() 170 | 171 | if rep.when == "call": 172 | setattr(item, "test_run_duration", call.stop - call.start) 173 | setattr(item, "test_effective_start_time", call.start) 174 | 175 | 176 | def pytest_runtest_call(item): 177 | if not PYTEST_MONITORING_ENABLED: 178 | return 179 | setattr(item, "monitor_results", False) 180 | if hasattr(item, "module"): 181 | setattr( 182 | item, 183 | "monitor_component", 184 | getattr(item.module, "pytest_monitor_component", ""), 185 | ) 186 | else: 187 | setattr(item, "monitor_skip_test", True) 188 | 189 | 190 | @pytest.hookimpl 191 | def pytest_pyfunc_call(pyfuncitem): 192 | """ 193 | Core sniffer logic. We encapsulate the test function in a sniffer function to collect 194 | memory results. 195 | """ 196 | 197 | def wrapped_function(): 198 | try: 199 | funcargs = pyfuncitem.funcargs 200 | testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} 201 | pyfuncitem.obj(**testargs) 202 | except Exception: 203 | raise 204 | except BaseException as e: 205 | return e 206 | 207 | def prof(): 208 | m = memory_profiler.memory_usage((wrapped_function, ()), max_iterations=1, max_usage=True, retval=True) 209 | if isinstance(m[1], BaseException): # Do we have any outcome? 210 | raise m[1] 211 | memuse = m[0][0] if type(m[0]) is list else m[0] 212 | setattr(pyfuncitem, "mem_usage", memuse) 213 | setattr(pyfuncitem, "monitor_results", True) 214 | 215 | if not PYTEST_MONITORING_ENABLED: 216 | wrapped_function() 217 | else: 218 | if not pyfuncitem.session.config.option.mtr_disable_gc: 219 | gc.collect() 220 | prof() 221 | return True 222 | 223 | 224 | def pytest_make_parametrize_id(config, val, argname): 225 | if config.option.mtr_want_explicit_ids: 226 | return f"{argname}={val}" 227 | return None 228 | 229 | 230 | @pytest.hookimpl(hookwrapper=True) 231 | def pytest_sessionstart(session): 232 | """ 233 | Instantiate a monitor session to save collected metrics. 234 | We yield at the end to let pytest pursue the execution. 235 | """ 236 | if session.config.option.mtr_force_component and session.config.option.mtr_component_prefix: 237 | raise pytest.UsageError("Invalid usage: --force-component and --component-prefix are incompatible options!") 238 | if session.config.option.mtr_no_db and not session.config.option.mtr_remote and not session.config.option.mtr_none: 239 | warnings.warn("pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.") 240 | session.config.option.mtr_none = True 241 | component = session.config.option.mtr_force_component or session.config.option.mtr_component_prefix 242 | if session.config.option.mtr_component_prefix: 243 | component += ".{user_component}" 244 | if not component: 245 | component = "{user_component}" 246 | db = ( 247 | None 248 | if (session.config.option.mtr_none or session.config.option.mtr_no_db) 249 | else session.config.option.mtr_db_out 250 | ) 251 | remote = None if session.config.option.mtr_none else session.config.option.mtr_remote 252 | session.pytest_monitor = PyTestMonitorSession( 253 | db=db, remote=remote, component=component, scope=session.config.option.mtr_scope 254 | ) 255 | global PYTEST_MONITORING_ENABLED 256 | PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none 257 | session.pytest_monitor.compute_info(session.config.option.mtr_description, session.config.option.mtr_tags) 258 | yield 259 | 260 | 261 | @pytest.fixture(autouse=True, scope="module") 262 | def _prf_module_tracer(request): 263 | if not PYTEST_MONITORING_ENABLED: 264 | yield 265 | else: 266 | t_a = time.time() 267 | ptimes_a = request.session.pytest_monitor.process.cpu_times() 268 | yield 269 | ptimes_b = request.session.pytest_monitor.process.cpu_times() 270 | t_z = time.time() 271 | rss = request.session.pytest_monitor.process.memory_info().rss / 1024**2 272 | component = getattr(request.module, "pytest_monitor_component", "") 273 | item = request.node.name[:-3] 274 | pypath = request.module.__name__[: -len(item) - 1] 275 | request.session.pytest_monitor.add_test_info( 276 | item, 277 | pypath, 278 | "", 279 | request.node._nodeid, 280 | "module", 281 | component, 282 | t_a, 283 | t_z - t_a, 284 | ptimes_b.user - ptimes_a.user, 285 | ptimes_b.system - ptimes_a.system, 286 | rss, 287 | ) 288 | 289 | 290 | @pytest.fixture(autouse=True) 291 | def _prf_tracer(request): 292 | if not PYTEST_MONITORING_ENABLED: 293 | yield 294 | else: 295 | ptimes_a = request.session.pytest_monitor.process.cpu_times() 296 | yield 297 | ptimes_b = request.session.pytest_monitor.process.cpu_times() 298 | if not request.node.monitor_skip_test and getattr(request.node, "monitor_results", False): 299 | item_name = request.node.originalname or request.node.name 300 | item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] 301 | request.session.pytest_monitor.add_test_info( 302 | item_name, 303 | request.module.__name__, 304 | request.node.name, 305 | item_loc, 306 | "function", 307 | request.node.monitor_component, 308 | request.node.test_effective_start_time, 309 | request.node.test_run_duration, 310 | ptimes_b.user - ptimes_a.user, 311 | ptimes_b.system - ptimes_a.system, 312 | request.node.mem_usage, 313 | ) 314 | -------------------------------------------------------------------------------- /pytest_monitor/session.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import hashlib 3 | import json 4 | import os 5 | import warnings 6 | from http import HTTPStatus 7 | 8 | import memory_profiler 9 | import psutil 10 | import requests 11 | 12 | from pytest_monitor.handler import DBHandler 13 | from pytest_monitor.sys_utils import ( 14 | ExecutionContext, 15 | collect_ci_info, 16 | determine_scm_revision, 17 | ) 18 | 19 | 20 | class PyTestMonitorSession: 21 | def __init__(self, db=None, remote=None, component="", scope=None, tracing=True): 22 | self.__db = None 23 | if db: 24 | self.__db = DBHandler(db) 25 | self.__monitor_enabled = tracing 26 | self.__remote = remote 27 | self.__component = component 28 | self.__session = "" 29 | self.__scope = scope or [] 30 | self.__eid = (None, None) 31 | self.__mem_usage_base = None 32 | self.__process = psutil.Process(os.getpid()) 33 | 34 | @property 35 | def monitoring_enabled(self): 36 | return self.__monitor_enabled 37 | 38 | @property 39 | def remote_env_id(self): 40 | return self.__eid[1] 41 | 42 | @property 43 | def db_env_id(self): 44 | return self.__eid[0] 45 | 46 | @property 47 | def process(self): 48 | return self.__process 49 | 50 | def get_env_id(self, env): 51 | db, remote = None, None 52 | if self.__db: 53 | row = self.__db.query("SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.compute_hash(),)) 54 | db = row[0] if row else None 55 | if self.__remote: 56 | r = requests.get(f"{self.__remote}/contexts/{env.compute_hash()}") 57 | remote = None 58 | if r.status_code == HTTPStatus.OK: 59 | remote = json.loads(r.text) 60 | if remote["contexts"]: 61 | remote = remote["contexts"][0]["h"] 62 | else: 63 | remote = None 64 | return db, remote 65 | 66 | def compute_info(self, description, tags): 67 | run_date = datetime.datetime.now().isoformat() 68 | scm = determine_scm_revision() 69 | h = hashlib.md5() 70 | h.update(scm.encode()) 71 | h.update(run_date.encode()) 72 | h.update(description.encode()) 73 | self.__session = h.hexdigest() 74 | # From description + tags to JSON format 75 | d = collect_ci_info() 76 | if description: 77 | d["description"] = description 78 | for tag in tags: 79 | if type(tag) is str: 80 | _tag_info = tag.split("=", 1) 81 | d[_tag_info[0]] = _tag_info[1] 82 | else: 83 | for sub_tag in tag: 84 | _tag_info = sub_tag.split("=", 1) 85 | d[_tag_info[0]] = _tag_info[1] 86 | description = json.dumps(d) 87 | # Now get memory usage base and create the database 88 | self.prepare() 89 | self.set_environment_info(ExecutionContext()) 90 | if self.__db: 91 | self.__db.insert_session(self.__session, run_date, scm, description) 92 | if self.__remote: 93 | r = requests.post( 94 | f"{self.__remote}/sessions/", 95 | json={ 96 | "session_h": self.__session, 97 | "run_date": run_date, 98 | "scm_ref": scm, 99 | "description": json.loads(description), 100 | }, 101 | ) 102 | if r.status_code != HTTPStatus.CREATED: 103 | self.__remote = "" 104 | msg = f"Cannot insert session in remote monitor server ({r.status_code})! Deactivating...')" 105 | warnings.warn(msg) 106 | 107 | def set_environment_info(self, env): 108 | self.__eid = self.get_env_id(env) 109 | db_id, remote_id = self.__eid 110 | if self.__db and db_id is None: 111 | self.__db.insert_execution_context(env) 112 | db_id = self.__db.query("select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.compute_hash(),))[0] 113 | if self.__remote and remote_id is None: 114 | # We must postpone that to be run at the end of the pytest session. 115 | r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) 116 | if r.status_code != HTTPStatus.CREATED: 117 | warnings.warn(f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...") 118 | self.__remote = "" 119 | else: 120 | remote_id = json.loads(r.text)["h"] 121 | self.__eid = db_id, remote_id 122 | 123 | def prepare(self): 124 | def dummy(): 125 | return True 126 | 127 | memuse = memory_profiler.memory_usage((dummy,), max_iterations=1, max_usage=True) 128 | self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse 129 | 130 | def add_test_info( 131 | self, 132 | item, 133 | item_path, 134 | item_variant, 135 | item_loc, 136 | kind, 137 | component, 138 | item_start_time, 139 | total_time, 140 | user_time, 141 | kernel_time, 142 | mem_usage, 143 | ): 144 | if kind not in self.__scope: 145 | return 146 | mem_usage = float(mem_usage) - self.__mem_usage_base 147 | cpu_usage = (user_time + kernel_time) / total_time 148 | item_start_time = datetime.datetime.fromtimestamp(item_start_time).isoformat() 149 | final_component = self.__component.format(user_component=component) 150 | if final_component.endswith("."): 151 | final_component = final_component[:-1] 152 | item_variant = item_variant.replace("-", ", ") # No choice 153 | if self.__db and self.db_env_id is not None: 154 | self.__db.insert_metric( 155 | self.__session, 156 | self.db_env_id, 157 | item_start_time, 158 | item, 159 | item_path, 160 | item_variant, 161 | item_loc, 162 | kind, 163 | final_component, 164 | total_time, 165 | user_time, 166 | kernel_time, 167 | cpu_usage, 168 | mem_usage, 169 | ) 170 | if self.__remote and self.remote_env_id is not None: 171 | r = requests.post( 172 | f"{self.__remote}/metrics/", 173 | json={ 174 | "session_h": self.__session, 175 | "context_h": self.remote_env_id, 176 | "item_start_time": item_start_time, 177 | "item_path": item_path, 178 | "item": item, 179 | "item_variant": item_variant, 180 | "item_fs_loc": item_loc, 181 | "kind": kind, 182 | "component": final_component, 183 | "total_time": total_time, 184 | "user_time": user_time, 185 | "kernel_time": kernel_time, 186 | "cpu_usage": cpu_usage, 187 | "mem_usage": mem_usage, 188 | }, 189 | ) 190 | if r.status_code != HTTPStatus.CREATED: 191 | self.__remote = "" 192 | msg = f"Cannot insert values in remote monitor server ({r.status_code})! Deactivating...')" 193 | warnings.warn(msg) 194 | -------------------------------------------------------------------------------- /pytest_monitor/sys_utils.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import multiprocessing 3 | import os 4 | import platform 5 | import socket 6 | import subprocess 7 | import sys 8 | import warnings 9 | 10 | import psutil 11 | 12 | 13 | def collect_ci_info(): 14 | # Test for jenkins 15 | if "BUILD_NUMBER" in os.environ and ("BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ): 16 | br = os.environ["BRANCH_NAME"] if "BRANCH_NAME" in os.environ else os.environ["JOB_NAME"] 17 | return { 18 | "pipeline_branch": br, 19 | "pipeline_build_no": os.environ["BUILD_NUMBER"], 20 | "__ci__": "jenkinsci", 21 | } 22 | # Test for CircleCI 23 | if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ: 24 | return { 25 | "pipeline_branch": os.environ["CIRCLE_JOB"], 26 | "pipeline_build_no": os.environ["CIRCLE_BUILD_NUM"], 27 | "__ci__": "circleci", 28 | } 29 | # Test for TravisCI 30 | if "TRAVIS_BUILD_NUMBER" in os.environ and "TRAVIS_BUILD_ID" in os.environ: 31 | return { 32 | "pipeline_branch": os.environ["TRAVIS_BUILD_ID"], 33 | "pipeline_build_no": os.environ["TRAVIS_BUILD_NUMBER"], 34 | "__ci__": "travisci", 35 | } 36 | # Test for DroneCI 37 | if "DRONE_REPO_BRANCH" in os.environ and "DRONE_BUILD_NUMBER" in os.environ: 38 | return { 39 | "pipeline_branch": os.environ["DRONE_REPO_BRANCH"], 40 | "pipeline_build_no": os.environ["DRONE_BUILD_NUMBER"], 41 | "__ci__": "droneci", 42 | } 43 | # Test for Gitlab CI 44 | if "CI_JOB_NAME" in os.environ and "CI_PIPELINE_ID" in os.environ: 45 | return { 46 | "pipeline_branch": os.environ["CI_JOB_NAME"], 47 | "pipeline_build_no": os.environ["CI_PIPELINE_ID"], 48 | "__ci__": "gitlabci", 49 | } 50 | # Test for Bitbucket CI 51 | if "BITBUCKET_BRANCH" in os.environ and "BITBUCKET_BUILD_NUMBER" in os.environ: 52 | return { 53 | "pipeline_branch": os.environ["BITBUCKET_BRANCH"], 54 | "pipeline_build_no": os.environ["BITBUCKET_BUILD_NUMBER"], 55 | "__ci__": "bitbucketci", 56 | } 57 | return {} 58 | 59 | 60 | def determine_scm_revision(): 61 | for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")): 62 | p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) 63 | p_out, _ = p.communicate() 64 | if p.returncode == 0: 65 | scm_ref = p_out.decode(errors="ignore").split("\n", maxsplit=1)[0] 66 | if scm == "p4": 67 | scm_ref = scm_ref.split()[1] 68 | return scm_ref 69 | return "" 70 | 71 | 72 | def _get_cpu_string(): 73 | if platform.system().lower() == "darwin": 74 | old_path = os.environ["PATH"] 75 | os.environ["PATH"] = old_path + ":" + "/usr/sbin" 76 | ret = subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) 77 | os.environ["PATH"] = old_path 78 | return ret.decode().strip() 79 | if platform.system().lower() == "linux": 80 | with open("/proc/cpuinfo", "r", encoding="utf-8") as f: 81 | lines = [i for i in f if i.startswith("model name")] 82 | if lines: 83 | return lines[0].split(":")[1].strip() 84 | return platform.processor() 85 | 86 | 87 | class ExecutionContext: 88 | def __init__(self): 89 | self.__cpu_count = multiprocessing.cpu_count() 90 | self.__cpu_vendor = _get_cpu_string() 91 | if int(os.environ.get("PYTEST_MONITOR_FORCE_CPU_FREQ", "0")): 92 | self._read_cpu_freq_from_env() 93 | else: 94 | try: 95 | self.__cpu_freq_base = psutil.cpu_freq().current 96 | except (AttributeError, NotImplementedError, FileNotFoundError): 97 | warnings.warn("Unable to fetch CPU frequency. Trying to read it from environment..") 98 | self._read_cpu_freq_from_env() 99 | self.__proc_typ = platform.processor() 100 | self.__tot_mem = int(psutil.virtual_memory().total / 1024**2) 101 | self.__fqdn = socket.getfqdn() 102 | self.__machine = platform.machine() 103 | self.__arch = platform.architecture()[0] 104 | self.__system = f"{platform.system()} - {platform.release()}" 105 | self.__py_ver = sys.version 106 | 107 | def _read_cpu_freq_from_env(self): 108 | try: 109 | self.__cpu_freq_base = float(os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0.")) 110 | except (ValueError, TypeError): 111 | warnings.warn("Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.") 112 | self.__cpu_freq_base = 0.0 113 | 114 | def to_dict(self): 115 | return { 116 | "cpu_count": self.cpu_count, 117 | "cpu_frequency": self.cpu_frequency, 118 | "cpu_type": self.cpu_type, 119 | "cpu_vendor": self.cpu_vendor, 120 | "ram_total": self.ram_total, 121 | "machine_node": self.fqdn, 122 | "machine_type": self.machine, 123 | "machine_arch": self.architecture, 124 | "system_info": self.system_info, 125 | "python_info": self.python_info, 126 | "h": self.compute_hash(), 127 | } 128 | 129 | @property 130 | def cpu_count(self): 131 | return self.__cpu_count 132 | 133 | @property 134 | def cpu_frequency(self): 135 | return self.__cpu_freq_base 136 | 137 | @property 138 | def cpu_type(self): 139 | return self.__proc_typ 140 | 141 | @property 142 | def cpu_vendor(self): 143 | return self.__cpu_vendor 144 | 145 | @property 146 | def ram_total(self): 147 | return self.__tot_mem 148 | 149 | @property 150 | def fqdn(self): 151 | return self.__fqdn 152 | 153 | @property 154 | def machine(self): 155 | return self.__machine 156 | 157 | @property 158 | def architecture(self): 159 | return self.__arch 160 | 161 | @property 162 | def system_info(self): 163 | return self.__system 164 | 165 | @property 166 | def python_info(self): 167 | return self.__py_ver 168 | 169 | def compute_hash(self): 170 | hr = hashlib.md5() 171 | hr.update(str(self.__cpu_count).encode()) 172 | hr.update(str(self.__cpu_freq_base).encode()) 173 | hr.update(str(self.__proc_typ).encode()) 174 | hr.update(str(self.__tot_mem).encode()) 175 | hr.update(str(self.__fqdn).encode()) 176 | hr.update(str(self.__machine).encode()) 177 | hr.update(str(self.__arch).encode()) 178 | hr.update(str(self.__system).encode()) 179 | hr.update(str(self.__py_ver).encode()) 180 | return hr.hexdigest() 181 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | psutil>=5.1.0 2 | memory_profiler>=0.58 3 | pytest 4 | requests 5 | black 6 | isort 7 | flake8=6.1.0 8 | flake8-builtins=2.1.0 9 | flake8-simplify=0.19.3 10 | flake8-comprehensions=3.10.1 11 | flake8-pytest-style=1.6.0 12 | flake8-return=1.2.0 13 | flake8-pyproject=1.2.3 14 | pre-commit=3.3.3 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | psutil>=5.1.0 2 | memory_profiler>=0.58 3 | pytest 4 | requests 5 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | pytest_plugins = ["pytester"] 2 | -------------------------------------------------------------------------------- /tests/test_monitor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | import pathlib 4 | import sqlite3 5 | 6 | import pytest 7 | 8 | 9 | def test_monitor_basic_test(testdir): 10 | """Make sure that pytest-monitor does the job without impacting user tests.""" 11 | # create a temporary pytest test module 12 | testdir.makepyfile( 13 | """ 14 | import time 15 | 16 | 17 | def test_ok(): 18 | time.sleep(0.5) 19 | x = ['a' * i for i in range(100)] 20 | assert len(x) == 100 21 | 22 | """ 23 | ) 24 | 25 | # run pytest with the following cmd args 26 | result = testdir.runpytest("-vv", "--tag", "version=12.3.5") 27 | 28 | # fnmatch_lines does an assertion internally 29 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 30 | 31 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 32 | assert pymon_path.exists() 33 | 34 | # make sure that that we get a '0' exit code for the test suite 35 | result.assert_outcomes(passed=1) 36 | 37 | db = sqlite3.connect(str(pymon_path)) 38 | cursor = db.cursor() 39 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 40 | assert len(cursor.fetchall()) == 1 41 | cursor = db.cursor() 42 | tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0]) 43 | assert "description" not in tags 44 | assert "version" in tags 45 | assert tags["version"] == "12.3.5" 46 | 47 | 48 | def test_monitor_basic_test_description(testdir): 49 | """Make sure that pytest-monitor does the job without impacting user tests.""" 50 | # create a temporary pytest test module 51 | testdir.makepyfile( 52 | """ 53 | import time 54 | 55 | 56 | def test_ok(): 57 | time.sleep(0.5) 58 | x = ['a' * i for i in range(100)] 59 | assert len(x) == 100 60 | 61 | """ 62 | ) 63 | 64 | # run pytest with the following cmd args 65 | result = testdir.runpytest("-vv", "--description", '"Test"', "--tag", "version=12.3.5") 66 | 67 | # fnmatch_lines does an assertion internally 68 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 69 | 70 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 71 | assert pymon_path.exists() 72 | 73 | # make sure that that we get a '0' exit code for the test suite 74 | result.assert_outcomes(passed=1) 75 | 76 | db = sqlite3.connect(str(pymon_path)) 77 | cursor = db.cursor() 78 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 79 | assert len(cursor.fetchall()) == 1 80 | cursor = db.cursor() 81 | tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0]) 82 | assert "description" in tags 83 | assert tags["description"] == '"Test"' 84 | assert "version" in tags 85 | assert tags["version"] == "12.3.5" 86 | 87 | 88 | def test_monitor_pytest_skip_marker(testdir): 89 | """Make sure that pytest-monitor does the job without impacting user tests.""" 90 | # create a temporary pytest test module 91 | testdir.makepyfile( 92 | """ 93 | import pytest 94 | import time 95 | 96 | @pytest.mark.skip("Some reason") 97 | def test_skipped(): 98 | assert True 99 | 100 | """ 101 | ) 102 | 103 | # run pytest with the following cmd args 104 | result = testdir.runpytest("-v") 105 | 106 | # fnmatch_lines does an assertion internally 107 | result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"]) 108 | 109 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 110 | assert pymon_path.exists() 111 | 112 | # make sure that that we get a '0' exit code for the testsuite 113 | result.assert_outcomes(skipped=1) 114 | 115 | db = sqlite3.connect(str(pymon_path)) 116 | cursor = db.cursor() 117 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 118 | assert not len(cursor.fetchall()) 119 | 120 | 121 | def test_monitor_pytest_skip_marker_on_fixture(testdir): 122 | """Make sure that pytest-monitor does the job without impacting user tests.""" 123 | # create a temporary pytest test module 124 | testdir.makepyfile( 125 | """ 126 | import pytest 127 | import time 128 | 129 | @pytest.fixture 130 | def a_fixture(): 131 | pytest.skip("because this is the scenario being tested") 132 | 133 | def test_skipped(a_fixture): 134 | assert True 135 | 136 | """ 137 | ) 138 | 139 | # run pytest with the following cmd args 140 | result = testdir.runpytest("-v") 141 | 142 | # fnmatch_lines does an assertion internally 143 | result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"]) 144 | 145 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 146 | assert pymon_path.exists() 147 | 148 | # make sure that that we get a '0' exit code for the testsuite 149 | result.assert_outcomes(skipped=1) 150 | 151 | db = sqlite3.connect(str(pymon_path)) 152 | cursor = db.cursor() 153 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 154 | assert not len(cursor.fetchall()) 155 | 156 | 157 | def test_bad_markers(testdir): 158 | """Make sure that pytest-monitor warns about unknown markers.""" 159 | # create a temporary pytest test module 160 | testdir.makepyfile( 161 | """ 162 | import pytest 163 | import time 164 | 165 | 166 | @pytest.mark.monitor_bad_marker 167 | def test_ok(): 168 | time.sleep(0.1) 169 | x = ['a' * i for i in range(100)] 170 | assert len(x) == 100 171 | 172 | """ 173 | ) 174 | 175 | # run pytest with the following cmd args 176 | result = testdir.runpytest("-v") 177 | 178 | # fnmatch_lines does an assertion internally 179 | result.stdout.fnmatch_lines(["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"]) 180 | 181 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 182 | assert pymon_path.exists() 183 | 184 | # make sure that that we get a '0' exit code for the testsuite 185 | result.assert_outcomes(passed=1) 186 | 187 | db = sqlite3.connect(str(pymon_path)) 188 | cursor = db.cursor() 189 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 190 | assert len(cursor.fetchall()) == 1 191 | 192 | 193 | def test_monitor_skip_module(testdir): 194 | """Make sure that pytest-monitor correctly understand the monitor_skip_test marker.""" 195 | # create a temporary pytest test module 196 | testdir.makepyfile( 197 | """ 198 | import pytest 199 | import time 200 | 201 | pytestmark = pytest.mark.monitor_skip_test 202 | 203 | def test_ok_not_monitored(): 204 | time.sleep(0.1) 205 | x = ['a' * i for i in range(100)] 206 | assert len(x) == 100 207 | 208 | def test_another_function_ok_not_monitored(): 209 | assert True 210 | """ 211 | ) 212 | 213 | # run pytest with the following cmd args 214 | result = testdir.runpytest("-v") 215 | 216 | # fnmatch_lines does an assertion internally 217 | result.stdout.fnmatch_lines( 218 | [ 219 | "*::test_ok_not_monitored PASSED*", 220 | "*::test_another_function_ok_not_monitored PASSED*", 221 | ] 222 | ) 223 | 224 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 225 | assert pymon_path.exists() 226 | 227 | # make sure that that we get a '0' exit code for the testsuite 228 | result.assert_outcomes(passed=2) 229 | 230 | db = sqlite3.connect(str(pymon_path)) 231 | cursor = db.cursor() 232 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 233 | assert not len(cursor.fetchall()) # Nothing ran 234 | 235 | 236 | def test_monitor_skip_test(testdir): 237 | """Make sure that pytest-monitor correctly understand the monitor_skip_test marker.""" 238 | # create a temporary pytest test module 239 | testdir.makepyfile( 240 | """ 241 | import pytest 242 | import time 243 | 244 | 245 | @pytest.mark.monitor_skip_test 246 | def test_not_monitored(): 247 | time.sleep(0.1) 248 | x = ['a' * i for i in range(100)] 249 | assert len(x) == 100 250 | 251 | """ 252 | ) 253 | 254 | # run pytest with the following cmd args 255 | result = testdir.runpytest("-v") 256 | 257 | # fnmatch_lines does an assertion internally 258 | result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*"]) 259 | 260 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 261 | assert pymon_path.exists() 262 | 263 | # make sure that that we get a '0' exit code for the testsuite 264 | result.assert_outcomes(passed=1) 265 | 266 | db = sqlite3.connect(str(pymon_path)) 267 | cursor = db.cursor() 268 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 269 | assert not len(cursor.fetchall()) # nothing monitored 270 | 271 | 272 | def test_monitor_skip_test_if(testdir): 273 | """Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.""" 274 | # create a temporary pytest test module 275 | testdir.makepyfile( 276 | """ 277 | import pytest 278 | import time 279 | 280 | 281 | @pytest.mark.monitor_skip_test_if(True) 282 | def test_not_monitored(): 283 | time.sleep(0.1) 284 | x = ['a' * i for i in range(100)] 285 | assert len(x) == 100 286 | 287 | 288 | @pytest.mark.monitor_skip_test_if(False) 289 | def test_monitored(): 290 | time.sleep(0.1) 291 | x = ['a' *i for i in range(100)] 292 | assert len(x) == 100 293 | 294 | """ 295 | ) 296 | 297 | # run pytest with the following cmd args 298 | result = testdir.runpytest("-v") 299 | 300 | # fnmatch_lines does an assertion internally 301 | result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"]) 302 | 303 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 304 | assert pymon_path.exists() 305 | 306 | # make sure that that we get a '0' exit code for the testsuite 307 | result.assert_outcomes(passed=2) 308 | 309 | db = sqlite3.connect(str(pymon_path)) 310 | cursor = db.cursor() 311 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 312 | assert len(cursor.fetchall()) == 1 313 | 314 | 315 | def test_monitor_no_db(testdir): 316 | """Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.""" 317 | # create a temporary pytest test module 318 | testdir.makepyfile( 319 | """ 320 | import pytest 321 | import time 322 | 323 | 324 | def test_it(): 325 | time.sleep(0.1) 326 | x = ['a' * i for i in range(100)] 327 | assert len(x) == 100 328 | 329 | 330 | def test_that(): 331 | time.sleep(0.1) 332 | x = ['a' *i for i in range(100)] 333 | assert len(x) == 100 334 | 335 | """ 336 | ) 337 | 338 | wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." 339 | with pytest.warns(UserWarning, match=wrn): 340 | # run pytest with the following cmd args 341 | result = testdir.runpytest("--no-db", "-v") 342 | 343 | # fnmatch_lines does an assertion internally 344 | result.stdout.fnmatch_lines(["*::test_it PASSED*", "*::test_that PASSED*"]) 345 | 346 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 347 | assert not pymon_path.exists() 348 | 349 | # make sure that that we get a '0' exit code for the testsuite 350 | result.assert_outcomes(passed=2) 351 | 352 | 353 | def test_monitor_basic_output(testdir): 354 | """Make sure that pytest-monitor does not repeat captured output (issue #26).""" 355 | # create a temporary pytest test module 356 | testdir.makepyfile( 357 | """ 358 | def test_it(): 359 | print('Hello World') 360 | """ 361 | ) 362 | 363 | wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." 364 | with pytest.warns(UserWarning, match=wrn): 365 | # run pytest with the following cmd args 366 | result = testdir.runpytest("--no-db", "-s", "-vv") 367 | 368 | # fnmatch_lines does an assertion internally 369 | result.stdout.fnmatch_lines(["*::test_it Hello World*"]) 370 | assert "Hello World" != result.stdout.get_lines_after("*Hello World")[0] 371 | 372 | # make sure that that we get a '0' exit code for the testsuite 373 | result.assert_outcomes(passed=1) 374 | 375 | 376 | def test_monitor_with_doctest(testdir): 377 | """Make sure that pytest-monitor does not fail to run doctest.""" 378 | # create a temporary pytest test module 379 | testdir.makepyfile( 380 | ''' 381 | def run(a, b): 382 | """ 383 | >>> run(3, 30) 384 | 33 385 | """ 386 | return a + b 387 | ''' 388 | ) 389 | 390 | # run pytest with the following cmd args 391 | result = testdir.runpytest("--doctest-modules", "-vv") 392 | 393 | # make sure that that we get a '0' exit code for the testsuite 394 | result.assert_outcomes(passed=1) 395 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 396 | assert pymon_path.exists() 397 | 398 | db = sqlite3.connect(str(pymon_path)) 399 | cursor = db.cursor() 400 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 401 | assert not len(cursor.fetchall()) 402 | 403 | pymon_path.unlink() 404 | result = testdir.runpytest("--doctest-modules", "--no-monitor", "-vv") 405 | 406 | # make sure that that we get a '0' exit code for the testsuite 407 | result.assert_outcomes(passed=1) 408 | assert not pymon_path.exists() 409 | -------------------------------------------------------------------------------- /tests/test_monitor_component.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pathlib 3 | import sqlite3 4 | 5 | 6 | def test_monitor_no_component(testdir): 7 | """Make sure that pytest-monitor has an empty component by default""" 8 | # create a temporary pytest test module 9 | testdir.makepyfile( 10 | """ 11 | import time 12 | 13 | 14 | def test_ok(): 15 | time.sleep(0.5) 16 | x = ['a' * i for i in range(100)] 17 | assert len(x) == 100 18 | 19 | """ 20 | ) 21 | 22 | # run pytest with the following cmd args 23 | result = testdir.runpytest("-v") 24 | 25 | # fnmatch_lines does an assertion internally 26 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 27 | 28 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 29 | assert pymon_path.exists() 30 | 31 | # make sure that that we get a '0' exit code for the testsuite 32 | result.assert_outcomes(passed=1) 33 | 34 | db = sqlite3.connect(str(pymon_path)) 35 | cursor = db.cursor() 36 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 37 | assert len(cursor.fetchall()) == 1 38 | cursor.execute("SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';") 39 | assert not len(cursor.fetchall()) 40 | 41 | 42 | def test_monitor_force_component(testdir): 43 | """Make sure that pytest-monitor forces the component name if required""" 44 | # create a temporary pytest test module 45 | testdir.makepyfile( 46 | """ 47 | import time 48 | 49 | 50 | def test_force_ok(): 51 | time.sleep(0.5) 52 | x = ['a' * i for i in range(100)] 53 | assert len(x) == 100 54 | 55 | """ 56 | ) 57 | 58 | # run pytest with the following cmd args 59 | result = testdir.runpytest("--force-component", "my_component", "-v") 60 | 61 | # fnmatch_lines does an assertion internally 62 | result.stdout.fnmatch_lines(["*::test_force_ok PASSED*"]) 63 | 64 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 65 | assert pymon_path.exists() 66 | 67 | # make sure that that we get a '0' exit code for the testsuite 68 | result.assert_outcomes(passed=1) 69 | 70 | db = sqlite3.connect(str(pymon_path)) 71 | cursor = db.cursor() 72 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 73 | assert len(cursor.fetchall()) == 1 74 | cursor.execute( 75 | "SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';" 76 | ) 77 | assert len(cursor.fetchall()) == 1 78 | 79 | 80 | def test_monitor_prefix_component(testdir): 81 | """Make sure that pytest-monitor has a prefixed component""" 82 | # create a temporary pytest test module 83 | testdir.makepyfile( 84 | """ 85 | import time 86 | 87 | pytest_monitor_component = 'internal' 88 | 89 | def test_prefix_ok(): 90 | time.sleep(0.5) 91 | x = ['a' * i for i in range(100)] 92 | assert len(x) == 100 93 | 94 | """ 95 | ) 96 | 97 | # run pytest with the following cmd args 98 | result = testdir.runpytest("--component-prefix", "my_component", "-v") 99 | 100 | # fnmatch_lines does an assertion internally 101 | result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"]) 102 | 103 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 104 | assert pymon_path.exists() 105 | 106 | # make sure that that we get a '0' exit code for the testsuite 107 | result.assert_outcomes(passed=1) 108 | 109 | db = sqlite3.connect(str(pymon_path)) 110 | cursor = db.cursor() 111 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 112 | assert len(cursor.fetchall()) == 1 113 | cursor.execute( 114 | "SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" 115 | ) 116 | assert not len(cursor.fetchall()) 117 | cursor.execute( 118 | "SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';" 119 | ) 120 | assert len(cursor.fetchall()) == 1 121 | 122 | 123 | def test_monitor_prefix_without_component(testdir): 124 | """Make sure that pytest-monitor has a prefixed component""" 125 | # create a temporary pytest test module 126 | testdir.makepyfile( 127 | """ 128 | import time 129 | 130 | 131 | def test_prefix_ok(): 132 | time.sleep(0.5) 133 | x = ['a' * i for i in range(100)] 134 | assert len(x) == 100 135 | 136 | """ 137 | ) 138 | 139 | # run pytest with the following cmd args 140 | result = testdir.runpytest("--component-prefix", "my_component", "-v") 141 | 142 | # fnmatch_lines does an assertion internally 143 | result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"]) 144 | 145 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 146 | assert pymon_path.exists() 147 | 148 | # make sure that that we get a '0' exit code for the testsuite 149 | result.assert_outcomes(passed=1) 150 | 151 | db = sqlite3.connect(str(pymon_path)) 152 | cursor = db.cursor() 153 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 154 | assert len(cursor.fetchall()) == 1 155 | cursor.execute( 156 | "SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" 157 | ) 158 | assert len(cursor.fetchall()) == 1 159 | -------------------------------------------------------------------------------- /tests/test_monitor_context.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import sqlite3 4 | 5 | import mock 6 | import pytest 7 | 8 | CPU_FREQ_PATH = "pytest_monitor.sys_utils.psutil.cpu_freq" 9 | 10 | TEST_CONTENT = """ 11 | import time 12 | 13 | 14 | def test_ok(): 15 | time.sleep(0.5) 16 | x = ['a' * i for i in range(100)] 17 | assert len(x) == 100 18 | """ 19 | 20 | 21 | def get_nb_metrics_with_cpu_freq(path): 22 | db_path = path / ".pymon" 23 | db = sqlite3.connect(db_path.as_posix()) 24 | cursor = db.cursor() 25 | cursor.execute("SELECT ITEM FROM TEST_METRICS;") 26 | nb_metrics = len(cursor.fetchall()) 27 | cursor = db.cursor() 28 | cursor.execute("SELECT CPU_FREQUENCY_MHZ FROM EXECUTION_CONTEXTS;") 29 | rows = cursor.fetchall() 30 | assert len(rows) == 1 31 | cpu_freq = rows[0][0] 32 | return nb_metrics, cpu_freq 33 | 34 | 35 | def test_force_cpu_freq_set_0_use_psutil(testdir): 36 | """Test that when force mode is set, we do not call psutil to fetch CPU's frequency""" 37 | # create a temporary pytest test module 38 | testdir.makepyfile(TEST_CONTENT) 39 | 40 | with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock: 41 | os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "0" 42 | os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" 43 | # run pytest with the following cmd args 44 | result = testdir.runpytest("-vv") 45 | del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] 46 | del os.environ["PYTEST_MONITOR_CPU_FREQ"] 47 | cpu_freq_mock.assert_called() 48 | 49 | # fnmatch_lines does an assertion internally 50 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 51 | # make sure that we get a '0' exit code for the test suite 52 | result.assert_outcomes(passed=1) 53 | 54 | nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir))) 55 | 56 | assert (nb_metrics, cpu_freq) == (1, 3000) 57 | 58 | 59 | def test_force_cpu_freq(testdir): 60 | """Test that when force mode is set, we do not call psutil to fetch CPU's frequency""" 61 | # create a temporary pytest test module 62 | testdir.makepyfile(TEST_CONTENT) 63 | 64 | with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock: 65 | os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "1" 66 | os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" 67 | # run pytest with the following cmd args 68 | result = testdir.runpytest("-vv") 69 | del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] 70 | del os.environ["PYTEST_MONITOR_CPU_FREQ"] 71 | cpu_freq_mock.assert_not_called() 72 | 73 | # fnmatch_lines does an assertion internally 74 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 75 | # make sure that we get a '0' exit code for the test suite 76 | result.assert_outcomes(passed=1) 77 | 78 | nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir))) 79 | 80 | assert (nb_metrics, cpu_freq) == (1, 3000) 81 | 82 | 83 | @pytest.mark.parametrize("effect", [AttributeError, NotImplementedError, FileNotFoundError]) 84 | def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback(effect, testdir): 85 | """Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var""" 86 | # create a temporary pytest test module 87 | testdir.makepyfile(TEST_CONTENT) 88 | 89 | with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock: 90 | os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" 91 | # run pytest with the following cmd args 92 | result = testdir.runpytest("-vv") 93 | del os.environ["PYTEST_MONITOR_CPU_FREQ"] 94 | cpu_freq_mock.assert_called() 95 | 96 | # fnmatch_lines does an assertion internally 97 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 98 | # make sure that we get a '0' exit code for the test suite 99 | result.assert_outcomes(passed=1) 100 | 101 | nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir))) 102 | 103 | assert (nb_metrics, cpu_freq) == (1, 3000) 104 | 105 | 106 | @pytest.mark.parametrize("effect", [AttributeError, NotImplementedError, FileNotFoundError]) 107 | def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir): 108 | """Make sure that pytest-monitor's fallback mechanism is efficient enough.""" 109 | # create a temporary pytest test module 110 | testdir.makepyfile(TEST_CONTENT) 111 | 112 | with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock: 113 | # run pytest with the following cmd args 114 | result = testdir.runpytest("-vv") 115 | cpu_freq_mock.assert_called() 116 | 117 | # fnmatch_lines does an assertion internally 118 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 119 | # make sure that we get a '0' exit code for the test suite 120 | result.assert_outcomes(passed=1) 121 | 122 | nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir))) 123 | 124 | assert (nb_metrics, cpu_freq) == (1, 0) 125 | 126 | 127 | @mock.patch("pytest_monitor.sys_utils.psutil.cpu_freq", return_value=None) 128 | def test_when_cpu_freq_cannot_fetch_frequency(cpu_freq_mock, testdir): 129 | """Make sure that pytest-monitor does the job when we have issue in collecing context resources""" 130 | # create a temporary pytest test module 131 | testdir.makepyfile(TEST_CONTENT) 132 | 133 | # run pytest with the following cmd args 134 | result = testdir.runpytest("-vv") 135 | 136 | # fnmatch_lines does an assertion internally 137 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 138 | # make sure that we get a '0' exit code for the test suite 139 | result.assert_outcomes(passed=1) 140 | 141 | nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir))) 142 | 143 | assert (nb_metrics, cpu_freq) == (1, 0) 144 | -------------------------------------------------------------------------------- /tests/test_monitor_in_ci.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import pathlib 4 | import sqlite3 5 | 6 | 7 | def test_monitor_no_ci(testdir): 8 | """Make sure that pytest-monitor does not insert CI information.""" 9 | # create a temporary pytest test module 10 | testdir.makepyfile( 11 | """ 12 | import time 13 | 14 | 15 | def test_ok(): 16 | time.sleep(0.5) 17 | x = ['a' * i for i in range(100)] 18 | assert len(x) == 100 19 | 20 | """ 21 | ) 22 | 23 | envs = {} 24 | for k in [ 25 | "CIRCLE_BUILD_NUM", 26 | "CIRCLE_JOB", 27 | "DRONE_REPO_BRANCH", 28 | "DRONE_BUILD_NUMBER", 29 | "BUILD_NUMBER", 30 | "JOB_NUMBER", 31 | "JOB_NAME", 32 | "TRAVIS_BUILD_ID", 33 | "TRAVIS_BUILD_NUMBER", 34 | "CI_PIPELINE_ID", 35 | "CI_JOB_NAME", 36 | "BITBUCKET_BRANCH", 37 | "BITBUCKET_BUILD_NUMBER", 38 | ]: 39 | if k in os.environ: 40 | envs[k] = os.environ[k] 41 | del os.environ[k] 42 | 43 | # run pytest with the following cmd args 44 | result = testdir.runpytest("-v") 45 | 46 | # fnmatch_lines does an assertion internally 47 | result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 48 | 49 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 50 | assert pymon_path.exists() 51 | 52 | # make sure that that we get a '0' exit code for the testsuite 53 | result.assert_outcomes(passed=1) 54 | 55 | db = sqlite3.connect(str(pymon_path)) 56 | cursor = db.cursor() 57 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 58 | desc = cursor.fetchall() 59 | assert len(desc) == 1 # current test 60 | assert desc[0][0] == "{}" 61 | for k in envs.keys(): 62 | os.environ[k] = envs[k] 63 | 64 | 65 | def test_monitor_jenkins_ci(testdir): 66 | """Make sure that pytest-monitor correctly handle Jenkins CI information.""" 67 | # create a temporary pytest test module 68 | testdir.makepyfile( 69 | """ 70 | import time 71 | 72 | 73 | def test_ok(): 74 | time.sleep(0.5) 75 | x = ['a' * i for i in range(100)] 76 | assert len(x) == 100 77 | 78 | """ 79 | ) 80 | 81 | def check_that(the_result, match): 82 | # fnmatch_lines does an assertion internally 83 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 84 | 85 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 86 | assert pymon_path.exists() 87 | 88 | # make sure that that we get a '0' exit code for the testsuite 89 | the_result.assert_outcomes(passed=1) 90 | 91 | db = sqlite3.connect(str(pymon_path)) 92 | cursor = db.cursor() 93 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 94 | desc = cursor.fetchall() 95 | assert len(desc) == 1 # current test 96 | assert desc[0][0] == match 97 | pymon_path.unlink() 98 | 99 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "jenkinsci"}' 100 | 101 | envs = {} 102 | for k in [ 103 | "CIRCLE_BUILD_NUM", 104 | "CIRCLE_JOB", 105 | "DRONE_REPO_BRANCH", 106 | "DRONE_BUILD_NUMBER", 107 | "BUILD_NUMBER", 108 | "JOB_NUMBER", 109 | "JOB_NAME", 110 | "TRAVIS_BUILD_ID", 111 | "TRAVIS_BUILD_NUMBER", 112 | "CI_PIPELINE_ID", 113 | "CI_JOB_NAME", 114 | "BITBUCKET_BRANCH", 115 | "BITBUCKET_BUILD_NUMBER", 116 | ]: 117 | if k in os.environ: 118 | envs[k] = os.environ[k] 119 | del os.environ[k] 120 | 121 | for env, exp in [ 122 | ({"BUILD_NUMBER": "123"}, "{}"), 123 | ({"BUILD_NUMBER": "123", "JOB_NAME": "test"}, run_description), 124 | ({"BUILD_NUMBER": "123", "BRANCH_NAME": "test"}, run_description), 125 | ( 126 | {"BUILD_NUMBER": "123", "JOB_NAME": "test-123", "BRANCH_NAME": "test"}, 127 | run_description, 128 | ), 129 | ]: 130 | if "BUILD_NUMBER" in os.environ: 131 | del os.environ["BUILD_NUMBER"] 132 | if "JOB_NUMBER" in os.environ: 133 | del os.environ["JOB_NAME"] 134 | if "BRANCH_NUMBER" in os.environ: 135 | del os.environ["BRANCH_NAME"] 136 | 137 | for k, v in env.items(): 138 | os.environ[k] = v 139 | 140 | result = testdir.runpytest("-v") 141 | check_that(result, match=exp) 142 | 143 | if "BUILD_NUMBER" in os.environ: 144 | del os.environ["BUILD_NUMBER"] 145 | if "JOB_NUMBER" in os.environ: 146 | del os.environ["JOB_NAME"] 147 | if "BRANCH_NUMBER" in os.environ: 148 | del os.environ["BRANCH_NAME"] 149 | 150 | 151 | def test_monitor_gitlab_ci(testdir): 152 | """Make sure that pytest-monitor correctly handle Gitlab CI information.""" 153 | # create a temporary pytest test module 154 | testdir.makepyfile( 155 | """ 156 | import time 157 | 158 | 159 | def test_ok(): 160 | time.sleep(0.5) 161 | x = ['a' * i for i in range(100)] 162 | assert len(x) == 100 163 | 164 | """ 165 | ) 166 | 167 | def check_that(the_result, match): 168 | # fnmatch_lines does an assertion internally 169 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 170 | 171 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 172 | assert pymon_path.exists() 173 | 174 | # make sure that that we get a '0' exit code for the testsuite 175 | the_result.assert_outcomes(passed=1) 176 | 177 | db = sqlite3.connect(str(pymon_path)) 178 | cursor = db.cursor() 179 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 180 | desc = cursor.fetchall() 181 | assert len(desc) == 1 # current test 182 | assert desc[0][0] == match 183 | pymon_path.unlink() 184 | 185 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "gitlabci"}' 186 | envs = {} 187 | for k in [ 188 | "CIRCLE_BUILD_NUM", 189 | "CIRCLE_JOB", 190 | "DRONE_REPO_BRANCH", 191 | "DRONE_BUILD_NUMBER", 192 | "BUILD_NUMBER", 193 | "JOB_NUMBER", 194 | "JOB_NAME", 195 | "TRAVIS_BUILD_ID", 196 | "TRAVIS_BUILD_NUMBER", 197 | "CI_PIPELINE_ID", 198 | "CI_JOB_NAME", 199 | "BITBUCKET_BRANCH", 200 | "BITBUCKET_BUILD_NUMBER", 201 | ]: 202 | if k in os.environ: 203 | envs[k] = os.environ[k] 204 | del os.environ[k] 205 | 206 | for env, exp in [ 207 | ({"CI_PIPELINE_ID": "123"}, "{}"), 208 | ({"CI_PIPELINE_ID": "123", "CI_JOB_NAME": "test"}, run_description), 209 | ({"CI_JOB_NAME": "123"}, "{}"), 210 | ]: 211 | if "CI_PIPELINE_ID" in os.environ: 212 | del os.environ["CI_PIPELINE_ID"] 213 | if "CI_JOB_NAME" in os.environ: 214 | del os.environ["CI_JOB_NAME"] 215 | 216 | for k, v in env.items(): 217 | os.environ[k] = v 218 | 219 | result = testdir.runpytest("-v") 220 | check_that(result, match=exp) 221 | 222 | if "CI_PIPELINE_ID" in os.environ: 223 | del os.environ["CI_PIPELINE_ID"] 224 | if "CI_JOB_NAME" in os.environ: 225 | del os.environ["CI_JOB_NAME"] 226 | 227 | 228 | def test_monitor_travis_ci(testdir): 229 | """Make sure that pytest-monitor correctly handle Travis CI information.""" 230 | # create a temporary pytest test module 231 | testdir.makepyfile( 232 | """ 233 | import time 234 | 235 | 236 | def test_ok(): 237 | time.sleep(0.5) 238 | x = ['a' * i for i in range(100)] 239 | assert len(x) == 100 240 | 241 | """ 242 | ) 243 | 244 | def check_that(the_result, match): 245 | # fnmatch_lines does an assertion internally 246 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 247 | 248 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 249 | assert pymon_path.exists() 250 | 251 | # make sure that that we get a '0' exit code for the testsuite 252 | the_result.assert_outcomes(passed=1) 253 | 254 | db = sqlite3.connect(str(pymon_path)) 255 | cursor = db.cursor() 256 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 257 | desc = cursor.fetchall() 258 | assert len(desc) == 1 # current test 259 | assert desc[0][0] == match 260 | pymon_path.unlink() 261 | 262 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "travisci"}' 263 | envs = {} 264 | for k in [ 265 | "CIRCLE_BUILD_NUM", 266 | "CIRCLE_JOB", 267 | "DRONE_REPO_BRANCH", 268 | "DRONE_BUILD_NUMBER", 269 | "BUILD_NUMBER", 270 | "JOB_NUMBER", 271 | "JOB_NAME", 272 | "TRAVIS_BUILD_ID", 273 | "TRAVIS_BUILD_NUMBER", 274 | "CI_PIPELINE_ID", 275 | "CI_JOB_NAME", 276 | "BITBUCKET_BRANCH", 277 | "BITBUCKET_BUILD_NUMBER", 278 | ]: 279 | if k in os.environ: 280 | envs[k] = os.environ[k] 281 | del os.environ[k] 282 | 283 | for env, exp in [ 284 | ({"TRAVIS_BUILD_NUMBER": "123"}, "{}"), 285 | ({"TRAVIS_BUILD_NUMBER": "123", "TRAVIS_BUILD_ID": "test"}, run_description), 286 | ({"TRAVIS_BUILD_ID": "test-123"}, "{}"), 287 | ]: 288 | if "TRAVIS_BUILD_NUMBER" in os.environ: 289 | del os.environ["TRAVIS_BUILD_NUMBER"] 290 | if "TRAVIS_BUILD_ID" in os.environ: 291 | del os.environ["TRAVIS_BUILD_ID"] 292 | 293 | for k, v in env.items(): 294 | os.environ[k] = v 295 | 296 | result = testdir.runpytest("-v") 297 | check_that(result, match=exp) 298 | 299 | if "TRAVIS_BUILD_NUMBER" in os.environ: 300 | del os.environ["TRAVIS_BUILD_NUMBER"] 301 | if "TRAVIS_BUILD_ID" in os.environ: 302 | del os.environ["TRAVIS_BUILD_ID"] 303 | 304 | 305 | def test_monitor_circle_ci(testdir): 306 | """Make sure that pytest-monitor correctly handle Circle CI information.""" 307 | # create a temporary pytest test module 308 | testdir.makepyfile( 309 | """ 310 | import time 311 | 312 | 313 | def test_ok(): 314 | time.sleep(0.5) 315 | x = ['a' * i for i in range(100)] 316 | assert len(x) == 100 317 | 318 | """ 319 | ) 320 | 321 | def check_that(the_result, match): 322 | # fnmatch_lines does an assertion internally 323 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 324 | 325 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 326 | assert pymon_path.exists() 327 | 328 | # make sure that that we get a '0' exit code for the testsuite 329 | the_result.assert_outcomes(passed=1) 330 | 331 | db = sqlite3.connect(str(pymon_path)) 332 | cursor = db.cursor() 333 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 334 | desc = cursor.fetchall() 335 | assert len(desc) == 1 # current test 336 | assert desc[0][0] == match 337 | pymon_path.unlink() 338 | 339 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "circleci"}' 340 | envs = {} 341 | for k in [ 342 | "CIRCLE_BUILD_NUM", 343 | "CIRCLE_JOB", 344 | "DRONE_REPO_BRANCH", 345 | "DRONE_BUILD_NUMBER", 346 | "BUILD_NUMBER", 347 | "JOB_NUMBER", 348 | "JOB_NAME", 349 | "TRAVIS_BUILD_ID", 350 | "TRAVIS_BUILD_NUMBER", 351 | "CI_PIPELINE_ID", 352 | "CI_JOB_NAME", 353 | "BITBUCKET_BRANCH", 354 | "BITBUCKET_BUILD_NUMBER", 355 | ]: 356 | if k in os.environ: 357 | envs[k] = os.environ[k] 358 | del os.environ[k] 359 | 360 | for env, exp in [ 361 | ({"CIRCLE_BUILD_NUM": "123"}, "{}"), 362 | ({"CIRCLE_BUILD_NUM": "123", "CIRCLE_JOB": "test"}, run_description), 363 | ({"CIRCLE_JOB": "test"}, "{}"), 364 | ]: 365 | if "CIRCLE_BUILD_NUM" in os.environ: 366 | del os.environ["CIRCLE_BUILD_NUM"] 367 | if "CIRCLE_JOB" in os.environ: 368 | del os.environ["CIRCLE_JOB"] 369 | 370 | for k, v in env.items(): 371 | os.environ[k] = v 372 | 373 | result = testdir.runpytest("-v") 374 | check_that(result, match=exp) 375 | 376 | if "CIRCLE_BUILD_NUM" in os.environ: 377 | del os.environ["CIRCLE_BUILD_NUM"] 378 | if "CIRCLE_JOB" in os.environ: 379 | del os.environ["CIRCLE_JOB"] 380 | 381 | 382 | def test_monitor_drone_ci(testdir): 383 | """Make sure that pytest-monitor correctly handle Jenkins CI information.""" 384 | # create a temporary pytest test module 385 | testdir.makepyfile( 386 | """ 387 | import time 388 | 389 | 390 | def test_ok(): 391 | time.sleep(0.5) 392 | x = ['a' * i for i in range(100)] 393 | assert len(x) == 100 394 | 395 | """ 396 | ) 397 | 398 | def check_that(the_result, match): 399 | # fnmatch_lines does an assertion internally 400 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 401 | 402 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 403 | assert pymon_path.exists() 404 | 405 | # make sure that that we get a '0' exit code for the testsuite 406 | the_result.assert_outcomes(passed=1) 407 | 408 | db = sqlite3.connect(str(pymon_path)) 409 | cursor = db.cursor() 410 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 411 | desc = cursor.fetchall() 412 | assert len(desc) == 1 # current test 413 | assert desc[0][0] == match 414 | pymon_path.unlink() 415 | 416 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "droneci"}' 417 | envs = {} 418 | for k in [ 419 | "CIRCLE_BUILD_NUM", 420 | "CIRCLE_JOB", 421 | "DRONE_REPO_BRANCH", 422 | "DRONE_BUILD_NUMBER", 423 | "BUILD_NUMBER", 424 | "JOB_NUMBER", 425 | "JOB_NAME", 426 | "TRAVIS_BUILD_ID", 427 | "TRAVIS_BUILD_NUMBER", 428 | "CI_PIPELINE_ID", 429 | "CI_JOB_NAME", 430 | "BITBUCKET_BRANCH", 431 | "BITBUCKET_BUILD_NUMBER", 432 | ]: 433 | if k in os.environ: 434 | envs[k] = os.environ[k] 435 | del os.environ[k] 436 | 437 | for env, exp in [ 438 | ({"DRONE_BUILD_NUMBER": "123"}, "{}"), 439 | ({"DRONE_BUILD_NUMBER": "123", "DRONE_REPO_BRANCH": "test"}, run_description), 440 | ({"DRONE_REPO_BRANCH": "test"}, "{}"), 441 | ]: 442 | if "DRONE_REPO_BRANCH" in os.environ: 443 | del os.environ["DRONE_REPO_BRANCH"] 444 | if "DRONE_BUILD_NUMBER" in os.environ: 445 | del os.environ["DRONE_BUILD_NUMBER"] 446 | 447 | for k, v in env.items(): 448 | os.environ[k] = v 449 | 450 | result = testdir.runpytest("-v") 451 | check_that(result, match=exp) 452 | 453 | if "DRONE_REPO_BRANCH" in os.environ: 454 | del os.environ["DRONE_REPO_BRANCH"] 455 | if "DRONE_BUILD_NUMBER" in os.environ: 456 | del os.environ["DRONE_BUILD_NUMBER"] 457 | 458 | def test_monitor_bitbucket_ci(testdir): 459 | """Make sure that pytest-monitor correctly handle Bitbucket CI information.""" 460 | # create a temporary pytest test module 461 | testdir.makepyfile( 462 | """ 463 | import time 464 | 465 | 466 | def test_ok(): 467 | time.sleep(0.5) 468 | x = ['a' * i for i in range(100)] 469 | assert len(x) == 100 470 | 471 | """ 472 | ) 473 | 474 | def check_that(the_result, match): 475 | # fnmatch_lines does an assertion internally 476 | the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) 477 | 478 | pymon_path = pathlib.Path(str(testdir)) / ".pymon" 479 | assert pymon_path.exists() 480 | 481 | # make sure that that we get a '0' exit code for the testsuite 482 | the_result.assert_outcomes(passed=1) 483 | 484 | db = sqlite3.connect(str(pymon_path)) 485 | cursor = db.cursor() 486 | cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") 487 | desc = cursor.fetchall() 488 | assert len(desc) == 1 # current test 489 | assert desc[0][0] == match 490 | pymon_path.unlink() 491 | 492 | run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "bitbucketci"}' 493 | envs = {} 494 | for k in [ 495 | "CIRCLE_BUILD_NUM", 496 | "CIRCLE_JOB", 497 | "DRONE_REPO_BRANCH", 498 | "DRONE_BUILD_NUMBER", 499 | "BUILD_NUMBER", 500 | "JOB_NUMBER", 501 | "JOB_NAME", 502 | "TRAVIS_BUILD_ID", 503 | "TRAVIS_BUILD_NUMBER", 504 | "CI_PIPELINE_ID", 505 | "CI_JOB_NAME", 506 | "BITBUCKET_BRANCH", 507 | "BITBUCKET_BUILD_NUMBER", 508 | ]: 509 | if k in os.environ: 510 | envs[k] = os.environ[k] 511 | del os.environ[k] 512 | 513 | for env, exp in [ 514 | ({"BITBUCKET_BUILD_NUMBER": "123"}, "{}"), 515 | ({"BITBUCKET_BUILD_NUMBER": "123", "BITBUCKET_BRANCH": "test"}, run_description), 516 | ({"BITBUCKET_BRANCH": "test"}, "{}"), 517 | ]: 518 | if "BITBUCKET_BRANCH" in os.environ: 519 | del os.environ["BITBUCKET_BRANCH"] 520 | if "BITBUCKET_BUILD_NUMBER" in os.environ: 521 | del os.environ["BITBUCKET_BUILD_NUMBER"] 522 | 523 | for k, v in env.items(): 524 | os.environ[k] = v 525 | 526 | result = testdir.runpytest("-v") 527 | check_that(result, match=exp) 528 | 529 | if "BITBUCKET_BRANCH" in os.environ: 530 | del os.environ["BITBUCKET_BRANCH"] 531 | if "BITBUCKET_BUILD_NUMBER" in os.environ: 532 | del os.environ["BITBUCKET_BUILD_NUMBER"] 533 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # For more information about tox, see https://tox.readthedocs.io/en/latest/ 2 | [tox] 3 | envlist = py27,py34,py35,py36,py37,pypy,flake8 4 | 5 | [testenv] 6 | deps = pytest>=3.0 7 | commands = pytest {posargs:tests} 8 | 9 | [testenv:flake8] 10 | skip_install = true 11 | deps = flake8 12 | commands = flake8 pytest_monitor.py setup.py tests 13 | --------------------------------------------------------------------------------