├── .gitignore ├── .travis.yml ├── CHANGES.rst ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.rst ├── pytest_catchlog ├── __init__.py ├── common.py ├── fixture.py └── plugin.py ├── setup.cfg ├── setup.py ├── tasks.py ├── tests ├── conftest.py ├── perf │ ├── __init__.py │ ├── bench │ │ ├── conftest.py │ │ ├── pytest.ini │ │ ├── test_log.py │ │ └── test_runtest_hook.py │ ├── conftest.py │ ├── data.py │ ├── plot.py │ └── test_perf_run.py ├── test_compat.py ├── test_fixture.py └── test_reporting.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | ### Python template 2 | # Byte-compiled / optimized / Native Library Files 3 | __pycache__/ 4 | *.py[cod] 5 | *.so 6 | 7 | # Distribution / packaging 8 | .Python 9 | /env/ 10 | /build/ 11 | /develop-eggs/ 12 | /dist/ 13 | /downloads/ 14 | /eggs/ 15 | /lib/ 16 | /lib64/ 17 | /parts/ 18 | /sdist/ 19 | /var/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | 24 | # Unit test / coverage reports 25 | htmlcov/ 26 | .benchmarks/ 27 | .tox/ 28 | .coverage 29 | .cache 30 | nosetests.xml 31 | coverage.xml 32 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: python 3 | python: 4 | - "2.6" 5 | - "2.7" 6 | - "3.3" 7 | - "3.4" 8 | - "3.5" 9 | - "pypy" 10 | - "pypy3" 11 | matrix: 12 | include: 13 | - python: "2.7" 14 | env: TOXENV=perf 15 | 16 | before_install: | 17 | pip install tox virtualenv 18 | if [ ! -v TOXENV ] 19 | then 20 | __py_ver=$TRAVIS_PYTHON_VERSION 21 | __tox_dfl=${__py_ver/[0-9].[0-9]/py${__py_ver/.}} 22 | export TOXENV=${TOXENV:-$__tox_dfl} 23 | fi 24 | function announce() 25 | { echo -e "$ANSI_GREEN$@${ANSI_RESET}"; } 26 | function tox() 27 | { announce "Running tox in TOXENV=$TOXENV"; env tox "$@"; } 28 | 29 | install: tox --notest 30 | script: tox 31 | 32 | before_cache: 33 | - rm -rf $HOME/.cache/pip/log 34 | cache: 35 | directories: 36 | - $HOME/.cache/pip 37 | 38 | deploy: 39 | provider: pypi 40 | distributions: 'sdist bdist_wheel' 41 | on: 42 | branch: 'master' 43 | tags: true 44 | password: 45 | secure: >- 46 | Yg5m1M3mhezSOdEA2bfQ/0tU4T5/kh9DjH11lvNVZA7QSY4AWp0Ri 47 | 38Ea5bk7HP24nlFAXFTrrWr5UJ2ZjdJ/P04tFIWA4yP1H+HU9IF8/ 48 | 3FSauKjT2uzd0Cy7aP3PB5k2RFNPdtmHqCJr9o5yjAgT74Pv/dM5k 49 | t8Qj3h5szdpI= 50 | user: 'pytest-catchlog-ci' 51 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | List of notable changes between pytest-catchlog releases. 5 | 6 | .. %UNRELEASED_SECTION% 7 | 8 | `1.2.2`_ 9 | ------------- 10 | 11 | Released on 2016-01-24 UTC. 12 | 13 | - [Bugfix] `#30`_ `#31`_ - Fix ``unicode`` vs ``str`` compatibility issues between Python2 and Python3. 14 | (Thanks goes to `@sirex`_ for reporting the issue and providing a fix!) 15 | 16 | .. _#30: https://github.com/eisensheng/pytest-catchlog/issues/30 17 | .. _#31: https://github.com/eisensheng/pytest-catchlog/issues/31 18 | .. _@sirex: https://github.com/sirex 19 | 20 | 21 | `1.2.1`_ 22 | ------------- 23 | 24 | Released on 2015-12-07. 25 | 26 | - [Bugfix] #18 - Allow ``caplog.records()`` to be modified. Thanks to Eldar Abusalimov for the PR and Marco Nenciarini for reporting the issue. 27 | - [Bugfix] #15 #17 - Restore Python 2.6 compatibility. (Thanks to Marco Nenciarini!) 28 | 29 | .. attention:: 30 | Deprecation warning: the following objects (i.e. functions, properties) 31 | are slated for removal in the next major release. 32 | 33 | - ``caplog.at_level`` and ``caplog.set_level`` should be used instead of 34 | ``caplog.atLevel`` and ``caplog.setLevel``. 35 | 36 | The methods ``caplog.atLevel`` and ``caplog.setLevel`` are still 37 | available but deprecated and not supported since they don't follow 38 | the PEP8 convention for method names. 39 | 40 | - ``caplog.text``, ``caplog.records`` and 41 | ``caplog.record_tuples`` were turned into properties. 42 | They still can be used as regular methods for backward compatibility, 43 | but that syntax is considered deprecated and scheduled for removal in 44 | the next major release. 45 | 46 | 47 | Version 1.2 48 | ----------- 49 | 50 | Released on 2015-11-08. 51 | 52 | - [Feature] #6 - Configure logging message and date format through ini file. 53 | - [Feature] #7 - Also catch logs from setup and teardown stages. 54 | - [Feature] #7 - Replace deprecated ``__multicall__`` use to support future Py.test releases. 55 | - [Feature] #11 - reintroduce ``setLevel`` and ``atLevel`` to retain backward compatibility with pytest-capturelog. Also the members ``text``, ``records`` and ``record_tuples`` of the ``caplog`` fixture can be used as properties now. 56 | 57 | Special thanks for this release goes to Eldar Abusalimov. He provided all of the changed features. 58 | 59 | 60 | Version 1.1 61 | ----------- 62 | 63 | Released on 2015-06-07. 64 | 65 | - #2 - Explicitly state Python3 support and add configuration for running 66 | tests with tox on multiple Python versions. (Thanks to Jeremy Bowman!) 67 | - Add an option to silence logs completely on the terminal. 68 | 69 | 70 | Version 1.0 71 | ----------- 72 | 73 | Released on 2014-12-08. 74 | 75 | - Add ``record_tuples`` for comparing recorded log entries against expected 76 | log entries with their logger name, severity and formatted message. 77 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Original work Copyright (c) 2010 Meme Dough 4 | Modified work Copyright (c) 2014 Arthur Skowronek 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in Makefile LICENSE.txt README.rst CHANGES.rst setup.cfg 2 | graft tests 3 | 4 | global-exclude *pyc 5 | prune __pycache__ 6 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: docs 2 | .SILENT: init-devel test test-tox test-coverage 3 | 4 | all: clean test 5 | 6 | clean-coverage: 7 | -rm .coverage* 8 | -rm coverage.xml 9 | -rm -rfv htmlcov 10 | 11 | clean-pyc: 12 | -find . -path './.tox' -prune -or \ 13 | -name '__pycache__' -exec rm -rv {} + 14 | -find . -path './.tox' -prune -or \ 15 | \( -name '*.pyc' -or -name '*.pyo' \) -exec rm -rv {} + 16 | 17 | clean: clean-pyc clean-coverage 18 | -rm -rv build dist *.egg-info 19 | 20 | test: 21 | py.test -v tests 22 | 23 | test-coverage: 24 | coverage erase 25 | coverage run --source=pytest_catchlog --branch -m pytest -v 26 | coverage report 27 | coverage xml 28 | 29 | audit: 30 | flake8 pytest_catchlog 31 | 32 | wheel: 33 | python setup.py bdist_wheel 34 | 35 | sdist: 36 | python setup.py sdist 37 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | pytest-catchlog (deprecated!) 2 | ============================= 3 | 4 | .. image:: https://badges.gitter.im/Join%20Chat.svg 5 | :alt: Join the chat at https://gitter.im/eisensheng/pytest-catchlog 6 | :target: https://gitter.im/eisensheng/pytest-catchlog?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge 7 | 8 | py.test plugin to catch log messages. This is a fork of `pytest-capturelog`_. 9 | 10 | **As of pytest 3.3, the functionality of this plugin has been merged to the pytest core.** See the pytest `logging documentation`_ for details. 11 | 12 | .. _`pytest-capturelog`: https://pypi.python.org/pypi/pytest-capturelog/ 13 | .. _`logging documentation`: https://docs.pytest.org/en/latest/logging.html 14 | 15 | 16 | Installation 17 | ------------ 18 | 19 | The `pytest-catchlog`_ package may be installed with pip or easy_install:: 20 | 21 | pip install pytest-catchlog 22 | easy_install pytest-catchlog 23 | 24 | .. _`pytest-catchlog`: http://pypi.python.org/pypi/pytest-catchlog/ 25 | 26 | 27 | Usage 28 | ----- 29 | 30 | If the plugin is installed log messages are captured by default and for 31 | each failed test will be shown in the same manner as captured stdout and 32 | stderr. 33 | 34 | Running without options:: 35 | 36 | py.test 37 | 38 | Shows failed tests like so:: 39 | 40 | ----------------------- Captured stdlog call ---------------------- 41 | test_reporting.py 26 INFO text going to logger 42 | ----------------------- Captured stdout call ---------------------- 43 | text going to stdout 44 | ----------------------- Captured stderr call ---------------------- 45 | text going to stderr 46 | ==================== 2 failed in 0.02 seconds ===================== 47 | 48 | By default each captured log message shows the module, line number, 49 | log level and message. Showing the exact module and line number is 50 | useful for testing and debugging. If desired the log format and date 51 | format can be specified to anything that the logging module supports. 52 | 53 | Running pytest specifying formatting options:: 54 | 55 | py.test --log-format="%(asctime)s %(levelname)s %(message)s" \ 56 | --log-date-format="%Y-%m-%d %H:%M:%S" 57 | 58 | Shows failed tests like so:: 59 | 60 | ----------------------- Captured stdlog call ---------------------- 61 | 2010-04-10 14:48:44 INFO text going to logger 62 | ----------------------- Captured stdout call ---------------------- 63 | text going to stdout 64 | ----------------------- Captured stderr call ---------------------- 65 | text going to stderr 66 | ==================== 2 failed in 0.02 seconds ===================== 67 | 68 | These options can also be customized through a configuration file:: 69 | 70 | [pytest] 71 | log_format = %(asctime)s %(levelname)s %(message)s 72 | log_date_format = %Y-%m-%d %H:%M:%S 73 | 74 | Although the same effect could be achieved through the ``addopts`` setting, 75 | using dedicated options should be preferred since the latter doesn't 76 | force other developers to have ``pytest-catchlog`` installed (while at 77 | the same time, ``addopts`` approach would fail with 'unrecognized arguments' 78 | error). Command line arguments take precedence. 79 | 80 | Further it is possible to disable reporting logs on failed tests 81 | completely with:: 82 | 83 | py.test --no-print-logs 84 | 85 | Or in you ``pytest.ini``:: 86 | 87 | [pytest] 88 | log_print=False 89 | 90 | 91 | Shows failed tests in the normal manner as no logs were captured:: 92 | 93 | ----------------------- Captured stdout call ---------------------- 94 | text going to stdout 95 | ----------------------- Captured stderr call ---------------------- 96 | text going to stderr 97 | ==================== 2 failed in 0.02 seconds ===================== 98 | 99 | Inside tests it is possible to change the log level for the captured 100 | log messages. This is supported by the ``caplog`` fixture:: 101 | 102 | def test_foo(caplog): 103 | caplog.set_level(logging.INFO) 104 | pass 105 | 106 | By default the level is set on the handler used to catch the log 107 | messages, however as a convenience it is also possible to set the log 108 | level of any logger:: 109 | 110 | def test_foo(caplog): 111 | caplog.set_level(logging.CRITICAL, logger='root.baz') 112 | pass 113 | 114 | It is also possible to use a context manager to temporarily change the 115 | log level:: 116 | 117 | def test_bar(caplog): 118 | with caplog.at_level(logging.INFO): 119 | pass 120 | 121 | Again, by default the level of the handler is affected but the level 122 | of any logger can be changed instead with:: 123 | 124 | def test_bar(caplog): 125 | with caplog.at_level(logging.CRITICAL, logger='root.baz'): 126 | pass 127 | 128 | Lastly all the logs sent to the logger during the test run are made 129 | available on the fixture in the form of both the LogRecord instances 130 | and the final log text. This is useful for when you want to assert on 131 | the contents of a message:: 132 | 133 | def test_baz(caplog): 134 | func_under_test() 135 | for record in caplog.records: 136 | assert record.levelname != 'CRITICAL' 137 | assert 'wally' not in caplog.text 138 | 139 | For all the available attributes of the log records see the 140 | ``logging.LogRecord`` class. 141 | 142 | You can also resort to ``record_tuples`` if all you want to do is to ensure, 143 | that certain messages have been logged under a given logger name with a 144 | given severity and message:: 145 | 146 | def test_foo(caplog): 147 | logging.getLogger().info('boo %s', 'arg') 148 | 149 | assert caplog.record_tuples == [ 150 | ('root', logging.INFO, 'boo arg'), 151 | ] 152 | 153 | You can call ``caplog.clear()`` to reset the captured log records in a test:: 154 | 155 | def test_something_with_clearing_records(caplog): 156 | some_method_that_creates_log_records() 157 | caplog.clear() 158 | your_test_method() 159 | assert ['Foo'] == [rec.message for rec in caplog.records] 160 | 161 | Live Logs 162 | ~~~~~~~~~ 163 | 164 | By default, catchlog will output any logging records with a level higher or equal 165 | to WARNING. In order to actually see these logs in the console you have to disable 166 | pytest output capture by passing ``-s``. 167 | 168 | You can specify the logging level for which log records with equal or higher level 169 | are printed to the console by passing ``--log-cli-level``. This setting accepts the 170 | logging level names as seen in python's documentation or an integer as the logging 171 | level num. 172 | 173 | Additionally, you can also specify ``--log-cli-format`` and ``--log-cli-date-format`` 174 | which mirror and default to ``--log-format`` and ``--log-date-format`` if not 175 | provided, but are applied only to the console logging handler. 176 | 177 | All of the CLI log options can also be set in the configuration INI file. The option 178 | names are: 179 | 180 | * ``log_cli_level`` 181 | * ``log_cli_format`` 182 | * ``log_cli_date_format`` 183 | 184 | If you need to record the whole test suite logging calls to a file, you can 185 | pass 186 | ``--log-file=/path/to/log/file``. This log file is opened in write mode which means 187 | that it will be overwritten at each run tests session. 188 | 189 | You can also specify the logging level for the log file by passing 190 | ``--log-file-level``. This setting accepts the logging level names as seen in python's 191 | documentation(ie, uppercased level names) or an integer as the logging level num. 192 | 193 | Additionally, you can also specify ``--log-file-format`` and ``--log-file-date-format`` 194 | which are equal to ``--log-format`` and ``--log-date-format`` but are applied to the 195 | log file logging handler. 196 | 197 | All of the log file options can also be set in the configuration INI file. The option 198 | names are: 199 | 200 | * ``log_file`` 201 | * ``log_file_level`` 202 | * ``log_file_format`` 203 | * ``log_file_date_format`` 204 | -------------------------------------------------------------------------------- /pytest_catchlog/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | __version__ = '1.2.2' 3 | -------------------------------------------------------------------------------- /pytest_catchlog/common.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division, print_function 3 | 4 | import logging 5 | from contextlib import closing, contextmanager 6 | 7 | import py 8 | 9 | 10 | def get_logger_obj(logger=None): 11 | """Get a logger object that can be specified by its name, or passed as is. 12 | 13 | Defaults to the root logger. 14 | """ 15 | if logger is None or isinstance(logger, py.builtin._basestring): 16 | logger = logging.getLogger(logger) 17 | return logger 18 | 19 | 20 | @contextmanager 21 | def logging_at_level(level, logger=None): 22 | """Context manager that sets the level for capturing of logs.""" 23 | logger = get_logger_obj(logger) 24 | 25 | orig_level = logger.level 26 | logger.setLevel(level) 27 | try: 28 | yield 29 | finally: 30 | logger.setLevel(orig_level) 31 | 32 | 33 | @contextmanager 34 | def logging_using_handler(handler, logger=None): 35 | """Context manager that safely registers a given handler.""" 36 | logger = get_logger_obj(logger) 37 | 38 | if handler in logger.handlers: # reentrancy 39 | # Adding the same handler twice would confuse logging system. 40 | # Just don't do that. 41 | yield 42 | else: 43 | logger.addHandler(handler) 44 | try: 45 | yield 46 | finally: 47 | logger.removeHandler(handler) 48 | 49 | 50 | @contextmanager 51 | def catching_logs(handler, filter=None, formatter=None, 52 | level=logging.NOTSET, logger=None): 53 | """Context manager that prepares the whole logging machinery properly.""" 54 | logger = get_logger_obj(logger) 55 | 56 | if filter is not None: 57 | handler.addFilter(filter) 58 | if formatter is not None: 59 | handler.setFormatter(formatter) 60 | handler.setLevel(level) 61 | 62 | with closing(handler): 63 | with logging_using_handler(handler, logger): 64 | with logging_at_level(min(handler.level, logger.level), logger): 65 | 66 | yield handler 67 | -------------------------------------------------------------------------------- /pytest_catchlog/fixture.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division, print_function 3 | 4 | import functools 5 | import logging 6 | 7 | import pytest 8 | import py 9 | 10 | from pytest_catchlog.common import catching_logs, logging_at_level 11 | 12 | 13 | class LogCaptureFixture(object): 14 | """Provides access and control of log capturing.""" 15 | 16 | def __init__(self, item): 17 | """Creates a new funcarg.""" 18 | self._item = item 19 | 20 | @property 21 | def handler(self): 22 | return self._item.catch_log_handler 23 | 24 | @property 25 | def text(self): 26 | """Returns the log text.""" 27 | return self.handler.stream.getvalue() 28 | 29 | @property 30 | def records(self): 31 | """Returns the list of log records.""" 32 | return self.handler.records 33 | 34 | @property 35 | def record_tuples(self): 36 | """Returns a list of a striped down version of log records intended 37 | for use in assertion comparison. 38 | 39 | The format of the tuple is: 40 | 41 | (logger_name, log_level, message) 42 | """ 43 | return [(r.name, r.levelno, r.getMessage()) for r in self.records] 44 | 45 | def clear(self): 46 | """Reset the list of log records.""" 47 | self.handler.records = [] 48 | 49 | def set_level(self, level, logger=None): 50 | """Sets the level for capturing of logs. 51 | 52 | By default, the level is set on the handler used to capture 53 | logs. Specify a logger name to instead set the level of any 54 | logger. 55 | """ 56 | 57 | obj = logger and logging.getLogger(logger) or self.handler 58 | obj.setLevel(level) 59 | 60 | def at_level(self, level, logger=None): 61 | """Context manager that sets the level for capturing of logs. 62 | 63 | By default, the level is set on the handler used to capture 64 | logs. Specify a logger name to instead set the level of any 65 | logger. 66 | """ 67 | 68 | obj = logger and logging.getLogger(logger) or self.handler 69 | return logging_at_level(level, obj) 70 | 71 | 72 | class CallablePropertyMixin(object): 73 | """Backward compatibility for functions that became properties.""" 74 | 75 | @classmethod 76 | def compat_property(cls, func): 77 | if isinstance(func, property): 78 | make_property = func.getter 79 | func = func.fget 80 | else: 81 | make_property = property 82 | 83 | @functools.wraps(func) 84 | def getter(self): 85 | naked_value = func(self) 86 | ret = cls(naked_value) 87 | ret._naked_value = naked_value 88 | ret._warn_compat = self._warn_compat 89 | ret._prop_name = func.__name__ 90 | return ret 91 | 92 | return make_property(getter) 93 | 94 | def __call__(self): 95 | new = "'caplog.{0}' property".format(self._prop_name) 96 | if self._prop_name == 'records': 97 | new += ' (or caplog.clear())' 98 | self._warn_compat(old="'caplog.{0}()' syntax".format(self._prop_name), 99 | new=new) 100 | return self._naked_value # to let legacy clients modify the object 101 | 102 | 103 | class CallableList(CallablePropertyMixin, list): 104 | pass 105 | 106 | 107 | class CallableStr(CallablePropertyMixin, py.builtin.text): 108 | pass 109 | 110 | 111 | class CompatLogCaptureFixture(LogCaptureFixture): 112 | """Backward compatibility with pytest-capturelog.""" 113 | 114 | def _warn_compat(self, old, new): 115 | self._item.warn(code='L1', 116 | message=("{0} is deprecated, use {1} instead" 117 | .format(old, new))) 118 | 119 | @CallableStr.compat_property 120 | def text(self): 121 | return super(CompatLogCaptureFixture, self).text 122 | 123 | @CallableList.compat_property 124 | def records(self): 125 | return super(CompatLogCaptureFixture, self).records 126 | 127 | @CallableList.compat_property 128 | def record_tuples(self): 129 | return super(CompatLogCaptureFixture, self).record_tuples 130 | 131 | def setLevel(self, level, logger=None): 132 | self._warn_compat(old="'caplog.setLevel()'", 133 | new="'caplog.set_level()'") 134 | return self.set_level(level, logger) 135 | 136 | def atLevel(self, level, logger=None): 137 | self._warn_compat(old="'caplog.atLevel()'", 138 | new="'caplog.at_level()'") 139 | return self.at_level(level, logger) 140 | 141 | 142 | @pytest.fixture 143 | def caplog(request): 144 | """Access and control log capturing. 145 | 146 | Captured logs are available through the following methods:: 147 | 148 | * caplog.text() -> string containing formatted log output 149 | * caplog.records() -> list of logging.LogRecord instances 150 | * caplog.record_tuples() -> list of (logger_name, level, message) tuples 151 | """ 152 | return CompatLogCaptureFixture(request.node) 153 | 154 | capturelog = caplog 155 | -------------------------------------------------------------------------------- /pytest_catchlog/plugin.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division, print_function 3 | 4 | import logging 5 | import sys 6 | from contextlib import closing, contextmanager 7 | 8 | import pytest 9 | import py 10 | 11 | from pytest_catchlog.common import catching_logs 12 | 13 | # Let the fixtures be discoverable by pytest. 14 | from pytest_catchlog.fixture import caplog, capturelog 15 | 16 | 17 | DEFAULT_LOG_FORMAT = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s' 18 | DEFAULT_LOG_DATE_FORMAT = '%H:%M:%S' 19 | 20 | 21 | def add_option_ini(parser, option, dest, default=None, **kwargs): 22 | parser.addini(dest, default=default, 23 | help='default value for ' + option) 24 | parser.getgroup('catchlog').addoption(option, dest=dest, **kwargs) 25 | 26 | 27 | def get_option_ini(config, name): 28 | ret = config.getoption(name) # 'default' arg won't work as expected 29 | if ret is None: 30 | ret = config.getini(name) 31 | return ret 32 | 33 | 34 | def pytest_addoption(parser): 35 | """Add options to control log capturing.""" 36 | 37 | group = parser.getgroup('catchlog', 'Log catching') 38 | add_option_ini(parser, 39 | '--no-print-logs', 40 | dest='log_print', action='store_const', const=False, default=True, 41 | help='disable printing caught logs on failed tests.' 42 | ) 43 | add_option_ini( 44 | parser, 45 | '--log-level', 46 | dest='log_level', default=None, 47 | help='logging level used by the logging module' 48 | ) 49 | add_option_ini(parser, 50 | '--log-format', 51 | dest='log_format', default=DEFAULT_LOG_FORMAT, 52 | help='log format as used by the logging module.' 53 | ) 54 | add_option_ini(parser, 55 | '--log-date-format', 56 | dest='log_date_format', default=DEFAULT_LOG_DATE_FORMAT, 57 | help='log date format as used by the logging module.' 58 | ) 59 | add_option_ini( 60 | parser, 61 | '--log-cli-level', 62 | dest='log_cli_level', default=None, 63 | help='cli logging level.' 64 | ) 65 | add_option_ini( 66 | parser, 67 | '--log-cli-format', 68 | dest='log_cli_format', default=None, 69 | help='log format as used by the logging module.' 70 | ) 71 | add_option_ini( 72 | parser, 73 | '--log-cli-date-format', 74 | dest='log_cli_date_format', default=None, 75 | help='log date format as used by the logging module.' 76 | ) 77 | add_option_ini( 78 | parser, 79 | '--log-file', 80 | dest='log_file', default=None, 81 | help='path to a file when logging will be written to.' 82 | ) 83 | add_option_ini( 84 | parser, 85 | '--log-file-level', 86 | dest='log_file_level', default=None, 87 | help='log file logging level.' 88 | ) 89 | add_option_ini( 90 | parser, 91 | '--log-file-format', 92 | dest='log_file_format', default=DEFAULT_LOG_FORMAT, 93 | help='log format as used by the logging module.' 94 | ) 95 | add_option_ini( 96 | parser, 97 | '--log-file-date-format', 98 | dest='log_file_date_format', default=DEFAULT_LOG_DATE_FORMAT, 99 | help='log date format as used by the logging module.' 100 | ) 101 | 102 | 103 | 104 | def get_actual_log_level(config, setting_name): 105 | """Return the actual logging level.""" 106 | log_level = get_option_ini(config, setting_name) 107 | if not log_level: 108 | return 109 | if isinstance(log_level, py.builtin.text): 110 | log_level = log_level.upper() 111 | try: 112 | return int(getattr(logging, log_level, log_level)) 113 | except ValueError: 114 | # Python logging does not recognise this as a logging level 115 | raise pytest.UsageError( 116 | "'{0}' is not recognized as a logging level name for " 117 | "'{1}'. Please consider passing the " 118 | "logging level num instead.".format( 119 | log_level, 120 | setting_name)) 121 | 122 | 123 | def pytest_configure(config): 124 | """Always register the log catcher plugin with py.test or tests can't 125 | find the fixture function. 126 | """ 127 | log_cli_level = get_actual_log_level(config, 'log_cli_level') 128 | if log_cli_level is None: 129 | # No specific CLI logging level was provided, let's check 130 | # log_level for a fallback 131 | log_cli_level = get_actual_log_level(config, 'log_level') 132 | if log_cli_level is None: 133 | # No log_level was provided, default to WARNING 134 | log_cli_level = logging.WARNING 135 | config._catchlog_log_cli_level = log_cli_level 136 | config._catchlog_log_file = get_option_ini(config, 'log_file') 137 | if config._catchlog_log_file: 138 | log_file_level = get_actual_log_level(config, 'log_file_level') 139 | if log_file_level is None: 140 | # No log_level was provided, default to WARNING 141 | log_file_level = logging.WARNING 142 | config._catchlog_log_file_level = log_file_level 143 | config.pluginmanager.register(CatchLogPlugin(config), '_catch_log') 144 | 145 | 146 | class CatchLogPlugin(object): 147 | """Attaches to the logging module and captures log messages for each test. 148 | """ 149 | 150 | def __init__(self, config): 151 | """Creates a new plugin to capture log messages. 152 | 153 | The formatter can be safely shared across all handlers so 154 | create a single one for the entire test session here. 155 | """ 156 | print_logs = get_option_ini(config, 'log_print') 157 | if not isinstance(print_logs, bool): 158 | if print_logs.lower() in ('true', 'yes', '1'): 159 | print_logs = True 160 | elif print_logs.lower() in ('false', 'no', '0'): 161 | print_logs = False 162 | self.print_logs = print_logs 163 | self.formatter = logging.Formatter( 164 | get_option_ini(config, 'log_format'), 165 | get_option_ini(config, 'log_date_format')) 166 | self.log_cli_handler = logging.StreamHandler(sys.stderr) 167 | log_cli_format = get_option_ini(config, 'log_cli_format') 168 | if not log_cli_format: 169 | # No CLI specific format was provided, use log_format 170 | log_cli_format = get_option_ini(config, 'log_format') 171 | log_cli_date_format = get_option_ini(config, 'log_cli_date_format') 172 | if not log_cli_date_format: 173 | # No CLI specific date format was provided, use log_date_format 174 | log_cli_date_format = get_option_ini(config, 'log_date_format') 175 | log_cli_formatter = logging.Formatter( 176 | log_cli_format, 177 | datefmt=log_cli_date_format) 178 | self.log_cli_handler.setFormatter(log_cli_formatter) 179 | if config._catchlog_log_file: 180 | log_file_format = get_option_ini(config, 'log_file_format') 181 | if not log_file_format: 182 | # No log file specific format was provided, use log_format 183 | log_file_format = get_option_ini(config, 'log_format') 184 | log_file_date_format = get_option_ini(config, 'log_file_date_format') 185 | if not log_file_date_format: 186 | # No log file specific date format was provided, use log_date_format 187 | log_file_date_format = get_option_ini(config, 'log_date_format') 188 | self.log_file_handler = logging.FileHandler( 189 | config._catchlog_log_file, 190 | # Each pytest runtests session will write to a clean logfile 191 | mode='w', 192 | ) 193 | log_file_formatter = logging.Formatter( 194 | log_file_format, 195 | datefmt=log_file_date_format) 196 | self.log_file_handler.setFormatter(log_file_formatter) 197 | else: 198 | self.log_file_handler = None 199 | 200 | @contextmanager 201 | def _runtest_for(self, item, when): 202 | """Implements the internals of pytest_runtest_xxx() hook.""" 203 | with catching_logs(LogCaptureHandler(), 204 | formatter=self.formatter) as log_handler: 205 | item.catch_log_handler = log_handler 206 | try: 207 | yield # run test 208 | finally: 209 | del item.catch_log_handler 210 | 211 | if self.print_logs: 212 | # Add a captured log section to the report. 213 | log = log_handler.stream.getvalue().strip() 214 | item.add_report_section(when, 'log', log) 215 | 216 | @pytest.mark.hookwrapper 217 | def pytest_runtest_setup(self, item): 218 | with self._runtest_for(item, 'setup'): 219 | yield 220 | 221 | @pytest.mark.hookwrapper 222 | def pytest_runtest_call(self, item): 223 | with self._runtest_for(item, 'call'): 224 | yield 225 | 226 | @pytest.mark.hookwrapper 227 | def pytest_runtest_teardown(self, item): 228 | with self._runtest_for(item, 'teardown'): 229 | yield 230 | 231 | @pytest.mark.hookwrapper 232 | def pytest_runtestloop(self, session): 233 | """Runs all collected test items.""" 234 | with catching_logs(self.log_cli_handler, 235 | level=session.config._catchlog_log_cli_level): 236 | if self.log_file_handler is not None: 237 | with catching_logs(self.log_file_handler, 238 | level=session.config._catchlog_log_file_level): 239 | yield # run all the tests 240 | else: 241 | yield # run all the tests 242 | 243 | 244 | class LogCaptureHandler(logging.StreamHandler): 245 | """A logging handler that stores log records and the log text.""" 246 | 247 | def __init__(self): 248 | """Creates a new log handler.""" 249 | 250 | logging.StreamHandler.__init__(self) 251 | self.stream = py.io.TextIO() 252 | self.records = [] 253 | 254 | def close(self): 255 | """Close this log handler and its underlying stream.""" 256 | 257 | logging.StreamHandler.close(self) 258 | self.stream.close() 259 | 260 | def emit(self, record): 261 | """Keep the log records in a list in addition to the log text.""" 262 | 263 | self.records.append(record) 264 | logging.StreamHandler.emit(self, record) 265 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [wheel] 2 | universal = 1 3 | 4 | [sdist] 5 | formats = zip 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import io 3 | import os 4 | import re 5 | 6 | from setuptools import setup, find_packages 7 | 8 | 9 | def _read_text_file(file_name): 10 | file_path = os.path.join(os.path.dirname(__file__), file_name) 11 | with io.open(file_path, encoding='utf-8') as f_stream: 12 | return f_stream.read() 13 | 14 | 15 | def _get_version(): 16 | return re.search("__version__\s*=\s*'([^']+)'\s*", 17 | _read_text_file('pytest_catchlog/__init__.py')).group(1) 18 | 19 | 20 | setup(name='pytest-catchlog', 21 | version=_get_version(), 22 | description=('py.test plugin to catch log messages.' 23 | ' This is a fork of pytest-capturelog.'), 24 | long_description='\n'.join([_read_text_file('README.rst'), 25 | _read_text_file('CHANGES.rst'), ]), 26 | author='Arthur Skowronek (Fork Author)', # original author: Meme Dough 27 | author_email='eisensheng@mailbox.org', 28 | url='https://github.com/eisensheng/pytest-catchlog', 29 | packages=find_packages(exclude=['tests']), 30 | install_requires=['py>=1.1.1', 'pytest>=2.6'], 31 | entry_points={'pytest11': ['pytest_catchlog = pytest_catchlog.plugin']}, 32 | license='MIT License', 33 | zip_safe=False, 34 | keywords='py.test pytest logging', 35 | classifiers=['Development Status :: 4 - Beta', 36 | 'Intended Audience :: Developers', 37 | 'License :: OSI Approved :: MIT License', 38 | 'Operating System :: OS Independent', 39 | 'Programming Language :: Python', 40 | 'Programming Language :: Python :: 2.6', 41 | 'Programming Language :: Python :: 2.7', 42 | 'Programming Language :: Python :: 3', 43 | 'Programming Language :: Python :: 3.3', 44 | 'Programming Language :: Python :: 3.4', 45 | 'Programming Language :: Python :: 3.5', 46 | 'Programming Language :: Python :: Implementation :: CPython', 47 | 'Programming Language :: Python :: Implementation :: PyPy', 48 | 'Topic :: Software Development :: Testing']) 49 | -------------------------------------------------------------------------------- /tasks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import re 4 | import io 5 | from contextlib import contextmanager 6 | from datetime import datetime 7 | 8 | from invoke import task, run 9 | 10 | VERSION_FILE = 'pytest_catchlog/__init__.py' 11 | CHANGE_LOG_FILE = 'CHANGES.rst' 12 | 13 | 14 | def _path_abs_join(*nodes): 15 | return os.path.abspath(os.path.join(os.path.dirname(__file__), *nodes)) 16 | 17 | 18 | def _path_open(*nodes, **kwargs): 19 | return io.open(_path_abs_join(*nodes), **kwargs) 20 | 21 | 22 | def _shell_quote(s): 23 | """Quote given string to be suitable as input for bash as argument.""" 24 | if not s: 25 | return "''" 26 | if re.search(r'[^\w@%+=:,./-]', s) is None: 27 | return s 28 | return "'" + s.replace("'", "'\"'\"'") + "'" 29 | 30 | 31 | def _git_do(*commands, **kwargs): 32 | """Execute arbitrary git commands.""" 33 | kwargs.setdefault('hide', 'out') 34 | results = [run('git ' + command, **kwargs).stdout.strip('\n') 35 | for command in commands] 36 | return results if len(commands) > 1 else results[0] 37 | 38 | 39 | def _git_checkout(branch_name): 40 | """Switches to the given branch name.""" 41 | return _git_do('checkout ' + _shell_quote(branch_name)) 42 | 43 | 44 | @contextmanager 45 | def _git_work_on(branch_name): 46 | """Work on given branch. Preserves current git branch.""" 47 | original_branch = _git_do('rev-parse --abbrev-ref HEAD') 48 | try: 49 | if original_branch != branch_name: 50 | _git_checkout(branch_name) 51 | yield 52 | finally: 53 | if original_branch and original_branch != branch_name: 54 | _git_checkout(original_branch) 55 | 56 | 57 | def _version_find_existing(): 58 | """Returns set of existing versions in this repository. 59 | 60 | This information is backed by previously used version tags 61 | stored in the git repository. 62 | """ 63 | git_tags = [y.strip() for y in _git_do('tag -l').split('\n')] 64 | 65 | _version_re = re.compile(r'^v?(\d+)(?:\.(\d+)(?:\.(\d+))?)?$') 66 | return {tuple(int(n) if n else 0 for n in m.groups()) 67 | for m in (_version_re.match(t) for t in git_tags if t) if m} 68 | 69 | 70 | def _version_find_latest(): 71 | """Returns the most recent used version number. 72 | 73 | This information is backed by previously used version tags 74 | stored in the git repository. 75 | """ 76 | return max(_version_find_existing()) 77 | 78 | 79 | def _version_guess_next(position='minor'): 80 | """Guess next version. 81 | 82 | A guess for the next version is determined by incrementing given 83 | position or minor level position in latest existing version. 84 | """ 85 | try: 86 | latest_version = list(_version_find_latest()) 87 | except ValueError: 88 | latest_version = [0, 0, 0] 89 | 90 | position_index = {'major': 0, 'minor': 1, 'patch': 2}[position] 91 | latest_version[position_index] += 1 92 | latest_version[position_index + 1:] = [0] * (2 - position_index) 93 | return tuple(latest_version) 94 | 95 | 96 | def _version_format(version): 97 | """Return version in dotted string format.""" 98 | return '.'.join(str(x) for x in version) 99 | 100 | 101 | def _patch_file(file_path, line_callback): 102 | """Patch given file with result from line callback. 103 | 104 | Each line will be passed to the line callback. 105 | The return value of the given callback will determine 106 | the new content for the file. 107 | 108 | :param str file_path: 109 | The file to patch. 110 | :param callable line_callback: 111 | The patch function to run over each line. 112 | :return: 113 | Whenever the file has changed or not. 114 | :rtype: 115 | bool 116 | """ 117 | new_file_content, file_changed = [], False 118 | with _path_open(file_path) as in_stream: 119 | for l in (x.strip('\n') for x in in_stream): 120 | alt_lines = line_callback(l) or [l] 121 | if alt_lines != [l]: 122 | file_changed = True 123 | new_file_content += (x + u'\n' for x in alt_lines) 124 | 125 | new_file_name = file_path + '.new' 126 | with _path_open(new_file_name, mode='w') as out_stream: 127 | out_stream.writelines(new_file_content) 128 | out_stream.flush() 129 | os.fsync(out_stream.fileno()) 130 | os.rename(new_file_name, file_path) 131 | 132 | return file_changed 133 | 134 | 135 | def _patch_version(new_version): 136 | """Patch given version into version file.""" 137 | _patch_version_re = re.compile(r"""^(\s*__version__\s*=\s*(?:"|'))""" 138 | r"""(?:[^'"]*)(?:("|')\s*)$""") 139 | 140 | def __line_callback(line): 141 | match = _patch_version_re.match(line) 142 | if match: 143 | line_head, line_tail = match.groups() 144 | return [line_head + new_version + line_tail] 145 | return _patch_file(VERSION_FILE, __line_callback) 146 | 147 | 148 | def _patch_change_log(new_version): 149 | """Patch given version into change log file.""" 150 | def __line_callback(line): 151 | if line == u'`Unreleased`_': 152 | return [u'`{}`_'.format(new_version)] 153 | elif line == u'Yet to be released.': 154 | return [datetime.utcnow().strftime(u'Released on %Y-%m-%d UTC.')] 155 | return _patch_file(CHANGE_LOG_FILE, __line_callback) 156 | 157 | 158 | @task(name='changelog-add-stub') 159 | def changelog_add_stub(): 160 | """Add new version changes stub to changelog file.""" 161 | def __line_callback(line): 162 | if line == u'.. %UNRELEASED_SECTION%': 163 | return [u'.. %UNRELEASED_SECTION%', 164 | u'', 165 | u'`Unreleased`_', 166 | u'-------------', 167 | u'', 168 | u'Yet to be released.', 169 | u''] 170 | return _patch_file(CHANGE_LOG_FILE, __line_callback) 171 | 172 | 173 | @task() 174 | def mkrelease(position='minor'): 175 | """Merge development state into Master Branch and tags a new Release.""" 176 | next_version = _version_format(_version_guess_next(position)) 177 | with _git_work_on('develop'): 178 | patched_files = [] 179 | if _patch_version(next_version): 180 | patched_files.append(VERSION_FILE) 181 | 182 | if _patch_change_log(next_version): 183 | patched_files.append(CHANGE_LOG_FILE) 184 | 185 | if patched_files: 186 | patched_files = ' '.join(_shell_quote(x) for x in patched_files) 187 | _git_do('diff --color=always -- ' + patched_files, 188 | ("commit -m 'Bump Version to {0}' -- {1}" 189 | .format(next_version, patched_files)), 190 | hide=None) 191 | 192 | with _git_work_on('master'): 193 | message = _shell_quote('Release {0}'.format(next_version)) 194 | _git_do('merge --no-ff --no-edit -m {0} develop'.format(message), 195 | "tag -a -m {0} {1}".format(message, next_version)) 196 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, division, print_function 3 | 4 | import pytest 5 | 6 | 7 | pytest_plugins = 'pytester' 8 | 9 | 10 | def pytest_addoption(parser): 11 | parser.addoption('--run-perf', 12 | action='store', dest='run_perf', 13 | choices=['yes', 'no', 'only', 'check'], 14 | nargs='?', default='check', const='yes', 15 | help='Run performance tests (can be slow)', 16 | ) 17 | 18 | parser.addoption('--perf-graph', 19 | action='store', dest='perf_graph_name', 20 | nargs='?', default=None, const='graph.svg', 21 | help='Plot a graph using data found in --benchmark-storage', 22 | ) 23 | parser.addoption('--perf-expr', 24 | action='store', dest='perf_expr_primary', 25 | default='log_emit', 26 | help='Benchmark (or expression combining benchmarks) to plot', 27 | ) 28 | parser.addoption('--perf-expr-secondary', 29 | action='store', dest='perf_expr_secondary', 30 | default='caplog - stub', 31 | help=('Benchmark (or expression combining benchmarks) to plot ' 32 | 'as a secondary line'), 33 | ) 34 | -------------------------------------------------------------------------------- /tests/perf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eisensheng/pytest-catchlog/f6b05b0afb8f8934b33e0c78a495ebfd8b3599d6/tests/perf/__init__.py -------------------------------------------------------------------------------- /tests/perf/bench/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import pytest 4 | 5 | 6 | class CatchLogStub(object): 7 | """Provides a no-op 'caplog' fixture fallback.""" 8 | 9 | @pytest.yield_fixture 10 | def caplog(self): 11 | yield 12 | 13 | 14 | @pytest.mark.trylast 15 | def pytest_configure(config): 16 | if not pytest.config.pluginmanager.hasplugin('pytest_catchlog'): 17 | config.pluginmanager.register(CatchLogStub(), 'caplog_stub') 18 | -------------------------------------------------------------------------------- /tests/perf/bench/pytest.ini: -------------------------------------------------------------------------------- 1 | # This file prevents upward conftests from being loaded. 2 | # 3 | # You can run performance test bench manually as follows: 4 | # 5 | # $ cd tests/perf/bench && py.test 6 | # 7 | # OR 8 | # 9 | # $ py.test tests/perf/bench --confcutdir=tests/perf/bench 10 | # 11 | [pytest] 12 | -------------------------------------------------------------------------------- /tests/perf/bench/test_log.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import logging 4 | 5 | 6 | logger = logging.getLogger('pytest_catchlog.test.perf') 7 | 8 | 9 | def test_log_emit(benchmark): 10 | benchmark(logger.info, 'Testing %s performance: %s', 11 | 'catchlog', 'emit a single log record') 12 | -------------------------------------------------------------------------------- /tests/perf/bench/test_runtest_hook.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import logging 4 | 5 | import pytest 6 | 7 | 8 | logger = logging.getLogger('pytest_catchlog.test.perf') 9 | 10 | 11 | @pytest.fixture(autouse=True) 12 | def bench_runtest(request, benchmark): 13 | # Using benchmark.weave to patch a runtest hook doesn't seem to work with 14 | # pytest 2.8.3; for some reason hook gets called more than once, before 15 | # running benchmark cleanup finalizer, resulting in the 16 | # "FixtureAlreadyUsed: Fixture can only be used once" error. 17 | # 18 | # Use plain old monkey patching instead. 19 | ihook = request.node.ihook 20 | saved_hook = ihook.pytest_runtest_call 21 | 22 | def patched_hook(*args, **kwargs): 23 | ihook.pytest_runtest_call = saved_hook # restore early 24 | return benchmark(saved_hook, *args, **kwargs) 25 | 26 | ihook.pytest_runtest_call = patched_hook 27 | benchmark.group = 'runtest' 28 | 29 | 30 | @pytest.yield_fixture # because 'caplog' is also a yield_fixture 31 | def stub(): 32 | """No-op stub used in place of 'caplog'. 33 | 34 | Helps to measure the inevitable overhead of the pytest fixture injector to 35 | let us exclude it later on. 36 | """ 37 | yield 38 | 39 | 40 | def test_fixture_stub(stub): 41 | logger.info('Testing %r hook performance: %s', 42 | 'catchlog', 'pure runtest hookwrapper overhead') 43 | 44 | 45 | def test_caplog_fixture(caplog): 46 | logger.info('Testing %r hook performance: %s', 47 | 'catchlog', 'hookwrapper + caplog fixture overhead') 48 | -------------------------------------------------------------------------------- /tests/perf/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import os 4 | import os.path 5 | try: 6 | from builtins import __dict__ as builtins 7 | except ImportError: # Python 2 8 | from __builtin__ import __dict__ as builtins 9 | 10 | import py 11 | import pytest 12 | 13 | from .data import gen_dict, load_benchmarks_from_files, ls_bench_storage 14 | from .plot import make_plot 15 | 16 | 17 | BENCH_DIR = py.path.local(__file__).dirpath('bench') 18 | 19 | 20 | mode_args_map = { 21 | 'default': [], 22 | 'noprint': ['--no-print-logs'], 23 | 'nocapture': ['-s'], 24 | 'off': ['-p', 'no:pytest_catchlog'], 25 | } 26 | 27 | 28 | def cleanup_garbage_files(filenames, terminalreporter, dry_run=False): 29 | if not filenames: 30 | return 31 | writeln = terminalreporter.write_line 32 | 33 | writeln('perf-test: Benchmark data files missing for some run modes', 34 | yellow=True, bold=(not dry_run)) 35 | 36 | if dry_run: 37 | caption = 'Excess files to be removed on the next run' 38 | else: 39 | caption = 'Removing excess files' 40 | 41 | writeln('perf-test: {0}:'.format(caption), yellow=True, bold=(not dry_run)) 42 | 43 | for filename in filenames: 44 | writeln('\t{0}'.format(filename), light=dry_run, bold=(not dry_run)) 45 | if not dry_run: 46 | os.remove(filename) 47 | 48 | 49 | def pytest_configure(config): 50 | if config.getoption('run_perf') in ('yes', 'only'): 51 | terminalreporter = config.pluginmanager.get_plugin("terminalreporter") 52 | bench_storage = config.getoption('--benchmark-storage') 53 | _, _, garbage = ls_bench_storage(bench_storage, 54 | modes=sorted(mode_args_map)) 55 | cleanup_garbage_files(garbage, terminalreporter) 56 | 57 | if config.getoption('perf_graph_name'): 58 | expr = config.getoption('perf_expr_primary') 59 | expr2 = config.getoption('perf_expr_secondary') 60 | if not (expr or expr2): 61 | raise pytest.UsageError('perf-graph: Nothing to plot, ' 62 | 'see --perf-expr') 63 | 64 | 65 | def path_in_dir(path, dir=py.path.local(__file__).dirpath()): 66 | return (dir.common(path) == dir) 67 | 68 | 69 | def pytest_ignore_collect(path): 70 | if path_in_dir(path, BENCH_DIR): 71 | # Tests from that directory never run through test discovery. 72 | # 73 | # Instead, they run either with the BENCH_DIR specified explicitly 74 | # both as a target and a 'confcutdir' or when the BENCH_DIR itself 75 | # is a 'rootdir' (CWD). In any case this conftest is not processed 76 | # at all, and this hook doesn't run when running tests from the 77 | # BENCH_DIR. 78 | return True 79 | 80 | 81 | def pytest_itemcollected(item): 82 | """This is only called for local tests (under tests/perf_runner/).""" 83 | if path_in_dir(item.fspath): 84 | item.add_marker('_perf_test') 85 | 86 | 87 | def pytest_collection_modifyitems(session, config, items): 88 | """This is called after collection (of all tests) has been performed.""" 89 | run_perf = config.getoption('run_perf') 90 | if run_perf in ('yes', 'check'): 91 | return 92 | perf_only = (run_perf == 'only') 93 | 94 | items[:] = [item 95 | for item in items 96 | if bool(item.get_marker('_perf_test')) == perf_only] 97 | 98 | 99 | def pytest_terminal_summary(terminalreporter): 100 | """Add additional section in terminal summary reporting.""" 101 | config = terminalreporter.config 102 | 103 | if config.getoption('run_perf') in ('yes', 'only'): 104 | passed = terminalreporter.stats.get('passed', []) 105 | for rep in passed: 106 | if '_perf_test' not in rep.keywords: 107 | continue 108 | 109 | out = '\n'.join(s for _, s in rep.get_sections('Captured stdout')) 110 | if out: 111 | fspath, lineno, name = rep.location 112 | terminalreporter.write_sep("-", 'Report from {0}'.format(name)) 113 | terminalreporter.write(out) 114 | 115 | handle_perf_graph(config, terminalreporter) 116 | 117 | 118 | def handle_perf_graph(config, terminalreporter): 119 | bench_storage = config.getoption('--benchmark-storage') 120 | (trial_names, 121 | benchmark_files, 122 | garbage) = ls_bench_storage(bench_storage, modes=sorted(mode_args_map)) 123 | cleanup_garbage_files(garbage, terminalreporter, dry_run=True) 124 | 125 | benchmarks = load_benchmarks_from_files(benchmark_files, trial_names) 126 | 127 | output_file = config.getoption('perf_graph_name') 128 | if not output_file: 129 | return 130 | output_file = os.path.join(bench_storage, output_file) 131 | 132 | if not trial_names: 133 | terminalreporter.write_line( 134 | 'perf-graph: No benchmarks found in {0}'.format(bench_storage), 135 | yellow=True, bold=True) 136 | return 137 | 138 | expr = config.getoption('perf_expr_primary') 139 | expr2 = config.getoption('perf_expr_secondary') 140 | 141 | history = eval_benchmark_expr(expr, benchmarks) 142 | history2 = eval_benchmark_expr(expr2, benchmarks) 143 | 144 | plot = make_plot( 145 | trial_names=trial_names, 146 | history=history, 147 | history2=history2, 148 | expr=expr, 149 | expr2=expr2, 150 | ) 151 | plot.render_to_file(output_file) 152 | 153 | terminalreporter.write_line( 154 | 'perf-graph: Saved graph into {0}'.format(output_file), 155 | green=True, bold=True) 156 | 157 | 158 | @gen_dict 159 | def eval_benchmark_expr(expr, benchenvs, **kwargs): 160 | for mode, envlist in benchenvs.items(): 161 | yield mode, [BenchmarkEnv(benchenv, **kwargs).evaluate(expr) 162 | for benchenv in envlist] 163 | 164 | 165 | class BenchmarkEnv(dict): 166 | """Holds results of benchmark tests. 167 | 168 | Allows unambiguous access through a key substring: e.g. just 'foo' for 169 | 'test_foo_stuff' (but at the same time there must not be 'test_some_food'). 170 | """ 171 | __slots__ = () 172 | 173 | class UndefinedValue(ValueError): 174 | pass 175 | 176 | def __missing__(self, lookup): 177 | if lookup in builtins: 178 | raise KeyError # to make builtins handled as usual 179 | 180 | found = None 181 | for key in self: 182 | if lookup in key: # substring match 183 | if found is not None: 184 | raise pytest.UsageError('Ambiguous benchmark ID: ' 185 | 'multiple tests match {lookup!r} ' 186 | '(at least {found!r} and {key!r})' 187 | .format(**locals())) 188 | found = key 189 | 190 | ret = self.get(found) # treat unknown tests just as missing values 191 | if ret is None: 192 | # If an expression involves None (i.e. undefined), 193 | # the result must be also None. 194 | raise self.UndefinedValue('Missing proper benchmark value') 195 | 196 | return ret 197 | 198 | def evaluate(self, expr): 199 | """Evaluate an expression using this environment as globals.""" 200 | try: 201 | return eval(expr.strip() or 'None', {}, self) 202 | except self.UndefinedValue: 203 | return None 204 | 205 | 206 | @pytest.fixture(params=sorted(mode_args_map)) 207 | def mode(request): 208 | return request.param 209 | 210 | 211 | @pytest.fixture 212 | def mode_args(mode): 213 | return mode_args_map[mode] 214 | 215 | 216 | @pytest.fixture 217 | def storage_dir(pytestconfig, mode): 218 | storage = pytestconfig.getoption('--benchmark-storage') 219 | return os.path.join(storage, mode) 220 | 221 | 222 | @pytest.fixture 223 | def bench_dir(): 224 | return BENCH_DIR 225 | -------------------------------------------------------------------------------- /tests/perf/data.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import io 4 | import json 5 | import os.path 6 | from collections import defaultdict 7 | from functools import wraps 8 | from glob import glob 9 | from itertools import chain 10 | 11 | 12 | def gen_dict(func): 13 | @wraps(func) 14 | def decorated(*args, **kwargs): 15 | return dict(func(*args, **kwargs)) 16 | return decorated 17 | 18 | 19 | def ls_bench_storage(bench_storage, modes): 20 | # NNNN just reflects the pytest-benchmark result files naming scheme: 21 | # NNNN_commit*.json, that is, 0001_commit*.json, 0002_commit*.json, ... 22 | nnnn_files_map = defaultdict(dict) # {'NNNN': {'mode': 'filename'}} 23 | garbage_files = set() 24 | 25 | for mode in modes: 26 | for filename in glob(os.path.join(bench_storage, mode, 27 | '[0-9][0-9][0-9][0-9]_*.json')): 28 | mode_dirname, basename = os.path.split(filename) 29 | nnnn = os.path.splitext(basename)[0][:12] # NNNN_commit 30 | mode_nnnn_files = glob(os.path.join(mode_dirname, nnnn + '*.json')) 31 | if len(mode_nnnn_files) != 1: 32 | garbage_files.update(mode_nnnn_files) 33 | else: 34 | nnnn_files_map[nnnn][mode] = filename 35 | 36 | benchmark_files = defaultdict(dict) # {'mode': {'NNNN': 'filename'}} 37 | 38 | for nnnn, nnnn_files in nnnn_files_map.items(): 39 | if len(nnnn_files) != len(modes): 40 | # for gf in nnnn_files.values(): 41 | # print('>>>', gf) 42 | garbage_files.update(nnnn_files.values()) 43 | else: 44 | for mode, filename in nnnn_files.items(): 45 | benchmark_files[mode][nnnn] = filename 46 | 47 | return sorted(nnnn_files_map), dict(benchmark_files), sorted(garbage_files) 48 | 49 | 50 | @gen_dict # {'mode': {'NNNN': benchmark, ...}} 51 | def load_raw_benchmarks(benchmark_files): 52 | for mode, filemap in benchmark_files.items(): 53 | trialmap = {} 54 | 55 | for trial_name, filename in filemap.items(): 56 | with io.open(filename, 'rU') as fh: 57 | trialmap[trial_name] = json.load(fh) 58 | 59 | yield mode, trialmap 60 | 61 | 62 | @gen_dict # {'mode': [{'test': min}...]} 63 | def prepare_benchmarks(raw_benchmarks, trial_names): 64 | for mode, trialmap in raw_benchmarks.items(): 65 | envlist = [] 66 | 67 | for trial_name in trial_names: 68 | trial = trialmap.get(trial_name, {}).get('benchmarks', []) 69 | 70 | benchenv = dict((bench['fullname'], bench['stats'].get('min')) 71 | for bench in trial) 72 | envlist.append(benchenv) 73 | 74 | yield mode, envlist 75 | 76 | 77 | def load_benchmarks(bench_storage, modes): 78 | trial_names, benchmark_files, _ = ls_bench_storage(bench_storage, modes) 79 | return load_benchmarks_from_files(benchmark_files, trial_names) 80 | 81 | 82 | def load_benchmarks_from_files(benchmark_files, trial_names): 83 | raw_benchmarks = load_raw_benchmarks(benchmark_files) 84 | benchmarks = prepare_benchmarks(raw_benchmarks, trial_names) 85 | return benchmarks 86 | -------------------------------------------------------------------------------- /tests/perf/plot.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import math 4 | 5 | import pygal 6 | from pygal.style import DefaultStyle 7 | 8 | try: 9 | import pygaljs 10 | except ImportError: 11 | opts = {} 12 | else: 13 | opts = {"js": [pygaljs.uri("2.0.x", "pygal-tooltips.js")]} 14 | 15 | opts["css"] = [ 16 | "file://style.css", 17 | "file://graph.css", 18 | """inline: 19 | .axis.x text { 20 | text-anchor: middle !important; 21 | } 22 | .tooltip .value { 23 | font-size: 1em !important; 24 | } 25 | """ 26 | ] 27 | 28 | 29 | def log_ceil(x): 30 | x = float(x) 31 | exponent = math.floor(math.log10(x)) 32 | exp_mult = math.pow(10, exponent) 33 | mantissa = x / exp_mult 34 | return math.ceil(mantissa) * exp_mult 35 | 36 | 37 | def history_range(history): 38 | max_ = max(v for serie in history.values() for v in serie) 39 | if max_ > 0: 40 | return (0, log_ceil(max_)) 41 | 42 | 43 | def make_plot(trial_names, history, history2, expr, expr2): 44 | style = DefaultStyle(colors=[ 45 | '#ED6C1D', # 3 46 | '#EDC51E', # 4 47 | '#BCED1E', # 5 48 | '#63ED1F', # 6 49 | '#1FED34', # 7 50 | '#ED1D27', # 2 51 | ][:len(history)] + [ 52 | '#A71DED', # -3 53 | '#4F1EED', # -4 54 | '#1E45ED', # -5 55 | '#1F9EED', # -6 56 | '#1FEDE4', # -7 57 | '#ED1DDA', # -2 58 | ][:len(history2)] 59 | ) 60 | 61 | plot = pygal.Line( 62 | title="Speed in seconds", 63 | x_title="Trial", 64 | x_labels=trial_names, 65 | x_label_rotation=15, 66 | include_x_axis=True, 67 | human_readable=True, 68 | range=history_range(history), 69 | secondary_range=history_range(history2), 70 | style=style, 71 | stroke_style={'width': 2, 'dasharray': '20, 4'}, 72 | **opts 73 | ) 74 | 75 | for mode in sorted(history): 76 | serie = [{'value': value, 'label': expr} 77 | for value in history[mode]] 78 | plot.add(mode, serie, stroke_style={'dasharray': 'none'}) 79 | 80 | for mode in sorted(history2): 81 | serie = [{'value': value, 'label': expr2} 82 | for value in history2[mode]] 83 | plot.add(mode, serie, secondary=True) 84 | 85 | return plot 86 | -------------------------------------------------------------------------------- /tests/perf/test_perf_run.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import os 4 | import os.path 5 | import sys 6 | import subprocess 7 | 8 | import pytest 9 | 10 | 11 | PYTEST_PATH = (os.path.abspath(pytest.__file__.rstrip("oc")) 12 | .replace("$py.class", ".py")) 13 | 14 | POPEN_CLEANUP_TIMEOUT = 3 if (sys.version_info[0] >= 3) else 0 # Py3k only 15 | 16 | 17 | @pytest.fixture 18 | def popen(request): 19 | env = os.environ.copy() 20 | cwd = os.getcwd() 21 | pythonpath = [cwd] 22 | if env.get('PYTHONPATH'): 23 | pythonpath.append(env['PYTHONPATH']) 24 | env['PYTHONPATH'] = os.pathsep.join(pythonpath) 25 | 26 | def popen_wait(*args, **kwargs): 27 | __tracebackhide__ = True 28 | 29 | args = [str(arg) for arg in args] 30 | kwargs['env'] = dict(env, **kwargs.get('env', {})) 31 | 32 | print('Running', ' '.join(args)) 33 | popen = subprocess.Popen(args, **kwargs) 34 | try: 35 | ret = popen.wait() 36 | except KeyboardInterrupt as e: 37 | # Before proceeding to the exit, give the child the last chance to 38 | # cleanup. Otherwise, benchmark storage may happen to be read 39 | # during preparing the final reporting (see ls_bench_storage(), 40 | # called from handle_perf_graph()), while being concurrently 41 | # modified by the child (pytest-benchmark writing results files). 42 | try: 43 | if POPEN_CLEANUP_TIMEOUT: 44 | popen.wait(timeout=POPEN_CLEANUP_TIMEOUT) 45 | finally: 46 | raise e 47 | else: 48 | assert ret == 0 49 | 50 | return popen_wait 51 | 52 | 53 | @pytest.fixture 54 | def color(pytestconfig): 55 | reporter = pytestconfig.pluginmanager.getplugin('terminalreporter') 56 | try: 57 | return 'yes' if reporter.writer.hasmarkup else 'no' 58 | except AttributeError: 59 | return pytestconfig.option.color 60 | 61 | 62 | @pytest.fixture 63 | def verbosity(pytestconfig): 64 | v = pytestconfig.option.verbose or -1 65 | return ('v' if v > 0 else 'q') * abs(v) 66 | 67 | @pytest.fixture 68 | def base_args(bench_dir, verbosity, color): 69 | return [ 70 | bench_dir, 71 | '--confcutdir={0}'.format(bench_dir), 72 | '-x', 73 | '-{0}'.format(verbosity), 74 | '-rw', 75 | '--color={0}'.format(color), 76 | ] 77 | 78 | 79 | @pytest.fixture 80 | def bench_args(pytestconfig, storage_dir): 81 | if pytestconfig.getoption('run_perf') != 'check': 82 | return [ 83 | '--benchmark-only', 84 | '--benchmark-disable-gc', 85 | '--benchmark-autosave', 86 | '--benchmark-storage={0}'.format(storage_dir), 87 | ] 88 | else: 89 | return ['--benchmark-disable'] 90 | 91 | 92 | @pytest.fixture 93 | def perf_args(base_args, mode_args, bench_args): 94 | return base_args + mode_args + bench_args 95 | 96 | 97 | def test_perf_run(popen, perf_args): 98 | popen(sys.executable, PYTEST_PATH, *perf_args) 99 | -------------------------------------------------------------------------------- /tests/test_compat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | 4 | 5 | def test_camel_case_aliases(testdir): 6 | testdir.makepyfile(''' 7 | import logging 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | def test_foo(caplog): 12 | caplog.setLevel(logging.INFO) 13 | logger.debug('boo!') 14 | 15 | with caplog.atLevel(logging.WARNING): 16 | logger.info('catch me if you can') 17 | ''') 18 | result = testdir.runpytest() 19 | assert result.ret == 0 20 | 21 | with pytest.raises(pytest.fail.Exception): 22 | result.stdout.fnmatch_lines(['*- Captured *log call -*']) 23 | 24 | result = testdir.runpytest('-rw') 25 | assert result.ret == 0 26 | result.stdout.fnmatch_lines(''' 27 | =*warning summary*= 28 | *WL1*test_camel_case_aliases*caplog.setLevel()*deprecated* 29 | *WL1*test_camel_case_aliases*caplog.atLevel()*deprecated* 30 | ''') 31 | 32 | 33 | def test_property_call(testdir): 34 | testdir.makepyfile(''' 35 | import logging 36 | 37 | logger = logging.getLogger(__name__) 38 | 39 | def test_foo(caplog): 40 | logger.info('boo %s', 'arg') 41 | 42 | assert caplog.text == caplog.text() == str(caplog.text) 43 | assert caplog.records == caplog.records() == list(caplog.records) 44 | assert (caplog.record_tuples == 45 | caplog.record_tuples() == list(caplog.record_tuples)) 46 | ''') 47 | result = testdir.runpytest() 48 | assert result.ret == 0 49 | 50 | result = testdir.runpytest('-rw') 51 | assert result.ret == 0 52 | result.stdout.fnmatch_lines(''' 53 | =*warning summary*= 54 | *WL1*test_property_call*caplog.text()*deprecated* 55 | *WL1*test_property_call*caplog.records()*deprecated* 56 | *WL1*test_property_call*caplog.record_tuples()*deprecated* 57 | ''') 58 | 59 | 60 | def test_records_modification(testdir): 61 | testdir.makepyfile(''' 62 | import logging 63 | 64 | logger = logging.getLogger(__name__) 65 | 66 | def test_foo(caplog): 67 | logger.info('boo %s', 'arg') 68 | assert caplog.records 69 | assert caplog.records() 70 | 71 | del caplog.records()[:] # legacy syntax 72 | assert not caplog.records 73 | assert not caplog.records() 74 | 75 | logger.info('foo %s', 'arg') 76 | assert caplog.records 77 | assert caplog.records() 78 | ''') 79 | result = testdir.runpytest() 80 | assert result.ret == 0 81 | -------------------------------------------------------------------------------- /tests/test_fixture.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | import logging 4 | 5 | 6 | logger = logging.getLogger(__name__) 7 | sublogger = logging.getLogger(__name__+'.baz') 8 | 9 | u = (lambda x: x.decode('utf-8')) if sys.version_info < (3,) else (lambda x: x) 10 | 11 | 12 | def test_fixture_help(testdir): 13 | result = testdir.runpytest('--fixtures') 14 | result.stdout.fnmatch_lines(['*caplog*']) 15 | 16 | 17 | def test_change_level(caplog): 18 | caplog.set_level(logging.INFO) 19 | logger.debug('handler DEBUG level') 20 | logger.info('handler INFO level') 21 | 22 | caplog.set_level(logging.CRITICAL, logger=sublogger.name) 23 | sublogger.warning('logger WARNING level') 24 | sublogger.critical('logger CRITICAL level') 25 | 26 | assert 'DEBUG' not in caplog.text 27 | assert 'INFO' in caplog.text 28 | assert 'WARNING' not in caplog.text 29 | assert 'CRITICAL' in caplog.text 30 | 31 | 32 | def test_with_statement(caplog): 33 | with caplog.at_level(logging.INFO): 34 | logger.debug('handler DEBUG level') 35 | logger.info('handler INFO level') 36 | 37 | with caplog.at_level(logging.CRITICAL, logger=sublogger.name): 38 | sublogger.warning('logger WARNING level') 39 | sublogger.critical('logger CRITICAL level') 40 | 41 | assert 'DEBUG' not in caplog.text 42 | assert 'INFO' in caplog.text 43 | assert 'WARNING' not in caplog.text 44 | assert 'CRITICAL' in caplog.text 45 | 46 | 47 | def test_log_access(caplog): 48 | logger.info('boo %s', 'arg') 49 | assert caplog.records[0].levelname == 'INFO' 50 | assert caplog.records[0].msg == 'boo %s' 51 | assert 'boo arg' in caplog.text 52 | 53 | 54 | def test_record_tuples(caplog): 55 | logger.info('boo %s', 'arg') 56 | 57 | assert caplog.record_tuples == [ 58 | (__name__, logging.INFO, 'boo arg'), 59 | ] 60 | 61 | 62 | def test_unicode(caplog): 63 | logger.info(u('bū')) 64 | assert caplog.records[0].levelname == 'INFO' 65 | assert caplog.records[0].msg == u('bū') 66 | assert u('bū') in caplog.text 67 | 68 | 69 | def test_clear(caplog): 70 | logger.info(u('bū')) 71 | assert len(caplog.records) 72 | caplog.clear() 73 | assert not len(caplog.records) 74 | 75 | 76 | def test_special_warning_with_del_records_warning(testdir): 77 | p1 = testdir.makepyfile(""" 78 | def test_del_records_inline(caplog): 79 | del caplog.records()[:] 80 | """) 81 | result = testdir.runpytest_subprocess(p1) 82 | result.stdout.fnmatch_lines([ 83 | "WL1 test_*.py:1 'caplog.records()' syntax is deprecated," 84 | " use 'caplog.records' property (or caplog.clear()) instead", 85 | "*1 pytest-warnings*", 86 | ]) 87 | 88 | 89 | def test_warning_with_setLevel(testdir): 90 | p1 = testdir.makepyfile(""" 91 | def test_inline(caplog): 92 | caplog.setLevel(0) 93 | """) 94 | result = testdir.runpytest_subprocess(p1) 95 | result.stdout.fnmatch_lines([ 96 | "WL1 test_*.py:1 'caplog.setLevel()' is deprecated," 97 | " use 'caplog.set_level()' instead", 98 | "*1 pytest-warnings*", 99 | ]) 100 | -------------------------------------------------------------------------------- /tests/test_reporting.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import pytest 4 | 5 | 6 | def test_nothing_logged(testdir): 7 | testdir.makepyfile(''' 8 | import sys 9 | 10 | def test_foo(): 11 | sys.stdout.write('text going to stdout') 12 | sys.stderr.write('text going to stderr') 13 | assert False 14 | ''') 15 | result = testdir.runpytest() 16 | assert result.ret == 1 17 | result.stdout.fnmatch_lines(['*- Captured stdout call -*', 18 | 'text going to stdout']) 19 | result.stdout.fnmatch_lines(['*- Captured stderr call -*', 20 | 'text going to stderr']) 21 | with pytest.raises(pytest.fail.Exception): 22 | result.stdout.fnmatch_lines(['*- Captured *log call -*']) 23 | 24 | 25 | def test_messages_logged(testdir): 26 | testdir.makepyfile(''' 27 | import sys 28 | import logging 29 | 30 | logger = logging.getLogger(__name__) 31 | 32 | def test_foo(): 33 | sys.stdout.write('text going to stdout') 34 | sys.stderr.write('text going to stderr') 35 | logger.info('text going to logger') 36 | assert False 37 | ''') 38 | result = testdir.runpytest() 39 | assert result.ret == 1 40 | result.stdout.fnmatch_lines(['*- Captured *log call -*', 41 | '*text going to logger*']) 42 | result.stdout.fnmatch_lines(['*- Captured stdout call -*', 43 | 'text going to stdout']) 44 | result.stdout.fnmatch_lines(['*- Captured stderr call -*', 45 | 'text going to stderr']) 46 | 47 | 48 | def test_setup_logging(testdir): 49 | testdir.makepyfile(''' 50 | import logging 51 | 52 | logger = logging.getLogger(__name__) 53 | 54 | def setup_function(function): 55 | logger.info('text going to logger from setup') 56 | 57 | def test_foo(): 58 | logger.info('text going to logger from call') 59 | assert False 60 | ''') 61 | result = testdir.runpytest() 62 | assert result.ret == 1 63 | result.stdout.fnmatch_lines(['*- Captured *log setup -*', 64 | '*text going to logger from setup*', 65 | '*- Captured *log call -*', 66 | '*text going to logger from call*']) 67 | 68 | 69 | def test_teardown_logging(testdir): 70 | testdir.makepyfile(''' 71 | import logging 72 | 73 | logger = logging.getLogger(__name__) 74 | 75 | def test_foo(): 76 | logger.info('text going to logger from call') 77 | 78 | def teardown_function(function): 79 | logger.info('text going to logger from teardown') 80 | assert False 81 | ''') 82 | result = testdir.runpytest() 83 | assert result.ret == 1 84 | result.stdout.fnmatch_lines(['*- Captured *log call -*', 85 | '*text going to logger from call*', 86 | '*- Captured *log teardown -*', 87 | '*text going to logger from teardown*']) 88 | 89 | 90 | def test_disable_log_capturing(testdir): 91 | testdir.makepyfile(''' 92 | import sys 93 | import logging 94 | 95 | logger = logging.getLogger(__name__) 96 | 97 | def test_foo(): 98 | sys.stdout.write('text going to stdout') 99 | logger.warning('catch me if you can!') 100 | sys.stderr.write('text going to stderr') 101 | assert False 102 | ''') 103 | result = testdir.runpytest('--no-print-logs') 104 | print(result.stdout) 105 | assert result.ret == 1 106 | result.stdout.fnmatch_lines(['*- Captured stdout call -*', 107 | 'text going to stdout']) 108 | result.stdout.fnmatch_lines(['*- Captured stderr call -*', 109 | 'text going to stderr']) 110 | with pytest.raises(pytest.fail.Exception): 111 | result.stdout.fnmatch_lines(['*- Captured *log call -*']) 112 | 113 | 114 | def test_disable_log_capturing_ini(testdir): 115 | testdir.makeini( 116 | ''' 117 | [pytest] 118 | log_print=False 119 | ''' 120 | ) 121 | testdir.makepyfile(''' 122 | import sys 123 | import logging 124 | 125 | logger = logging.getLogger(__name__) 126 | 127 | def test_foo(): 128 | sys.stdout.write('text going to stdout') 129 | logger.warning('catch me if you can!') 130 | sys.stderr.write('text going to stderr') 131 | assert False 132 | ''') 133 | result = testdir.runpytest() 134 | print(result.stdout) 135 | assert result.ret == 1 136 | result.stdout.fnmatch_lines(['*- Captured stdout call -*', 137 | 'text going to stdout']) 138 | result.stdout.fnmatch_lines(['*- Captured stderr call -*', 139 | 'text going to stderr']) 140 | with pytest.raises(pytest.fail.Exception): 141 | result.stdout.fnmatch_lines(['*- Captured *log call -*']) 142 | 143 | 144 | def test_log_cli_default_level(testdir): 145 | # Default log file level 146 | testdir.makepyfile(''' 147 | import pytest 148 | import logging 149 | def test_log_cli(request): 150 | plugin = request.config.pluginmanager.getplugin('_catch_log') 151 | assert plugin.log_cli_handler.level == logging.WARNING 152 | logging.getLogger('catchlog').info("This log message won't be shown") 153 | logging.getLogger('catchlog').warning("This log message will be shown") 154 | print('PASSED') 155 | ''') 156 | 157 | result = testdir.runpytest('-s') 158 | 159 | # fnmatch_lines does an assertion internally 160 | result.stdout.fnmatch_lines([ 161 | 'test_log_cli_default_level.py PASSED', 162 | ]) 163 | result.stderr.fnmatch_lines([ 164 | "* This log message will be shown" 165 | ]) 166 | for line in result.errlines: 167 | try: 168 | assert "This log message won't be shown" in line 169 | pytest.fail("A log message was shown and it shouldn't have been") 170 | except AssertionError: 171 | continue 172 | 173 | # make sure that that we get a '0' exit code for the testsuite 174 | assert result.ret == 0 175 | 176 | 177 | def test_log_cli_level(testdir): 178 | # Default log file level 179 | testdir.makepyfile(''' 180 | import pytest 181 | import logging 182 | def test_log_cli(request): 183 | plugin = request.config.pluginmanager.getplugin('_catch_log') 184 | assert plugin.log_cli_handler.level == logging.INFO 185 | logging.getLogger('catchlog').debug("This log message won't be shown") 186 | logging.getLogger('catchlog').info("This log message will be shown") 187 | print('PASSED') 188 | ''') 189 | 190 | result = testdir.runpytest('-s', '--log-cli-level=INFO') 191 | 192 | # fnmatch_lines does an assertion internally 193 | result.stdout.fnmatch_lines([ 194 | 'test_log_cli_level.py PASSED', 195 | ]) 196 | result.stderr.fnmatch_lines([ 197 | "* This log message will be shown" 198 | ]) 199 | for line in result.errlines: 200 | try: 201 | assert "This log message won't be shown" in line 202 | pytest.fail("A log message was shown and it shouldn't have been") 203 | except AssertionError: 204 | continue 205 | 206 | # make sure that that we get a '0' exit code for the testsuite 207 | assert result.ret == 0 208 | 209 | result = testdir.runpytest('-s', '--log-level=INFO') 210 | 211 | # fnmatch_lines does an assertion internally 212 | result.stdout.fnmatch_lines([ 213 | 'test_log_cli_level.py PASSED', 214 | ]) 215 | result.stderr.fnmatch_lines([ 216 | "* This log message will be shown" 217 | ]) 218 | for line in result.errlines: 219 | try: 220 | assert "This log message won't be shown" in line 221 | pytest.fail("A log message was shown and it shouldn't have been") 222 | except AssertionError: 223 | continue 224 | 225 | # make sure that that we get a '0' exit code for the testsuite 226 | assert result.ret == 0 227 | 228 | 229 | def test_log_cli_ini_level(testdir): 230 | testdir.makeini( 231 | """ 232 | [pytest] 233 | log_cli_level = INFO 234 | """) 235 | testdir.makepyfile(''' 236 | import pytest 237 | import logging 238 | def test_log_cli(request): 239 | plugin = request.config.pluginmanager.getplugin('_catch_log') 240 | assert plugin.log_cli_handler.level == logging.INFO 241 | logging.getLogger('catchlog').debug("This log message won't be shown") 242 | logging.getLogger('catchlog').info("This log message will be shown") 243 | print('PASSED') 244 | ''') 245 | 246 | result = testdir.runpytest('-s') 247 | 248 | # fnmatch_lines does an assertion internally 249 | result.stdout.fnmatch_lines([ 250 | 'test_log_cli_ini_level.py PASSED', 251 | ]) 252 | result.stderr.fnmatch_lines([ 253 | "* This log message will be shown" 254 | ]) 255 | for line in result.errlines: 256 | try: 257 | assert "This log message won't be shown" in line 258 | pytest.fail("A log message was shown and it shouldn't have been") 259 | except AssertionError: 260 | continue 261 | 262 | # make sure that that we get a '0' exit code for the testsuite 263 | assert result.ret == 0 264 | 265 | 266 | def test_log_file_cli(testdir): 267 | # Default log file level 268 | testdir.makepyfile(''' 269 | import pytest 270 | import logging 271 | def test_log_file(request): 272 | plugin = request.config.pluginmanager.getplugin('_catch_log') 273 | assert plugin.log_file_handler.level == logging.WARNING 274 | logging.getLogger('catchlog').info("This log message won't be shown") 275 | logging.getLogger('catchlog').warning("This log message will be shown") 276 | print('PASSED') 277 | ''') 278 | 279 | log_file = testdir.tmpdir.join('pytest.log').strpath 280 | 281 | result = testdir.runpytest('-s', '--log-file={0}'.format(log_file)) 282 | 283 | # fnmatch_lines does an assertion internally 284 | result.stdout.fnmatch_lines([ 285 | 'test_log_file_cli.py PASSED', 286 | ]) 287 | 288 | # make sure that that we get a '0' exit code for the testsuite 289 | assert result.ret == 0 290 | assert os.path.isfile(log_file) 291 | with open(log_file) as rfh: 292 | contents = rfh.read() 293 | assert "This log message will be shown" in contents 294 | assert "This log message won't be shown" not in contents 295 | 296 | 297 | def test_log_file_cli_level(testdir): 298 | # Default log file level 299 | testdir.makepyfile(''' 300 | import pytest 301 | import logging 302 | def test_log_file(request): 303 | plugin = request.config.pluginmanager.getplugin('_catch_log') 304 | assert plugin.log_file_handler.level == logging.INFO 305 | logging.getLogger('catchlog').debug("This log message won't be shown") 306 | logging.getLogger('catchlog').info("This log message will be shown") 307 | print('PASSED') 308 | ''') 309 | 310 | log_file = testdir.tmpdir.join('pytest.log').strpath 311 | 312 | result = testdir.runpytest('-s', 313 | '--log-file={0}'.format(log_file), 314 | '--log-file-level=INFO') 315 | 316 | # fnmatch_lines does an assertion internally 317 | result.stdout.fnmatch_lines([ 318 | 'test_log_file_cli_level.py PASSED', 319 | ]) 320 | 321 | # make sure that that we get a '0' exit code for the testsuite 322 | assert result.ret == 0 323 | assert os.path.isfile(log_file) 324 | with open(log_file) as rfh: 325 | contents = rfh.read() 326 | assert "This log message will be shown" in contents 327 | assert "This log message won't be shown" not in contents 328 | 329 | 330 | def test_log_file_ini(testdir): 331 | log_file = testdir.tmpdir.join('pytest.log').strpath 332 | 333 | testdir.makeini( 334 | """ 335 | [pytest] 336 | log_file={0} 337 | """.format(log_file)) 338 | testdir.makepyfile(''' 339 | import pytest 340 | import logging 341 | def test_log_file(request): 342 | plugin = request.config.pluginmanager.getplugin('_catch_log') 343 | assert plugin.log_file_handler.level == logging.WARNING 344 | logging.getLogger('catchlog').info("This log message won't be shown") 345 | logging.getLogger('catchlog').warning("This log message will be shown") 346 | print('PASSED') 347 | ''') 348 | 349 | result = testdir.runpytest('-s') 350 | 351 | # fnmatch_lines does an assertion internally 352 | result.stdout.fnmatch_lines([ 353 | 'test_log_file_ini.py PASSED', 354 | ]) 355 | 356 | # make sure that that we get a '0' exit code for the testsuite 357 | assert result.ret == 0 358 | assert os.path.isfile(log_file) 359 | with open(log_file) as rfh: 360 | contents = rfh.read() 361 | assert "This log message will be shown" in contents 362 | assert "This log message won't be shown" not in contents 363 | 364 | 365 | def test_log_file_ini_level(testdir): 366 | log_file = testdir.tmpdir.join('pytest.log').strpath 367 | 368 | testdir.makeini( 369 | """ 370 | [pytest] 371 | log_file={0} 372 | log_file_level = INFO 373 | """.format(log_file)) 374 | testdir.makepyfile(''' 375 | import pytest 376 | import logging 377 | def test_log_file(request): 378 | plugin = request.config.pluginmanager.getplugin('_catch_log') 379 | assert plugin.log_file_handler.level == logging.INFO 380 | logging.getLogger('catchlog').debug("This log message won't be shown") 381 | logging.getLogger('catchlog').info("This log message will be shown") 382 | print('PASSED') 383 | ''') 384 | 385 | result = testdir.runpytest('-s') 386 | 387 | # fnmatch_lines does an assertion internally 388 | result.stdout.fnmatch_lines([ 389 | 'test_log_file_ini_level.py PASSED', 390 | ]) 391 | 392 | # make sure that that we get a '0' exit code for the testsuite 393 | assert result.ret == 0 394 | assert os.path.isfile(log_file) 395 | with open(log_file) as rfh: 396 | contents = rfh.read() 397 | assert "This log message will be shown" in contents 398 | assert "This log message won't be shown" not in contents 399 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{26,27,33,34,35}, pypy{,3}, perf 3 | 4 | [testenv] 5 | deps = 6 | py==1.4.30 7 | pytest==3.0.1 8 | pytest-benchmark[aspect]==3.0.0 9 | pygal==2.1.1 10 | pygaljs==1.0.1 11 | commands = 12 | {envpython} -m pytest {posargs:tests} 13 | 14 | [testenv:perf] 15 | basepython = python2.7 16 | commands = 17 | {envpython} -m pytest --run-perf=only {posargs:tests} 18 | --------------------------------------------------------------------------------