├── docs ├── changelog.rst ├── sheet │ ├── static │ │ ├── background.png │ │ └── sheet.css_t │ ├── theme.conf │ └── layout.html ├── api │ ├── index.rst │ ├── utilities.rst │ ├── internal.rst │ ├── compat.rst │ ├── ticketing.rst │ ├── base.rst │ ├── notifiers.rst │ ├── more.rst │ ├── queues.rst │ └── handlers.rst ├── index.rst ├── compat.rst ├── unittesting.rst ├── ticketing.rst ├── libraries.rst ├── performance.rst ├── make.bat ├── stacks.rst ├── designexplained.rst ├── Makefile ├── features.rst ├── conf.py ├── setups.rst ├── quickstart.rst └── designdefense.rst ├── .hgignore ├── MANIFEST.in ├── setup.cfg ├── benchmark ├── bench_logger_creation.py ├── bench_disabled_logger.py ├── bench_logging_logger_creation.py ├── bench_test_handler.py ├── bench_file_handler.py ├── bench_logger_level_low.py ├── bench_file_handler_unicode.py ├── bench_stream_handler.py ├── bench_logging_file_handler.py ├── bench_logging_stream_handler.py ├── bench_logging_file_handler_unicode.py ├── bench_noop_filter.py ├── bench_disabled_introspection.py ├── bench_enabled_introspection.py ├── bench_logging_noop.py ├── bench_logging_logger_level_low.py ├── bench_noop.py ├── bench_redirect_to_logging.py ├── bench_redirect_from_logging.py ├── bench_logging_noop_filter.py ├── bench_stack_manipulation.py ├── bench_noop_filter_on_handler.py └── run.py ├── .gitignore ├── testwin32log.py ├── README ├── AUTHORS ├── twitter-secrets.txt ├── Makefile ├── tox.ini ├── logbook ├── _termcolors.py ├── __init__.py ├── testsuite │ └── __init__.py ├── _fallback.py ├── _speedups.pyx ├── _stringfmt.py ├── helpers.py ├── compat.py ├── notifiers.py ├── more.py ├── queues.py └── ticketing.py ├── LICENSE ├── CHANGES └── setup.py /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGES 2 | -------------------------------------------------------------------------------- /.hgignore: -------------------------------------------------------------------------------- 1 | \.pyc$ 2 | \.egg-info$ 3 | docs/_build 4 | \.ropeproject 5 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in Makefile CHANGES logbook/_speedups.c logbook/_speedups.pyx 2 | -------------------------------------------------------------------------------- /docs/sheet/static/background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zacharyvoase/logbook/master/docs/sheet/static/background.png -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs/ 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [upload_docs] 7 | upload-dir = docs/_build/html 8 | -------------------------------------------------------------------------------- /benchmark/bench_logger_creation.py: -------------------------------------------------------------------------------- 1 | """Test with no handler active""" 2 | from logbook import Logger 3 | 4 | 5 | def run(): 6 | for x in xrange(500): 7 | Logger('Test') 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ropeproject 2 | .tox 3 | docs/_build 4 | logbook/_speedups.c 5 | logbook/_speedups.so 6 | Logbook.egg-info 7 | dist 8 | *.pyc 9 | env 10 | env* 11 | .coverage 12 | cover 13 | build 14 | -------------------------------------------------------------------------------- /testwin32log.py: -------------------------------------------------------------------------------- 1 | from logbook import NTEventLogHandler, Logger 2 | 3 | logger = Logger('MyLogger') 4 | handler = NTEventLogHandler('My Application') 5 | 6 | with handler.applicationbound(): 7 | logger.error('Testing') 8 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Logbook is a nice logging replacement, but still under development. 2 | 3 | It should be easy to setup, use and configure and support web applications :) 4 | 5 | For more information look at http://logbook.pocoo.org/ 6 | -------------------------------------------------------------------------------- /benchmark/bench_disabled_logger.py: -------------------------------------------------------------------------------- 1 | """Tests with the whole logger disabled""" 2 | from logbook import Logger 3 | 4 | 5 | log = Logger('Test logger') 6 | log.disabled = True 7 | 8 | 9 | def run(): 10 | for x in xrange(500): 11 | log.warning('this is not handled') 12 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Logbook is written and maintained by the Logbook Team and various 2 | contributors: 3 | 4 | Lead Developers: 5 | 6 | - Armin Ronacher 7 | - Georg Brandl 8 | 9 | Contributors: 10 | 11 | - Ronny Pfannschmidt 12 | - Daniel Neuhäuser 13 | - Kenneth Reitz -------------------------------------------------------------------------------- /benchmark/bench_logging_logger_creation.py: -------------------------------------------------------------------------------- 1 | """Test with no handler active""" 2 | from logging import getLogger 3 | 4 | 5 | root_logger = getLogger() 6 | 7 | 8 | def run(): 9 | for x in xrange(500): 10 | getLogger('Test') 11 | del root_logger.manager.loggerDict['Test'] 12 | -------------------------------------------------------------------------------- /benchmark/bench_test_handler.py: -------------------------------------------------------------------------------- 1 | """Tests the test handler""" 2 | from logbook import Logger, TestHandler 3 | 4 | 5 | log = Logger('Test logger') 6 | 7 | 8 | def run(): 9 | with TestHandler() as handler: 10 | for x in xrange(500): 11 | log.warning('this is not handled') 12 | -------------------------------------------------------------------------------- /twitter-secrets.txt: -------------------------------------------------------------------------------- 1 | Leaked Twitter Secrets 2 | 3 | Twitter for Android 4 | xauth: yes 5 | key: 3nVuSoBZnx6U4vzUxf5w 6 | secret: Bcs59EFbbsdF6Sl9Ng71smgStWEGwXXKSjYvPVt7qys 7 | 8 | Echofon: 9 | xauth: yes 10 | key: yqoymTNrS9ZDGsBnlFhIuw 11 | secret: OMai1whT3sT3XMskI7DZ7xiju5i5rAYJnxSEHaKYvEs 12 | -------------------------------------------------------------------------------- /docs/api/index.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================= 3 | 4 | This part of the documentation documents all the classes and functions 5 | provided by Logbook. 6 | 7 | .. toctree:: 8 | 9 | base 10 | handlers 11 | utilities 12 | queues 13 | ticketing 14 | more 15 | notifiers 16 | compat 17 | internal 18 | -------------------------------------------------------------------------------- /docs/sheet/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = sheet.css 4 | pygments_style = tango 5 | 6 | [options] 7 | bodyfont = 'Cantarell', 'Lucida Grande', sans-serif 8 | seriffont = 'OFL Sorts Mill Goudy TT', 'Georgia', 'Bitstream Vera Serif', serif 9 | monofont = 'Consolas', 'Inconsolata', 'Bitstream Vera Sans Mono', monospace 10 | -------------------------------------------------------------------------------- /benchmark/bench_file_handler.py: -------------------------------------------------------------------------------- 1 | """Benchmarks the file handler""" 2 | from logbook import Logger, FileHandler 3 | from tempfile import NamedTemporaryFile 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | def run(): 10 | f = NamedTemporaryFile() 11 | with FileHandler(f.name) as handler: 12 | for x in xrange(500): 13 | log.warning('this is handled') 14 | -------------------------------------------------------------------------------- /benchmark/bench_logger_level_low.py: -------------------------------------------------------------------------------- 1 | """Benchmarks too low logger levels""" 2 | from logbook import Logger, StreamHandler, ERROR 3 | from cStringIO import StringIO 4 | 5 | 6 | log = Logger('Test logger') 7 | log.level = ERROR 8 | 9 | 10 | def run(): 11 | out = StringIO() 12 | with StreamHandler(out): 13 | for x in xrange(500): 14 | log.warning('this is not handled') 15 | -------------------------------------------------------------------------------- /benchmark/bench_file_handler_unicode.py: -------------------------------------------------------------------------------- 1 | """Benchmarks the file handler with unicode""" 2 | from logbook import Logger, FileHandler 3 | from tempfile import NamedTemporaryFile 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | def run(): 10 | f = NamedTemporaryFile() 11 | with FileHandler(f.name) as handler: 12 | for x in xrange(500): 13 | log.warning(u'this is handled \x6f') 14 | -------------------------------------------------------------------------------- /benchmark/bench_stream_handler.py: -------------------------------------------------------------------------------- 1 | """Tests the stream handler""" 2 | from logbook import Logger, StreamHandler 3 | from cStringIO import StringIO 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | def run(): 10 | out = StringIO() 11 | with StreamHandler(out) as handler: 12 | for x in xrange(500): 13 | log.warning('this is not handled') 14 | assert out.getvalue().count('\n') == 500 15 | -------------------------------------------------------------------------------- /benchmark/bench_logging_file_handler.py: -------------------------------------------------------------------------------- 1 | """Tests logging file handler in comparison""" 2 | from logging import getLogger, FileHandler 3 | from tempfile import NamedTemporaryFile 4 | 5 | 6 | log = getLogger('Testlogger') 7 | 8 | 9 | def run(): 10 | f = NamedTemporaryFile() 11 | handler = FileHandler(f.name) 12 | log.addHandler(handler) 13 | for x in xrange(500): 14 | log.warning('this is handled') 15 | -------------------------------------------------------------------------------- /benchmark/bench_logging_stream_handler.py: -------------------------------------------------------------------------------- 1 | """Tests the stream handler in logging""" 2 | from logging import Logger, StreamHandler 3 | from cStringIO import StringIO 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | def run(): 10 | out = StringIO() 11 | log.addHandler(StreamHandler(out)) 12 | for x in xrange(500): 13 | log.warning('this is not handled') 14 | assert out.getvalue().count('\n') == 500 15 | -------------------------------------------------------------------------------- /benchmark/bench_logging_file_handler_unicode.py: -------------------------------------------------------------------------------- 1 | """Tests logging file handler in comparison""" 2 | from logging import getLogger, FileHandler 3 | from tempfile import NamedTemporaryFile 4 | 5 | 6 | log = getLogger('Testlogger') 7 | 8 | 9 | def run(): 10 | f = NamedTemporaryFile() 11 | handler = FileHandler(f.name) 12 | log.addHandler(handler) 13 | for x in xrange(500): 14 | log.warning(u'this is handled \x6f') 15 | -------------------------------------------------------------------------------- /benchmark/bench_noop_filter.py: -------------------------------------------------------------------------------- 1 | from logbook import Logger, StreamHandler, NullHandler 2 | from cStringIO import StringIO 3 | 4 | 5 | log = Logger('Test logger') 6 | 7 | 8 | def run(): 9 | out = StringIO() 10 | with NullHandler(): 11 | with StreamHandler(out, filter=lambda r, h: False) as handler: 12 | for x in xrange(500): 13 | log.warning('this is not handled') 14 | assert not out.getvalue() 15 | -------------------------------------------------------------------------------- /benchmark/bench_disabled_introspection.py: -------------------------------------------------------------------------------- 1 | """Tests with frame introspection disabled""" 2 | from logbook import Logger, NullHandler, Flags 3 | 4 | 5 | log = Logger('Test logger') 6 | 7 | 8 | class DummyHandler(NullHandler): 9 | blackhole = False 10 | 11 | 12 | def run(): 13 | with Flags(introspection=False): 14 | with DummyHandler() as handler: 15 | for x in xrange(500): 16 | log.warning('this is not handled') 17 | -------------------------------------------------------------------------------- /benchmark/bench_enabled_introspection.py: -------------------------------------------------------------------------------- 1 | """Tests with stack frame introspection enabled""" 2 | from logbook import Logger, NullHandler, Flags 3 | 4 | 5 | log = Logger('Test logger') 6 | 7 | 8 | class DummyHandler(NullHandler): 9 | blackhole = False 10 | 11 | 12 | def run(): 13 | with Flags(introspection=True): 14 | with DummyHandler() as handler: 15 | for x in xrange(500): 16 | log.warning('this is not handled') 17 | -------------------------------------------------------------------------------- /benchmark/bench_logging_noop.py: -------------------------------------------------------------------------------- 1 | """Tests with a logging handler becoming a noop for comparison""" 2 | from logging import getLogger, StreamHandler, ERROR 3 | from cStringIO import StringIO 4 | 5 | 6 | log = getLogger('Testlogger') 7 | 8 | 9 | def run(): 10 | out = StringIO() 11 | handler = StreamHandler(out) 12 | handler.setLevel(ERROR) 13 | log.addHandler(handler) 14 | for x in xrange(500): 15 | log.warning('this is not handled') 16 | -------------------------------------------------------------------------------- /benchmark/bench_logging_logger_level_low.py: -------------------------------------------------------------------------------- 1 | """Tests with a logging handler becoming a noop for comparison""" 2 | from logging import getLogger, StreamHandler, ERROR 3 | from cStringIO import StringIO 4 | 5 | 6 | log = getLogger('Testlogger') 7 | log.setLevel(ERROR) 8 | 9 | 10 | def run(): 11 | out = StringIO() 12 | handler = StreamHandler(out) 13 | log.addHandler(handler) 14 | for x in xrange(500): 15 | log.warning('this is not handled') 16 | -------------------------------------------------------------------------------- /benchmark/bench_noop.py: -------------------------------------------------------------------------------- 1 | """Test with no handler active""" 2 | from logbook import Logger, StreamHandler, NullHandler, ERROR 3 | from cStringIO import StringIO 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | def run(): 10 | out = StringIO() 11 | with NullHandler(): 12 | with StreamHandler(out, level=ERROR) as handler: 13 | for x in xrange(500): 14 | log.warning('this is not handled') 15 | assert not out.getvalue() 16 | -------------------------------------------------------------------------------- /docs/api/utilities.rst: -------------------------------------------------------------------------------- 1 | Utilities 2 | ========= 3 | 4 | This documents general purpose utility functions available in Logbook. 5 | 6 | .. module:: logbook 7 | 8 | .. autofunction:: debug 9 | 10 | .. autofunction:: info 11 | 12 | .. autofunction:: warn 13 | 14 | .. autofunction:: warning 15 | 16 | .. autofunction:: notice 17 | 18 | .. autofunction:: error 19 | 20 | .. autofunction:: exception 21 | 22 | .. autofunction:: catch_exceptions 23 | 24 | .. autofunction:: critical 25 | 26 | .. autofunction:: log 27 | -------------------------------------------------------------------------------- /benchmark/bench_redirect_to_logging.py: -------------------------------------------------------------------------------- 1 | """Tests redirects from logging to logbook""" 2 | from logging import getLogger, StreamHandler 3 | from logbook.compat import LoggingHandler 4 | from cStringIO import StringIO 5 | 6 | 7 | log = getLogger('Test logger') 8 | 9 | 10 | def run(): 11 | out = StringIO() 12 | log.addHandler(StreamHandler(out)) 13 | with LoggingHandler(): 14 | for x in xrange(500): 15 | log.warning('this is not handled') 16 | assert out.getvalue().count('\n') == 500 17 | -------------------------------------------------------------------------------- /benchmark/bench_redirect_from_logging.py: -------------------------------------------------------------------------------- 1 | """Tests redirects from logging to logbook""" 2 | from logging import getLogger 3 | from logbook import StreamHandler 4 | from logbook.compat import redirect_logging 5 | from cStringIO import StringIO 6 | 7 | 8 | redirect_logging() 9 | log = getLogger('Test logger') 10 | 11 | 12 | def run(): 13 | out = StringIO() 14 | with StreamHandler(out): 15 | for x in xrange(500): 16 | log.warning('this is not handled') 17 | assert out.getvalue().count('\n') == 500 18 | -------------------------------------------------------------------------------- /benchmark/bench_logging_noop_filter.py: -------------------------------------------------------------------------------- 1 | """Tests with a filter disabling a handler for comparsion in logging""" 2 | from logging import getLogger, StreamHandler, Filter 3 | from cStringIO import StringIO 4 | 5 | 6 | log = getLogger('Testlogger') 7 | 8 | 9 | class DisableFilter(Filter): 10 | def filter(self, record): 11 | return False 12 | 13 | 14 | def run(): 15 | out = StringIO() 16 | handler = StreamHandler(out) 17 | handler.addFilter(DisableFilter()) 18 | log.addHandler(handler) 19 | for x in xrange(500): 20 | log.warning('this is not handled') 21 | -------------------------------------------------------------------------------- /benchmark/bench_stack_manipulation.py: -------------------------------------------------------------------------------- 1 | """Tests basic stack manipulation performance""" 2 | from logbook import Handler, NullHandler, StreamHandler, FileHandler, \ 3 | ERROR, WARNING 4 | from tempfile import NamedTemporaryFile 5 | from cStringIO import StringIO 6 | 7 | 8 | def run(): 9 | f = NamedTemporaryFile() 10 | out = StringIO() 11 | with NullHandler(): 12 | with StreamHandler(out, level=WARNING): 13 | with FileHandler(f.name, level=ERROR): 14 | for x in xrange(100): 15 | list(Handler.stack_manager.iter_context_objects()) 16 | -------------------------------------------------------------------------------- /docs/api/internal.rst: -------------------------------------------------------------------------------- 1 | Internal API 2 | ============ 3 | 4 | This documents the internal API that might be useful for more advanced 5 | setups or custom handlers. 6 | 7 | .. module:: logbook.base 8 | 9 | .. autofunction:: dispatch_record 10 | 11 | .. autoclass:: StackedObject 12 | :members: 13 | 14 | .. autoclass:: RecordDispatcher 15 | :members: 16 | 17 | .. autoclass:: LoggerMixin 18 | :members: 19 | :inherited-members: 20 | 21 | .. module:: logbook.handlers 22 | 23 | .. autoclass:: RotatingFileHandlerBase 24 | :members: 25 | 26 | .. autoclass:: StringFormatterHandlerMixin 27 | :members: 28 | -------------------------------------------------------------------------------- /benchmark/bench_noop_filter_on_handler.py: -------------------------------------------------------------------------------- 1 | """Like the filter test, but with the should_handle implemented""" 2 | from logbook import Logger, StreamHandler, NullHandler 3 | from cStringIO import StringIO 4 | 5 | 6 | log = Logger('Test logger') 7 | 8 | 9 | class CustomStreamHandler(StreamHandler): 10 | def should_handle(self, record): 11 | return False 12 | 13 | 14 | def run(): 15 | out = StringIO() 16 | with NullHandler(): 17 | with CustomStreamHandler(out) as handler: 18 | for x in xrange(500): 19 | log.warning('this is not handled') 20 | assert not out.getvalue() 21 | -------------------------------------------------------------------------------- /docs/api/compat.rst: -------------------------------------------------------------------------------- 1 | Compatibility 2 | ============= 3 | 4 | This documents compatibility support with existing systems such as 5 | :mod:`logging` and :mod:`warnings`. 6 | 7 | .. module:: logbook.compat 8 | 9 | Logging Compatibility 10 | --------------------- 11 | 12 | .. autofunction:: redirect_logging 13 | 14 | .. autofunction:: redirected_logging 15 | 16 | .. autoclass:: RedirectLoggingHandler 17 | :members: 18 | 19 | .. autoclass:: LoggingHandler 20 | :members: 21 | 22 | 23 | Warnings Compatibility 24 | ---------------------- 25 | 26 | .. autofunction:: redirect_warnings 27 | 28 | .. autofunction:: redirected_warnings 29 | -------------------------------------------------------------------------------- /docs/api/ticketing.rst: -------------------------------------------------------------------------------- 1 | Ticketing Support 2 | ================= 3 | 4 | This documents the support classes for ticketing. With ticketing handlers 5 | log records are categorized by location and for every emitted log record a 6 | count is added. That way you know how often certain messages are 7 | triggered, at what times and when the last occurrence was. 8 | 9 | .. module:: logbook.ticketing 10 | 11 | .. autoclass:: TicketingBaseHandler 12 | :members: 13 | 14 | .. autoclass:: TicketingHandler 15 | :members: 16 | 17 | .. autoclass:: BackendBase 18 | :members: 19 | 20 | .. autoclass:: SQLAlchemyBackend 21 | 22 | .. autoclass:: MongoDBBackend 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: clean-pyc test 2 | 3 | clean-pyc: 4 | find . -name '*.pyc' -exec rm -f {} + 5 | find . -name '*.pyo' -exec rm -f {} + 6 | find . -name '*~' -exec rm -f {} + 7 | 8 | test: 9 | @python -c "import unittest as x; x.main('logbook.testsuite', 'suite')" 10 | 11 | toxtest: 12 | @tox 13 | 14 | bench: 15 | @python benchmark/run.py 16 | 17 | upload-docs: 18 | make -C docs html SPHINXOPTS=-Aonline=1 19 | python setup.py upload_docs 20 | 21 | logbook/_speedups.so: logbook/_speedups.pyx 22 | cython logbook/_speedups.pyx 23 | python setup.py build 24 | cp build/*/logbook/_speedups.so logbook 25 | 26 | cybuild: logbook/_speedups.so 27 | 28 | .PHONY: test upload-docs clean-pyc cybuild bench all 29 | -------------------------------------------------------------------------------- /docs/api/base.rst: -------------------------------------------------------------------------------- 1 | Core Interface 2 | ============== 3 | 4 | This implements the core interface. 5 | 6 | .. module:: logbook 7 | 8 | .. autoclass:: Logger 9 | :members: 10 | :inherited-members: 11 | 12 | .. autoclass:: LoggerGroup 13 | :members: 14 | 15 | .. autoclass:: LogRecord 16 | :members: 17 | 18 | .. autoclass:: Flags 19 | :members: 20 | :inherited-members: 21 | 22 | .. autoclass:: Processor 23 | :members: 24 | :inherited-members: 25 | 26 | .. autofunction:: get_level_name 27 | 28 | .. autofunction:: lookup_level 29 | 30 | .. data:: CRITICAL 31 | ERROR 32 | WARNING 33 | INFO 34 | DEBUG 35 | NOTSET 36 | 37 | The log level constants 38 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist=py24,py25,py26,py27,py31,docs 3 | 4 | [testenv] 5 | deps= 6 | SQLAlchemy 7 | pyzmq 8 | execnet 9 | Jinja2 10 | commands=python -c __import__('unittest').main('logbook.testsuite','suite') 11 | changedir={toxworkdir} 12 | 13 | [testenv:py24] 14 | deps= 15 | SQLAlchemy 16 | pysqlite 17 | simplejson 18 | multiprocessing 19 | pyzmq 20 | execnet 21 | Jinja2 22 | 23 | [testenv:py25] 24 | deps= 25 | SQLAlchemy 26 | simplejson 27 | multiprocessing 28 | pyzmq 29 | execnet 30 | Jinja2 31 | 32 | [testenv:py31] 33 | deps= 34 | SQLAlchemy 35 | execnet 36 | Jinja2 37 | 38 | [testenv:docs] 39 | deps= 40 | Sphinx 41 | changedir=docs 42 | commands= 43 | sphinx-build -W -b html . _build/html 44 | sphinx-build -W -b linkcheck . _build/linkcheck 45 | -------------------------------------------------------------------------------- /docs/api/notifiers.rst: -------------------------------------------------------------------------------- 1 | .. _notifiers: 2 | 3 | The Notifiers Module 4 | ==================== 5 | 6 | The notifiers module implements special handlers for various platforms 7 | that depend on external libraries. 8 | The more module implements special handlers and other things that are 9 | beyond the scope of Logbook itself or depend on external libraries. 10 | 11 | .. module:: logbook.notifiers 12 | 13 | .. autofunction:: create_notification_handler 14 | 15 | OSX Specific Handlers 16 | --------------------- 17 | 18 | .. autoclass:: GrowlHandler 19 | :members: 20 | 21 | Linux Specific Handlers 22 | ----------------------- 23 | 24 | .. autoclass:: LibNotifyHandler 25 | :members: 26 | 27 | Other Services 28 | -------------- 29 | 30 | .. autoclass:: BoxcarHandler 31 | :members: 32 | 33 | .. autoclass:: NotifoHandler 34 | :members: 35 | 36 | Base Interface 37 | -------------- 38 | 39 | .. autoclass:: NotificationBaseHandler 40 | :members: 41 | -------------------------------------------------------------------------------- /docs/sheet/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "basic/layout.html" %} 2 | 3 | {% block extrahead %} 4 | {% if online %} 5 | 7 | {% endif %} 8 | {% endblock %} 9 | 10 | {% block header %} 11 |
12 | 18 | {% endblock %} 19 | 20 | {% block footer %} 21 | {% if online %} 22 | 23 | Fork me on GitHub 26 | 27 | {% endif %} 28 | {{ super() }} 29 |
30 | {% endblock %} 31 | -------------------------------------------------------------------------------- /docs/api/more.rst: -------------------------------------------------------------------------------- 1 | The More Module 2 | =============== 3 | 4 | The more module implements special handlers and other things that are 5 | beyond the scope of Logbook itself or depend on external libraries. 6 | Additionally there are some handlers in :mod:`logbook.ticketing`, 7 | :mod:`logbook.queues` and :mod:`logbook.notifiers`. 8 | 9 | .. module:: logbook.more 10 | 11 | Tagged Logging 12 | -------------- 13 | 14 | .. autoclass:: TaggingLogger 15 | :members: 16 | :inherited-members: 17 | 18 | .. autoclass:: TaggingHandler 19 | :members: 20 | 21 | Special Handlers 22 | ---------------- 23 | 24 | .. autoclass:: TwitterHandler 25 | :members: 26 | 27 | .. autoclass:: ExternalApplicationHandler 28 | :members: 29 | 30 | .. autoclass:: ExceptionHandler 31 | :members: 32 | 33 | Colorized Handlers 34 | ------------------ 35 | 36 | .. versionadded:: 0.3 37 | 38 | .. autoclass:: ColorizedStderrHandler 39 | 40 | .. autoclass:: ColorizingStreamHandlerMixin 41 | :members: 42 | 43 | Other 44 | ----- 45 | 46 | .. autoclass:: JinjaFormatter 47 | :members: 48 | -------------------------------------------------------------------------------- /docs/api/queues.rst: -------------------------------------------------------------------------------- 1 | Queue Support 2 | ============= 3 | 4 | The queue support module makes it possible to add log records to a queue 5 | system. This is useful for distributed setups where you want multiple 6 | processes to log to the same backend. Currently supported are ZeroMQ as 7 | well as the :mod:`multiprocessing` :class:`~multiprocessing.Queue` class. 8 | 9 | .. module:: logbook.queues 10 | 11 | ZeroMQ 12 | ------ 13 | 14 | .. autoclass:: ZeroMQHandler 15 | :members: 16 | 17 | .. autoclass:: ZeroMQSubscriber 18 | :members: 19 | :inherited-members: 20 | 21 | MultiProcessing 22 | --------------- 23 | 24 | .. autoclass:: MultiProcessingHandler 25 | :members: 26 | 27 | .. autoclass:: MultiProcessingSubscriber 28 | :members: 29 | :inherited-members: 30 | 31 | Other 32 | ----- 33 | 34 | .. autoclass:: ThreadedWrapperHandler 35 | :members: 36 | 37 | .. autoclass:: SubscriberGroup 38 | :members: 39 | 40 | Base Interface 41 | -------------- 42 | 43 | .. autoclass:: SubscriberBase 44 | :members: 45 | 46 | .. autoclass:: ThreadController 47 | :members: 48 | 49 | .. autoclass:: TWHThreadController 50 | :members: 51 | -------------------------------------------------------------------------------- /benchmark/run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Runs the benchmarks 4 | """ 5 | import sys 6 | import os 7 | import re 8 | from subprocess import Popen 9 | 10 | try: 11 | from pkg_resources import get_distribution 12 | version = get_distribution('Logbook').version 13 | except Exception: 14 | version = 'unknown version' 15 | 16 | 17 | _filename_re = re.compile(r'^bench_(.*?)\.py$') 18 | bench_directory = os.path.abspath(os.path.dirname(__file__)) 19 | 20 | 21 | def list_benchmarks(): 22 | result = [] 23 | for name in os.listdir(bench_directory): 24 | match = _filename_re.match(name) 25 | if match is not None: 26 | result.append(match.group(1)) 27 | result.sort(key=lambda x: (x.startswith('logging_'), x.lower())) 28 | return result 29 | 30 | 31 | def run_bench(name): 32 | sys.stdout.write('%-32s' % name) 33 | sys.stdout.flush() 34 | Popen([sys.executable, '-mtimeit', '-s', 35 | 'from bench_%s import run' % name, 36 | 'run()']).wait() 37 | 38 | 39 | def main(): 40 | print '=' * 80 41 | print 'Running benchmark with Logbook %s' % version 42 | print '-' * 80 43 | os.chdir(bench_directory) 44 | for bench in list_benchmarks(): 45 | run_bench(bench) 46 | print '-' * 80 47 | 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /logbook/_termcolors.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook._termcolors 4 | ~~~~~~~~~~~~~~~~~~~ 5 | 6 | Provides terminal color mappings. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | 12 | esc = "\x1b[" 13 | 14 | codes = {} 15 | codes[""] = "" 16 | codes["reset"] = esc + "39;49;00m" 17 | 18 | dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue", 19 | "purple", "teal", "lightgray"] 20 | light_colors = ["darkgray", "red", "green", "yellow", "blue", 21 | "fuchsia", "turquoise", "white"] 22 | 23 | x = 30 24 | for d, l in zip(dark_colors, light_colors): 25 | codes[d] = esc + "%im" % x 26 | codes[l] = esc + "%i;01m" % x 27 | x += 1 28 | 29 | del d, l, x 30 | 31 | codes["darkteal"] = codes["turquoise"] 32 | codes["darkyellow"] = codes["brown"] 33 | codes["fuscia"] = codes["fuchsia"] 34 | 35 | 36 | def _str_to_type(obj, strtype): 37 | """Helper for ansiformat and colorize""" 38 | if isinstance(obj, type(strtype)): 39 | return obj 40 | return obj.encode('ascii') 41 | 42 | 43 | def colorize(color_key, text): 44 | """Returns an ANSI formatted text with the given color.""" 45 | return _str_to_type(codes[color_key], text) + text + \ 46 | _str_to_type(codes["reset"], text) 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 by the Logbook Team, see AUTHORS for more details. 2 | 3 | Some rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are 7 | met: 8 | 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above 13 | copyright notice, this list of conditions and the following 14 | disclaimer in the documentation and/or other materials provided 15 | with the distribution. 16 | 17 | * The names of the contributors may not be used to endorse or 18 | promote products derived from this software without specific 19 | prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /docs/api/handlers.rst: -------------------------------------------------------------------------------- 1 | Handlers 2 | ======== 3 | 4 | This documents the base handler interface as well as the provided core 5 | handlers. There are additional handlers for special purposes in the 6 | :mod:`logbook.more`, :mod:`logbook.ticketing` and :mod:`logbook.queues` 7 | modules. 8 | 9 | .. module:: logbook 10 | 11 | Base Interface 12 | -------------- 13 | 14 | .. autoclass:: Handler 15 | :members: 16 | :inherited-members: 17 | 18 | .. autoclass:: NestedSetup 19 | :members: 20 | 21 | .. autoclass:: StringFormatter 22 | :members: 23 | 24 | Core Handlers 25 | ------------- 26 | 27 | .. autoclass:: StreamHandler 28 | :members: 29 | 30 | .. autoclass:: FileHandler 31 | :members: 32 | 33 | .. autoclass:: MonitoringFileHandler 34 | :members: 35 | 36 | .. autoclass:: StderrHandler 37 | :members: 38 | 39 | .. autoclass:: RotatingFileHandler 40 | :members: 41 | 42 | .. autoclass:: TimedRotatingFileHandler 43 | :members: 44 | 45 | .. autoclass:: TestHandler 46 | :members: 47 | 48 | .. autoclass:: MailHandler 49 | :members: 50 | 51 | .. autoclass:: SyslogHandler 52 | :members: 53 | 54 | .. autoclass:: NTEventLogHandler 55 | :members: 56 | 57 | .. autoclass:: NullHandler 58 | :members: 59 | 60 | .. autoclass:: WrapperHandler 61 | :members: 62 | 63 | .. autofunction:: create_syshandler 64 | 65 | Special Handlers 66 | ---------------- 67 | 68 | .. autoclass:: FingersCrossedHandler 69 | :members: 70 | 71 | .. autoclass:: GroupHandler 72 | :members: 73 | 74 | Mixin Classes 75 | ------------- 76 | 77 | .. autoclass:: StringFormatterHandlerMixin 78 | :members: 79 | 80 | .. autoclass:: HashingHandlerMixin 81 | :members: 82 | 83 | .. autoclass:: LimitingHandlerMixin 84 | :members: 85 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to Logbook 2 | ================== 3 | 4 | Logbook is a logging sytem for Python that replaces the standard library's 5 | logging module. It was designed with both complex and simple applications 6 | in mind and the idea to make logging fun: 7 | 8 | >>> from logbook import Logger 9 | >>> log = Logger('Logbook') 10 | >>> log.info('Hello, World!') 11 | [2010-07-23 16:34] INFO: Logbook: Hello, World! 12 | 13 | What makes it fun? What about getting log messages on your phone or 14 | desktop notification system? :ref:`Logbook can do that `. 15 | 16 | This library is still under heavy development and the API is not fully 17 | finalized yet. Feedback is appreciated. The docs here only show a tiny, 18 | tiny feature set and are terribly incomplete. We will have better docs 19 | soon, but until then we hope this gives a sneak peak about how cool 20 | Logbook is. If you want more, have a look at the comprehensive 21 | `testsuite`_. 22 | 23 | Documentation 24 | ------------- 25 | 26 | .. toctree:: 27 | :maxdepth: 2 28 | 29 | features 30 | quickstart 31 | setups 32 | stacks 33 | performance 34 | libraries 35 | unittesting 36 | ticketing 37 | compat 38 | api/index 39 | designexplained 40 | designdefense 41 | changelog 42 | 43 | Project Information 44 | ------------------- 45 | 46 | .. cssclass:: toctree-l1 47 | 48 | * `Download from PyPI`_ 49 | * `Master repository on GitHub`_ 50 | * `Mailing list`_ 51 | * IRC: ``#pocoo`` on freenode 52 | 53 | .. _testsuite: https://github.com/mitsuhiko/logbook/tree/master/logbook/testsuite 54 | .. _Download from PyPI: http://pypi.python.org/pypi/Logbook 55 | .. _Master repository on GitHub: https://github.com/mitsuhiko/logbook 56 | .. _Mailing list: http://groups.google.com/group/pocoo-libs 57 | -------------------------------------------------------------------------------- /logbook/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook 4 | ~~~~~~~ 5 | 6 | Simple logging library that aims to support desktop, command line 7 | and web applications alike. 8 | 9 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 10 | :license: BSD, see LICENSE for more details. 11 | """ 12 | 13 | from logbook.base import LogRecord, Logger, LoggerGroup, NestedSetup, \ 14 | Processor, Flags, get_level_name, lookup_level, dispatch_record, \ 15 | CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, NOTSET 16 | from logbook.handlers import Handler, StreamHandler, FileHandler, \ 17 | MonitoringFileHandler, StderrHandler, RotatingFileHandler, \ 18 | TimedRotatingFileHandler, TestHandler, MailHandler, SyslogHandler, \ 19 | NullHandler, NTEventLogHandler, create_syshandler, StringFormatter, \ 20 | StringFormatterHandlerMixin, HashingHandlerMixin, \ 21 | LimitingHandlerMixin, WrapperHandler, FingersCrossedHandler, \ 22 | GroupHandler 23 | 24 | 25 | # create an anonymous default logger and provide all important 26 | # methods of that logger as global functions 27 | _default_logger = Logger('Generic') 28 | _default_logger.suppress_dispatcher = True 29 | debug = _default_logger.debug 30 | info = _default_logger.info 31 | warn = _default_logger.warn 32 | warning = _default_logger.warning 33 | notice = _default_logger.notice 34 | error = _default_logger.error 35 | exception = _default_logger.exception 36 | catch_exceptions = _default_logger.catch_exceptions 37 | critical = _default_logger.critical 38 | exception = _default_logger.exception 39 | catch_exceptions = _default_logger.catch_exceptions 40 | log = _default_logger.log 41 | del _default_logger 42 | 43 | 44 | # install a default global handler 45 | default_handler = StderrHandler() 46 | default_handler.push_application() 47 | -------------------------------------------------------------------------------- /docs/compat.rst: -------------------------------------------------------------------------------- 1 | .. _logging-compat: 2 | 3 | Logging Compatibility 4 | ===================== 5 | 6 | Logbook provides backwards compatibility with the logging library. When 7 | activated, the logging library will transparently redirect all the logging calls 8 | to your Logbook logging setup. 9 | 10 | Basic Setup 11 | ----------- 12 | 13 | If you import the compat system and call the 14 | :func:`~logbook.compat.redirect_logging` function, all logging calls that happen 15 | after this call will transparently be redirected to Logbook:: 16 | 17 | from logbook.compat import redirect_logging 18 | redirect_logging() 19 | 20 | This also means you don't have to call :func:`logging.basicConfig`: 21 | 22 | >>> from logbook.compat import redirect_logging 23 | >>> redirect_logging() 24 | >>> from logging import getLogger 25 | >>> log = getLogger('My Logger') 26 | >>> log.warn('This is a warning') 27 | [2010-07-25 00:24] WARNING: My Logger: This is a warning 28 | 29 | Advanced Setup 30 | -------------- 31 | 32 | The way this is implemented is with a 33 | :class:`~logbook.compat.RedirectLoggingHandler`. This class is a handler for 34 | the old logging system that sends records via an internal logbook logger to the 35 | active logbook handlers. This handler can then be added to specific logging 36 | loggers if you want: 37 | 38 | >>> from logging import getLogger 39 | >>> mylog = getLogger('My Log') 40 | >>> from logbook.compat import RedirectLoggingHandler 41 | >>> mylog.addHandler(RedirectLoggingHandler()) 42 | >>> otherlog = getLogger('Other Log') 43 | >>> otherlog.warn('logging is deprecated') 44 | No handlers could be found for logger "Other Log" 45 | >>> mylog.warn('but logbook is awesome') 46 | [2010-07-25 00:29] WARNING: My Log: but logbook is awesome 47 | 48 | Reverse Redirects 49 | ----------------- 50 | 51 | You can also redirect logbook records to logging, so the other way round. 52 | For this you just have to activate the 53 | :class:`~logbook.compat.LoggingHandler` for the thread or application:: 54 | 55 | from logbook import Logger 56 | from logbook.compat import LoggingHandler 57 | 58 | log = Logger('My app') 59 | with LoggingHandler(): 60 | log.warn('Going to logging') 61 | -------------------------------------------------------------------------------- /docs/unittesting.rst: -------------------------------------------------------------------------------- 1 | Unittesting Support 2 | =================== 3 | 4 | .. currentmodule:: logbook 5 | 6 | Logbook has builtin support for testing logging calls. There is a handler 7 | that can be hooked in and will catch all log records for inspection. Not 8 | only that, it also provides methods to test if certain things were logged. 9 | 10 | Basic Setup 11 | ----------- 12 | 13 | The interface to satisfaction is :class:`logbook.TestHandler`. Create it, 14 | and bind it, and you're done. If you are using classic :mod:`unittest` 15 | test cases, you might want to set it up in the before and after callback 16 | methods:: 17 | 18 | import logbook 19 | import unittest 20 | 21 | class LoggingTestCase(unittest.TestCase): 22 | 23 | def setUp(self): 24 | self.log_handler = logbook.TestHandler() 25 | self.log_handler.push_thread() 26 | 27 | def tearDown(self): 28 | self.log_handler.pop_thread() 29 | 30 | Alternatively you can also use it in a with statement in an individual 31 | test. This is also how this can work in nose and other testing systems:: 32 | 33 | def my_test(): 34 | with logbook.TestHandler() as log_handler: 35 | ... 36 | 37 | 38 | Test Handler Interface 39 | ---------------------- 40 | 41 | The test handler has a few attributes and methods to gain access to the 42 | logged messages. The most important ones are :attr:`~TestHandler.records` 43 | and :attr:`~TestHandler.formatted_records`. The first is a list of the 44 | captured :class:`~LogRecord`\s, the second a list of the formatted records 45 | as unicode strings: 46 | 47 | >>> from logbook import TestHandler, Logger 48 | >>> logger = Logger('Testing') 49 | >>> handler = TestHandler() 50 | >>> handler.push_thread() 51 | >>> logger.warn('Hello World') 52 | >>> handler.records 53 | [] 54 | >>> handler.formatted_records 55 | [u'[WARNING] Testing: Hello World'] 56 | 57 | 58 | .. _probe-log-records: 59 | 60 | Probe Log Records 61 | ----------------- 62 | 63 | The handler also provide some convenience methods to do assertions: 64 | 65 | >>> handler.has_warnings 66 | True 67 | >>> handler.has_errors 68 | False 69 | >>> handler.has_warning('Hello World') 70 | True 71 | 72 | Methods like :meth:`~logbook.TestHandler.has_warning` accept two 73 | arguments: 74 | 75 | `message` 76 | If provided and not `None` it will check if there is at least one log 77 | record where the message matches. 78 | 79 | `channel` 80 | If provided and not `None` it will check if there is at least one log 81 | record where the logger name of the record matches. 82 | 83 | Example usage: 84 | 85 | >>> handler.has_warning('A different message') 86 | False 87 | >>> handler.has_warning('Hello World', channel='Testing') 88 | True 89 | >>> handler.has_warning(channel='Testing') 90 | True 91 | -------------------------------------------------------------------------------- /logbook/testsuite/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.testsuite 4 | ~~~~~~~~~~~~~~~~~ 5 | 6 | The logbook testsuite. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | import sys 12 | import unittest 13 | import logbook 14 | 15 | 16 | _skipped_modules = [] 17 | _missing = object() 18 | _func_ident = lambda f: f 19 | _func_none = lambda f: None 20 | 21 | 22 | class LogbookTestSuite(unittest.TestSuite): 23 | 24 | def run(self, result): 25 | try: 26 | return unittest.TestSuite.run(self, result) 27 | finally: 28 | sys.stderr.write('\n') 29 | for mod in _skipped_modules: 30 | msg = '*** Failed to import %s, tests skipped.\n' % mod 31 | sys.stderr.write(msg) 32 | 33 | 34 | class LogbookTestCase(unittest.TestCase): 35 | 36 | def setUp(self): 37 | self.log = logbook.Logger('testlogger') 38 | 39 | 40 | def make_fake_mail_handler(**kwargs): 41 | class FakeMailHandler(logbook.MailHandler): 42 | mails = [] 43 | 44 | def get_connection(self): 45 | return self 46 | 47 | def close_connection(self, con): 48 | pass 49 | 50 | def sendmail(self, fromaddr, recipients, mail): 51 | self.mails.append((fromaddr, recipients, mail)) 52 | 53 | kwargs.setdefault('level', logbook.ERROR) 54 | return FakeMailHandler('foo@example.com', ['bar@example.com'], **kwargs) 55 | 56 | 57 | def skip_if(condition): 58 | if condition: 59 | return _func_ident 60 | else: 61 | return _func_none 62 | 63 | 64 | def require(name): 65 | if name in _skipped_modules: 66 | return _func_none 67 | try: 68 | __import__(name) 69 | except ImportError: 70 | _skipped_modules.append(name) 71 | return _func_none 72 | return _func_ident 73 | 74 | 75 | def missing(name): 76 | def decorate(f): 77 | def wrapper(*args, **kwargs): 78 | old = sys.modules.get(name, _missing) 79 | sys.modules[name] = None 80 | try: 81 | f(*args, **kwargs) 82 | finally: 83 | if old is _missing: 84 | del sys.modules[name] 85 | else: 86 | sys.modules[name] = old 87 | return wrapper 88 | return decorate 89 | 90 | 91 | def suite(): 92 | loader = unittest.TestLoader() 93 | suite = LogbookTestSuite() 94 | suite.addTests(loader.loadTestsFromName('logbook.testsuite.test_regular')) 95 | if sys.version_info >= (2, 5): 96 | suite.addTests(loader.loadTestsFromName 97 | ('logbook.testsuite.test_contextmanager')) 98 | return suite 99 | 100 | 101 | if __name__ == '__main__': 102 | unittest.main(defaultTest='suite') 103 | -------------------------------------------------------------------------------- /docs/ticketing.rst: -------------------------------------------------------------------------------- 1 | Logging to Tickets 2 | ================== 3 | 4 | Logbook supports the concept of creating unique tickets for log records 5 | and keeping track of the number of times these log records were created. 6 | The default implementation logs into a relational database, but there is a 7 | baseclass that can be subclassed to log into existing ticketing systems 8 | such as trac or other data stores. 9 | 10 | The ticketing handlers and store backends are all implemented in the 11 | module :mod:`logbook.ticketing`. 12 | 13 | How does it work? 14 | ----------------- 15 | 16 | When a ticketing handler is used each call to a logbook logger is assigned 17 | a unique hash that is based on the name of the logger, the location of the 18 | call as well as the level of the message. The message itself is not taken 19 | into account as it might be changing depending on the arguments passed to 20 | it. 21 | 22 | Once that unique hash is created the database is checked if there is 23 | already a ticket for that hash. If there is, a new occurrence is logged 24 | with all details available. Otherwise a new ticket is created. 25 | 26 | This makes it possible to analyze how often certain log messages are 27 | triggered and over what period of time. 28 | 29 | Why should I use it? 30 | -------------------- 31 | 32 | The ticketing handlers have the big advantage over a regular log handler 33 | that they will capture the full data of the log record in machine 34 | processable format. Whatever information was attached to the log record 35 | will be send straight to the data store in JSON. 36 | 37 | This makes it easier to track down issues that might happen in production 38 | systems. Due to the higher overhead of ticketing logging over a standard 39 | logfile or something comparable it should only be used for higher log 40 | levels (:data:`~logbook.WARNING` or higher). 41 | 42 | Common Setups 43 | ------------- 44 | 45 | The builtin ticketing handler is called 46 | :class:`~logbook.ticketing.TicketingHandler`. In the default configuration 47 | it will connect to a relational database with the help of `SQLAlchemy`_ 48 | and log into two tables there: tickets go into ``${prefix}tickets`` and 49 | occurrences go into ``${prefix}occurrences``. The default table prefix is 50 | ``'logbook_'`` but can be overriden. If the tables do not exist already, 51 | the handler will create them. 52 | 53 | Here an example setup that logs into a postgres database:: 54 | 55 | from logbook import ERROR 56 | from logbook.ticketing import TicketingHandler 57 | handler = TicketingHandler('postgres://localhost/database', 58 | level=ERROR) 59 | with handler: 60 | # everything in this block and thread will be handled by 61 | # the ticketing database handler 62 | ... 63 | 64 | Alternative backends can be swapped in by providing the `backend` 65 | parameter. There is a second implementation of a backend that is using 66 | MongoDB: :class:`~logbook.ticketing.MongoDBBackend`. 67 | 68 | .. _SQLAlchemy: http://sqlalchemy.org/ 69 | -------------------------------------------------------------------------------- /docs/libraries.rst: -------------------------------------------------------------------------------- 1 | Logbook in Libraries 2 | ==================== 3 | 4 | Logging becomes more useful the higher the number of components in a 5 | system that are using it. Logbook itself is not a widely supported 6 | library so far, but a handful of libraries are using the :mod:`logging` 7 | already which can be redirected to Logbook if necessary. 8 | 9 | Logbook itself is easier to support for libraries than logging because it 10 | does away with the central logger registry and can easily be mocked in 11 | case the library is not available. 12 | 13 | Mocking Logbook 14 | --------------- 15 | 16 | If you want to support Logbook in your library but not depend on it you 17 | can copy/paste the following piece of code. It will attempt to import 18 | logbook and create a :class:`~logbook.Logger` and if it fails provide a 19 | class that just swallows all calls:: 20 | 21 | try: 22 | from logbook import Logger 23 | except ImportError: 24 | class Logger(object): 25 | def __init__(self, name, level=0): 26 | self.name = name 27 | self.level = level 28 | debug = info = warn = warning = notice = error = exception = \ 29 | critical = log = lambda *a, **kw: None 30 | 31 | log = Logger('My library') 32 | 33 | Best Practices 34 | -------------- 35 | 36 | - A library that wants to log to the Logbook system should generally be 37 | designed to provide an interface to the record dispatchers it is 38 | using. That does not have to be a reference to the record dispatcher 39 | itself, it is perfectly fine if there is a toggle to switch it on or 40 | off. 41 | 42 | - The channel name should be readable and descriptive. 43 | 44 | - For example, if you are a database library that wants to use the 45 | logging system to log all SQL statements issued in debug mode, you can 46 | enable and disable your record dispatcher based on that debug flag. 47 | 48 | - Libraries should never set up log setups except temporarily on a 49 | per-thread basis if it never changes the stack for a longer duration 50 | than a function call in a library. For example, hooking in a null 51 | handler for a call to a noisy function is fine, changing the global 52 | stack in a function and not reverting it at the end of the function is 53 | bad. 54 | 55 | Debug Loggers 56 | ------------- 57 | 58 | Sometimes you want to have loggers in place that are only really good for 59 | debugging. For example you might have a library that does a lot of 60 | server/client communication and for debugging purposes it would be nice if 61 | you can enable/disable that log output as necessary. 62 | 63 | In that case it makes sense to create a logger and disable that by default 64 | and give people a way to get hold of the logger to flip the flag. 65 | Additionally you can override the :attr:`~logbook.Logger.disabled` flag to 66 | automatically set it based on another value:: 67 | 68 | class MyLogger(Logger): 69 | @property 70 | def disabled(self): 71 | return not database_connection.debug 72 | database_connection.logger = MyLogger('mylibrary.dbconnection') 73 | -------------------------------------------------------------------------------- /docs/sheet/static/sheet.css_t: -------------------------------------------------------------------------------- 1 | /* 2 | * sheet.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet for the sheet theme. 6 | * 7 | * :copyright: Copyright 2010 by Armin Ronacher, Georg Brandl. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | body { 15 | text-align: center; 16 | font-family: {{ theme_bodyfont }}; 17 | margin: 0; 18 | padding: 0; 19 | background: #d5dde2; 20 | } 21 | 22 | .book { 23 | padding: 15px 25px 25px 85px; 24 | margin: 0 auto; 25 | width: 695px; 26 | text-align: left; 27 | background: white url("background.png") repeat-y; 28 | } 29 | 30 | a { 31 | font-weight: bold; 32 | color: #003366; 33 | } 34 | 35 | h1, h2, h3, h4, h5, h6 { 36 | font-family: {{ theme_seriffont }}; 37 | font-weight: normal; 38 | } 39 | 40 | h1 { font-size: 2.8em; } 41 | h2 { font-size: 2.2em; } 42 | 43 | .document { 44 | border-bottom: 1px solid #837D7C; 45 | border-top: 1px solid #837D7C; 46 | margin: 12px 0px; 47 | line-height: 1.5em; 48 | } 49 | 50 | .related a { 51 | margin: 0; 52 | font-size: 1.1em; 53 | font-weight: normal; 54 | letter-spacing: 3px; 55 | text-transform: uppercase; 56 | text-decoration: none; 57 | border-bottom: 1px dashed #ddd; 58 | color: #837D7C; 59 | } 60 | 61 | div.related ul { 62 | margin-right: -10px; 63 | padding: 0px; 64 | } 65 | 66 | .banner h1 { 67 | font-size: 4.5em; 68 | font-weight: normal; 69 | line-height: 1; 70 | letter-spacing: -3px; 71 | margin-top: 12px; 72 | margin-bottom: 12px; 73 | } 74 | 75 | .banner { 76 | color: #000000; 77 | letter-spacing: -1px; 78 | font-family: {{ theme_seriffont }}; 79 | margin-bottom: 24px; 80 | } 81 | 82 | .banner a { 83 | color: #000000; 84 | text-decoration: none; 85 | } 86 | 87 | .footer { 88 | color: #000000; 89 | font-size: 0.7em; 90 | text-align: center; 91 | line-height: 1; 92 | margin-top: 20px; 93 | font-family: {{ theme_monofont }}; 94 | font-weight: normal; 95 | letter-spacing: 2px; 96 | text-transform: uppercase; 97 | text-decoration: none; 98 | color: #837D7C; 99 | } 100 | 101 | .highlight pre { 102 | background-color: #f8f8f8; 103 | border-top: 1px solid #c8c8c8; 104 | border-bottom: 1px solid #c8c8c8; 105 | line-height: 120%; 106 | padding: 10px 6px; 107 | } 108 | 109 | .highlighttable .highlight pre { 110 | margin: 0px; 111 | } 112 | 113 | div.sphinxsidebar { 114 | margin-left: 0px; 115 | float: none; 116 | width: 100%; 117 | font-size: 0.8em; 118 | } 119 | 120 | .toctree-l1 a { 121 | text-decoration: none; 122 | } 123 | 124 | img.align-right { 125 | margin-left: 24px; 126 | } 127 | 128 | pre, tt { 129 | font-family: {{ theme_monofont }}; 130 | font-size: 15px!important; 131 | } 132 | 133 | dl.class dt { 134 | padding-left: 60px; 135 | text-indent: -60px; 136 | } 137 | 138 | tt.descname { 139 | font-size: 1em; 140 | } 141 | 142 | p.output-caption { 143 | font-size: small; 144 | margin: 0px; 145 | } 146 | -------------------------------------------------------------------------------- /docs/performance.rst: -------------------------------------------------------------------------------- 1 | Performance Tuning 2 | ================== 3 | 4 | The more logging calls you add to your application and libraries, the more 5 | overhead will you introduce. There are a couple things you can do to 6 | remedy this behavior. 7 | 8 | Debug-Only Logging 9 | ------------------ 10 | 11 | There are debug log calls, and there are debug log calls. Some debug log 12 | calls would sometimes be interesting in a production environment, others 13 | really only if you are on your local machine fiddling around with the 14 | code. Logbook internally makes sure to process as little of your logging 15 | call as necessary, but it will still have to walk the current stack to 16 | figure out if there are any active handlers or not. Depending on the 17 | number of handlers on the stack, the kind of handler etc, there will be 18 | more or less processed. 19 | 20 | Generally speaking a not-handled logging call is cheap enough that you 21 | don't have to care about it. However there is not only your logging call, 22 | there might also be some data you have to process for the record. This 23 | will always be processed, even if the log record ends up being discarded. 24 | 25 | This is where the Python ``__debug__`` feature comes in handy. This 26 | variable is a special flag that is evaluated at the time where Python 27 | processes your script. It can elliminate code completely from your script 28 | so that it does not even exist in the compiled bytecode (requires Python 29 | to be run with the ``-O`` switch):: 30 | 31 | if __debug__: 32 | info = get_wallcalculate_debug_info() 33 | logger.debug("Call to response() failed. Reason: {0}", info) 34 | 35 | Keep the Fingers Crossed 36 | ------------------------ 37 | 38 | Do you really need the debug info? In case you find yourself only looking 39 | at the logfiles when errors occurred it would be an option to put in the 40 | :class:`~logbook.FingersCrossedHandler`. Logging into memory is always 41 | cheaper than logging on a filesystem. 42 | 43 | Keep the Stack Static 44 | --------------------- 45 | 46 | Whenever you do a push or pop from one of the stacks you will invalidate 47 | an internal cache that is used by logbook. This is an implementation 48 | detail, but this is how it works for the moment. That means that the 49 | first logging call after a push or pop will have a higher impact on the 50 | performance than following calls. That means you should not attempt to 51 | push or pop from a stack for each logging call. Make sure to do the 52 | pushing and popping only as needed. (start/end of application/request) 53 | 54 | Disable Introspection 55 | --------------------- 56 | 57 | By default Logbook will try to pull in the interpreter frame of the caller 58 | that invoked a logging function. While this is a fast operation that 59 | usually does not slow down the execution of your script it also means that 60 | for certain Python implementations it invalidates assumptions a JIT 61 | compiler might have made of the function body. Currently this for example 62 | is the case for applications running on pypy. If you would be using a 63 | stock logbook setup on pypy, the JIT wouldn't be able to work properly. 64 | 65 | In case you don't need the frame based information (name of module, 66 | calling function, filename, line number) you can disable the introspection 67 | feature:: 68 | 69 | from logbook import Flags 70 | 71 | with Flags(introspection=False): 72 | # all logging calls here will not use introspection 73 | ... 74 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | Logbook Changelog 2 | ================= 3 | 4 | Here you can see the full list of changes between each Logbook release. 5 | 6 | Version 0.4 7 | ----------- 8 | 9 | Release date to be announced. Codename to be selected. 10 | 11 | - Added :class:`logbook.notifiers.NotifoHandler` 12 | - `channel` is now documented to be used for filtering purposes if 13 | wanted. Previously this was an opaque string that was not intended 14 | for filtering of any kind. 15 | 16 | Version 0.3 17 | ----------- 18 | 19 | Released on October 23rd. Codename "Informant" 20 | 21 | - Added :class:`logbook.more.ColorizingStreamHandlerMixin` and 22 | :class:`logbook.more.ColorizedStderrHandler` 23 | - Deprecated :class:`logbook.RotatingFileHandlerBase` because the 24 | interface was not flexible enough. 25 | - Provided basic Python 3 compatibility. This did cause a few smaller 26 | API changes that caused minimal changes on Python 2 as well. The 27 | deprecation of the :class:`logbook.RotatingFileHandlerBase` was a 28 | result of this. 29 | - Added support for Python 2.4 30 | - Added batch emitting support for handlers which now makes it possible 31 | to use the :class:`logbook.more.FingersCrossedHandler` with the 32 | :class:`logbook.MailHandler`. 33 | - Moved the :class:`~logbook.FingersCrossedHandler` handler into the 34 | base package. The old location stays importable for a few releases. 35 | - Added :class:`logbook.GroupHandler` that buffers records until the 36 | handler is popped. 37 | - Added :class:`logbook.more.ExternalApplicationHandler` that executes 38 | an external application for each log record emitted. 39 | 40 | Version 0.2.1 41 | ------------- 42 | 43 | Bugfix release, Released on September 22nd. 44 | 45 | - Fixes Python 2.5 compatibility. 46 | 47 | Version 0.2 48 | ----------- 49 | 50 | Released on September 21st. Codename "Walls of Text" 51 | 52 | - Implemented default with statement for handlers which is an 53 | alias for `threadbound`. 54 | - `applicationbound` and `threadbound` return the handler now. 55 | - Implemented channel recording on the log records. 56 | - The :class:`logbook.more.FingersCrossedHandler` now is set to 57 | `ERROR` by default and has the ability to create new loggers 58 | from a factory function. 59 | - Implemented maximum buffer size for the 60 | :class:`logbook.more.FingersCrossedHandler` as well as a lock 61 | for thread safety. 62 | - Added ability to filter for context. 63 | - Moved bubbling flags and filters to the handler object. 64 | - Moved context processors on their own stack. 65 | - Removed the `iter_context_handlers` function. 66 | - Renamed `NestedHandlerSetup` to :class:`~logbook.NestedSetup` 67 | because it can now also configure processors. 68 | - Added the :class:`logbook.Processor` class. 69 | - There is no difference between logger attached handlers and 70 | context specific handlers any more. 71 | - Added a function to redirect warnings to logbook 72 | (:func:`logbook.compat.redirected_warnings`). 73 | - Fixed and improved :class:`logbook.LoggerGroup`. 74 | - The :class:`logbook.TestHandler` now keeps the record open 75 | for further inspection. 76 | - The traceback is now removed from a log record when the record 77 | is closed. The formatted traceback is a cached property 78 | instead of a function. 79 | - Added ticketing handlers that send logs directly into a database. 80 | - Added MongoDB backend for ticketing handlers 81 | - Added a :func:`logbook.base.dispatch_record` function to dispatch 82 | records to handlers independently of a logger (uses the default 83 | record dispatching logic). 84 | - Renamed `logger_name` to `channel`. 85 | - Added a multi processing log handler 86 | (:class:`logbook.more.MultiProcessingHandler`). 87 | - Added a twitter handler. 88 | - Added a ZeroMQ handler. 89 | - Added a Growl handler. 90 | - Added a Libnotify handler. 91 | - Added a monitoring file handler. 92 | - Added a handler wrapper that moves the actual handling into a 93 | background thread. 94 | - The mail handler can now be configured to deliver each log record 95 | not more than n times in m seconds. 96 | - Added support for Python 2.5 97 | - Added a :class:`logbook.queues.SubscriberGroup` to deal with multiple 98 | subscribers. 99 | - Added a :class:`logbook.compat.LoggingHandler` for redirecting logbook 100 | log calls to the standard library's :mod:`logging` module. 101 | 102 | Version 0.1 103 | ----------- 104 | 105 | First public release. 106 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Logbook 3 | ------- 4 | 5 | An awesome logging implementation that is fun to use. 6 | 7 | Quickstart 8 | `````````` 9 | 10 | :: 11 | 12 | from logbook import Logger 13 | log = Logger('A Fancy Name') 14 | 15 | log.warn('Logbook is too awesome for most applications') 16 | log.error("Can't touch this") 17 | 18 | Works for web apps too 19 | `````````````````````` 20 | 21 | :: 22 | 23 | from logbook import MailHandler, Processor 24 | 25 | mailhandler = MailHandler(from_addr='servererror@example.com', 26 | recipients=['admin@example.com'], 27 | level='ERROR', format_string=u'''\ 28 | Subject: Application Error for {record.extra[path]} [{record.extra[method]}] 29 | 30 | Message type: {record.level_name} 31 | Location: {record.filename}:{record.lineno} 32 | Module: {record.module} 33 | Function: {record.func_name} 34 | Time: {record.time:%Y-%m-%d %H:%M:%S} 35 | Remote IP: {record.extra[ip]} 36 | Request: {record.extra[path]} [{record.extra[method]}] 37 | 38 | Message: 39 | 40 | {record.message} 41 | ''') 42 | 43 | def handle_request(request): 44 | def inject_extra(record, handler): 45 | record.extra['ip'] = request.remote_addr 46 | record.extra['method'] = request.method 47 | record.extra['path'] = request.path 48 | 49 | with Processor(inject_extra): 50 | with mailhandler: 51 | # execute code that might fail in the context of the 52 | # request. 53 | """ 54 | 55 | import os 56 | import sys 57 | from setuptools import setup, Extension, Feature 58 | from distutils.command.build_ext import build_ext 59 | from distutils.errors import CCompilerError, DistutilsExecError, \ 60 | DistutilsPlatformError 61 | 62 | 63 | extra = {} 64 | cmdclass = {} 65 | 66 | 67 | class BuildFailed(Exception): 68 | pass 69 | 70 | 71 | ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) 72 | if sys.platform == 'win32' and sys.version_info > (2, 6): 73 | # 2.6's distutils.msvc9compiler can raise an IOError when failing to 74 | # find the compiler 75 | ext_errors += (IOError,) 76 | 77 | if sys.version_info >= (3, 0): 78 | extra['use_2to3'] = True 79 | 80 | 81 | class ve_build_ext(build_ext): 82 | """This class allows C extension building to fail.""" 83 | 84 | def run(self): 85 | try: 86 | build_ext.run(self) 87 | except DistutilsPlatformError: 88 | raise BuildFailed() 89 | 90 | def build_extension(self, ext): 91 | try: 92 | build_ext.build_extension(self, ext) 93 | except ext_errors: 94 | raise BuildFailed() 95 | 96 | cmdclass['build_ext'] = ve_build_ext 97 | # Don't try to compile the extension if we're running on PyPy 98 | if os.path.isfile('logbook/_speedups.c') and not hasattr(sys, "pypy_translation_info"): 99 | speedups = Feature('optional C speed-enhancement module', standard=True, 100 | ext_modules=[Extension('logbook._speedups', 101 | ['logbook/_speedups.c'])]) 102 | else: 103 | speedups = None 104 | 105 | 106 | def run_setup(with_binary): 107 | features = {} 108 | if with_binary and speedups is not None: 109 | features['speedups'] = speedups 110 | setup( 111 | name='Logbook', 112 | version='0.4', 113 | license='BSD', 114 | url='http://logbook.pocoo.org/', 115 | author='Armin Ronacher, Georg Brandl', 116 | author_email='armin.ronacher@active-4.com', 117 | description='A logging replacement for Python', 118 | long_description=__doc__, 119 | packages=['logbook', 'logbook.testsuite'], 120 | test_suite='logbook.testsuite.suite', 121 | zip_safe=False, 122 | platforms='any', 123 | cmdclass=cmdclass, 124 | features=features, 125 | **extra 126 | ) 127 | 128 | 129 | def echo(msg=''): 130 | sys.stdout.write(msg + '\n') 131 | 132 | 133 | try: 134 | run_setup(True) 135 | except BuildFailed: 136 | LINE = '=' * 74 137 | BUILD_EXT_WARNING = ('WARNING: The C extension could not be compiled, ' 138 | 'speedups are not enabled.') 139 | 140 | echo(LINE) 141 | echo(BUILD_EXT_WARNING) 142 | echo('Failure information, if any, is above.') 143 | echo('Retrying the build without the C extension now.') 144 | echo() 145 | 146 | run_setup(False) 147 | 148 | echo(LINE) 149 | echo(BUILD_EXT_WARNING) 150 | echo('Plain-Python installation succeeded.') 151 | echo(LINE) 152 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | if NOT "%PAPER%" == "" ( 11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 12 | ) 13 | 14 | if "%1" == "" goto help 15 | 16 | if "%1" == "help" ( 17 | :help 18 | echo.Please use `make ^` where ^ is one of 19 | echo. html to make standalone HTML files 20 | echo. dirhtml to make HTML files named index.html in directories 21 | echo. singlehtml to make a single large HTML file 22 | echo. pickle to make pickle files 23 | echo. json to make JSON files 24 | echo. htmlhelp to make HTML files and a HTML help project 25 | echo. qthelp to make HTML files and a qthelp project 26 | echo. devhelp to make HTML files and a Devhelp project 27 | echo. epub to make an epub 28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 29 | echo. text to make text files 30 | echo. man to make manual pages 31 | echo. changes to make an overview over all changed/added/deprecated items 32 | echo. linkcheck to check all external links for integrity 33 | echo. doctest to run all doctests embedded in the documentation if enabled 34 | goto end 35 | ) 36 | 37 | if "%1" == "clean" ( 38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 39 | del /q /s %BUILDDIR%\* 40 | goto end 41 | ) 42 | 43 | if "%1" == "html" ( 44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 45 | echo. 46 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 47 | goto end 48 | ) 49 | 50 | if "%1" == "dirhtml" ( 51 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 52 | echo. 53 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 54 | goto end 55 | ) 56 | 57 | if "%1" == "singlehtml" ( 58 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 59 | echo. 60 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 61 | goto end 62 | ) 63 | 64 | if "%1" == "pickle" ( 65 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 66 | echo. 67 | echo.Build finished; now you can process the pickle files. 68 | goto end 69 | ) 70 | 71 | if "%1" == "json" ( 72 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 73 | echo. 74 | echo.Build finished; now you can process the JSON files. 75 | goto end 76 | ) 77 | 78 | if "%1" == "htmlhelp" ( 79 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 80 | echo. 81 | echo.Build finished; now you can run HTML Help Workshop with the ^ 82 | .hhp project file in %BUILDDIR%/htmlhelp. 83 | goto end 84 | ) 85 | 86 | if "%1" == "qthelp" ( 87 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 88 | echo. 89 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 90 | .qhcp project file in %BUILDDIR%/qthelp, like this: 91 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Logbook.qhcp 92 | echo.To view the help file: 93 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Logbook.ghc 94 | goto end 95 | ) 96 | 97 | if "%1" == "devhelp" ( 98 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 99 | echo. 100 | echo.Build finished. 101 | goto end 102 | ) 103 | 104 | if "%1" == "epub" ( 105 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 106 | echo. 107 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 108 | goto end 109 | ) 110 | 111 | if "%1" == "latex" ( 112 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 113 | echo. 114 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 115 | goto end 116 | ) 117 | 118 | if "%1" == "text" ( 119 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 120 | echo. 121 | echo.Build finished. The text files are in %BUILDDIR%/text. 122 | goto end 123 | ) 124 | 125 | if "%1" == "man" ( 126 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 127 | echo. 128 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 129 | goto end 130 | ) 131 | 132 | if "%1" == "changes" ( 133 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 134 | echo. 135 | echo.The overview file is in %BUILDDIR%/changes. 136 | goto end 137 | ) 138 | 139 | if "%1" == "linkcheck" ( 140 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 141 | echo. 142 | echo.Link check complete; look for any errors in the above output ^ 143 | or in %BUILDDIR%/linkcheck/output.txt. 144 | goto end 145 | ) 146 | 147 | if "%1" == "doctest" ( 148 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 149 | echo. 150 | echo.Testing of doctests in the sources finished, look at the ^ 151 | results in %BUILDDIR%/doctest/output.txt. 152 | goto end 153 | ) 154 | 155 | :end 156 | -------------------------------------------------------------------------------- /docs/stacks.rst: -------------------------------------------------------------------------------- 1 | Stacks in Logbook 2 | ================= 3 | 4 | Logbook keeps three stacks internally currently: 5 | 6 | - one for the :class:`~logbook.Handler`\s: each handler is handled from 7 | stack top to bottom. When a record was handled it depends on the 8 | :attr:`~logbook.Handler.bubble` flag of the handler if it should still 9 | be processed by the next handler on the stack. 10 | - one for the :class:`~logbook.Processor`\s: each processor in the stack 11 | is applied on a record before the log record is handled by the 12 | handler. 13 | - one for the :class:`~logbook.Flags`: this stack manages simple flags 14 | such as how errors during logging should be processed or if stackframe 15 | introspection should be used etc. 16 | 17 | General Stack Management 18 | ------------------------ 19 | 20 | Generally all objects that are management by stacks have a common 21 | interface (:class:`~logbook.base.StackedObject`) and can be used in 22 | combination with the :class:`~logbook.NestedSetup` class. 23 | 24 | Commonly stacked objects are used with a context manager (`with` 25 | statement):: 26 | 27 | with context_object.threadbound(): 28 | # this is managed for this thread only 29 | ... 30 | 31 | with context_object.applicationbound(): 32 | # this is managed for all applications 33 | ... 34 | 35 | Alternatively you can also use `try`/`finally`:: 36 | 37 | context_object.push_thread() 38 | try: 39 | # this is managed for this thread only 40 | ... 41 | finally: 42 | context_object.pop_thread() 43 | 44 | context_object.push_application() 45 | try: 46 | # this is managed for all applications 47 | ... 48 | finally: 49 | context_object.pop_application() 50 | 51 | It's very important that you will always pop from the stack again unless 52 | you really want the change to last until the application closes down, 53 | which probably is not the case. 54 | 55 | If you want to push and pop multiple stacked objects at the same time, you 56 | can use the :class:`~logbook.NestedSetup`:: 57 | 58 | setup = NestedSetup([stacked_object1, stacked_object2]) 59 | with setup.threadbound(): 60 | # both objects are now bound to the thread's stack 61 | ... 62 | 63 | Sometimes a stacked object can be passed to one of the functions or 64 | methods in Logbook. If any stacked object can be passed, this is usually 65 | called the `setup`. This is for example the case when you specify a 66 | handler or processor for things like the 67 | :class:`~logbook.queues.ZeroMQSubscriber`. 68 | 69 | Handlers 70 | -------- 71 | 72 | Handlers use the features of the stack the most because not only do they 73 | stack, but they also specify how stack handling is supposed to work. Each 74 | handler can decide if it wants to process the record, and then it has a 75 | flag (the :attr:`~logbook.Handler.bubble` flag) which specifies if the 76 | next handler in the chain is supposed to get this record passed to. 77 | 78 | If a handler is bubbeling it will give the record to the next handler, 79 | even if it was properly handled. If it's not, it will stop promoting 80 | handlers further down the chain. Additionally there are so-called 81 | "blackhole" handlers (:class:`~logbook.NullHandler`) which stop processing 82 | at any case when they are reached. If you push a blackhole handler on top 83 | of an existing infrastructure you can build up a separate one without 84 | performance impact. 85 | 86 | Processor 87 | --------- 88 | 89 | A processor can inject additional information into a log record when the 90 | record is handled. Processors are called once at least one log handler is 91 | interested in handling the record. Before that happens, no processing 92 | takes place. 93 | 94 | Here an example processor that injects the current working directory into 95 | the extra attribute of the record:: 96 | 97 | import os 98 | 99 | def inject_cwd(record): 100 | record.extra['cwd'] = os.getcwd() 101 | 102 | with Processor(inject_cwd): 103 | # all logging calls inside this block in this thread will now 104 | # have the current working directory information attached. 105 | ... 106 | 107 | Flags 108 | ----- 109 | 110 | The last pillar of logbook is the flags stack. This stack can be used to 111 | override settings of the logging system. Currently this can be used to 112 | change the behavior of logbook in case an exception during log handling 113 | happens (for instance if a log record is supposed to be delivered to the 114 | filesystem but it ran out of available space). Additionally there is a 115 | flag that disables frame introspection which can result in a speedup on 116 | JIT compiled Python interpreters. 117 | 118 | Here an example of a silenced error reporting:: 119 | 120 | with Flags(errors='silent'): 121 | # errors are now silent for this block 122 | ... 123 | -------------------------------------------------------------------------------- /docs/designexplained.rst: -------------------------------------------------------------------------------- 1 | The Design Explained 2 | ==================== 3 | 4 | This part of the documentation explains the design of Logbook in detail. 5 | This is not strictly necessary to make use of Logbook but might be helpful 6 | when writing custom handlers for Logbook or when using it in a more 7 | complex environment. 8 | 9 | Dispatchers and Channels 10 | ------------------------ 11 | 12 | Logbook does not use traditional loggers, instead a logger is internally 13 | named as :class:`~logbook.base.RecordDispatcher`. While a logger also has 14 | methods to create new log records, the base class for all record 15 | dispatchers itself only has ways to dispatch :class:`~logbook.LogRecord`\s 16 | to the handlers. A log record itself might have an attribute that points 17 | to the dispatcher that was responsible for dispatching, but it does not 18 | have to be. 19 | 20 | If a log record was created from the builtin :class:`~logbook.Logger` it 21 | will have the channel set to the name of the logger. But that itself is 22 | no requirement. The only requirement for the channel is that it's a 23 | string with some human readable origin information. It could be 24 | ``'Database'`` if the database issued the log record, it could be 25 | ``'Process-4223'`` if the process with the pid 4223 issued it etc. 26 | 27 | For example if you are logging from the :func:`logbook.log` function they 28 | will have a cannel set, but no dispatcher: 29 | 30 | >>> from logbook import TestHandler, warn 31 | >>> handler = TestHandler() 32 | >>> handler.push_application() 33 | >>> warn('This is a warning') 34 | >>> handler.records[0].channel 35 | 'Generic' 36 | >>> handler.records[0].dispatcher is None 37 | True 38 | 39 | If you are logging from a custom logger, the channel attribute points to 40 | the logger for as long this logger class is not garbage collected: 41 | 42 | >>> from logbook import Logger, TestHandler 43 | >>> logger = Logger('Console') 44 | >>> handler = TestHandler() 45 | >>> handler.push_application() 46 | >>> logger.warn('A warning') 47 | >>> handler.records[0].dispatcher is logger 48 | True 49 | 50 | You don't need a record dispatcher to dispatch a log record though. The 51 | default dispatching can be triggered from a function 52 | :func:`~logbook.base.dispatch_record`: 53 | 54 | >>> from logbook import dispatch_record, LogRecord, INFO 55 | >>> record = LogRecord('My channel', INFO, 'Hello World!') 56 | >>> dispatch_record(record) 57 | [2010-09-04 15:56] INFO: My channel: Hello World! 58 | 59 | It is pretty common for log records to be created without a dispatcher. 60 | Here some common use cases for log records without a dispatcher: 61 | 62 | - log records that were redirected from a different logging system 63 | such as the standard library's :mod:`logging` module or the 64 | :mod:`warnings` module. 65 | - log records that came from different processes and do not have a 66 | dispatcher equivalent in the current process. 67 | - log records that came from over the network. 68 | 69 | The Log Record Container 70 | ------------------------ 71 | 72 | The :class:`~logbook.LogRecord` class is a simple container that 73 | holds all the information necessary for a log record. Usually they are 74 | created from a :class:`~logbook.Logger` or one of the default log 75 | functions (:func:`logbook.warn` etc.) and immediately dispatched to the 76 | handlers. The logger will apply some additional knowledge to figure out 77 | where the record was created from and if a traceback information should be 78 | attached. 79 | 80 | Normally if log records are dispatched they will be closed immediately 81 | after all handlers had their chance to write it down. On closing, the 82 | interpreter frame and traceback object will be removed from the log record 83 | to break up circular dependencies. 84 | 85 | Sometimes however it might be necessary to keep log records around for a 86 | longer time. Logbook provides three different ways to accomplish that: 87 | 88 | 1. Handlers can set the :attr:`~logbook.LogRecord.keep_open` attribute of 89 | a log record to `True` so that the record dispatcher will not close 90 | the object. This is for example used by the 91 | :class:`~logbook.TestHandler` so that unittests can still access 92 | interpreter frames and traceback objects if necessary. 93 | 2. Because some information on the log records depends on the interpreter 94 | frame (such as the location of the log call) it is possible to pull 95 | that related information directly into the log record so that it can 96 | safely be closed without losing that information (see 97 | :meth:`~logbook.LogRecord.pull_information`). 98 | 3. Last but not least, log records can be converted to dictionaries and 99 | recreated from these. It is also possible to make these dictionaries 100 | safe for JSON export which is used by the 101 | :class:`~logbook.ticketing.TicketingHandler` to store information in a 102 | database or the :class:`~logbook.more.MultiProcessingHandler` to send 103 | information between processes. 104 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Logbook.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Logbook.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Logbook" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Logbook" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 98 | "(use \`make latexpdf' here to do that automatically)." 99 | 100 | latexpdf: 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | make -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /logbook/_fallback.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook._fallback 4 | ~~~~~~~~~~~~~~~~~ 5 | 6 | Fallback implementations in case speedups is not around. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | import threading 12 | from itertools import count 13 | from thread import get_ident as current_thread 14 | 15 | 16 | _missing = object() 17 | _MAX_CONTEXT_OBJECT_CACHE = 256 18 | 19 | 20 | def group_reflected_property(name, default, fallback=_missing): 21 | """Returns a property for a given name that falls back to the 22 | value of the group if set. If there is no such group, the 23 | provided default is used. 24 | """ 25 | def _get(self): 26 | rv = getattr(self, '_' + name, _missing) 27 | if rv is not _missing and rv != fallback: 28 | return rv 29 | if self.group is None: 30 | return default 31 | return getattr(self.group, name) 32 | def _set(self, value): 33 | setattr(self, '_' + name, value) 34 | def _del(self): 35 | delattr(self, '_' + name) 36 | return property(_get, _set, _del) 37 | 38 | 39 | class _StackBound(object): 40 | 41 | def __init__(self, obj, push, pop): 42 | self.__obj = obj 43 | self.__push = push 44 | self.__pop = pop 45 | 46 | def __enter__(self): 47 | self.__push() 48 | return self.__obj 49 | 50 | def __exit__(self, exc_type, exc_value, tb): 51 | self.__pop() 52 | 53 | 54 | class StackedObject(object): 55 | """Baseclass for all objects that provide stack manipulation 56 | operations. 57 | """ 58 | 59 | def push_thread(self): 60 | """Pushes the stacked object to the thread stack.""" 61 | raise NotImplementedError() 62 | 63 | def pop_thread(self): 64 | """Pops the stacked object from the thread stack.""" 65 | raise NotImplementedError() 66 | 67 | def push_application(self): 68 | """Pushes the stacked object to the application stack.""" 69 | raise NotImplementedError() 70 | 71 | def pop_application(self): 72 | """Pops the stacked object from the application stack.""" 73 | raise NotImplementedError() 74 | 75 | def __enter__(self): 76 | self.push_thread() 77 | return self 78 | 79 | def __exit__(self, exc_type, exc_value, tb): 80 | self.pop_thread() 81 | 82 | def threadbound(self, _cls=_StackBound): 83 | """Can be used in combination with the `with` statement to 84 | execute code while the object is bound to the thread. 85 | """ 86 | return _cls(self, self.push_thread, self.pop_thread) 87 | 88 | def applicationbound(self, _cls=_StackBound): 89 | """Can be used in combination with the `with` statement to 90 | execute code while the object is bound to the application. 91 | """ 92 | return _cls(self, self.push_application, self.pop_application) 93 | 94 | 95 | class ContextStackManager(object): 96 | """Helper class for context objects that manages a stack of 97 | objects. 98 | """ 99 | 100 | def __init__(self): 101 | self._global = [] 102 | self._context_lock = threading.Lock() 103 | self._context = threading.local() 104 | self._cache = {} 105 | self._stackop = count().next 106 | 107 | def iter_context_objects(self): 108 | """Returns an iterator over all objects for the combined 109 | application and context cache. 110 | """ 111 | tid = current_thread() 112 | objects = self._cache.get(tid) 113 | if objects is None: 114 | if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE: 115 | self._cache.clear() 116 | objects = self._global[:] 117 | objects.extend(getattr(self._context, 'stack', ())) 118 | objects.sort(reverse=True) 119 | objects = [x[1] for x in objects] 120 | self._cache[tid] = objects 121 | return iter(objects) 122 | 123 | def push_thread(self, obj): 124 | self._context_lock.acquire() 125 | try: 126 | self._cache.pop(current_thread(), None) 127 | item = (self._stackop(), obj) 128 | stack = getattr(self._context, 'stack', None) 129 | if stack is None: 130 | self._context.stack = [item] 131 | else: 132 | stack.append(item) 133 | finally: 134 | self._context_lock.release() 135 | 136 | def pop_thread(self): 137 | self._context_lock.acquire() 138 | try: 139 | self._cache.pop(current_thread(), None) 140 | stack = getattr(self._context, 'stack', None) 141 | assert stack, 'no objects on stack' 142 | return stack.pop()[1] 143 | finally: 144 | self._context_lock.release() 145 | 146 | def push_application(self, obj): 147 | self._global.append((self._stackop(), obj)) 148 | self._cache.clear() 149 | 150 | def pop_application(self): 151 | assert self._global, 'no objects on application stack' 152 | popped = self._global.pop()[1] 153 | self._cache.clear() 154 | return popped 155 | -------------------------------------------------------------------------------- /logbook/_speedups.pyx: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook._speedups 4 | ~~~~~~~~~~~~~~~~~ 5 | 6 | Cython implementation of some core objects. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | 12 | import thread 13 | import threading 14 | 15 | from cpython.dict cimport PyDict_Clear, PyDict_SetItem 16 | from cpython.list cimport PyList_New, PyList_Append, PyList_Sort, \ 17 | PyList_SET_ITEM, PyList_GET_SIZE 18 | from cpython.pythread cimport PyThread_type_lock, PyThread_allocate_lock, \ 19 | PyThread_release_lock, PyThread_acquire_lock, WAIT_LOCK 20 | 21 | cdef object _missing = object() 22 | 23 | cdef enum: 24 | _MAX_CONTEXT_OBJECT_CACHE = 256 25 | 26 | cdef current_thread = thread.get_ident 27 | 28 | 29 | cdef class group_reflected_property: 30 | cdef char* name 31 | cdef char* _name 32 | cdef object default 33 | cdef object fallback 34 | 35 | def __init__(self, char* name, object default, object fallback=_missing): 36 | self.name = name 37 | _name = '_' + name 38 | self._name = _name 39 | self.default = default 40 | self.fallback = fallback 41 | 42 | def __get__(self, obj, type): 43 | if obj is None: 44 | return self 45 | rv = getattr3(obj, self._name, _missing) 46 | if rv is not _missing and rv != self.fallback: 47 | return rv 48 | if obj.group is None: 49 | return self.default 50 | return getattr(obj.group, self.name) 51 | 52 | def __set__(self, obj, value): 53 | setattr(obj, self._name, value) 54 | 55 | def __del__(self, obj): 56 | delattr(obj, self._name) 57 | 58 | 59 | cdef class _StackItem: 60 | cdef int id 61 | cdef readonly object val 62 | 63 | def __init__(self, int id, object val): 64 | self.id = id 65 | self.val = val 66 | 67 | def __cmp__(self, _StackItem other): 68 | return cmp(other.id, self.id) 69 | 70 | 71 | cdef class _StackBound: 72 | cdef object obj 73 | cdef object push_func 74 | cdef object pop_func 75 | 76 | def __init__(self, obj, push, pop): 77 | self.obj = obj 78 | self.push_func = push 79 | self.pop_func = pop 80 | 81 | def __enter__(self): 82 | self.push_func() 83 | return self.obj 84 | 85 | def __exit__(self, exc_type, exc_value, tb): 86 | self.pop_func() 87 | 88 | 89 | cdef class StackedObject: 90 | """Baseclass for all objects that provide stack manipulation 91 | operations. 92 | """ 93 | 94 | cpdef push_thread(self): 95 | """Pushes the stacked object to the thread stack.""" 96 | raise NotImplementedError() 97 | 98 | cpdef pop_thread(self): 99 | """Pops the stacked object from the thread stack.""" 100 | raise NotImplementedError() 101 | 102 | cpdef push_application(self): 103 | """Pushes the stacked object to the application stack.""" 104 | raise NotImplementedError() 105 | 106 | cpdef pop_application(self): 107 | """Pops the stacked object from the application stack.""" 108 | raise NotImplementedError() 109 | 110 | def __enter__(self): 111 | self.push_thread() 112 | return self 113 | 114 | def __exit__(self, exc_type, exc_value, tb): 115 | self.pop_thread() 116 | 117 | cpdef threadbound(self): 118 | """Can be used in combination with the `with` statement to 119 | execute code while the object is bound to the thread. 120 | """ 121 | return _StackBound(self, self.push_thread, self.pop_thread) 122 | 123 | cpdef applicationbound(self): 124 | """Can be used in combination with the `with` statement to 125 | execute code while the object is bound to the application. 126 | """ 127 | return _StackBound(self, self.push_application, self.pop_application) 128 | 129 | 130 | cdef class ContextStackManager: 131 | cdef list _global 132 | cdef PyThread_type_lock _context_lock 133 | cdef object _context 134 | cdef dict _cache 135 | cdef int _stackcnt 136 | 137 | def __init__(self): 138 | self._global = [] 139 | self._context_lock = PyThread_allocate_lock() 140 | self._context = threading.local() 141 | self._cache = {} 142 | self._stackcnt = 0 143 | 144 | cdef _stackop(self): 145 | self._stackcnt += 1 146 | return self._stackcnt 147 | 148 | cpdef iter_context_objects(self): 149 | tid = current_thread() 150 | objects = self._cache.get(tid) 151 | if objects is None: 152 | if PyList_GET_SIZE(self._cache) > _MAX_CONTEXT_OBJECT_CACHE: 153 | PyDict_Clear(self._cache) 154 | objects = self._global[:] 155 | objects.extend(getattr3(self._context, 'stack', ())) 156 | PyList_Sort(objects) 157 | objects = [(<_StackItem>x).val for x in objects] 158 | PyDict_SetItem(self._cache, tid, objects) 159 | return iter(objects) 160 | 161 | cpdef push_thread(self, obj): 162 | PyThread_acquire_lock(self._context_lock, WAIT_LOCK) 163 | try: 164 | self._cache.pop(current_thread(), None) 165 | item = _StackItem(self._stackop(), obj) 166 | stack = getattr3(self._context, 'stack', None) 167 | if stack is None: 168 | self._context.stack = [item] 169 | else: 170 | PyList_Append(stack, item) 171 | finally: 172 | PyThread_release_lock(self._context_lock) 173 | 174 | cpdef pop_thread(self): 175 | PyThread_acquire_lock(self._context_lock, WAIT_LOCK) 176 | try: 177 | self._cache.pop(current_thread(), None) 178 | stack = getattr3(self._context, 'stack', None) 179 | assert stack, 'no objects on stack' 180 | return (<_StackItem>stack.pop()).val 181 | finally: 182 | PyThread_release_lock(self._context_lock) 183 | 184 | cpdef push_application(self, obj): 185 | self._global.append(_StackItem(self._stackop(), obj)) 186 | PyDict_Clear(self._cache) 187 | 188 | cpdef pop_application(self): 189 | assert self._global, 'no objects on application stack' 190 | popped = (<_StackItem>self._global.pop()).val 191 | PyDict_Clear(self._cache) 192 | return popped 193 | -------------------------------------------------------------------------------- /docs/features.rst: -------------------------------------------------------------------------------- 1 | What does it do? 2 | ================ 3 | 4 | Although the Python standard library provides a logging system, you should 5 | consider having a look at Logbook for your applications. Currently 6 | logbook is an alpha version and should be considered a developer preview. 7 | 8 | But give it a try, we think it will work out for you and be fun to use :) 9 | 10 | Furthermore because it was prototyped in a couple of days, it leverages 11 | some features of Python that are not available in older Python releases. 12 | Logbook currently requires Python 2.4 or higher including Python 3 (3.1 or 13 | higher, 3.0 is not supported). 14 | 15 | Core Features 16 | ------------- 17 | 18 | - Logbook is based on the concept of loggers that are extensible by the 19 | application. 20 | - Each logger and handler, as well as other parts of the system, may inject 21 | additional information into the logging record that improves the usefulness 22 | of log entries. 23 | - Handlers can be set on an application-wide stack as well as a thread-wide 24 | stack. Setting a handler does not replace existing handlers, but gives it 25 | higher priority. Each handler has the ability to prevent records from 26 | propagating to lower-priority handlers. 27 | - Logbook comes with a useful default configuration that spits all the 28 | information to stderr in a useful manner. 29 | - All of the built-in handlers have a useful default configuration applied with 30 | formatters that provide all the available information in a format that 31 | makes the most sense for the given handler. For example, a default stream 32 | handler will try to put all the required information into one line, whereas 33 | an email handler will split it up into nicely formatted ASCII tables that 34 | span multiple lines. 35 | - Logbook has built-in handlers for streams, arbitrary files, files with time 36 | and size based rotation, a handler that delivers mails, a handler for the 37 | syslog daemon as well as the NT log file. 38 | - There is also a special "fingers crossed" handler that, in combination with 39 | the handler stack, has the ability to accumulate all logging messages and 40 | will deliver those in case a severity level was exceeded. For example, it 41 | can withhold all logging messages for a specific request to a web 42 | application until an error record appears, in which case it will also send 43 | all withheld records to the handler it wraps. This way, you can always log 44 | lots of debugging records, but only get see them when they can actually 45 | tell you something of interest. 46 | - It is possible to inject a handler for testing that records messages for 47 | assertions. 48 | - Logbook was designed to be fast and with modern Python features in mind. 49 | For example, it uses context managers to handle the stack of handlers as 50 | well as new-style string formatting for all of the core log calls. 51 | - Builtin support for ZeroMQ and other means to distribute log messages 52 | between heavily distributed systems and multiple processes. 53 | - The Logbook system does not depend on log levels. In fact, custom log 54 | levels are not supported, instead we strongly recommend using logging 55 | subclasses or log processors that inject tagged information into the log 56 | record for this purpose. 57 | - :pep:`8` naming and code style. 58 | 59 | Advantages over Logging 60 | ----------------------- 61 | 62 | If properly configured, Logbook's logging calls will be very cheap and 63 | provide a great performance improvement over an equivalent configuration 64 | of the standard library's logging module. While for some parts we are not 65 | quite at performance we desire, there will be some further performance 66 | improvements in the upcoming versions. 67 | 68 | It also supports the ability to inject additional information for all 69 | logging calls happening in a specific thread or for the whole application. 70 | For example, this makes it possible for a web application to add 71 | request-specific information to each log record such as remote address, 72 | request URL, HTTP method and more. 73 | 74 | The logging system is (besides the stack) stateless and makes unit testing 75 | it very simple. If context managers are used, it is impossible to corrupt 76 | the stack, so each test can easily hook in custom log handlers. 77 | 78 | Cooperation 79 | ----------- 80 | 81 | Logbook is an addon library to Python and working in an area where there 82 | are already a couple of contestants. First of all there is the standard 83 | library's :mod:`logging` module, secondly there is also the 84 | :mod:`warnings` module which is used internally in Python to warn about 85 | invalid uses of APIs and more. We know that there are many situations 86 | where you want to use either of them. Be it that they are integrated into 87 | a legacy system, part of a library outside of your control or just because 88 | they are a better choice. 89 | 90 | Because of that, Logbook is two-way compatible with :mod:`logging` and 91 | one-way compatible with :mod:`warnings`. If you want, you can let all 92 | logging calls redirect to the logbook handlers or the other way round, 93 | depending on what your desired setup looks like. That way you can enjoy 94 | the best of both worlds. 95 | 96 | It should be Fun 97 | ---------------- 98 | 99 | Logging should be fun. A good log setup makes debugging easier when 100 | things go rough. For good results you really have to start using logging 101 | before things actually break. Logbook comes with a couple of unusual log 102 | handlers to bring the fun back to logging. You can log to your personal 103 | twitter feed, you can log to mobile devices, your desktop notification 104 | system and more. 105 | 106 | Logbook in a Nutshell 107 | --------------------- 108 | 109 | This is how easy it is to get started with Logbook:: 110 | 111 | from logbook import warn 112 | warn('This is a warning') 113 | 114 | That will use the default logging channel. But you can create as many as 115 | you like:: 116 | 117 | from logbook import Logger 118 | log = Logger('My Logger') 119 | log.warn('This is a warning') 120 | 121 | Roadmap 122 | ------- 123 | 124 | Here a list of things you can expect in upcoming versions: 125 | 126 | - c implementation of the internal stack management and record 127 | dispatching for higher performance. 128 | - a ticketing log handler that creates tickets in trac and redmine. 129 | - a web frontend for the ticketing database handler. 130 | -------------------------------------------------------------------------------- /logbook/_stringfmt.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook._stringfmt 4 | ~~~~~~~~~~~~~~~~~~ 5 | 6 | Advanced string formatting for Python >= 2.4. 7 | This is a stripped version of 'stringformat', available at 8 | * http://pypi.python.org/pypi/StringFormat 9 | 10 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl, Florent Xicluna. 11 | :license: BSD, see LICENSE for more details. 12 | """ 13 | import re 14 | import datetime 15 | 16 | if hasattr(str, 'partition'): 17 | def partition(s, sep): 18 | return s.partition(sep) 19 | else: # Python 2.4 20 | def partition(s, sep): 21 | try: 22 | left, right = s.split(sep, 1) 23 | except ValueError: 24 | return s, '', '' 25 | return left, sep, right 26 | 27 | _integer_classes = (int, long) 28 | _date_classes = (datetime.datetime, datetime.date, datetime.time) 29 | _format_str_re = re.compile( 30 | r'((?=^])?)' # alignment 37 | r'([-+ ]?)' # sign 38 | r'(#?)' r'(\d*)' r'(,?)' # base prefix, minimal width, thousands sep 39 | r'((?:\.\d+)?)' # precision 40 | r'(.?)$' # type 41 | ) 42 | _field_part_re = re.compile( 43 | r'(?:(\[)|\.|^)' # start or '.' or '[' 44 | r'((?(1)[^]]*|[^.[]*))' # part 45 | r'(?(1)(?:\]|$)([^.[]+)?)' # ']' and invalid tail 46 | ) 47 | 48 | if hasattr(re, '__version__'): 49 | _format_str_sub = _format_str_re.sub 50 | else: 51 | # Python 2.4 fails to preserve the Unicode type 52 | def _format_str_sub(repl, s): 53 | if isinstance(s, unicode): 54 | return unicode(_format_str_re.sub(repl, s)) 55 | return _format_str_re.sub(repl, s) 56 | 57 | 58 | def _strformat(value, format_spec=""): 59 | """Internal string formatter. 60 | 61 | It implements the Format Specification Mini-Language. 62 | """ 63 | m = _format_spec_re.match(str(format_spec)) 64 | if not m: 65 | raise ValueError('Invalid conversion specification') 66 | align, sign, prefix, width, comma, precision, conversion = m.groups() 67 | is_numeric = hasattr(value, '__float__') 68 | is_integer = is_numeric and isinstance(value, _integer_classes) 69 | if is_numeric and conversion == 'n': 70 | # Default to 'd' for ints and 'g' for floats 71 | conversion = is_integer and 'd' or 'g' 72 | if conversion == 'c': 73 | conversion = 's' 74 | value = chr(value % 256) 75 | rv = ('%' + prefix + precision + (conversion or 's')) % (value,) 76 | if sign not in '-' and value >= 0: 77 | # sign in (' ', '+') 78 | rv = sign + rv 79 | if width: 80 | zero = (width[0] == '0') 81 | width = int(width) 82 | else: 83 | zero = False 84 | width = 0 85 | # Fastpath when alignment is not required 86 | if width <= len(rv): 87 | return rv 88 | fill, align = align[:-1], align[-1:] 89 | if not fill: 90 | fill = zero and '0' or ' ' 91 | if align == '^': 92 | padding = width - len(rv) 93 | # tweak the formatting if the padding is odd 94 | if padding % 2: 95 | rv += fill 96 | rv = rv.center(width, fill) 97 | elif align == '=' or (zero and not align): 98 | if value < 0 or sign not in '-': 99 | rv = rv[0] + rv[1:].rjust(width - 1, fill) 100 | else: 101 | rv = rv.rjust(width, fill) 102 | elif align in ('>', '=') or (is_numeric and not align): 103 | # numeric value right aligned by default 104 | rv = rv.rjust(width, fill) 105 | else: 106 | rv = rv.ljust(width, fill) 107 | return rv 108 | 109 | 110 | def _format_field(value, parts, conv, spec): 111 | """Format a replacement field.""" 112 | for k, part, _ in parts: 113 | if k: 114 | if part.isdigit(): 115 | value = value[int(part)] 116 | else: 117 | value = value[part] 118 | else: 119 | value = getattr(value, part) 120 | if conv: 121 | value = ((conv == 'r') and '%r' or '%s') % (value,) 122 | if hasattr(value, '__format__'): 123 | value = value.__format__(spec) 124 | elif isinstance(value, _date_classes) and spec: 125 | value = value.strftime(str(spec)) 126 | else: 127 | value = _strformat(value, spec) 128 | return value 129 | 130 | 131 | class FormattableString(object): 132 | """Class which implements method format(). 133 | 134 | The method format() behaves like str.format() in python 2.6+. 135 | 136 | >>> FormattableString(u'{a:5}').format(a=42) 137 | ... # Same as u'{a:5}'.format(a=42) 138 | u' 42' 139 | 140 | """ 141 | 142 | __slots__ = '_index', '_kwords', '_nested', '_string', 'format_string' 143 | 144 | def __init__(self, format_string): 145 | self._index = 0 146 | self._kwords = {} 147 | self._nested = {} 148 | 149 | self.format_string = format_string 150 | self._string = _format_str_sub(self._prepare, format_string) 151 | 152 | def __eq__(self, other): 153 | if isinstance(other, FormattableString): 154 | return self.format_string == other.format_string 155 | # Compare equal with the original string. 156 | return self.format_string == other 157 | 158 | def _prepare(self, match): 159 | # Called for each replacement field. 160 | part = match.group(0) 161 | if part[0] == part[-1]: 162 | # '{{' or '}}' 163 | assert part == part[0] * len(part) 164 | return part[:len(part) // 2] 165 | repl = part[1:-1] 166 | field, _, format_spec = partition(repl, ':') 167 | literal, sep, conversion = partition(field, '!') 168 | name_parts = _field_part_re.findall(literal) 169 | if literal[:1] in '.[': 170 | # Auto-numbering 171 | name = str(self._index) 172 | self._index += 1 173 | if not literal: 174 | del name_parts[0] 175 | else: 176 | name = name_parts.pop(0)[1] 177 | if name.isdigit() and self._index is not None: 178 | # Manual specification 179 | self._index = None 180 | if '{' in format_spec: 181 | format_spec = _format_sub_re.sub(self._prepare, format_spec) 182 | rv = (name_parts, conversion, format_spec) 183 | self._nested.setdefault(name, []).append(rv) 184 | else: 185 | rv = (name_parts, conversion, format_spec) 186 | self._kwords.setdefault(name, []).append(rv) 187 | return r'%%(%s)s' % id(rv) 188 | 189 | def format(self, *args, **kwargs): 190 | """Same as str.format() and unicode.format() in Python 2.6+.""" 191 | if args: 192 | kwargs.update(dict((str(i), value) 193 | for (i, value) in enumerate(args))) 194 | params = {} 195 | for name, items in self._kwords.items(): 196 | value = kwargs[name] 197 | for item in items: 198 | parts, conv, spec = item 199 | params[str(id(item))] = _format_field(value, parts, conv, spec) 200 | for name, items in self._nested.items(): 201 | value = kwargs[name] 202 | for item in items: 203 | parts, conv, spec = item 204 | spec = spec % params 205 | params[str(id(item))] = _format_field(value, parts, conv, spec) 206 | return self._string % params 207 | -------------------------------------------------------------------------------- /logbook/helpers.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.helpers 4 | ~~~~~~~~~~~~~~~ 5 | 6 | Various helper functions 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | import os 12 | import re 13 | import sys 14 | import errno 15 | import time 16 | import random 17 | from datetime import datetime, timedelta 18 | 19 | 20 | # Python 2.4 compatibility 21 | 22 | try: 23 | any = any 24 | except NameError: 25 | def any(iterable): 26 | for item in iterable: 27 | if item: 28 | return True 29 | 30 | 31 | # Python 2.5 compatibility 32 | 33 | try: 34 | import json 35 | except ImportError: 36 | import simplejson as json 37 | 38 | if hasattr(str, 'format'): 39 | def F(format_string): 40 | return format_string 41 | else: 42 | from logbook._stringfmt import FormattableString as F 43 | 44 | # this regexp also matches incompatible dates like 20070101 because 45 | # some libraries (like the python xmlrpclib modules) use this 46 | _iso8601_re = re.compile( 47 | # date 48 | r'(\d{4})(?:-?(\d{2})(?:-?(\d{2}))?)?' 49 | # time 50 | r'(?:T(\d{2}):(\d{2})(?::(\d{2}(?:\.\d+)?))?(Z|[+-]\d{2}:\d{2})?)?$' 51 | ) 52 | _missing = object() 53 | _py3 = sys.version_info >= (3, 0) 54 | if _py3: 55 | import io 56 | def b(x): return x.encode('ascii') 57 | def _is_text_stream(stream): return isinstance(stream, io.TextIOBase) 58 | else: 59 | def b(x): return x 60 | def _is_text_stream(x): return True 61 | 62 | 63 | can_rename_open_file = False 64 | if os.name == 'nt': # pragma: no cover 65 | _rename = lambda src, dst: False 66 | _rename_atomic = lambda src, dst: False 67 | 68 | try: 69 | import ctypes 70 | 71 | _MOVEFILE_REPLACE_EXISTING = 0x1 72 | _MOVEFILE_WRITE_THROUGH = 0x8 73 | _MoveFileEx = ctypes.windll.kernel32.MoveFileExW 74 | 75 | def _rename(src, dst): 76 | if not isinstance(src, unicode): 77 | src = unicode(src, sys.getfilesystemencoding()) 78 | if not isinstance(dst, unicode): 79 | dst = unicode(dst, sys.getfilesystemencoding()) 80 | if _rename_atomic(src, dst): 81 | return True 82 | retry = 0 83 | rv = False 84 | while not rv and retry < 100: 85 | rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | 86 | _MOVEFILE_WRITE_THROUGH) 87 | if not rv: 88 | time.sleep(0.001) 89 | retry += 1 90 | return rv 91 | 92 | # new in Vista and Windows Server 2008 93 | _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction 94 | _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction 95 | _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW 96 | _CloseHandle = ctypes.windll.kernel32.CloseHandle 97 | can_rename_open_file = True 98 | 99 | def _rename_atomic(src, dst): 100 | ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Logbook rename') 101 | if ta == -1: 102 | return False 103 | try: 104 | retry = 0 105 | rv = False 106 | while not rv and retry < 100: 107 | rv = _MoveFileTransacted(src, dst, None, None, 108 | _MOVEFILE_REPLACE_EXISTING | 109 | _MOVEFILE_WRITE_THROUGH, ta) 110 | if rv: 111 | rv = _CommitTransaction(ta) 112 | break 113 | else: 114 | time.sleep(0.001) 115 | retry += 1 116 | return rv 117 | finally: 118 | _CloseHandle(ta) 119 | except Exception: 120 | pass 121 | 122 | def rename(src, dst): 123 | # Try atomic or pseudo-atomic rename 124 | if _rename(src, dst): 125 | return 126 | # Fall back to "move away and replace" 127 | try: 128 | os.rename(src, dst) 129 | except OSError, e: 130 | if e.errno != errno.EEXIST: 131 | raise 132 | old = "%s-%08x" % (dst, random.randint(0, sys.maxint)) 133 | os.rename(dst, old) 134 | os.rename(src, dst) 135 | try: 136 | os.unlink(old) 137 | except Exception: 138 | pass 139 | else: 140 | rename = os.rename 141 | can_rename_open_file = True 142 | 143 | 144 | def to_safe_json(data): 145 | """Makes a data structure safe for JSON silently discarding invalid 146 | objects from nested structures. This also converts dates. 147 | """ 148 | def _convert(obj): 149 | if obj is None: 150 | return None 151 | elif not _py3 and isinstance(obj, str): 152 | return obj.decode('utf-8', 'replace') 153 | elif isinstance(obj, (bool, int, long, float, unicode)): 154 | return obj 155 | elif isinstance(obj, datetime): 156 | return format_iso8601(obj) 157 | elif isinstance(obj, list): 158 | return [_convert(x) for x in obj] 159 | elif isinstance(obj, tuple): 160 | return tuple(_convert(x) for x in obj) 161 | elif isinstance(obj, dict): 162 | rv = {} 163 | for key, value in obj.iteritems(): 164 | if not _py3 and isinstance(key, str): 165 | key = key.decode('utf-8', 'replace') 166 | else: 167 | key = unicode(key) 168 | rv[key] = _convert(value) 169 | return rv 170 | return _convert(data) 171 | 172 | 173 | def format_iso8601(d=None): 174 | """Returns a date in iso8601 format.""" 175 | if d is None: 176 | d = datetime.utcnow() 177 | rv = d.strftime('%Y-%m-%dT%H:%M:%S') 178 | if d.microsecond: 179 | rv += '.' + str(d.microsecond) 180 | return rv + 'Z' 181 | 182 | 183 | def parse_iso8601(value): 184 | """Parse an iso8601 date into a datetime object. The timezone is 185 | normalized to UTC. 186 | """ 187 | m = _iso8601_re.match(value) 188 | if m is None: 189 | raise ValueError('not a valid iso8601 date value') 190 | 191 | groups = m.groups() 192 | args = [] 193 | for group in groups[:-2]: 194 | if group is not None: 195 | group = int(group) 196 | args.append(group) 197 | seconds = groups[-2] 198 | if seconds is not None: 199 | if '.' in seconds: 200 | sec, usec = seconds.split('.') 201 | args.append(int(sec)) 202 | args.append(int(usec.ljust(6, '0'))) 203 | else: 204 | args.append(int(seconds)) 205 | 206 | rv = datetime(*args) 207 | tz = groups[-1] 208 | if tz and tz != 'Z': 209 | args = map(int, tz[1:].split(':')) 210 | delta = timedelta(hours=args[0], minutes=args[1]) 211 | if tz[0] == '+': 212 | rv -= delta 213 | else: 214 | rv += delta 215 | 216 | return rv 217 | 218 | 219 | def get_application_name(): 220 | if not sys.argv or not sys.argv[0]: 221 | return 'Python' 222 | return os.path.basename(sys.argv[0]).title() 223 | 224 | 225 | class cached_property(object): 226 | """A property that is lazily calculated and then cached.""" 227 | 228 | def __init__(self, func, name=None, doc=None): 229 | self.__name__ = name or func.__name__ 230 | self.__module__ = func.__module__ 231 | self.__doc__ = doc or func.__doc__ 232 | self.func = func 233 | 234 | def __get__(self, obj, type=None): 235 | if obj is None: 236 | return self 237 | value = obj.__dict__.get(self.__name__, _missing) 238 | if value is _missing: 239 | value = self.func(obj) 240 | obj.__dict__[self.__name__] = value 241 | return value 242 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Logbook documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Jul 23 16:54:49 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.extend((os.path.abspath('.'), os.path.abspath('..'))) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = u'Logbook' 44 | copyright = u'2010, Armin Ronacher, Georg Brandl' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = '0.4' 52 | # The full version, including alpha/beta/rc tags. 53 | release = '0.4' 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'sheet' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | html_theme_options = { 100 | 'nosidebar': True, 101 | } 102 | 103 | # Add any paths that contain custom themes here, relative to this directory. 104 | html_theme_path = ['.'] 105 | 106 | # The name for this set of Sphinx documents. If None, it defaults to 107 | # " v documentation". 108 | html_title = "Logbook" 109 | 110 | # A shorter title for the navigation bar. Default is the same as html_title. 111 | html_short_title = "Logbook " + release 112 | 113 | # The name of an image file (relative to this directory) to place at the top 114 | # of the sidebar. 115 | #html_logo = None 116 | 117 | # The name of an image file (within the static path) to use as favicon of the 118 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 119 | # pixels large. 120 | #html_favicon = None 121 | 122 | # Add any paths that contain custom static files (such as style sheets) here, 123 | # relative to this directory. They are copied after the builtin static files, 124 | # so a file named "default.css" will overwrite the builtin "default.css". 125 | #html_static_path = ['_static'] 126 | 127 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 128 | # using the given strftime format. 129 | #html_last_updated_fmt = '%b %d, %Y' 130 | 131 | # If true, SmartyPants will be used to convert quotes and dashes to 132 | # typographically correct entities. 133 | #html_use_smartypants = True 134 | 135 | # Custom sidebar templates, maps document names to template names. 136 | #html_sidebars = {} 137 | 138 | # Additional templates that should be rendered to pages, maps page names to 139 | # template names. 140 | #html_additional_pages = {} 141 | 142 | # If false, no module index is generated. 143 | #html_domain_indices = True 144 | 145 | # If false, no index is generated. 146 | #html_use_index = True 147 | 148 | # If true, the index is split into individual pages for each letter. 149 | #html_split_index = False 150 | 151 | # If true, links to the reST sources are added to the pages. 152 | #html_show_sourcelink = True 153 | 154 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 155 | #html_show_sphinx = True 156 | 157 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 158 | #html_show_copyright = True 159 | 160 | html_add_permalinks = False 161 | 162 | # If true, an OpenSearch description file will be output, and all pages will 163 | # contain a tag referring to it. The value of this option must be the 164 | # base URL from which the finished HTML is served. 165 | #html_use_opensearch = '' 166 | 167 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 168 | #html_file_suffix = '' 169 | 170 | # Output file base name for HTML help builder. 171 | htmlhelp_basename = 'Logbookdoc' 172 | 173 | 174 | # -- Options for LaTeX output -------------------------------------------------- 175 | 176 | # The paper size ('letter' or 'a4'). 177 | #latex_paper_size = 'letter' 178 | 179 | # The font size ('10pt', '11pt' or '12pt'). 180 | #latex_font_size = '10pt' 181 | 182 | # Grouping the document tree into LaTeX files. List of tuples 183 | # (source start file, target name, title, author, documentclass [howto/manual]). 184 | latex_documents = [ 185 | ('index', 'Logbook.tex', u'Logbook Documentation', 186 | u'Armin Ronacher, Georg Brandl', 'manual'), 187 | ] 188 | 189 | # The name of an image file (relative to this directory) to place at the top of 190 | # the title page. 191 | #latex_logo = None 192 | 193 | # For "manual" documents, if this is true, then toplevel headings are parts, 194 | # not chapters. 195 | #latex_use_parts = False 196 | 197 | # If true, show page references after internal links. 198 | #latex_show_pagerefs = False 199 | 200 | # If true, show URL addresses after external links. 201 | #latex_show_urls = False 202 | 203 | # Additional stuff for the LaTeX preamble. 204 | #latex_preamble = '' 205 | 206 | # Documents to append as an appendix to all manuals. 207 | #latex_appendices = [] 208 | 209 | # If false, no module index is generated. 210 | #latex_domain_indices = True 211 | 212 | 213 | # -- Options for manual page output -------------------------------------------- 214 | 215 | # One entry per manual page. List of tuples 216 | # (source start file, name, description, authors, manual section). 217 | man_pages = [ 218 | ('index', 'logbook', u'Logbook Documentation', 219 | [u'Armin Ronacher, Georg Brandl'], 1) 220 | ] 221 | 222 | intersphinx_mapping = { 223 | 'http://docs.python.org': None 224 | } 225 | -------------------------------------------------------------------------------- /docs/setups.rst: -------------------------------------------------------------------------------- 1 | Common Logbook Setups 2 | ===================== 3 | 4 | This part of the documentation shows how you can configure Logbook for 5 | different kinds of setups. 6 | 7 | 8 | Desktop Application Setup 9 | ------------------------- 10 | 11 | If you develop a desktop application (command line or GUI), you probably have a line 12 | like this in your code:: 13 | 14 | if __name__ == '__main__': 15 | main() 16 | 17 | This is what you should wrap with a ``with`` statement that sets up your log 18 | handler:: 19 | 20 | from logbook import FileHandler 21 | log_handler = FileHandler('application.log') 22 | 23 | if __name__ == '__main__': 24 | with log_handler.applicationbound(): 25 | main() 26 | 27 | Alternatively you can also just push a handler in there:: 28 | 29 | from logbook import FileHandler 30 | log_handler = FileHandler('application.log') 31 | log_handler.push_application() 32 | 33 | if __name__ == '__main__': 34 | main() 35 | 36 | Please keep in mind that you will have to pop the handlers in reverse order if 37 | you want to remove them from the stack, so it is recommended to use the context 38 | manager API if you plan on reverting the handlers. 39 | 40 | Web Application Setup 41 | --------------------- 42 | 43 | Typical modern web applications written in Python have two separate contexts 44 | where code might be executed: when the code is imported, as well as when a 45 | request is handled. The first case is easy to handle, just push a global file 46 | handler that writes everything into a file. 47 | 48 | But Logbook also gives you the ability to improve upon the logging. For 49 | example, you can easily create yourself a log handler that is used for 50 | request-bound logging that also injects additional information. 51 | 52 | For this you can either subclass the logger or you can bind to the handler with 53 | a function that is invoked before logging. The latter has the advantage that it 54 | will also be triggered for other logger instances which might be used by a 55 | different library. 56 | 57 | Here is a simple WSGI example application that showcases sending error mails for 58 | errors happened during a WSGI application:: 59 | 60 | from logbook import MailHandler 61 | 62 | mail_handler = MailHandler('errors@example.com', 63 | ['admin@example.com'], 64 | format_string=u'''\ 65 | Subject: Application Error at {record.extra[url]} 66 | 67 | Message type: {record.level_name} 68 | Location: {record.filename}:{record.lineno} 69 | Module: {record.module} 70 | Function: {record.func_name} 71 | Time: {record.time:%Y-%m-%d %H:%M:%S} 72 | Remote IP: {record.extra[ip]} 73 | Request: {record.extra[url]} [{record.extra[method]}] 74 | 75 | Message: 76 | 77 | {record.message} 78 | ''', bubble=True) 79 | 80 | def application(environ, start_response): 81 | request = Request(environ) 82 | 83 | def inject_info(record, handler): 84 | record.extra.update( 85 | ip=request.remote_addr, 86 | method=request.method, 87 | url=request.url 88 | ) 89 | 90 | with mail_handler.threadbound(processor=inject_info): 91 | # standard WSGI processing happens here. If an error 92 | # is logged, a mail will be sent to the admin on 93 | # example.com 94 | ... 95 | 96 | Deeply Nested Setups 97 | -------------------- 98 | 99 | If you want deeply nested logger setups, you can use the 100 | :class:`~logbook.NestedSetup` class which simplifies that. This is best 101 | explained using an example:: 102 | 103 | import os 104 | from logbook import NestedSetup, NullHandler, FileHandler, \ 105 | MailHandler, Processor 106 | 107 | def inject_information(record): 108 | record.extra['cwd'] = os.getcwd() 109 | 110 | # a nested handler setup can be used to configure more complex setups 111 | setup = NestedSetup([ 112 | # make sure we never bubble up to the stderr handler 113 | # if we run out of setup handling 114 | NullHandler(), 115 | # then write messages that are at least warnings to to a logfile 116 | FileHandler('application.log', level='WARNING'), 117 | # errors should then be delivered by mail and also be kept 118 | # in the application log, so we let them bubble up. 119 | MailHandler('servererrors@example.com', 120 | ['admin@example.com'], 121 | level='ERROR', bubble=True), 122 | # while we're at it we can push a processor on its own stack to 123 | # record additional information. Because processors and handlers 124 | # go to different stacks it does not matter if the processor is 125 | # added here at the bottom or at the very beginning. Same would 126 | # be true for flags. 127 | Processor(inject_information) 128 | ]) 129 | 130 | Once such a complex setup is defined, the nested handler setup can be used as if 131 | it was a single handler:: 132 | 133 | with setup.threadbound(): 134 | # everything here is handled as specified by the rules above. 135 | ... 136 | 137 | 138 | Distributed Logging 139 | ------------------- 140 | 141 | For applications that are spread over multiple processes or even machines 142 | logging into a central system can be a pain. Logbook supports ZeroMQ to 143 | deal with that. You can set up a :class:`~logbook.queues.ZeroMQHandler` 144 | that acts as ZeroMQ publisher and will send log records encoded as JSON 145 | over the wire:: 146 | 147 | from logbook.queues import ZeroMQHandler 148 | handler = ZeroMQHandler('tcp://127.0.0.1:5000') 149 | 150 | Then you just need a separate process that can receive the log records and 151 | hand it over to another log handler using the 152 | :class:`~logbook.queues.ZeroMQSubscriber`. The usual setup is this:: 153 | 154 | from logbook.queues import ZeroMQSubscriber 155 | subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000') 156 | with my_handler: 157 | subscriber.dispatch_forever() 158 | 159 | You can also run that loop in a background thread with 160 | :meth:`~logbook.queues.ZeroMQSubscriber.dispatch_in_background`:: 161 | 162 | from logbook.queues import ZeroMQSubscriber 163 | subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000') 164 | subscriber.dispatch_in_background(my_handler) 165 | 166 | If you just want to use this in a :mod:`multiprocessing` environment you 167 | can use the :class:`~logbook.queues.MultiProcessingHandler` and 168 | :class:`~logbook.queues.MultiProcessingSubscriber` instead. They work the 169 | same way as the ZeroMQ equivalents but are connected through a 170 | :class:`multiprocessing.Queue`:: 171 | 172 | from multiprocessing import Queue 173 | from logbook.queues import MultiProcessingHandler, \ 174 | MultiProcessingSubscriber 175 | queue = Queue(-1) 176 | handler = MultiProcessingHandler(queue) 177 | subscriber = MultiProcessingSubscriber(queue) 178 | 179 | 180 | Redirecting Single Loggers 181 | -------------------------- 182 | 183 | If you want to have a single logger go to another logfile you have two 184 | options. First of all you can attach a handler to a specific record 185 | dispatcher. So just import the logger and attach something:: 186 | 187 | from yourapplication.yourmodule import logger 188 | logger.handlers.append(MyHandler(...)) 189 | 190 | Handlers attached directly to a record dispatcher will always take 191 | precedence over the stack based handlers. The bubble flag works as 192 | expected, so if you have a non-bubbling handler on your logger and it 193 | always handles, it will never be passed to other handlers. 194 | 195 | Secondly you can write a handler that looks at the logging channel and 196 | only accepts loggers of a specific kind. You can also do that with a 197 | filter function:: 198 | 199 | handler = MyHandler(filter=lambda r: r.channel == 'app.database') 200 | 201 | Keep in mind that the channel is intended to be a human readable string 202 | and is not necessarily unique. If you really need to keep loggers apart 203 | on a central point you might want to introduce some more meta information 204 | into the extra dictionary. 205 | 206 | You can also compare the dispatcher on the log record:: 207 | 208 | from yourapplication.yourmodule import logger 209 | handler = MyHandler(filter=lambda r: r.dispatcher is logger) 210 | 211 | This however has the disadvantage that the dispatcher entry on the log 212 | record is a weak reference and might go away unexpectedly and will not be 213 | there if log records are sent to a different process. 214 | 215 | Last but not least you can check if you can modify the stack around the 216 | execution of the code that triggers that logger For instance if the 217 | logger you are interested in is used by a specific subsystem, you can 218 | modify the stacks before calling into the system. 219 | -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | Quickstart 2 | ========== 3 | 4 | .. currentmodule:: logbook 5 | 6 | Logbook makes it very easy to get started with logging. Just import the logger 7 | class, create yourself a logger and you are set: 8 | 9 | >>> from logbook import Logger 10 | >>> log = Logger('My Awesome Logger') 11 | >>> log.warn('This is too cool for stdlib') 12 | [2010-07-23 16:34] WARNING: My Awesome Logger: This is too cool for stdlib 13 | 14 | A logger is a so-called :class:`~logbook.base.RecordDispatcher`, which is 15 | commonly referred to as a "logging channel". The name you give such a channel 16 | is up to you and need not be unique although it's a good idea to keep it 17 | unique so that you can filter by it if you want. 18 | 19 | The basic interface is similar to what you may already know from the standard 20 | library's :mod:`logging` module. 21 | 22 | There are several logging levels, available as methods on the logger. The 23 | levels -- and their suggested meaning -- are: 24 | 25 | * ``critical`` -- for errors that lead to termination 26 | * ``error`` -- for errors that occur, but are handled 27 | * ``warning`` -- for exceptional circumstances that might not be errors 28 | * ``notice`` -- for non-error messages you usually want to see 29 | * ``info`` -- for messages you usually don't want to see 30 | * ``debug`` -- for debug messages 31 | 32 | Each of these levels is available as method on the :class:`Logger`. 33 | Additionally the ``warning`` level is aliased as :meth:`~Logger.warn`. 34 | 35 | Alternatively, there is the :meth:`~Logger.log` method that takes the logging 36 | level (string or integer) as an argument. 37 | 38 | Handlers 39 | -------- 40 | 41 | Each call to a logging method creates a log *record* which is then passed to 42 | *handlers*, which decide how to store or present the logging info. There are a 43 | multitude of available handlers, and of course you can also create your own: 44 | 45 | * :class:`StreamHandler` for logging to arbitrary streams 46 | * :class:`StderrHandler` for logging to stderr 47 | * :class:`FileHandler`, :class:`MonitoringFileHandler`, 48 | :class:`RotatingFileHandler` and :class:`TimedRotatingFileHandler` for 49 | logging to files 50 | * :class:`MailHandler` for logging via e-mail 51 | * :class:`SyslogHandler` for logging to the syslog daemon 52 | * :class:`NTEventLogHandler` for logging to the Windows NT event log 53 | 54 | On top of those there are a couple of handlers for special use cases: 55 | 56 | * :class:`logbook.FingersCrossedHandler` for logging into memory and 57 | delegating information to another handler when a certain level was 58 | exceeded, otherwise discarding all buffered records. 59 | * :class:`logbook.more.TaggingHandler` for dispatching log records that 60 | are tagged (used in combination with a 61 | :class:`logbook.more.TaggingLogger`) 62 | * :class:`logbook.queues.ZeroMQHandler` for logging to ZeroMQ 63 | * :class:`logbook.queues.MultiProcessingHandler` for logging from a child 64 | process to a handler from the outer process. 65 | * :class:`logbook.queues.ThreadedWrapperHandler` for moving the actual 66 | handling of a handler into a background thread and using a queue to 67 | deliver records to that thread. 68 | * :class:`logbook.notifiers.GrowlHandler` and 69 | :class:`logbook.notifiers.LibNotifyHandler` for logging to the OS X Growl 70 | or the linux notification daemon. 71 | * :class:`logbook.notifiers.BoxcarHandler` for logging to `boxcar`_. 72 | * :class:`logbook.more.TwitterHandler` for logging to twitter. 73 | * :class:`logbook.more.ExternalApplicationHandler` for logging to an 74 | external application such as the OS X ``say`` command. 75 | * :class:`logbook.ticketing.TicketingHandler` for creating tickets from 76 | log records in a database or other data store. 77 | 78 | .. _boxcar: http://boxcar.io/ 79 | 80 | Registering Handlers 81 | -------------------- 82 | 83 | So how are handlers registered? If you are used to the standard Python logging 84 | system, it works a little bit differently here. Handlers can be registered for 85 | a thread or for a whole process or individually for a logger. However, it is 86 | strongly recommended not to add handlers to loggers unless there is a very good 87 | use case for that. 88 | 89 | If you want errors to go to syslog, you can set up logging like this:: 90 | 91 | from logbook import SyslogHandler 92 | 93 | error_handler = SyslogHandler('logbook example', level='ERROR') 94 | with error_handler.applicationbound(): 95 | # whatever is executed here and an error is logged to the 96 | # error handler 97 | ... 98 | 99 | This will send all errors to the syslog but warnings and lower record 100 | levels still to stderr. This is because the handler is not bubbling by 101 | default which means that if a record is handled by the handler, it will 102 | not bubble up to a higher handler. If you want to display all records on 103 | stderr, even if they went to the syslog you can enable bubbling by setting 104 | *bubble* to ``True``:: 105 | 106 | from logbook import SyslogHandler 107 | 108 | error_handler = SyslogHandler('logbook example', level='ERROR', bubble=True) 109 | with error_handler.applicationbound(): 110 | # whatever is executed here and an error is logged to the 111 | # error handler but it will also bubble up to the default 112 | # stderr handler. 113 | ... 114 | 115 | So what if you want to only log errors to the syslog and nothing to 116 | stderr? Then you can combine this with a :class:`NullHandler`:: 117 | 118 | from logbook import SyslogHandler, NullHandler 119 | 120 | error_handler = SyslogHandler('logbook example', level='ERROR') 121 | null_handler = NullHandler() 122 | 123 | with null_handler.applicationbound(): 124 | with error_handler.applicationbound(): 125 | # errors now go to the error_handler and everything else 126 | # is swallowed by the null handler so nothing ends up 127 | # on the default stderr handler 128 | ... 129 | 130 | Record Processors 131 | ----------------- 132 | 133 | What makes logbook interesting is the ability to automatically process log 134 | records. This is handy if you want additional information to be logged for 135 | everything you do. A good example use case is recording the IP of the current 136 | request in a web application. Or, in a daemon process you might want to log 137 | the user and working directory of the process. 138 | 139 | A context processor can be injected at two places: you can either bind a 140 | processor to a stack like you do with handlers or you can override the 141 | override the :meth:`.RecordDispatcher.process_record` method. 142 | 143 | Here an example that injects the current working directory into the 144 | `extra` dictionary of a log record:: 145 | 146 | import os 147 | from logbook import Processor 148 | 149 | def inject_cwd(record): 150 | record.extra['cwd'] = os.getcwd() 151 | 152 | with my_handler.applicationbound(): 153 | with Processor(inject_cwd).applicationbound(): 154 | # everything logged here will have the current working 155 | # directory in the log record. 156 | ... 157 | 158 | The alternative is to inject information just for one logger in which case 159 | you might want to subclass it:: 160 | 161 | import os 162 | 163 | class MyLogger(logbook.Logger): 164 | 165 | def process_record(self, record): 166 | logbook.Logger.process_record(self, record) 167 | record.extra['cwd'] = os.getcwd() 168 | 169 | 170 | Configuring the Logging Format 171 | ------------------------------ 172 | 173 | All handlers have a useful default log format you don't have to change to use 174 | logbook. However if you start injecting custom information into log records, 175 | it makes sense to configure the log formatting so that you can see that 176 | information. 177 | 178 | There are two ways to configure formatting: you can either just change the 179 | format string or hook in a custom format function. 180 | 181 | All the handlers that come with logbook and that log into a string use the 182 | :class:`~logbook.StringFormatter` by default. Their constructors accept a 183 | format string which sets the :attr:`logbook.Handler.format_string` attribute. 184 | You can override this attribute in which case a new string formatter is set: 185 | 186 | >>> from logbook import StderrHandler 187 | >>> handler = StderrHandler() 188 | >>> handler.format_string = '{record.channel}: {record.message}' 189 | >>> handler.formatter 190 | 191 | 192 | Alternatively you can also set a custom format function which is invoked 193 | with the record and handler as arguments: 194 | 195 | >>> def my_formatter(record, handler): 196 | ... return record.message 197 | ... 198 | >>> handler.formatter = my_formatter 199 | 200 | The format string used for the default string formatter has one variable called 201 | `record` available which is the log record itself. All attributes can be 202 | looked up using the dotted syntax, and items in the `extra` dict looked up 203 | using brackets. Note that if you are accessing an item in the extra dict that 204 | does not exist, an empty string is returned. 205 | 206 | Here is an example configuration that shows the current working directory from 207 | the example in the previous section:: 208 | 209 | handler = StderrHandler(format_string= 210 | '{record.channel}: {record.message) [{record.extra[cwd]}]') 211 | 212 | In the :mod:`~logbook.more` module there is a formatter that uses the Jinja2 213 | template engine to format log records, especially useful for multi-line log 214 | formatting such as mails (:class:`~logbook.more.JinjaFormatter`). 215 | -------------------------------------------------------------------------------- /logbook/compat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.compat 4 | ~~~~~~~~~~~~~~ 5 | 6 | Backwards compatibility with stdlib's logging package and the 7 | warnings module. 8 | 9 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 10 | :license: BSD, see LICENSE for more details. 11 | """ 12 | import sys 13 | import logging 14 | import warnings 15 | import logbook 16 | from datetime import date, datetime 17 | 18 | 19 | _epoch_ord = date(1970, 1, 1).toordinal() 20 | 21 | 22 | def redirect_logging(): 23 | """Permanently redirects logging to the stdlib. This also 24 | removes all otherwise registered handlers on root logger of 25 | the logging system but leaves the other loggers untouched. 26 | """ 27 | del logging.root.handlers[:] 28 | logging.root.addHandler(RedirectLoggingHandler()) 29 | 30 | 31 | class redirected_logging(object): 32 | """Temporarily redirects logging for all threads and reverts 33 | it later to the old handlers. Mainly used by the internal 34 | unittests:: 35 | 36 | from logbook.compat import redirected_logging 37 | with redirected_logging(): 38 | ... 39 | """ 40 | def __init__(self): 41 | self.old_handlers = logging.root.handlers[:] 42 | 43 | def start(self): 44 | redirect_logging() 45 | 46 | def end(self, etype=None, evalue=None, tb=None): 47 | logging.root.handlers[:] = self.old_handlers 48 | 49 | __enter__ = start 50 | __exit__ = end 51 | 52 | 53 | class RedirectLoggingHandler(logging.Handler): 54 | """A handler for the stdlib's logging system that redirects 55 | transparently to logbook. This is used by the 56 | :func:`redirect_logging` and :func:`redirected_logging` 57 | functions. 58 | 59 | If you want to customize the redirecting you can subclass it. 60 | """ 61 | 62 | def __init__(self): 63 | logging.Handler.__init__(self) 64 | 65 | def convert_level(self, level): 66 | """Converts a logging level into a logbook level.""" 67 | if level >= logging.CRITICAL: 68 | return logbook.CRITICAL 69 | if level >= logging.ERROR: 70 | return logbook.ERROR 71 | if level >= logging.WARNING: 72 | return logbook.WARNING 73 | if level >= logging.INFO: 74 | return logbook.INFO 75 | return logbook.DEBUG 76 | 77 | def find_extra(self, old_record): 78 | """Tries to find custom data from the old logging record. The 79 | return value is a dictionary that is merged with the log record 80 | extra dictionaries. 81 | """ 82 | rv = vars(old_record).copy() 83 | for key in ('name', 'msg', 'args', 'levelname', 'levelno', 84 | 'pathname', 'filename', 'module', 'exc_info', 85 | 'exc_text', 'lineno', 'funcName', 'created', 86 | 'msecs', 'relativeCreated', 'thread', 'threadName', 87 | 'processName', 'process'): 88 | rv.pop(key, None) 89 | return rv 90 | 91 | def find_caller(self, old_record): 92 | """Tries to find the caller that issued the call.""" 93 | frm = sys._getframe(2) 94 | while frm is not None: 95 | if frm.f_globals is globals() or \ 96 | frm.f_globals is logbook.base.__dict__ or \ 97 | frm.f_globals is logging.__dict__: 98 | frm = frm.f_back 99 | else: 100 | return frm 101 | 102 | def convert_time(self, timestamp): 103 | """Converts the UNIX timestamp of the old record into a 104 | datetime object as used by logbook. 105 | """ 106 | return datetime.utcfromtimestamp(timestamp) 107 | 108 | def convert_record(self, old_record): 109 | """Converts an old logging record into a logbook log record.""" 110 | record = logbook.LogRecord(old_record.name, 111 | self.convert_level(old_record.levelno), 112 | old_record.getMessage(), 113 | None, None, old_record.exc_info, 114 | self.find_extra(old_record), 115 | self.find_caller(old_record)) 116 | record.time = self.convert_time(old_record.created) 117 | return record 118 | 119 | def emit(self, record): 120 | logbook.dispatch_record(self.convert_record(record)) 121 | 122 | 123 | class LoggingHandler(logbook.Handler): 124 | """Does the opposite of the :class:`RedirectLoggingHandler`, it sends 125 | messages from logbook to logging. Because of that, it's a very bad 126 | idea to configure both. 127 | 128 | This handler is for logbook and will pass stuff over to a logger 129 | from the standard library. 130 | 131 | Example usage:: 132 | 133 | from logbook.compat import LoggingHandler, warn 134 | with LoggingHandler(): 135 | warn('This goes to logging') 136 | """ 137 | 138 | def __init__(self, logger=None, level=logbook.NOTSET, filter=None, 139 | bubble=False): 140 | logbook.Handler.__init__(self, level, filter, bubble) 141 | if logger is None: 142 | logger = logging.getLogger() 143 | elif isinstance(logger, basestring): 144 | logger = logging.getLogger(logger) 145 | self.logger = logger 146 | 147 | def get_logger(self, record): 148 | """Returns the logger to use for this record. This implementation 149 | always return :attr:`logger`. 150 | """ 151 | return self.logger 152 | 153 | def convert_level(self, level): 154 | """Converts a logbook level into a logging level.""" 155 | if level >= logbook.CRITICAL: 156 | return logging.CRITICAL 157 | if level >= logbook.ERROR: 158 | return logging.ERROR 159 | if level >= logbook.WARNING: 160 | return logging.WARNING 161 | if level >= logbook.INFO: 162 | return logging.INFO 163 | return logging.DEBUG 164 | 165 | def convert_time(self, dt): 166 | """Converts a datetime object into a timestamp.""" 167 | year, month, day, hour, minute, second = dt.utctimetuple()[:6] 168 | days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 169 | hours = days * 24 + hour 170 | minutes = hours * 60 + minute 171 | seconds = minutes * 60 + second 172 | return seconds 173 | 174 | def convert_record(self, old_record): 175 | """Converts a record from logbook to logging.""" 176 | if sys.version_info >= (2, 5): 177 | # make sure 2to3 does not screw this up 178 | optional_kwargs = {'func': getattr(old_record, 'func_name')} 179 | else: 180 | optional_kwargs = {} 181 | record = logging.LogRecord(old_record.channel, 182 | self.convert_level(old_record.level), 183 | old_record.filename, 184 | old_record.lineno, 185 | old_record.message, 186 | (), old_record.exc_info, 187 | **optional_kwargs) 188 | for key, value in old_record.extra.iteritems(): 189 | record.__dict__.setdefault(key, value) 190 | record.created = self.convert_time(old_record.time) 191 | return record 192 | 193 | def emit(self, record): 194 | self.get_logger(record).handle(self.convert_record(record)) 195 | 196 | 197 | def redirect_warnings(): 198 | """Like :func:`redirected_warnings` but will redirect all warnings 199 | to the shutdown of the interpreter:: 200 | 201 | from logbook.compat import redirect_warnings 202 | redirect_warnings() 203 | """ 204 | redirected_warnings().__enter__() 205 | 206 | 207 | class redirected_warnings(object): 208 | """A context manager that copies and restores the warnings filter upon 209 | exiting the context, and logs warnings using the logbook system. 210 | 211 | The :attr:`~logbook.LogRecord.channel` attribute of the log record will be 212 | the import name of the warning. 213 | 214 | Example usage:: 215 | 216 | from logbook.compat import redirected_warnings 217 | from warnings import warn 218 | 219 | with redirected_warnings(): 220 | warn(DeprecationWarning('logging should be deprecated')) 221 | """ 222 | 223 | def __init__(self): 224 | self._entered = False 225 | 226 | def message_to_unicode(self, message): 227 | try: 228 | return unicode(message) 229 | except UnicodeError: 230 | return str(message).decode('utf-8', 'replace') 231 | 232 | def make_record(self, message, exception, filename, lineno): 233 | category = exception.__name__ 234 | if exception.__module__ not in ('exceptions', 'builtins'): 235 | category = exception.__module__ + '.' + category 236 | rv = logbook.LogRecord(category, logbook.WARNING, message) 237 | # we don't know the caller, but we get that information from the 238 | # warning system. Just attach them. 239 | rv.filename = filename 240 | rv.lineno = lineno 241 | return rv 242 | 243 | def start(self): 244 | if self._entered: # pragma: no cover 245 | raise RuntimeError("Cannot enter %r twice" % self) 246 | self._entered = True 247 | self._filters = warnings.filters 248 | warnings.filters = self._filters[:] 249 | self._showwarning = warnings.showwarning 250 | 251 | def showwarning(message, category, filename, lineno, 252 | file=None, line=None): 253 | message = self.message_to_unicode(message) 254 | record = self.make_record(message, category, filename, lineno) 255 | logbook.dispatch_record(record) 256 | warnings.showwarning = showwarning 257 | 258 | def end(self, etype=None, evalue=None, tb=None): 259 | if not self._entered: # pragma: no cover 260 | raise RuntimeError("Cannot exit %r without entering first" % self) 261 | warnings.filters = self._filters 262 | warnings.showwarning = self._showwarning 263 | 264 | __enter__ = start 265 | __exit__ = end 266 | -------------------------------------------------------------------------------- /logbook/notifiers.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.notifiers 4 | ~~~~~~~~~~~~~~~~~ 5 | 6 | System notify handlers for OSX and Linux. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Christopher Grebs. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | import os 12 | import sys 13 | import base64 14 | from time import time 15 | from urllib import urlencode 16 | from httplib import HTTPSConnection 17 | 18 | from logbook.base import NOTSET, ERROR, WARNING 19 | from logbook.handlers import Handler, LimitingHandlerMixin 20 | from logbook.helpers import get_application_name 21 | 22 | 23 | def create_notification_handler(application_name=None, level=NOTSET, icon=None): 24 | """Creates a handler perfectly fit the current platform. On Linux 25 | systems this creates a :class:`LibNotifyHandler`, on OS X systems it 26 | will create a :class:`GrowlHandler`. 27 | """ 28 | if sys.platform == 'darwin': 29 | return GrowlHandler(application_name, level=level, icon=icon) 30 | return LibNotifyHandler(application_name, level=level, icon=icon) 31 | 32 | 33 | class NotificationBaseHandler(Handler, LimitingHandlerMixin): 34 | """Baseclass for notification handlers.""" 35 | 36 | def __init__(self, application_name=None, record_limit=None, 37 | record_delta=None, level=NOTSET, filter=None, bubble=False): 38 | Handler.__init__(self, level, filter, bubble) 39 | LimitingHandlerMixin.__init__(self, record_limit, record_delta) 40 | if application_name is None: 41 | application_name = get_application_name() 42 | self.application_name = application_name 43 | 44 | def make_title(self, record): 45 | """Called to get the title from the record.""" 46 | return u'%s: %s' % (record.channel, record.level_name.title()) 47 | 48 | def make_text(self, record): 49 | """Called to get the text of the record.""" 50 | return record.message 51 | 52 | 53 | class GrowlHandler(NotificationBaseHandler): 54 | """A handler that dispatches to Growl. Requires that either growl-py or 55 | py-Growl are installed. 56 | """ 57 | 58 | def __init__(self, application_name=None, icon=None, host=None, 59 | password=None, record_limit=None, record_delta=None, 60 | level=NOTSET, filter=None, bubble=False): 61 | NotificationBaseHandler.__init__(self, application_name, record_limit, 62 | record_delta, level, filter, bubble) 63 | 64 | # growl is using the deprecated md5 module, but we really don't need 65 | # to see that deprecation warning 66 | from warnings import filterwarnings 67 | filterwarnings(module='Growl', category=DeprecationWarning, 68 | action='ignore') 69 | 70 | try: 71 | import Growl 72 | self._growl = Growl 73 | except ImportError: 74 | raise RuntimeError('The growl module is not available. You have ' 75 | 'to install either growl-py or py-Growl to ' 76 | 'use the GrowlHandler.') 77 | 78 | if icon is not None: 79 | if not os.path.isfile(icon): 80 | raise IOError('Filename to an icon expected.') 81 | icon = self._growl.Image.imageFromPath(icon) 82 | else: 83 | try: 84 | icon = self._growl.Image.imageWithIconForCurrentApplication() 85 | except TypeError: 86 | icon = None 87 | 88 | self._notifier = self._growl.GrowlNotifier( 89 | applicationName=self.application_name, 90 | applicationIcon=icon, 91 | notifications=['Notset', 'Debug', 'Info', 'Notice', 'Warning', 92 | 'Error', 'Critical'], 93 | hostname=host, 94 | password=password 95 | ) 96 | self._notifier.register() 97 | 98 | def is_sticky(self, record): 99 | """Returns `True` if the sticky flag should be set for this record. 100 | The default implementation marks errors and criticals sticky. 101 | """ 102 | return record.level >= ERROR 103 | 104 | def get_priority(self, record): 105 | """Returns the priority flag for Growl. Errors and criticals are 106 | get highest priority (2), warnings get higher priority (1) and the 107 | rest gets 0. Growl allows values between -2 and 2. 108 | """ 109 | if record.level >= ERROR: 110 | return 2 111 | elif record.level == WARNING: 112 | return 1 113 | return 0 114 | 115 | def emit(self, record): 116 | if not self.check_delivery(record)[1]: 117 | return 118 | self._notifier.notify(record.level_name.title(), 119 | self.make_title(record), 120 | self.make_text(record), 121 | sticky=self.is_sticky(record), 122 | priority=self.get_priority(record)) 123 | 124 | 125 | class LibNotifyHandler(NotificationBaseHandler): 126 | """A handler that dispatches to libnotify. Requires pynotify installed. 127 | If `no_init` is set to `True` the initialization of libnotify is skipped. 128 | """ 129 | 130 | def __init__(self, application_name=None, icon=None, no_init=False, 131 | record_limit=None, record_delta=None, level=NOTSET, 132 | filter=None, bubble=False): 133 | NotificationBaseHandler.__init__(self, application_name, record_limit, 134 | record_delta, level, filter, bubble) 135 | 136 | try: 137 | import pynotify 138 | self._pynotify = pynotify 139 | except ImportError: 140 | raise RuntimeError('The pynotify library is required for ' 141 | 'the LibNotifyHandler.') 142 | 143 | self.icon = icon 144 | if not no_init: 145 | pynotify.init(self.application_name) 146 | 147 | def set_notifier_icon(self, notifier, icon): 148 | """Used to attach an icon on a notifier object.""" 149 | try: 150 | from gtk import gdk 151 | except ImportError: 152 | #TODO: raise a warning? 153 | raise RuntimeError('The gtk.gdk module is required to set an icon.') 154 | 155 | if icon is not None: 156 | if not isinstance(icon, gdk.Pixbuf): 157 | icon = gdk.pixbuf_new_from_file(icon) 158 | notifier.set_icon_from_pixbuf(icon) 159 | 160 | def get_expires(self, record): 161 | """Returns either EXPIRES_DEFAULT or EXPIRES_NEVER for this record. 162 | The default implementation marks errors and criticals as EXPIRES_NEVER. 163 | """ 164 | pn = self._pynotify 165 | return pn.EXPIRES_NEVER if record.level >= ERROR else pn.EXPIRES_DEFAULT 166 | 167 | def get_urgency(self, record): 168 | """Returns the urgency flag for pynotify. Errors and criticals are 169 | get highest urgency (CRITICAL), warnings get higher priority (NORMAL) 170 | and the rest gets LOW. 171 | """ 172 | pn = self._pynotify 173 | if record.level >= ERROR: 174 | return pn.URGENCY_CRITICAL 175 | elif record.level == WARNING: 176 | return pn.URGENCY_NORMAL 177 | return pn.URGENCY_LOW 178 | 179 | def emit(self, record): 180 | if not self.check_delivery(record)[1]: 181 | return 182 | notifier = self._pynotify.Notification(self.make_title(record), 183 | self.make_text(record)) 184 | notifier.set_urgency(self.get_urgency(record)) 185 | notifier.set_timeout(self.get_expires(record)) 186 | self.set_notifier_icon(notifier, self.icon) 187 | notifier.show() 188 | 189 | 190 | class BoxcarHandler(NotificationBaseHandler): 191 | """Sends notifications to boxcar.io. Can be forwarded to your iPhone or 192 | other compatible device. 193 | """ 194 | api_url = 'https://boxcar.io/notifications/' 195 | 196 | def __init__(self, email, password, record_limit=None, record_delta=None, 197 | level=NOTSET, filter=None, bubble=False): 198 | NotificationBaseHandler.__init__(self, None, record_limit, record_delta, 199 | level, filter, bubble) 200 | self.email = email 201 | self.password = password 202 | 203 | def get_screen_name(self, record): 204 | """Returns the value of the screen name field.""" 205 | return record.level_name.title() 206 | 207 | def emit(self, record): 208 | if not self.check_delivery(record)[1]: 209 | return 210 | body = urlencode({ 211 | 'notification[from_screen_name]': 212 | self.get_screen_name(record).encode('utf-8'), 213 | 'notification[message]': 214 | self.make_text(record).encode('utf-8'), 215 | 'notification[from_remote_service_id]': str(int(time() * 100)) 216 | }) 217 | con = HTTPSConnection('boxcar.io') 218 | con.request('POST', '/notifications/', headers={ 219 | 'Authorization': 'Basic ' + 220 | base64.b64encode((u'%s:%s' % 221 | (self.email, self.password)).encode('utf-8')).strip(), 222 | }, body=body) 223 | con.close() 224 | 225 | 226 | class NotifoHandler(NotificationBaseHandler): 227 | """Sends notifications to notifo.com. Can be forwarded to your Desktop, 228 | iPhone, or other compatible device. 229 | """ 230 | 231 | def __init__(self, application_name=None, username=None, secret=None, 232 | record_limit=None, record_delta=None, level=NOTSET, filter=None, 233 | bubble=False, hide_level=False): 234 | try: 235 | import notifo 236 | except ImportError: 237 | raise RuntimeError( 238 | 'The notifo module is not available. You have ' 239 | 'to install notifo to use the NotifoHandler.' 240 | ) 241 | NotificationBaseHandler.__init__(self, None, record_limit, record_delta, 242 | level, filter, bubble) 243 | self._notifo = notifo 244 | self.application_name = application_name 245 | self.username = username 246 | self.secret = secret 247 | self.hide_level = hide_level 248 | 249 | 250 | def emit(self, record): 251 | 252 | if self.hide_level: 253 | _level_name = None 254 | else: 255 | _level_name = self.level_name 256 | 257 | self._notifo.send_notification(self.username, self.secret, None, 258 | record.message, self.application_name, 259 | _level_name, None) 260 | -------------------------------------------------------------------------------- /docs/designdefense.rst: -------------------------------------------------------------------------------- 1 | Design Principles 2 | ================= 3 | 4 | .. currentmodule:: logbook 5 | 6 | Logbook is a logging library that breaks many expectations people have in 7 | logging libraries to support paradigms we think are more suitable for 8 | modern applications than the traditional Java inspired logging system that 9 | can also be found in the Python standard library and many more programming 10 | languages. 11 | 12 | This section of the documentation should help you understand the design of 13 | Logbook and why it was implemented like this. 14 | 15 | No Logger Registry 16 | ------------------ 17 | 18 | Logbook is unique in that it has the concept of logging channels but that 19 | it does not keep a global registry of them. In the standard library's 20 | logging module a logger is attached to a tree of loggers that are stored 21 | in the logging module itself as global state. 22 | 23 | In logbook a logger is just an opaque object that might or might not have 24 | a name and attached information such as log level or customizations, but 25 | the lifetime and availability of that object is controlled by the person 26 | creating that logger. 27 | 28 | The registry is necessary for the logging library to give the user the 29 | ability to configure these loggers. 30 | 31 | Logbook has a completely different concept of dispatching from loggers to 32 | the actual handlers which removes the requirement and usefulness of such a 33 | registry. The advantage of the logbook system is that it's a cheap 34 | operation to create a logger and that a logger can easily be garbage 35 | collected to remove all traces of it. 36 | 37 | Instead Logbook moves the burden of delivering a log record from the log 38 | channel's attached log to an independent entity that looks at the context 39 | of the execution to figure out where to deliver it. 40 | 41 | Context Sensitive Handler Stack 42 | ------------------------------- 43 | 44 | Python has two builtin ways to express implicit context: processes and 45 | threads. What this means is that if you have a function that is passed no 46 | arguments at all, you can figure out what thread called the function and 47 | what process you are sitting in. Logbook supports this context 48 | information and lets you bind a handler (or more!) for such a context. 49 | 50 | This is how this works: there are two stacks available at all times in 51 | Logbook. The first stack is the process wide stack. It is manipulated 52 | with :class:`Handler.push_application` and 53 | :class:`Handler.pop_application` (and of course the context manager 54 | :class:`Handler.applicationbound`). Then there is a second stack which is 55 | per thread. The manipulation of that stack happens with 56 | :class:`Handler.push_thread`, :class:`Handler.pop_thread` and the 57 | :class:`Handler.threadbound` contextmanager. 58 | 59 | Let's take a WSGI web application as first example. When a request comes 60 | in your WSGI server will most likely do one of the following two things: 61 | either spawn a new Python process (or reuse a process in a pool), or 62 | create a thread (or again, reuse something that already exists). Either 63 | way, we can now say that the context of process id and thread id is our 64 | playground. For this context we can define a log handler that is active 65 | in this context only for a certain time. In pseudocode this would look 66 | like this:: 67 | 68 | def my_application(environ, start_response): 69 | my_handler = FileHandler(...) 70 | my_handler.push_thread() 71 | try: 72 | # whatever happens here in terms of logging is handled 73 | # by the `my_handler` handler. 74 | ... 75 | finally: 76 | my_handler.pop_thread() 77 | 78 | Because this is a lot to type, you can also use the `with` statement to do 79 | the very same:: 80 | 81 | def my_application(environ, start_response): 82 | with FileHandler(...).threadbound() as my_handler: 83 | # whatever happens here in terms of logging is handled 84 | # by the `my_handler` handler. 85 | ... 86 | 87 | Additionally there is another place where you can put handlers: directly 88 | onto a logging channel (for example on a :class:`Logger`). 89 | 90 | This stack system might seem like overkill for a traditional system, but 91 | it allows complete decoupling from the log handling system and other 92 | systems that might log messages. 93 | 94 | Let's take a GUI application rather than a web application. You have an 95 | application that starts up, shuts down and at any point in between might 96 | fail or log messages. The typical default behaviour here would be to log 97 | into a logfile. Fair enough, that's how these applications work. 98 | 99 | But what's the point in logging if not even a single warning happened? 100 | The traditional solution with the logging library from Python is to set 101 | the level high (like `ERROR` or `WARNING`) and log into a file. When 102 | things break, you have a look at the file and hope it contains enough 103 | information. 104 | 105 | When you are in full control of the context of execution with a stack based 106 | system like Logbook has, there is a lot more you can do. 107 | 108 | For example you could immediately after your application boots up 109 | instanciate a :class:`~logbook.FingersCrossedHandler`. This handler 110 | buffers *all* log records in memory and does not emit them at all. What's 111 | the point? That handler activates when a certain threshold is reached. 112 | For example, when the first warning occurs you can write the buffered 113 | messages as well as the warning that just happened into a logfile and 114 | continue logging from that point. Because there is no point in logging 115 | when you will never look at that file anyways. 116 | 117 | But that alone is not the killer feature of a stack. In a GUI application 118 | there is the point where we are still initializing the windowing system. 119 | So a file is the best place to log messages. But once we have the GUI 120 | initialized, it would be very helpful to show error messages to a user in 121 | a console window or a dialog. So what we can do is to initialize at that 122 | point a new handler that logs into a dialog. 123 | 124 | When then a long running tasks in the GUI starts we can move that into a 125 | separate thread and intercept all the log calls for that thread into a 126 | separate window until the task succeeded. 127 | 128 | Here such a setup in pseudocode:: 129 | 130 | from logbook import FileHandler, WARNING 131 | from logbook import FingersCrossedHandler 132 | 133 | def main(): 134 | # first we set up a handler that logs everything (including debug 135 | # messages, but only starts doing that when a warning happens 136 | default_handler = FingersCrossedHandler(FileHandler(filename, 137 | delay=True), 138 | WARNING) 139 | # this handler is now activated as the default handler for the 140 | # whole process. We do not bubble up to the default handler 141 | # that logs to stderr. 142 | with default_handler.applicationbound(bubble=False): 143 | # now we initialize the GUI of the application 144 | initialize_gui() 145 | # at that point we can hook our own logger in that intercepts 146 | # errors and displays them in a log window 147 | with gui.log_handler.applicationbound(): 148 | # run the gui mainloop 149 | gui.mainloop() 150 | 151 | This stack can also be used to inject additional information automatically 152 | into log records. This is also used to replace the need for custom log 153 | levels. 154 | 155 | No Custom Log Levels 156 | -------------------- 157 | 158 | This change over logging was controversial, even under the two original 159 | core developers. There clearly are use cases for custom log levels, but 160 | there is an inherent problem with then: they require a registry. If you 161 | want custom log levels, you will have to register them somewhere or parts 162 | of the system will not know about them. Now we just spent a lot of time 163 | ripping out the registry with a stack based approach to solve delivery 164 | problems, why introduce a global state again just for log levels? 165 | 166 | Instead we looked at the cases where custom log levels are useful and 167 | figured that in most situations custom log levels are used to put 168 | additional information into a log entry. For example it's not uncommon to 169 | have separate log levels to filter user input out of a logfile. 170 | 171 | We instead provide powerful tools to inject arbitrary additional data into 172 | log records with the concept of log processors. 173 | 174 | So for example if you want to log user input and tag it appropriately you 175 | can override the :meth:`Logger.process_record` method:: 176 | 177 | class InputLogger(Logger): 178 | def process_record(self, record): 179 | record.extra['kind'] = 'input' 180 | 181 | A handler can then use this information to filter out input:: 182 | 183 | def no_input(record, handler): 184 | return record.extra.get('kind') != 'input' 185 | 186 | with MyHandler().threadbound(filter=no_input): 187 | ... 188 | 189 | Injecting Context-Sensitive Information 190 | --------------------------------------- 191 | 192 | For many situations it's not only necessary to inject information on a 193 | per-channel basis but also for all logging calls from a given context. 194 | This is best explained for web applications again. If you have some 195 | libraries doing logging in code that is triggered from a request you might 196 | want to record the URL of that request for each log record so that you get 197 | an idea where a specific error happened. 198 | 199 | This can easily be accomplished by registering a custom processor when 200 | binding a handler to a thread:: 201 | 202 | def my_application(environ, start_reponse): 203 | def inject_request_info(record, handler): 204 | record.extra['path'] = environ['PATH_INFO'] 205 | with Processor(inject_request_info).threadbound(): 206 | with my_handler.threadbound(): 207 | # rest of the request code here 208 | ... 209 | 210 | Logging Compatibility 211 | --------------------- 212 | 213 | The last pillar of logbook's design is the compatibility with the standard 214 | libraries logging system. There are many libraries that exist currently 215 | that log information with the standard libraries logging module. Having 216 | two separate logging systems in the same process is countrproductive and 217 | will cause separate logfiles to appear in the best case or complete chaos 218 | in the worst. 219 | 220 | Because of that, logbook provides ways to transparently redirect all 221 | logging records into the logbook stack based record delivery system. That 222 | way you can even continue to use the standard libraries logging system to 223 | emit log messages and can take the full advantage of logbook's powerful 224 | stack system. 225 | 226 | If you are curious, have a look at :ref:`logging-compat`. 227 | -------------------------------------------------------------------------------- /logbook/more.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.more 4 | ~~~~~~~~~~~~ 5 | 6 | Fancy stuff for logbook. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | import re 12 | import os 13 | from cgi import parse_qsl 14 | from urllib import urlencode 15 | 16 | from logbook.base import RecordDispatcher, NOTSET, ERROR, NOTICE 17 | from logbook.handlers import Handler, StringFormatter, \ 18 | StringFormatterHandlerMixin, StderrHandler 19 | from logbook._termcolors import colorize 20 | from logbook.helpers import F 21 | 22 | 23 | _ws_re = re.compile(r'(\s+)(?u)') 24 | TWITTER_FORMAT_STRING = \ 25 | u'[{record.channel}] {record.level_name}: {record.message}' 26 | TWITTER_ACCESS_TOKEN_URL = 'https://twitter.com/oauth/access_token' 27 | NEW_TWEET_URL = 'https://api.twitter.com/1/statuses/update.json' 28 | 29 | 30 | class TwitterFormatter(StringFormatter): 31 | """Works like the standard string formatter and is used by the 32 | :class:`TwitterHandler` unless changed. 33 | """ 34 | max_length = 140 35 | 36 | def format_exception(self, record): 37 | return u'%s: %s' % (record.exception_shortname, 38 | record.exception_message) 39 | 40 | def __call__(self, record, handler): 41 | formatted = StringFormatter.__call__(self, record, handler) 42 | rv = [] 43 | length = 0 44 | for piece in _ws_re.split(formatted): 45 | length += len(piece) 46 | if length > self.max_length: 47 | if length - len(piece) < self.max_length: 48 | rv.append(u'…') 49 | break 50 | rv.append(piece) 51 | return u''.join(rv) 52 | 53 | 54 | class TaggingLogger(RecordDispatcher): 55 | """A logger that attaches a tag to each record. This is an alternative 56 | record dispatcher that does not use levels but tags to keep log 57 | records apart. It is constructed with a descriptive name and at least 58 | one tag. The tags are up for you to define:: 59 | 60 | logger = TaggingLogger('My Logger', ['info', 'warning']) 61 | 62 | For each tag defined that way, a method appears on the logger with 63 | that name:: 64 | 65 | logger.info('This is a info message') 66 | 67 | To dispatch to different handlers based on tags you can use the 68 | :class:`TaggingHandler`. 69 | 70 | The tags themselves are stored as list named ``'tags'`` in the 71 | :attr:`~logbook.LogRecord.extra` dictionary. 72 | """ 73 | 74 | def __init__(self, name=None, tags=None): 75 | RecordDispatcher.__init__(self, name) 76 | # create a method for each tag named 77 | list(setattr(self, tag, lambda msg, *args, **kwargs: 78 | self.log(tag, msg, *args, **kwargs)) for tag in (tags or ())) 79 | 80 | def log(self, tags, msg, *args, **kwargs): 81 | if isinstance(tags, basestring): 82 | tags = [tags] 83 | exc_info = kwargs.pop('exc_info', None) 84 | extra = kwargs.pop('extra', {}) 85 | extra['tags'] = list(tags) 86 | return self.make_record_and_handle(NOTSET, msg, args, kwargs, 87 | exc_info, extra) 88 | 89 | 90 | class TaggingHandler(Handler): 91 | """A handler that logs for tags and dispatches based on those. 92 | 93 | Example:: 94 | 95 | import logbook 96 | from logbook.more import TaggingHandler 97 | 98 | handler = TaggingHandler(dict( 99 | info=OneHandler(), 100 | warning=AnotherHandler() 101 | )) 102 | """ 103 | 104 | def __init__(self, handlers, filter=None, bubble=False): 105 | Handler.__init__(self, NOTSET, filter, bubble) 106 | assert isinstance(handlers, dict) 107 | self._handlers = dict( 108 | (tag, isinstance(handler, Handler) and [handler] or handler) 109 | for (tag, handler) in handlers.iteritems()) 110 | 111 | def emit(self, record): 112 | for tag in record.extra.get('tags', ()): 113 | for handler in self._handlers.get(tag, ()): 114 | handler.handle(record) 115 | 116 | 117 | class TwitterHandler(Handler, StringFormatterHandlerMixin): 118 | """A handler that logs to twitter. Requires that you sign up an 119 | application on twitter and request xauth support. Furthermore the 120 | oauth2 library has to be installed. 121 | 122 | If you don't want to register your own application and request xauth 123 | credentials, there are a couple of leaked consumer key and secret 124 | pairs from application explicitly whitelisted at Twitter 125 | (`leaked secrets `_). 126 | """ 127 | default_format_string = TWITTER_FORMAT_STRING 128 | formatter_class = TwitterFormatter 129 | 130 | def __init__(self, consumer_key, consumer_secret, username, 131 | password, level=NOTSET, format_string=None, filter=None, 132 | bubble=False): 133 | Handler.__init__(self, level, filter, bubble) 134 | StringFormatterHandlerMixin.__init__(self, format_string) 135 | self.consumer_key = consumer_key 136 | self.consumer_secret = consumer_secret 137 | self.username = username 138 | self.password = password 139 | 140 | try: 141 | import oauth2 142 | except ImportError: 143 | raise RuntimeError('The python-oauth2 library is required for ' 144 | 'the TwitterHandler.') 145 | 146 | self._oauth = oauth2 147 | self._oauth_token = None 148 | self._oauth_token_secret = None 149 | self._consumer = oauth2.Consumer(consumer_key, 150 | consumer_secret) 151 | self._client = oauth2.Client(self._consumer) 152 | 153 | def get_oauth_token(self): 154 | """Returns the oauth access token.""" 155 | if self._oauth_token is None: 156 | resp, content = self._client.request( 157 | TWITTER_ACCESS_TOKEN_URL + '?', 'POST', 158 | body=urlencode({ 159 | 'x_auth_username': self.username.encode('utf-8'), 160 | 'x_auth_password': self.password.encode('utf-8'), 161 | 'x_auth_mode': 'client_auth' 162 | }), 163 | headers={'Content-Type': 'application/x-www-form-urlencoded'} 164 | ) 165 | if resp['status'] != '200': 166 | raise RuntimeError('unable to login to Twitter') 167 | data = dict(parse_qsl(content)) 168 | self._oauth_token = data['oauth_token'] 169 | self._oauth_token_secret = data['oauth_token_secret'] 170 | return self._oauth.Token(self._oauth_token, 171 | self._oauth_token_secret) 172 | 173 | def make_client(self): 174 | """Creates a new oauth client auth a new access token.""" 175 | return self._oauth.Client(self._consumer, self.get_oauth_token()) 176 | 177 | def tweet(self, status): 178 | """Tweets a given status. Status must not exceed 140 chars.""" 179 | client = self.make_client() 180 | resp, content = client.request(NEW_TWEET_URL, 'POST', 181 | body=urlencode({'status': status.encode('utf-8')}), 182 | headers={'Content-Type': 'application/x-www-form-urlencoded'}) 183 | return resp['status'] == '200' 184 | 185 | def emit(self, record): 186 | self.tweet(self.format(record)) 187 | 188 | 189 | class JinjaFormatter(object): 190 | """A formatter object that makes it easy to format using a Jinja 2 191 | template instead of a format string. 192 | """ 193 | 194 | def __init__(self, template): 195 | try: 196 | from jinja2 import Template 197 | except ImportError: 198 | raise RuntimeError('The jinja2 library is required for ' 199 | 'the JinjaFormatter.') 200 | self.template = Template(template) 201 | 202 | def __call__(self, record, handler): 203 | return self.template.render(record=record, handler=handler) 204 | 205 | 206 | class ExternalApplicationHandler(Handler): 207 | """This handler invokes an external application to send parts of 208 | the log record to. The constructor takes a list of arguments that 209 | are passed to another application where each of the arguments is a 210 | format string, and optionally a format string for data that is 211 | passed to stdin. 212 | 213 | For example it can be used to invoke the ``say`` command on OS X:: 214 | 215 | from logbook.more import ExternalApplicationHandler 216 | say_handler = ExternalApplicationHandler(['say', '{record.message}']) 217 | 218 | Note that the above example is blocking until ``say`` finished, so it's 219 | recommended to combine this handler with the 220 | :class:`logbook.ThreadedWrapperHandler` to move the execution into 221 | a background thread. 222 | 223 | .. versionadded:: 0.3 224 | """ 225 | 226 | def __init__(self, arguments, stdin_format=None, 227 | encoding='utf-8', level=NOTSET, filter=None, 228 | bubble=False): 229 | Handler.__init__(self, level, filter, bubble) 230 | self.encoding = encoding 231 | self._arguments = [F(arg) for arg in arguments] 232 | if stdin_format is not None: 233 | stdin_format = F(stdin_format) 234 | self._stdin_format = stdin_format 235 | import subprocess 236 | self._subprocess = subprocess 237 | 238 | def emit(self, record): 239 | args = [arg.format(record=record).encode(self.encoding) 240 | for arg in self._arguments] 241 | if self._stdin_format is not None: 242 | stdin_data = self._stdin_format.format(record=record) \ 243 | .encode(self.encoding) 244 | stdin = self._subprocess.PIPE 245 | else: 246 | stdin = None 247 | c = self._subprocess.Popen(args, stdin=stdin) 248 | if stdin is not None: 249 | c.communicate(stdin_data) 250 | c.wait() 251 | 252 | 253 | class ColorizingStreamHandlerMixin(object): 254 | """A mixin class that does colorizing. 255 | 256 | .. versionadded:: 0.3 257 | """ 258 | 259 | def should_colorize(self, record): 260 | """Returns `True` if colorizing should be applied to this 261 | record. The default implementation returns `True` if the 262 | stream is a tty and we are not executing on windows. 263 | """ 264 | if os.name == 'nt': 265 | return False 266 | isatty = getattr(self.stream, 'isatty', None) 267 | return isatty and isatty() 268 | 269 | def get_color(self, record): 270 | """Returns the color for this record.""" 271 | if record.level >= ERROR: 272 | return 'red' 273 | elif record.level >= NOTICE: 274 | return 'yellow' 275 | return 'lightgray' 276 | 277 | def format_and_encode(self, record): 278 | rv = super(ColorizingStreamHandlerMixin, self) \ 279 | .format_and_encode(record) 280 | if self.should_colorize(record): 281 | color = self.get_color(record) 282 | if color: 283 | rv = colorize(color, rv) 284 | return rv 285 | 286 | 287 | class ColorizedStderrHandler(ColorizingStreamHandlerMixin, StderrHandler): 288 | """A colorizing stream handler that writes to stderr. It will only 289 | colorize if a terminal was detected. Note that this handler does 290 | not colorize on Windows systems. 291 | 292 | .. versionadded:: 0.3 293 | """ 294 | 295 | 296 | # backwards compat. Should go away in some future releases 297 | from logbook.handlers import FingersCrossedHandler as \ 298 | FingersCrossedHandlerBase 299 | class FingersCrossedHandler(FingersCrossedHandlerBase): 300 | def __init__(self, *args, **kwargs): 301 | FingersCrossedHandlerBase.__init__(self, *args, **kwargs) 302 | from warnings import warn 303 | warn(PendingDeprecationWarning('fingers crossed handler changed ' 304 | 'location. It\'s now a core component of Logbook.')) 305 | 306 | 307 | class ExceptionHandler(Handler, StringFormatterHandlerMixin): 308 | """An exception handler which raises exceptions of the given `exc_type`. 309 | This is especially useful if you set a specific error `level` e.g. to treat 310 | warnings as exceptions:: 311 | 312 | from logbook.more import ExceptionHandler 313 | 314 | class ApplicationWarning(Exception): 315 | pass 316 | 317 | exc_handler = ExceptionHandler(ApplicationWarning, level='WARNING') 318 | 319 | .. versionadded:: 0.3 320 | """ 321 | def __init__(self, exc_type, level=NOTSET, format_string=None, 322 | filter=None, bubble=False): 323 | Handler.__init__(self, level, filter, bubble) 324 | StringFormatterHandlerMixin.__init__(self, format_string) 325 | self.exc_type = exc_type 326 | 327 | def handle(self, record): 328 | if self.should_handle(record): 329 | raise self.exc_type(self.format(record)) 330 | return False 331 | -------------------------------------------------------------------------------- /logbook/queues.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.queues 4 | ~~~~~~~~~~~~~~ 5 | 6 | This module implements queue backends. 7 | 8 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 9 | :license: BSD, see LICENSE for more details. 10 | """ 11 | from threading import Thread 12 | from Queue import Empty, Queue as ThreadQueue 13 | from logbook.base import NOTSET, LogRecord, dispatch_record 14 | from logbook.handlers import Handler, WrapperHandler 15 | from logbook.helpers import json 16 | 17 | 18 | class ZeroMQHandler(Handler): 19 | """A handler that acts as a ZeroMQ publisher, which publishes each record 20 | as json dump. Requires the pyzmq library. 21 | 22 | The queue will be filled with JSON exported log records. To receive such 23 | log records from a queue you can use the :class:`ZeroMQSubscriber`. 24 | 25 | 26 | Example setup:: 27 | 28 | handler = ZeroMQHandler('tcp://127.0.0.1:5000') 29 | """ 30 | 31 | def __init__(self, uri=None, level=NOTSET, filter=None, bubble=False, 32 | context=None): 33 | Handler.__init__(self, level, filter, bubble) 34 | try: 35 | import zmq 36 | except ImportError: 37 | raise RuntimeError('The pyzmq library is required for ' 38 | 'the ZeroMQHandler.') 39 | #: the zero mq context 40 | self.context = context or zmq.Context() 41 | #: the zero mq socket. 42 | self.socket = self.context.socket(zmq.PUB) 43 | if uri is not None: 44 | self.socket.bind(uri) 45 | 46 | def export_record(self, record): 47 | """Exports the record into a dictionary ready for JSON dumping.""" 48 | return record.to_dict(json_safe=True) 49 | 50 | def emit(self, record): 51 | self.socket.send(json.dumps(self.export_record(record))) 52 | 53 | def close(self): 54 | self.socket.close() 55 | 56 | 57 | class ThreadController(object): 58 | """A helper class used by queue subscribers to control the background 59 | thread. This is usually created and started in one go by 60 | :meth:`~logbook.queues.ZeroMQSubscriber.dispatch_in_background` or 61 | a comparable function. 62 | """ 63 | 64 | def __init__(self, subscriber, setup=None): 65 | self.setup = setup 66 | self.subscriber = subscriber 67 | self.running = False 68 | self._thread = None 69 | 70 | def start(self): 71 | """Starts the task thread.""" 72 | self.running = True 73 | self._thread = Thread(target=self._target) 74 | self._thread.setDaemon(True) 75 | self._thread.start() 76 | 77 | def stop(self): 78 | """Stops the task thread.""" 79 | if self.running: 80 | self.running = False 81 | self._thread.join() 82 | self._thread = None 83 | 84 | def _target(self): 85 | if self.setup is not None: 86 | self.setup.push_thread() 87 | try: 88 | while self.running: 89 | self.subscriber.dispatch_once(timeout=0.05) 90 | finally: 91 | if self.setup is not None: 92 | self.setup.pop_thread() 93 | 94 | 95 | class SubscriberBase(object): 96 | """Baseclass for all subscribers.""" 97 | 98 | def recv(self, timeout=None): 99 | """Receives a single record from the socket. Timeout of 0 means nonblocking, 100 | `None` means blocking and otherwise it's a timeout in seconds after which 101 | the function just returns with `None`. 102 | 103 | Subclasses have to override this. 104 | """ 105 | raise NotImplementedError() 106 | 107 | def dispatch_once(self, timeout=None): 108 | """Receives one record from the socket, loads it and dispatches it. Returns 109 | `True` if something was dispatched or `False` if it timed out. 110 | """ 111 | rv = self.recv(timeout) 112 | if rv is not None: 113 | dispatch_record(rv) 114 | return True 115 | return False 116 | 117 | def dispatch_forever(self): 118 | """Starts a loop that dispatches log records forever.""" 119 | while 1: 120 | self.dispatch_once() 121 | 122 | def dispatch_in_background(self, setup=None): 123 | """Starts a new daemonized thread that dispatches in the background. 124 | An optional handler setup can be provided that pushed to the new 125 | thread (can be any :class:`logbook.base.StackedObject`). 126 | 127 | Returns a :class:`ThreadController` object for shutting down 128 | the background thread. The background thread will already be 129 | running when this function returns. 130 | """ 131 | controller = ThreadController(self, setup) 132 | controller.start() 133 | return controller 134 | 135 | 136 | class ZeroMQSubscriber(SubscriberBase): 137 | """A helper that acts as ZeroMQ subscriber and will dispatch received 138 | log records to the active handler setup. There are multiple ways to 139 | use this class. 140 | 141 | It can be used to receive log records from a queue:: 142 | 143 | subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000') 144 | record = subscriber.recv() 145 | 146 | But it can also be used to receive and dispatch these in one go:: 147 | 148 | with target_handler: 149 | subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000') 150 | subscriber.dispatch_forever() 151 | 152 | This will take all the log records from that queue and dispatch them 153 | over to `target_handler`. If you want you can also do that in the 154 | background:: 155 | 156 | subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000') 157 | controller = subscriber.dispatch_in_background(target_handler) 158 | 159 | The controller returned can be used to shut down the background 160 | thread:: 161 | 162 | controller.stop() 163 | """ 164 | 165 | def __init__(self, uri=None, context=None): 166 | try: 167 | import zmq 168 | except ImportError: 169 | raise RuntimeError('The pyzmq library is required for ' 170 | 'the ZeroMQSubscriber.') 171 | self._zmq = zmq 172 | 173 | #: the zero mq context 174 | self.context = context or zmq.Context() 175 | #: the zero mq socket. 176 | self.socket = self.context.socket(zmq.SUB) 177 | if uri is not None: 178 | self.socket.connect(uri) 179 | self.socket.setsockopt(zmq.SUBSCRIBE, '') 180 | 181 | def __del__(self): 182 | try: 183 | self.close() 184 | except AttributeError: 185 | # subscriber partially created 186 | pass 187 | 188 | def close(self): 189 | """Closes the zero mq socket.""" 190 | self.socket.close() 191 | 192 | def recv(self, timeout=None): 193 | """Receives a single record from the socket. Timeout of 0 means nonblocking, 194 | `None` means blocking and otherwise it's a timeout in seconds after which 195 | the function just returns with `None`. 196 | """ 197 | if timeout is None: 198 | rv = self.socket.recv() 199 | elif not timeout: 200 | rv = self.socket.recv(self._zmq.NOBLOCK) 201 | if rv is None: 202 | return 203 | else: 204 | if not self._zmq.select([self.socket], [], [], timeout)[0]: 205 | return 206 | rv = self.socket.recv(self._zmq.NOBLOCK) 207 | return LogRecord.from_dict(json.loads(rv)) 208 | 209 | 210 | def _fix_261_mplog(): 211 | """necessary for older python's to disable a broken monkeypatch 212 | in the logging module. See multiprocessing/util.py for the 213 | hasattr() check. At least in Python 2.6.1 the multiprocessing 214 | module is not imported by logging and as such the test in 215 | the util fails. 216 | """ 217 | import logging, multiprocessing 218 | logging.multiprocessing = multiprocessing 219 | 220 | 221 | class MultiProcessingHandler(Handler): 222 | """Implements a handler that dispatches over a queue to a different 223 | process. It is connected to a subscriber with a 224 | :class:`multiprocessing.Queue`:: 225 | 226 | from multiprocessing import Queue 227 | from logbook.queues import MultiProcessingHandler 228 | queue = Queue(-1) 229 | handler = MultiProcessingHandler(queue) 230 | 231 | """ 232 | 233 | def __init__(self, queue, level=NOTSET, filter=None, bubble=False): 234 | Handler.__init__(self, level, filter, bubble) 235 | self.queue = queue 236 | _fix_261_mplog() 237 | 238 | def emit(self, record): 239 | self.queue.put_nowait(record.to_dict(json_safe=True)) 240 | 241 | 242 | class MultiProcessingSubscriber(SubscriberBase): 243 | """Receives log records from the given multiprocessing queue and 244 | dispatches them to the active handler setup. Make sure to use the same 245 | queue for both handler and subscriber. Idaelly the queue is set 246 | up with maximum size (``-1``):: 247 | 248 | from multiprocessing import Queue 249 | queue = Queue(-1) 250 | 251 | It can be used to receive log records from a queue:: 252 | 253 | subscriber = MultiProcessingSubscriber(queue) 254 | record = subscriber.recv() 255 | 256 | But it can also be used to receive and dispatch these in one go:: 257 | 258 | with target_handler: 259 | subscriber = MultiProcessingSubscriber(queue) 260 | subscriber.dispatch_forever() 261 | 262 | This will take all the log records from that queue and dispatch them 263 | over to `target_handler`. If you want you can also do that in the 264 | background:: 265 | 266 | subscriber = MultiProcessingSubscriber(queue) 267 | controller = subscriber.dispatch_in_background(target_handler) 268 | 269 | The controller returned can be used to shut down the background 270 | thread:: 271 | 272 | controller.stop() 273 | 274 | If no queue is provided the subscriber will create one. This one can the 275 | be used by handlers:: 276 | 277 | subscriber = MultiProcessingSubscriber() 278 | handler = MultiProcessingHandler(subscriber.queue) 279 | """ 280 | 281 | def __init__(self, queue=None): 282 | if queue is None: 283 | from multiprocessing import Queue 284 | queue = Queue(-1) 285 | self.queue = queue 286 | _fix_261_mplog() 287 | 288 | def recv(self, timeout=None): 289 | if timeout is None: 290 | rv = self.queue.get() 291 | else: 292 | try: 293 | rv = self.queue.get(block=False, timeout=timeout) 294 | except Empty: 295 | return None 296 | return LogRecord.from_dict(rv) 297 | 298 | 299 | class ExecnetChannelHandler(Handler): 300 | """Implements a handler that dispatches over a execnet channel 301 | to a different process. 302 | """ 303 | 304 | def __init__(self, channel, level=NOTSET, filter=None, bubble=False): 305 | Handler.__init__(self, level, filter, bubble) 306 | self.channel = channel 307 | 308 | def emit(self, record): 309 | self.channel.send(record.to_dict(json_safe=True)) 310 | 311 | 312 | class ExecnetChannelSubscriber(SubscriberBase): 313 | """subscribes to a execnet channel""" 314 | 315 | def __init__(self, channel): 316 | self.channel = channel 317 | 318 | def recv(self, timeout=-1): 319 | try: 320 | rv = self.channel.receive(timeout=timeout) 321 | except self.channel.RemoteError: 322 | #XXX: handle 323 | return None 324 | except (self.channel.TimeoutError, EOFError): 325 | return None 326 | else: 327 | return LogRecord.from_dict(rv) 328 | 329 | 330 | class TWHThreadController(object): 331 | """A very basic thread controller that pulls things in from a 332 | queue and sends it to a handler. Both queue and handler are 333 | taken from the passed :class:`ThreadedWrapperHandler`. 334 | """ 335 | _sentinel = object() 336 | 337 | def __init__(self, wrapper_handler): 338 | self.wrapper_handler = wrapper_handler 339 | self.running = False 340 | self._thread = None 341 | 342 | def start(self): 343 | """Starts the task thread.""" 344 | self.running = True 345 | self._thread = Thread(target=self._target) 346 | self._thread.setDaemon(True) 347 | self._thread.start() 348 | 349 | def stop(self): 350 | """Stops the task thread.""" 351 | if self.running: 352 | self.wrapper_handler.queue.put_nowait(self._sentinel) 353 | self._thread.join() 354 | self._thread = None 355 | 356 | def _target(self): 357 | while 1: 358 | record = self.wrapper_handler.queue.get() 359 | if record is self._sentinel: 360 | self.running = False 361 | break 362 | self.wrapper_handler.handler.emit(record) 363 | 364 | 365 | class ThreadedWrapperHandler(WrapperHandler): 366 | """This handled uses a single background thread to dispatch log records 367 | to a specific other handler using an internal queue. The idea is that if 368 | you are using a handler that requires some time to hand off the log records 369 | (such as the mail handler) and would block your request, you can let 370 | Logbook do that in a background thread. 371 | 372 | The threaded wrapper handler will automatically adopt the methods and 373 | properties of the wrapped handler. All the values will be reflected: 374 | 375 | >>> twh = ThreadedWrapperHandler(TestHandler()) 376 | >>> from logbook import WARNING 377 | >>> twh.level_name = 'WARNING' 378 | >>> twh.handler.level_name 379 | 'WARNING' 380 | """ 381 | _direct_attrs = frozenset(['handler', 'queue', 'controller']) 382 | 383 | def __init__(self, handler): 384 | WrapperHandler.__init__(self, handler) 385 | self.queue = ThreadQueue(-1) 386 | self.controller = TWHThreadController(self) 387 | self.controller.start() 388 | 389 | def close(self): 390 | self.controller.stop() 391 | self.handler.close() 392 | 393 | def emit(self, record): 394 | self.queue.put_nowait(record) 395 | 396 | 397 | class GroupMember(ThreadController): 398 | def __init__(self, subscriber, queue): 399 | ThreadController.__init__(self, subscriber, None) 400 | self.queue = queue 401 | 402 | def _target(self): 403 | if self.setup is not None: 404 | self.setup.push_thread() 405 | try: 406 | while self.running: 407 | record = self.subscriber.recv() 408 | if record: 409 | try: 410 | self.queue.put(record, timeout=0.05) 411 | except Queue.Full: 412 | pass 413 | finally: 414 | if self.setup is not None: 415 | self.setup.pop_thread() 416 | 417 | 418 | class SubscriberGroup(SubscriberBase): 419 | """This is a subscriber which represents a group of subscribers. 420 | 421 | This is helpful if you are writing a server-like application which has 422 | "slaves". This way a user is easily able to view every log record which 423 | happened somewhere in the entire system without having to check every 424 | single slave:: 425 | 426 | subscribers = SubscriberGroup([ 427 | MultiProcessingSubscriber(queue), 428 | ZeroMQSubscriber('tcp://localhost:5000') 429 | ]) 430 | with target_handler: 431 | subscribers.dispatch_forever() 432 | """ 433 | def __init__(self, subscribers=None, queue_limit=10): 434 | self.members = [] 435 | self.queue = ThreadQueue(queue_limit) 436 | for subscriber in subscribers or []: 437 | self.add(subscriber) 438 | 439 | def add(self, subscriber): 440 | """Adds the given `subscriber` to the group.""" 441 | member = GroupMember(subscriber, self.queue) 442 | member.start() 443 | self.members.append(member) 444 | 445 | def recv(self, timeout=None): 446 | try: 447 | return self.queue.get(timeout=timeout) 448 | except Empty: 449 | return 450 | 451 | def stop(self): 452 | """Stops the group from internally recieving any more messages, once the 453 | internal queue is exhausted :meth:`recv` will always return `None`. 454 | """ 455 | for member in self.members: 456 | self.member.stop() 457 | -------------------------------------------------------------------------------- /logbook/ticketing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | logbook.ticketing 4 | ~~~~~~~~~~~~~~~~~ 5 | 6 | Implements long handlers that write to remote data stores and assign 7 | each logging message a ticket id. 8 | 9 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. 10 | :license: BSD, see LICENSE for more details. 11 | """ 12 | from time import time 13 | from logbook.base import NOTSET, level_name_property, LogRecord 14 | from logbook.handlers import Handler, HashingHandlerMixin 15 | from logbook.helpers import json, cached_property, b 16 | 17 | 18 | class Ticket(object): 19 | """Represents a ticket from the database.""" 20 | 21 | level_name = level_name_property() 22 | 23 | def __init__(self, db, row): 24 | self.db = db 25 | self.__dict__.update(row) 26 | 27 | @cached_property 28 | def last_occurrence(self): 29 | """The last occurrence.""" 30 | rv = self.get_occurrences(limit=1) 31 | if rv: 32 | return rv[0] 33 | 34 | def get_occurrences(self, order_by='-time', limit=50, offset=0): 35 | """Returns the occurrences for this ticket.""" 36 | return self.db.get_occurrences(self.ticket_id, order_by, limit, offset) 37 | 38 | def solve(self): 39 | """Marks this ticket as solved.""" 40 | self.db.solve_ticket(self.ticket_id) 41 | self.solved = True 42 | 43 | def delete(self): 44 | """Deletes the ticket from the database.""" 45 | self.db.delete_ticket(self.ticket_id) 46 | 47 | # Silence DeprecationWarning 48 | __hash__ = None 49 | 50 | def __eq__(self, other): 51 | equal = True 52 | for key in self.__dict__.keys(): 53 | if getattr(self, key) != getattr(other, key): 54 | equal = False 55 | break 56 | return equal 57 | 58 | def __ne__(self, other): 59 | return not self.__eq__(other) 60 | 61 | 62 | class Occurrence(LogRecord): 63 | """Represents an occurrence of a ticket.""" 64 | 65 | def __init__(self, db, row): 66 | self.update_from_dict(json.loads(row['data'])) 67 | self.db = db 68 | self.time = row['time'] 69 | self.ticket_id = row['ticket_id'] 70 | self.occurrence_id = row['occurrence_id'] 71 | 72 | 73 | class BackendBase(object): 74 | """Provides an abstract interface to various databases.""" 75 | 76 | def __init__(self, **options): 77 | self.options = options 78 | self.setup_backend() 79 | 80 | def setup_backend(self): 81 | """Setup the database backend.""" 82 | raise NotImplementedError() 83 | 84 | def record_ticket(self, record, data, hash, app_id): 85 | """Records a log record as ticket.""" 86 | raise NotImplementedError() 87 | 88 | def count_tickets(self): 89 | """Returns the number of tickets.""" 90 | raise NotImplementedError() 91 | 92 | def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0): 93 | """Selects tickets from the database.""" 94 | raise NotImplementedError() 95 | 96 | def solve_ticket(self, ticket_id): 97 | """Marks a ticket as solved.""" 98 | raise NotImplementedError() 99 | 100 | def delete_ticket(self, ticket_id): 101 | """Deletes a ticket from the database.""" 102 | raise NotImplementedError() 103 | 104 | def get_ticket(self, ticket_id): 105 | """Return a single ticket with all occurrences.""" 106 | raise NotImplementedError() 107 | 108 | def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): 109 | """Selects occurrences from the database for a ticket.""" 110 | raise NotImplementedError() 111 | 112 | 113 | class SQLAlchemyBackend(BackendBase): 114 | """Implements a backend that is writing into a database SQLAlchemy can 115 | interface. 116 | 117 | This backend takes some additional options: 118 | 119 | `table_prefix` 120 | an optional table prefix for all tables created by 121 | the logbook ticketing handler. 122 | 123 | `metadata` 124 | an optional SQLAlchemy metadata object for the table creation. 125 | 126 | `autocreate_tables` 127 | can be set to `False` to disable the automatic 128 | creation of the logbook tables. 129 | 130 | """ 131 | 132 | def setup_backend(self): 133 | from sqlalchemy import create_engine, MetaData 134 | engine_or_uri = self.options.pop('uri', None) 135 | metadata = self.options.pop('metadata', None) 136 | table_prefix = self.options.pop('table_prefix', 'logbook_') 137 | 138 | if hasattr(engine_or_uri, 'execute'): 139 | self.engine = engine_or_uri 140 | else: 141 | self.engine = create_engine(engine_or_uri, convert_unicode=True) 142 | if metadata is None: 143 | metadata = MetaData() 144 | self.table_prefix = table_prefix 145 | self.metadata = metadata 146 | self.create_tables() 147 | if self.options.get('autocreate_tables', True): 148 | self.metadata.create_all(bind=self.engine) 149 | 150 | def create_tables(self): 151 | """Creates the tables required for the handler on the class and 152 | metadata. 153 | """ 154 | import sqlalchemy as db 155 | def table(name, *args, **kwargs): 156 | return db.Table(self.table_prefix + name, self.metadata, 157 | *args, **kwargs) 158 | self.tickets = table('tickets', 159 | db.Column('ticket_id', db.Integer, primary_key=True), 160 | db.Column('record_hash', db.String(40), unique=True), 161 | db.Column('level', db.Integer), 162 | db.Column('channel', db.String(120)), 163 | db.Column('location', db.String(512)), 164 | db.Column('module', db.String(256)), 165 | db.Column('last_occurrence_time', db.DateTime), 166 | db.Column('occurrence_count', db.Integer), 167 | db.Column('solved', db.Boolean), 168 | db.Column('app_id', db.String(80)) 169 | ) 170 | self.occurrences = table('occurrences', 171 | db.Column('occurrence_id', db.Integer, primary_key=True), 172 | db.Column('ticket_id', db.Integer, 173 | db.ForeignKey(self.table_prefix + 'tickets.ticket_id')), 174 | db.Column('time', db.DateTime), 175 | db.Column('data', db.Text), 176 | db.Column('app_id', db.String(80)) 177 | ) 178 | 179 | def _order(self, q, table, order_by): 180 | if order_by[0] == '-': 181 | return q.order_by(table.c[order_by[1:]].desc()) 182 | return q.order_by(table.c[order_by]) 183 | 184 | def record_ticket(self, record, data, hash, app_id): 185 | """Records a log record as ticket.""" 186 | cnx = self.engine.connect() 187 | trans = cnx.begin() 188 | try: 189 | q = self.tickets.select(self.tickets.c.record_hash == hash) 190 | row = cnx.execute(q).fetchone() 191 | if row is None: 192 | row = cnx.execute(self.tickets.insert().values( 193 | record_hash=hash, 194 | level=record.level, 195 | channel=record.channel or u'', 196 | location=u'%s:%d' % (record.filename, record.lineno), 197 | module=record.module or u'', 198 | occurrence_count=0, 199 | solved=False, 200 | app_id=app_id 201 | )) 202 | ticket_id = row.inserted_primary_key[0] 203 | else: 204 | ticket_id = row['ticket_id'] 205 | cnx.execute(self.occurrences.insert() 206 | .values(ticket_id=ticket_id, 207 | time=record.time, 208 | app_id=app_id, 209 | data=json.dumps(data))) 210 | cnx.execute(self.tickets.update() 211 | .where(self.tickets.c.ticket_id == ticket_id) 212 | .values(occurrence_count=self.tickets.c.occurrence_count + 1, 213 | last_occurrence_time=record.time, 214 | solved=False)) 215 | trans.commit() 216 | except Exception: 217 | trans.rollback() 218 | raise 219 | cnx.close() 220 | 221 | def count_tickets(self): 222 | """Returns the number of tickets.""" 223 | return self.engine.execute(self.tickets.count()).fetchone()[0] 224 | 225 | def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0): 226 | """Selects tickets from the database.""" 227 | return [Ticket(self, row) for row in self.engine.execute( 228 | self._order(self.tickets.select(), self.tickets, order_by) 229 | .limit(limit).offset(offset)).fetchall()] 230 | 231 | def solve_ticket(self, ticket_id): 232 | """Marks a ticket as solved.""" 233 | self.engine.execute(self.tickets.update() 234 | .where(self.tickets.c.ticket_id == ticket_id) 235 | .values(solved=True)) 236 | 237 | def delete_ticket(self, ticket_id): 238 | """Deletes a ticket from the database.""" 239 | self.engine.execute(self.occurrences.delete() 240 | .where(self.occurrences.c.ticket_id == ticket_id)) 241 | self.engine.execute(self.tickets.delete() 242 | .where(self.tickets.c.ticket_id == ticket_id)) 243 | 244 | def get_ticket(self, ticket_id): 245 | """Return a single ticket with all occurrences.""" 246 | row = self.engine.execute(self.tickets.select().where( 247 | self.tickets.c.ticket_id == ticket_id)).fetchone() 248 | if row is not None: 249 | return Ticket(self, row) 250 | 251 | def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): 252 | """Selects occurrences from the database for a ticket.""" 253 | return [Occurrence(self, row) for row in 254 | self.engine.execute(self._order(self.occurrences.select() 255 | .where(self.occurrences.c.ticket_id == ticket), 256 | self.occurrences, order_by) 257 | .limit(limit).offset(offset)).fetchall()] 258 | 259 | 260 | class MongoDBBackend(BackendBase): 261 | """Implements a backend that writes into a MongoDB database.""" 262 | 263 | class _FixedTicketClass(Ticket): 264 | @property 265 | def ticket_id(self): 266 | return self._id 267 | 268 | class _FixedOccurrenceClass(Occurrence): 269 | def __init__(self, db, row): 270 | self.update_from_dict(json.loads(row['data'])) 271 | self.db = db 272 | self.time = row['time'] 273 | self.ticket_id = row['ticket_id'] 274 | self.occurrence_id = row['_id'] 275 | 276 | #TODO: Update connection setup once PYTHON-160 is solved. 277 | def setup_backend(self): 278 | from pymongo import ASCENDING, DESCENDING 279 | from pymongo.connection import Connection, _parse_uri 280 | from pymongo.errors import AutoReconnect 281 | 282 | _connection = None 283 | uri = self.options.pop('uri', u'') 284 | _connection_attempts = 0 285 | 286 | hosts, database, user, password = _parse_uri(uri, Connection.PORT) 287 | 288 | # Handle auto reconnect signals properly 289 | while _connection_attempts < 5: 290 | try: 291 | if _connection is None: 292 | _connection = Connection(uri) 293 | database = _connection[database] 294 | break 295 | except AutoReconnect: 296 | _connection_attempts += 1 297 | time.sleep(0.1) 298 | 299 | self.database = database 300 | 301 | # setup correct indexes 302 | database.tickets.ensure_index([('record_hash', ASCENDING)], unique=True) 303 | database.tickets.ensure_index([('solved', ASCENDING), ('level', ASCENDING)]) 304 | database.occurrences.ensure_index([('time', DESCENDING)]) 305 | 306 | def _order(self, q, order_by): 307 | from pymongo import ASCENDING, DESCENDING 308 | col = '%s' % (order_by[0] == '-' and order_by[1:] or order_by) 309 | if order_by[0] == '-': 310 | return q.sort(col, DESCENDING) 311 | return q.sort(col, ASCENDING) 312 | 313 | def _oid(self, ticket_id): 314 | from pymongo.objectid import ObjectId 315 | return ObjectId(ticket_id) 316 | 317 | def record_ticket(self, record, data, hash, app_id): 318 | """Records a log record as ticket.""" 319 | db = self.database 320 | ticket = db.tickets.find_one({'record_hash': hash}) 321 | if not ticket: 322 | doc = { 323 | 'record_hash': hash, 324 | 'level': record.level, 325 | 'channel': record.channel or u'', 326 | 'location': u'%s:%d' % (record.filename, record.lineno), 327 | 'module': record.module or u'', 328 | 'occurrence_count': 0, 329 | 'solved': False, 330 | 'app_id': app_id, 331 | } 332 | ticket_id = db.tickets.insert(doc) 333 | else: 334 | ticket_id = ticket['_id'] 335 | 336 | db.tickets.update({'_id': ticket_id}, { 337 | '$inc': { 338 | 'occurrence_count': 1 339 | }, 340 | '$set': { 341 | 'last_occurrence_time': record.time, 342 | 'solved': False 343 | } 344 | }) 345 | # We store occurrences in a seperate collection so that 346 | # we can make it a capped collection optionally. 347 | db.occurrences.insert({ 348 | 'ticket_id': self._oid(ticket_id), 349 | 'app_id': app_id, 350 | 'time': record.time, 351 | 'data': json.dumps(data), 352 | }) 353 | 354 | def count_tickets(self): 355 | """Returns the number of tickets.""" 356 | return self.database.tickets.count() 357 | 358 | def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0): 359 | """Selects tickets from the database.""" 360 | query = self._order(self.database.tickets.find(), order_by) \ 361 | .limit(limit).skip(offset) 362 | return [self._FixedTicketClass(self, obj) for obj in query] 363 | 364 | 365 | def solve_ticket(self, ticket_id): 366 | """Marks a ticket as solved.""" 367 | self.database.tickets.update({'_id': self._oid(ticket_id)}, 368 | {'solved': True}) 369 | 370 | def delete_ticket(self, ticket_id): 371 | """Deletes a ticket from the database.""" 372 | self.database.occurrences.remove({'ticket_id': self._oid(ticket_id)}) 373 | self.database.tickets.remove({'_id': self._oid(ticket_id)}) 374 | 375 | def get_ticket(self, ticket_id): 376 | """Return a single ticket with all occurrences.""" 377 | ticket = self.database.tickets.find_one({'_id': self._oid(ticket_id)}) 378 | if ticket: 379 | return Ticket(self, ticket) 380 | 381 | def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): 382 | """Selects occurrences from the database for a ticket.""" 383 | collection = self.database.occurrences 384 | occurrences = self._order(collection.find( 385 | {'ticket_id': self._oid(ticket)} 386 | ), order_by).limit(limit).skip(offset) 387 | return [self._FixedOccurrenceClass(self, obj) for obj in occurrences] 388 | 389 | 390 | class TicketingBaseHandler(Handler, HashingHandlerMixin): 391 | """Baseclass for ticketing handlers. This can be used to interface 392 | ticketing systems that do not necessarily provide an interface that 393 | would be compatible with the :class:`BackendBase` interface. 394 | """ 395 | 396 | def __init__(self, hash_salt, level=NOTSET, filter=None, bubble=False): 397 | Handler.__init__(self, level, filter, bubble) 398 | self.hash_salt = hash_salt 399 | 400 | def hash_record_raw(self, record): 401 | """Returns the unique hash of a record.""" 402 | hash = HashingHandlerMixin.hash_record_raw(self, record) 403 | if self.hash_salt is not None: 404 | hash_salt = self.hash_salt 405 | if isinstance(hash_salt, unicode): 406 | hash_salt = hash_salt.encode('utf-8') 407 | hash.update(b('\x00') + hash_salt) 408 | return hash 409 | 410 | 411 | class TicketingHandler(TicketingBaseHandler): 412 | """A handler that writes log records into a remote database. This 413 | database can be connected to from different dispatchers which makes 414 | this a nice setup for web applications:: 415 | 416 | from logbook.ticketing import TicketingHandler 417 | handler = TicketingHandler('sqlite:////tmp/myapp-logs.db') 418 | 419 | :param uri: a backend specific string or object to decide where to log to. 420 | :param app_id: a string with an optional ID for an application. Can be 421 | used to keep multiple application setups apart when logging 422 | into the same database. 423 | :param hash_salt: an optional salt (binary string) for the hashes. 424 | :param backend: A backend class that implements the proper database handling. 425 | Backends available are: :class:`SQLAlchemyBackend`, 426 | :class:`MongoDBBackend`. 427 | """ 428 | 429 | #: The default backend that is being used when no backend is specified. 430 | #: Unless overriden by a subclass this will be the 431 | #: :class:`SQLAlchemyBackend`. 432 | default_backend = SQLAlchemyBackend 433 | 434 | def __init__(self, uri, app_id='generic', level=NOTSET, 435 | filter=None, bubble=False, hash_salt=None, backend=None, 436 | **db_options): 437 | if hash_salt is None: 438 | hash_salt = u'apphash-' + app_id 439 | TicketingBaseHandler.__init__(self, hash_salt, level, filter, bubble) 440 | if backend is None: 441 | backend = self.default_backend 442 | db_options['uri'] = uri 443 | self.set_backend(backend, **db_options) 444 | self.app_id = app_id 445 | 446 | def set_backend(self, cls, **options): 447 | self.db = cls(**options) 448 | 449 | def process_record(self, record, hash): 450 | """Subclasses can override this to tamper with the data dict that 451 | is sent to the database as JSON. 452 | """ 453 | return record.to_dict(json_safe=True) 454 | 455 | def record_ticket(self, record, data, hash): 456 | """Record either a new ticket or a new occurrence for a 457 | ticket based on the hash. 458 | """ 459 | self.db.record_ticket(record, data, hash, self.app_id) 460 | 461 | def emit(self, record): 462 | """Emits a single record and writes it to the database.""" 463 | hash = self.hash_record(record) 464 | data = self.process_record(record, hash) 465 | self.record_ticket(record, data, hash) 466 | --------------------------------------------------------------------------------