├── .gitignore ├── .travis.yml ├── AUTHORS.txt ├── CHANGELOG ├── LICENSE.txt ├── README.rst ├── django_kaneda ├── __init__.py └── settings.py ├── docs ├── Makefile ├── backends.rst ├── changelog.rst ├── conf.py ├── django.rst ├── index.rst ├── metrics.rst ├── queues.rst ├── settings.rst └── usage.rst ├── kaneda ├── __init__.py ├── backends │ ├── __init__.py │ ├── base.py │ ├── elasticsearch.py │ ├── influxdb.py │ ├── logger.py │ ├── mongodb.py │ └── rethink.py ├── base.py ├── exceptions.py ├── queues │ ├── __init__.py │ ├── base.py │ ├── celery.py │ ├── rq.py │ └── zmq.py ├── tasks │ ├── __init__.py │ ├── celery.py │ ├── rq.py │ └── zmq.py └── utils.py ├── setup.py ├── tests ├── __init__.py ├── conftest.py ├── integration │ ├── __init__.py │ ├── benchmarks │ │ ├── __init__.py │ │ ├── test_backends.py │ │ └── test_queues.py │ ├── conftest.py │ ├── django │ │ ├── __init__.py │ │ ├── conftest.py │ │ └── test_django.py │ ├── test_backends.py │ ├── test_metrics.py │ └── test_queues.py └── unit │ ├── __init__.py │ ├── conftest.py │ ├── test_backends.py │ ├── test_metrics.py │ └── test_utils.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Django stuff: 47 | *.log 48 | 49 | # Sphinx documentation 50 | docs/_build/ 51 | 52 | # PyBuilder 53 | target/ 54 | 55 | # PyCharm 56 | .idea -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | services: 3 | - mongodb 4 | - redis-server 5 | - elasticsearch 6 | matrix: 7 | include: 8 | - env: TOXENV=py27-dj110 9 | python: 2.7 10 | - env: TOXENV=pypy-dj110 11 | python: pypy 12 | - env: TOXENV=pypy3-dj110 13 | python: pypy3 14 | - env: TOXENV=py34-dj110 15 | python: 3.4 16 | - env: TOXENV=py35-dj110 17 | python: 3.5 18 | - env: TOXENV=py36-dj110 19 | python: 3.6 20 | - env: TOXENV=py27-dj111 21 | python: 2.7 22 | - env: TOXENV=pypy-dj111 23 | python: pypy 24 | - env: TOXENV=pypy3-dj111 25 | python: pypy3 26 | - env: TOXENV=py34-dj111 27 | python: 3.4 28 | - env: TOXENV=py35-dj111 29 | python: 3.5 30 | - env: TOXENV=py36-dj111 31 | python: 3.6 32 | - env: TOXENV=pypy3-dj20 33 | python: pypy3 34 | - env: TOXENV=py34-dj20 35 | python: 3.4 36 | - env: TOXENV=py35-dj20 37 | python: 3.5 38 | - env: TOXENV=py36-dj20 39 | python: 3.6 40 | - env: TOXENV=flake8 41 | python: 3.6 42 | before_install: 43 | # RethinkDB 44 | - source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list 45 | - wget -qO- https://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add - 46 | # InfluxDB 47 | - curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - 48 | - source /etc/lsb-release 49 | - echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list 50 | # Install both 51 | - sudo apt-get update 52 | - sudo apt-get install influxdb rethinkdb -y 53 | install: 54 | - pip install tox 55 | before_script: 56 | - rethinkdb --daemon 57 | - sudo service influxdb start 58 | script: 59 | - tox 60 | -------------------------------------------------------------------------------- /AUTHORS.txt: -------------------------------------------------------------------------------- 1 | Marc Tudurí 2 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 1.0 (2018-05-20) 5 | ~~~~~~~~~~~~~~~~ 6 | * Add support for Django 2.0. 7 | * Fix issues with InfluxDB backend. 8 | * Fix issues with RethinkDB backend 9 | 10 | 0.5 (2016-05-25) 11 | ~~~~~~~~~~~~~~~~ 12 | * Add InfluxDB reporting backend. 13 | * Add RethinkDB reporting backend 14 | * Add ZMQ queue class and task for asynchronous reporting. 15 | 16 | 0.4 (2016-04-28) 17 | ~~~~~~~~~~~~~~~~ 18 | Add support to run Kaneda in asynchronous mode. 19 | ----------------------------------------------- 20 | * Add Celery and RQ queue classes and tasks with backend reporting. 21 | * Add the option to configure Kaneda with a settings file. 22 | * Add utils.py file to handle the settings. 23 | * Add asynchronous reporting, settings file and queue classes documentation. 24 | * Add integration and benchmark testing. Improve unit tests. 25 | 26 | Add Django support. 27 | ------------------- 28 | * Move from django-kaneda project. 29 | * Add integration tests. 30 | * Improve Django documentation 31 | 32 | 0.3.1 (2016-03-29) 33 | ~~~~~~~~~~~~~~~~~~ 34 | * ElasticBackend now can be configured passing a list of 'hosts' or a list of 'connection_urls'. 35 | 36 | 0.3 (2016-03-28) 37 | ~~~~~~~~~~~~~~~~ 38 | * Add LoggerBackend. 39 | * Add exeception treatment on backend report methods. 40 | * Add connection timeout for storage backends. 41 | * Storage backends now support passing client or connection url as parameters instead all parameters. 42 | * Add support to use django-kaneda on debug mode. 43 | 44 | 0.2 (2016-02-17) 45 | ~~~~~~~~~~~~~~~~ 46 | * Refactor backend payload build method to unpack value field when is a dict. 47 | 48 | 0.1 (2016-01-13) 49 | ~~~~~~~~~~~~~~~~ 50 | * Initial release. 51 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 APSL 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Kaneda 2 | ====== 3 | 4 | .. image:: https://travis-ci.org/APSL/kaneda.svg?branch=master 5 | :target: https://travis-ci.org/APSL/kaneda 6 | 7 | .. image:: https://readthedocs.org/projects/kaneda/badge/?version=latest 8 | :target: https://readthedocs.org/projects/kaneda/?badge=latest 9 | 10 | Kaneda is a Python library that allows to report events and metrics of your applications. 11 | It provides a several builtin `metrics `_ methods in order to store any amount of data that you want to then 12 | analyze it or for performance studies. 13 | 14 | Usage 15 | ~~~~~~~~~~~ 16 | 17 | First of all, you need to install `Kaneda` package:: 18 | 19 | pip install kaneda 20 | 21 | Then you need a backend in order to keep data in a persistent storage. 22 | The following example it shows how to send metrics with Elasticsearch as a backend: 23 | 24 | .. code-block:: python 25 | 26 | from kaneda.backends import ElasticsearchBackend 27 | from kaneda import Metrics 28 | 29 | backend = ElasticsearchBackend(index_name='myindex', app_name='myapp', host='localhost', 30 | port=9200, user='kaneda', password='kaneda') 31 | metrics = Metrics(backend=backend) 32 | metrics.gauge('answer_of_life', 42) 33 | 34 | Features 35 | ~~~~~~~~ 36 | * Builtin `metrics `_ functions and custom metric reports. 37 | * Configurable reporting `backends `_ classes and `asynchronous `_ queue classes. 38 | * Builtin Elasticsearch, MongoDB, InfluxDB and RethinkDB backends. 39 | * Builtin Celery, RQ and ZMQ asynchronous queue classes. 40 | * Django support. 41 | 42 | Documentation 43 | ~~~~~~~~~~~~~ 44 | Visit the `documentation `_ for an in-depth look at Kaneda. 45 | -------------------------------------------------------------------------------- /django_kaneda/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from django.utils.functional import LazyObject 4 | 5 | 6 | class LazyMetrics(LazyObject): 7 | 8 | def _setup(self): 9 | from kaneda import Metrics 10 | from kaneda.utils import import_class, get_object_from_settings 11 | from kaneda.exceptions import UnexistingKanedaClass, SettingsError 12 | from . import settings 13 | 14 | if settings.DEBUG: 15 | backend_class = import_class('kaneda.backends.LoggerBackend') 16 | if settings.LOGGER: 17 | backend = backend_class(logger=logging.getLogger(settings.LOGGER)) 18 | elif settings.LOGGER_FILENAME: 19 | backend = backend_class(filename=settings.LOGGER_FILENAME) 20 | else: 21 | backend = backend_class() 22 | _metrics = Metrics(backend=backend) 23 | else: 24 | if not settings.BACKEND and not settings.QUEUE: 25 | raise SettingsError('You need to set KANEDA_BACKEND or KANEDA_QUEUE on settings.py to django_kaneda') 26 | if settings.BACKEND: 27 | try: 28 | backend = get_object_from_settings(settings.BACKEND, settings) 29 | _metrics = Metrics(backend=backend) 30 | except UnexistingKanedaClass: 31 | raise UnexistingKanedaClass('The selected KANEDA_BACKEND class does not exists.') 32 | if settings.QUEUE: 33 | try: 34 | queue = get_object_from_settings(settings.QUEUE, settings) 35 | _metrics = Metrics(queue=queue) 36 | except UnexistingKanedaClass: 37 | raise UnexistingKanedaClass('The selected KANEDA_QUEUE class does not exists.') 38 | self._wrapped = _metrics 39 | 40 | metrics = LazyMetrics() 41 | -------------------------------------------------------------------------------- /django_kaneda/settings.py: -------------------------------------------------------------------------------- 1 | from django.conf import settings 2 | 3 | BACKEND = getattr(settings, 'KANEDA_BACKEND', None) 4 | QUEUE = getattr(settings, 'KANEDA_QUEUE', None) 5 | 6 | # Elasticsearch backend settings 7 | ELASTIC_INDEX_NAME = getattr(settings, 'KANEDA_ELASTIC_INDEX_NAME', 'kaneda') 8 | ELASTIC_APP_NAME = getattr(settings, 'KANEDA_ELASTIC_APP_NAME', 'default') 9 | ELASTIC_CONNECTION_URL = getattr(settings, 'KANEDA_ELASTIC_CONNECTION_URL', None) 10 | ELASTIC_HOST = getattr(settings, 'KANEDA_ELASTIC_HOST', None) 11 | ELASTIC_PORT = getattr(settings, 'KANEDA_ELASTIC_PORT', None) 12 | ELASTIC_USER = getattr(settings, 'KANEDA_ELASTIC_USER', None) 13 | ELASTIC_PASSWORD = getattr(settings, 'KANEDA_ELASTIC_PASSWORD', None) 14 | ELASTIC_TIMEOUT = getattr(settings, 'KANEDA_ELASTIC_TIMEOUT', 0.3) 15 | 16 | # MongoDB backend settings 17 | MONGO_DB_NAME = getattr(settings, 'KANEDA_MONGO_DB_NAME', 'kaneda') 18 | MONGO_COLLECTION_NAME = getattr(settings, 'KANEDA_MONGO_COLLECTION_NAME', 'default') 19 | MONGO_CONNECTION_URL = getattr(settings, 'KANEDA_MONGO_CONNECTION_URL', None) 20 | MONGO_HOST = getattr(settings, 'KANEDA_MONGO_HOST', None) 21 | MONGO_PORT = getattr(settings, 'KANEDA_MONGO_PORT', None) 22 | MONGO_TIMEOUT = getattr(settings, 'KANEDA_MONGO_TIMEOUT', 300) 23 | 24 | # RethinkDB backend settings 25 | RETHINK_DB = getattr(settings, 'KANEDA_RETHINK_DB', 'kaneda') 26 | RETHINK_TABLE_NAME = getattr(settings, 'KANEDA_RETHINK_TABLE_NAME', None) 27 | RETHINK_HOST = getattr(settings, 'KANEDA_RETHINK_HOST', None) 28 | RETHINK_PORT = getattr(settings, 'KANEDA_RETHINK_PORT', None) 29 | RETHINK_USER = getattr(settings, 'KANEDA_RETHINK_USER', None) 30 | RETHINK_PASSWORD = getattr(settings, 'KANEDA_RETHINK_PASSWORD', None) 31 | RETHINK_TIMEOUT = getattr(settings, 'KANEDA_RETHINK_TIMEOUT', 300) 32 | 33 | # InfluxDB backend settings 34 | INFLUX_DATABASE = getattr(settings, 'KANEDA_INFLUX_DATABASE', 'kaneda') 35 | INFLUX_CONNECTION_URL = getattr(settings, 'KANEDA_INFLUX_CONNECTION_URL', None) 36 | INFLUX_HOST = getattr(settings, 'KANEDA_INFLUX_HOST', None) 37 | INFLUX_PORT = getattr(settings, 'KANEDA_INFLUX_PORT', None) 38 | INFLUX_USERNAME = getattr(settings, 'KANEDA_INFLUX_USERNAME', None) 39 | INFLUX_PASSWORD = getattr(settings, 'KANEDA_INFLUX_PASSWORD', None) 40 | INFLUX_TIMEOUT = getattr(settings, 'KANEDA_INFLUX_TIMEOUT', 300) 41 | 42 | # Debug backend mode settings 43 | DEBUG = getattr(settings, 'KANEDA_DEBUG', False) 44 | LOGGER = getattr(settings, 'KANEDA_LOGGER', None) 45 | LOGGER_FILENAME = getattr(settings, 'KANEDA_LOGGER_FILENAME', None) 46 | 47 | # Celery queue settings 48 | CELERY_BROKER = getattr(settings, 'KANEDA_CELERY_BROKER', '') 49 | CELERY_QUEUE_NAME = getattr(settings, 'KANEDA_CELERY_QUEUE_NAME', '') 50 | 51 | # RQ queue settings 52 | RQ_REDIS_URL = getattr(settings, 'KANEDA_RQ_REDIS_URL', 'kaneda') 53 | RQ_QUEUE_NAME = getattr(settings, 'KANEDA_RQ_QUEUE_NAME', None) 54 | 55 | # ZMQ queue settings 56 | ZMQ_CONNECTION_URL = getattr(settings, 'KANEDA_ZMQ_CONNECTION_URL', '') 57 | ZMQ_TIMEOUT = getattr(settings, 'KANEDA_ZMQ_TIMEOUT', 300) 58 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/kaneda.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/kaneda.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/kaneda" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/kaneda" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/backends.rst: -------------------------------------------------------------------------------- 1 | Backends 2 | ======== 3 | 4 | Kaneda provides builtin backends to store metrics and events in a persistent storage. If you want to use your 5 | custom backend you need to subclass :code:`BaseBackend` and implement your custom :code:`report` method which 6 | is the responsible to store the metrics data. 7 | 8 | Elasticsearch 9 | ~~~~~~~~~~~~~ 10 | 11 | Elasticsearch is a search based NoSQL database that works very well with metrics data. It provides powerful tools to analyze data and build 12 | real-time dashboards easily with `Kibana `_. 13 | 14 | .. note:: 15 | 16 | Before using Elasticesearch as backend you need to install Elasticsearch Python client:: 17 | 18 | pip install elasticsearch 19 | 20 | 21 | .. autoclass:: kaneda.backends.ElasticsearchBackend 22 | :members: 23 | 24 | MongoDB 25 | ~~~~~~~ 26 | 27 | MongoDB is a document oriented NoSQL database. Is a great tool to store metrics as it provides a powerful aggregation framework 28 | to perform data analysis. 29 | 30 | .. note:: 31 | 32 | Before using MongoDB as backend you need to install MongoDB Python client:: 33 | 34 | pip install pymongo 35 | 36 | 37 | .. autoclass:: kaneda.backends.MongoBackend 38 | :members: 39 | 40 | RethinkDB 41 | ~~~~~~~~~ 42 | 43 | RethinkDB is an open source scalable, distributed NoSQL database built for realtime applications. 44 | 45 | .. note:: 46 | 47 | Before using RethinkDB as backend you need to install RethinkDB Python client:: 48 | 49 | pip install rethinkdb 50 | 51 | 52 | .. autoclass:: kaneda.backends.RethinkBackend 53 | :members: 54 | 55 | 56 | InfluxDB 57 | ~~~~~~~~ 58 | 59 | InfluxDB is an open source time series database with no external dependencies. It's useful for recording metrics, 60 | events, and performing analytics. 61 | 62 | .. note:: 63 | 64 | Before using InfluxDB as backend you need to install InfluxDB Python client:: 65 | 66 | pip install influxdb 67 | 68 | .. warning:: 69 | 70 | InfluxDB can store other type of data besides time series. However it has some restrictions: 71 | 72 | * Metrics *tags* field can't be a :code:`list` only a :code:`dict`:: 73 | 74 | # bad 75 | metrics.timing('user.profile_load_time', 230, tags=['login', 'edit_profile']) 76 | 77 | # good 78 | metrics.timing('user.profile_load_time', 230, tags={'from': 'login', 'to': 'edit_profile'}) 79 | 80 | * :any:`Custom ` metric *value* field can’t be a :code:`list` nor a nested :code:`dict`:: 81 | 82 | # bad 83 | metrics.custom('zone.search', metric='query_time', value={'times': [120, 230]}) 84 | metrics.custom('zone.search', metric='query_time', value={'times': {'start': 120}, {'end': 230}}) 85 | 86 | # good 87 | metrics.custom('zone.search', metric='query_time', value={'start_time': 120, 'end_time': 230}) 88 | 89 | .. autoclass:: kaneda.backends.InfluxBackend 90 | :members: 91 | 92 | Logger 93 | ~~~~~~ 94 | 95 | You can use a logger instance of the logging library from the Python standard lib. Useful for debugging. 96 | 97 | .. autoclass:: kaneda.backends.LoggerBackend 98 | :members: -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGELOG 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import sys 4 | import os 5 | from os.path import abspath 6 | 7 | sys.path.insert(0, abspath('..')) 8 | import kaneda 9 | sys.path.pop(0) 10 | 11 | extensions = [ 12 | 'sphinx.ext.autodoc', 13 | 'sphinx.ext.intersphinx', 14 | 'sphinx.ext.viewcode', 15 | 'sphinx.ext.doctest', 16 | ] 17 | 18 | templates_path = ['_templates'] 19 | source_suffix = '.rst' 20 | master_doc = 'index' 21 | 22 | project = u'kaneda' 23 | copyright = u'2016, APSL' 24 | 25 | version = '1.0' 26 | release = '1.0' 27 | exclude_patterns = ['_build'] 28 | pygments_style = 'sphinx' 29 | 30 | import sphinx_rtd_theme 31 | html_theme = "sphinx_rtd_theme" 32 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 33 | 34 | html_static_path = ['_static'] 35 | htmlhelp_basename = 'kanedadoc' 36 | -------------------------------------------------------------------------------- /docs/django.rst: -------------------------------------------------------------------------------- 1 | .. _django: 2 | 3 | Django Setup 4 | ============ 5 | 6 | Kaneda can be use with Django as a mechanism to reporting metrics and events. 7 | 8 | 1. Add :code:`django_kaneda` to :code:`INSTALLED_APPS` in :file:`settings.py`. 9 | 10 | 2. Set :code:`KANEDA_BACKEND` and the properly configuration of your selected backend in :file:`settings.py`. If you want to use Elasticsearch our configuration will be something like this:: 11 | 12 | KANEDA_BACKEND = 'kaneda.backends.ElasticsearchBackend' 13 | KANEDA_ELASTIC_INDEX_NAME = 'kaneda' 14 | KANEDA_ELASTIC_APP_NAME = 'YouProject' 15 | KANEDA_ELASTIC_HOST = 'localhost' 16 | KANEDA_ELASTIC_PORT = 9200 17 | KANEDA_ELASTIC_USER = 'user' 18 | KANEDA_ELASTIC_PASSWORD = 'pass' 19 | 20 | Alternatively you can set :code:`KANEDA_QUEUE` to specify a :ref:`queue ` configuration to use Kaneda in :ref:`async mode `:: 21 | 22 | KANEDA_QUEUE = 'kaneda.queues.CeleryQueue' 23 | KANEDA_CELERY_BROKER = 'redis://localhost:6379/0' 24 | 25 | With this, you can use Kaneda in everyplace of your Django project:: 26 | 27 | from django_kaneda import metrics 28 | 29 | 30 | class UserProfileView(TemplateView): 31 | template_name = 'user/profile.html' 32 | 33 | @metrics.timed('user_profile.time') 34 | def get(self, request, *args, **kwargs): 35 | metrics.increment('user_profile.views') 36 | return super(UserProfileView, self).get(request, *args, **kwargs) 37 | 38 | Debug mode 39 | ~~~~~~~~~~ 40 | You can use Kaneda in debug mode with a logger as backend. Simply set :code:`KANEDA_DEBUG` to `True` to report everything 41 | to a logger instead a persistent backend. Furthermore, you can set a previously defined logger on :file:`settings.py` and use as 42 | your debug logger:: 43 | 44 | LOGGING = { 45 | 'version': 1, 46 | 'disable_existing_loggers': False, 47 | 'formatters': { 48 | 'with_timestamp': { 49 | 'format': '%(asctime)s - %(name)s - %(message)s' 50 | } 51 | }, 52 | 'handlers': { 53 | 'file': { 54 | 'level': 'INFO', 55 | 'class': 'logging.FileHandler', 56 | 'filename': '/tmp/kaneda-demo.log', 57 | 'formatter': 'with_timestamp' 58 | }, 59 | }, 60 | 'loggers': { 61 | 'kaneda.demo': { 62 | 'handlers': ['file'], 63 | 'level': 'INFO', 64 | 'propagate': True, 65 | }, 66 | }, 67 | } 68 | 69 | KANEDA_DEBUG = True 70 | KANEDA_LOGGER = 'kaneda.demo' 71 | 72 | Alternatively you can set :code:`KANEDA_LOGGER_FILENAME` instead of :code:`KANEDA_LOGGER` to store the reporting results 73 | in a specific filename. 74 | 75 | Available settings 76 | ~~~~~~~~~~~~~~~~~~ 77 | Elasticsearch 78 | ------------- 79 | KANEDA_ELASTIC_INDEX_NAME (='kaneda') 80 | Name of the Elasticsearch index used to store metrics data. Default name format will be app_name-YYYY.MM.DD. 81 | 82 | KANEDA_ELASTIC_APP_NAME (='default') 83 | Name of the app/project where metrics are used. 84 | 85 | KANEDA_ELASTIC_CONNECTION_URL (=None) 86 | Elasticsearch connection url (https://user:secret@localhost:9200). 87 | 88 | KANEDA_ELASTIC_HOST (=None) 89 | Server host. 90 | 91 | KANEDA_ELASTIC_PORT (=None) 92 | Server port. 93 | 94 | KANEDA_ELASTIC_USER (=None) 95 | HTTP auth username. 96 | 97 | KANEDA_ELASTIC_PASSWORD (=None) 98 | HTTP auth password. 99 | 100 | KANEDA_ELASTIC_TIMEOUT (=0.3) 101 | Elasticsearch connection timeout (seconds). 102 | 103 | MongoDB 104 | ------- 105 | KANEDA_MONGO_DB_NAME (='kaneda') 106 | Name of the MongoDB database. 107 | 108 | KANEDA_MONGO_COLLECTION_NAME (='default') 109 | Name of the MongoDB collection used to store metric data. 110 | 111 | KANEDA_MONGO_CONNECTION_URL (=None) 112 | Mongo connection url (mongodb://localhost:27017/). 113 | 114 | KANEDA_MONGO_HOST (=None) 115 | Server host. 116 | 117 | KANEDA_MONGO_PORT (=None) 118 | Server port. 119 | 120 | KANEDA_MONGO_TIMEOUT (=300) 121 | MongoDB connection timeout (milliseconds). 122 | 123 | RethinkDB 124 | --------- 125 | KANEDA_RETHINK_DB (='kaneda') 126 | Name of the RethinkDB database 127 | 128 | KANEDA_RETHINK_TABLE_NAME (=None) 129 | Name of the RethinkDB table. If this is not provided, it will be used the name of the metric. 130 | 131 | KANEDA_RETHINK_HOST (=None) 132 | Server host. 133 | 134 | KANEDA_RETHINK_PORT (=None) 135 | Server port. 136 | 137 | KANEDA_RETHINK_USER (=None) 138 | Auth username. 139 | 140 | KANEDA_RETHINK_PASSWORD (=None) 141 | Auth password. 142 | 143 | KANEDA_RETHINK_TIMEOUT (=0.3) 144 | RethinkDB connection timeout (seconds). 145 | 146 | InfluxDB 147 | -------- 148 | KANEDA_INFLUX_DATABASE (='kaneda') 149 | Name of the InfluxDB database. 150 | 151 | KANEDA_INFLUX_CONNECTION_URL (=None) 152 | InfluxDB connection url (influxdb://username:password@localhost:8086/databasename). 153 | 154 | KANEDA_INFLUX_HOST (=None) 155 | Server host. 156 | 157 | KANEDA_INFLUX_PORT (=None) 158 | Server port. 159 | 160 | KANEDA_INFLUX_USERNAME (=None) 161 | Auth username. 162 | 163 | KANEDA_INFLUX_PASSWORD (=None) 164 | Auth password. 165 | 166 | KANEDA_INFLUX_TIMEOUT (=0.3) 167 | InfluxDB connection timeout (seconds). 168 | 169 | Celery 170 | ------ 171 | KANEDA_CELERY_BROKER (='') 172 | Broker connection url. 173 | 174 | KANEDA_CELERY_QUEUE_NAME (='') 175 | Name of the Celery queue. 176 | 177 | RQ 178 | -- 179 | KANEDA_RQ_REDIS_URL (='') 180 | Redis connection url. 181 | 182 | KANEDA_RQ_QUEUE_NAME (='kaneda') 183 | Name of the RQ queue. 184 | 185 | ZMQ 186 | --- 187 | KANEDA_ZMQ_CONNECTION_URL (='') 188 | ZMQ connection url. 189 | 190 | KANEDA_ZMQ_TIMEOUT (=300) 191 | ZMQ socket timeout (milliseconds). 192 | 193 | Debug 194 | ----- 195 | KANEDA_DEBUG (=True) 196 | Use Kaneda in debug mode. 197 | 198 | KANEDA_LOGGER (=None) 199 | Name of a previously defined logger, to use in debug mode. 200 | 201 | KANEDA_LOGGER_FILENAME (=None) 202 | Name of the file where logger will store the metrics, to use in debug mode. 203 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Kaneda 2 | ====== 3 | 4 | Kaneda is a Python library that allows to report events and metrics of your applications. 5 | It provides a several builtin :doc:`metrics` methods in order to store any amount of data that you want to then 6 | analyze it or for performance studies 7 | 8 | Example:: 9 | 10 | from kaneda import Metrics 11 | metrics = Metrics(...) 12 | metrics.event('welcome', 'Kaneda is cool') 13 | metrics.gauge('answer_of_life', 42) 14 | 15 | .. toctree:: 16 | :hidden: 17 | 18 | usage 19 | metrics 20 | backends 21 | queues 22 | settings 23 | django 24 | changelog 25 | 26 | -------------------------------------------------------------------------------- /docs/metrics.rst: -------------------------------------------------------------------------------- 1 | .. _metrics: 2 | 3 | Metrics 4 | ======= 5 | 6 | .. autoclass:: kaneda.base.Metrics 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/queues.rst: -------------------------------------------------------------------------------- 1 | .. _queues: 2 | 3 | Queues 4 | ====== 5 | 6 | Kaneda provides builtin queues to store metrics and events to perform :ref:`asynchronous reporting `. If you want to use your 7 | custom asynchronous queue system you need to subclass :code:`BaseQueue` and implement your custom :code:`report` method 8 | which is the responsible to pass metrics data to a job queue. 9 | 10 | .. _celery: 11 | 12 | Celery 13 | ~~~~~~ 14 | 15 | Celery is a simple, flexible and reliable distributed system to process vast amounts of messages. It can be configured 16 | using various broker systems such Redis or RabbitMQ. 17 | 18 | .. note:: 19 | 20 | Before using Celery as async queue you need to install Celery library:: 21 | 22 | pip install celery 23 | 24 | 25 | .. autoclass:: kaneda.queues.CeleryQueue 26 | :members: 27 | 28 | To run the worker execute this command:: 29 | 30 | celery -A kaneda.tasks.celery worker 31 | 32 | .. _rq: 33 | 34 | RQ 35 | ~~ 36 | 37 | RQ (Redis Queue) is a simple Python library for queueing jobs and processing them in the background with workers. It uses 38 | Redis as main broker system. 39 | 40 | .. note:: 41 | 42 | Before using RQ as async queue you need to install RQ and Redis library:: 43 | 44 | pip install redis 45 | pip install rq 46 | 47 | To run the worker execute this command:: 48 | 49 | rqworker [queue] 50 | 51 | The default queue is "kaneda". 52 | 53 | .. autoclass:: kaneda.queues.RQQueue 54 | :members: 55 | 56 | ZMQ 57 | ~~~ 58 | 59 | ZMQ (or ZeroMQ) is a library which extends the standard socket interfaces with features traditionally provided by 60 | specialised messaging middleware products. ZeroMQ sockets provide an abstraction of asynchronous message queues and much 61 | more. 62 | 63 | .. note:: 64 | 65 | Before using ZMQ as async queue you need to install ZMQ library:: 66 | 67 | pip install pyzmq 68 | 69 | To run the worker execute this command:: 70 | 71 | zmqworker --connection_url= 72 | 73 | or define :ref:`zmq_settings` settings in :file:`kanedasettings.py` and simply execute the worker command with:: 74 | 75 | zmqworker 76 | 77 | .. autoclass:: kaneda.queues.ZMQQueue 78 | :members: -------------------------------------------------------------------------------- /docs/settings.rst: -------------------------------------------------------------------------------- 1 | Settings 2 | ======== 3 | 4 | Kaneda can be used with a settings file as the same way to use with :ref:`Django `. Simply define a 5 | :file:`kanedasettings.py` file with the backend or queue settings. Alternatively you can define the environment variable 6 | `DEFAULT_SETTINGS_ENVAR` pointing to the desired settings filename. 7 | 8 | With this you will be able to use :ref:`Metrics` class without passing parameters:: 9 | 10 | from kaneda import Metrics 11 | 12 | metrics = Metrics() 13 | metrics.gauge('answer_of_life', 42) 14 | 15 | Backends settings 16 | ~~~~~~~~~~~~~~~~~ 17 | 18 | General 19 | ------- 20 | BACKEND 21 | Class name of the backend. Available options are: 22 | 23 | * :code:`kaneda.backends.ElasticsearchBackend` 24 | * :code:`kaneda.backends.MongoBackend` 25 | * :code:`kaneda.backends.LoggerBackend` 26 | * :code:`kaneda.backends.RethinkBackend` 27 | * :code:`kaneda.backends.InfluxBackend` 28 | 29 | Elasticsearch 30 | ------------- 31 | ELASTIC_INDEX_NAME 32 | Name of the Elasticsearch index used to store metrics data. Default name format will be app_name-YYYY.MM.DD. 33 | 34 | ELASTIC_APP_NAME 35 | Name of the app/project where metrics are used. 36 | 37 | ELASTIC_CONNECTION_URL 38 | Elasticsearch connection url (https://user:secret@localhost:9200). 39 | 40 | ELASTIC_HOST 41 | Server host. 42 | 43 | ELASTIC_PORT 44 | Server port. 45 | 46 | ELASTIC_USER 47 | HTTP auth username. 48 | 49 | ELASTIC_PASSWORD 50 | HTTP auth password. 51 | 52 | ELASTIC_TIMEOUT 53 | Elasticsearch connection timeout (seconds). 54 | 55 | MongoDB 56 | ------- 57 | MONGO_DB_NAME 58 | Name of the MongoDB database. 59 | 60 | MONGO_COLLECTION_NAME 61 | Name of the MongoDB collection used to store metric data. 62 | 63 | MONGO_CONNECTION_URL 64 | Mongo connection url (mongodb://localhost:27017/). 65 | 66 | MONGO_HOST 67 | Server host. 68 | 69 | MONGO_PORT 70 | Server port. 71 | 72 | MONGO_TIMEOUT 73 | MongoDB connection timeout (milliseconds). 74 | 75 | RethinkDB 76 | --------- 77 | RETHINK_DB 78 | Name of the RethinkDB database 79 | 80 | RETHINK_TABLE_NAME 81 | Name of the RethinkDB table. If this is not provided, it will be used the name of the metric. 82 | 83 | RETHINK_HOST 84 | Server host. 85 | 86 | RETHINK_PORT 87 | Server port. 88 | 89 | RETHINK_USER 90 | Auth username. 91 | 92 | RETHINK_PASSWORD 93 | Auth password. 94 | 95 | RETHINK_TIMEOUT 96 | RethinkDB connection timeout (seconds). 97 | 98 | InfluxDB 99 | -------- 100 | INFLUX_DATABASE 101 | Name of the InfluxDB database. 102 | 103 | INFLUX_CONNECTION_URL 104 | InfluxDB connection url (influxdb://username:password@localhost:8086/databasename). 105 | 106 | INFLUX_HOST 107 | Server host. 108 | 109 | INFLUX_PORT 110 | Server port. 111 | 112 | INFLUX_USERNAME 113 | Auth username. 114 | 115 | INFLUX_PASSWORD 116 | Auth password. 117 | 118 | INFLUX_TIMEOUT 119 | InfluxDB connection timeout (seconds). 120 | 121 | Logger 122 | ------ 123 | LOGGER_FILENAME 124 | Name of the file where logger will store the metrics. 125 | 126 | Queues settings 127 | ~~~~~~~~~~~~~~~ 128 | 129 | General 130 | ------- 131 | QUEUE 132 | Class name of the queue. Available options are: 133 | 134 | * :code:`kaneda.backends.CeleryQueue` 135 | * :code:`kaneda.backends.RQQueue` 136 | * :code:`kaneda.backends.ZMQQueue` 137 | 138 | Celery 139 | ------ 140 | CELERY_BROKER 141 | Broker connection url. 142 | 143 | CELERY_QUEUE_NAME 144 | Name of the Celery queue. 145 | 146 | RQ 147 | -- 148 | RQ_REDIS_URL 149 | Redis connection url. 150 | 151 | RQ_QUEUE_NAME 152 | Name of the RQ queue. 153 | 154 | .. _zmq_settings: 155 | 156 | ZMQ 157 | --- 158 | ZMQ_CONNECTION_URL 159 | ZMQ connection url. 160 | 161 | ZMQ_TIMEOUT 162 | ZMQ socket timeout (milliseconds). -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | You need to install `Kaneda` package:: 5 | 6 | pip install kaneda 7 | 8 | 9 | Backend reporting 10 | ~~~~~~~~~~~~~~~~~ 11 | 12 | You need a backend in order to keep data in a persistent storage. You can use builtin :doc:`backends` 13 | or define your custom backend subclassing the :code:`BaseBackend` class. 14 | 15 | The following example it shows how to send metrics with Elasticsearch as a backend:: 16 | 17 | from kaneda.backends import ElasticsearchBackend 18 | from kaneda import Metrics 19 | 20 | backend = ElasticsearchBackend(index_name='myindex', app_name='myapp', host='localhost', 21 | port=9200, user='kaneda', password='kaneda') 22 | metrics = Metrics(backend=backend) 23 | metrics.gauge('answer_of_life', 42) 24 | 25 | A backend class can also be instantiated passing a previously defined connection client. This is specially useful when 26 | you want to use a tuned connection:: 27 | 28 | client = Elasticsearch(['localhost'], port=9200, http_auth=('kaneda', 'kaneda'), timeout=0.3) 29 | backend = ElasticsearchBackend(index_name='myindex', app_name='myapp', client=client) 30 | 31 | .. _async: 32 | 33 | Asynchronous reporting 34 | ~~~~~~~~~~~~~~~~~~~~~~ 35 | 36 | Depending the selection of the backend the process of reporting metrics could be "slow" if the response time of your 37 | application is critical (e.g: a website). Furthermore if your application doesn't need the see the reported metrics 38 | in real time you probably have to consider to using asynchronous reporting. With this system you are allowed to send a 39 | metric report in background without adding too much overhead. 40 | 41 | To use this system you need to install a queue system and use one of the builtin Kaneda :ref:`queues` classes. 42 | To setup Kaneda in async mode follow these steps. 43 | 44 | 1. Install and configure your queue system (e.g: :ref:`rq`). 45 | 46 | .. code-block:: shell 47 | 48 | pip install rq 49 | 50 | 2. Setup your backend configuration in new file named :file:`kanedasettings.py`. 51 | 52 | .. code-block:: python 53 | 54 | BACKEND = 'kaneda.backends.ElasticsearchBackend' 55 | ELASTIC_INDEX_NAME = 'myindex' 56 | ELASTIC_APP_NAME = 'myapp' 57 | ELASTIC_HOST = 'localhost' 58 | ELASTIC_PORT = 9200 59 | ELASTIC_USER = 'kaneda' 60 | ELASTIC_PASSWORD = 'kaneda' 61 | 62 | 3. Run the worker 63 | 64 | .. code-block:: shell 65 | 66 | rqworker 67 | 68 | Now you can use Kaneda with the same :ref:`metrics` API:: 69 | 70 | from kaneda.queues import RQQueue 71 | from kaneda import Metrics 72 | 73 | queue = RQQueue(redis_url='redis://localhost:6379/0') 74 | metrics = Metrics(queue=queue) 75 | metrics.gauge('answer_of_life', 42) 76 | 77 | As in the backend example it can be used passing a queue client:: 78 | 79 | q = Queue(queue_name, connection=Redis()) 80 | queue = RQQueue(queue=q) 81 | 82 | Also you are able to specify a Redis connection url (or a broker url if you use :ref:`Celery`). Notice this allows you 83 | to run the worker on a different server. -------------------------------------------------------------------------------- /kaneda/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | __author__ = 'Marc Tudurí' 3 | __email__ = 'mtuduri@apsl.net' 4 | __version__ = '1.0' 5 | 6 | import logging # NOQA 7 | 8 | from .base import Metrics # NOQA 9 | 10 | try: # Python 2.7+ 11 | from logging import NullHandler 12 | except ImportError: 13 | class NullHandler(logging.Handler): 14 | def emit(self, record): 15 | pass 16 | logging.getLogger(__name__).addHandler(NullHandler()) 17 | -------------------------------------------------------------------------------- /kaneda/backends/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseBackend # NOQA 2 | from .elasticsearch import ElasticsearchBackend # NOQA 3 | from .mongodb import MongoBackend # NOQA 4 | from .rethink import RethinkBackend # NOQA 5 | from .influxdb import InfluxBackend # NOQA 6 | from .logger import LoggerBackend # NOQA 7 | -------------------------------------------------------------------------------- /kaneda/backends/base.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | 4 | class BaseBackend(object): 5 | """ 6 | Base class for backend reporting storage. 7 | 8 | settings_namespace is a class attribute that will be used to get the needed 9 | parameters to create new backend instance from a settings file. 10 | """ 11 | settings_namespace = None 12 | 13 | def report(self, name, metric, value, tags, id_): 14 | raise NotImplemented() 15 | 16 | def _get_host_name(self): 17 | return socket.gethostname() 18 | 19 | def _get_payload(self, name, value, tags): 20 | payload = {'host': self._get_host_name(), 'name': name} 21 | if isinstance(value, dict): 22 | payload.update(value) 23 | else: 24 | payload['value'] = value 25 | if tags: 26 | payload['tags'] = tags 27 | return payload 28 | -------------------------------------------------------------------------------- /kaneda/backends/elasticsearch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | from datetime import datetime 5 | 6 | try: 7 | from elasticsearch import Elasticsearch 8 | except ImportError: 9 | Elasticsearch = None 10 | 11 | from kaneda.exceptions import ImproperlyConfigured 12 | 13 | from .base import BaseBackend 14 | 15 | 16 | class ElasticsearchBackend(BaseBackend): 17 | """ 18 | Elasticsearch backend. 19 | 20 | :param index_name: name of the Elasticsearch index used to store metrics data. Default name format will be \ 21 | index_name-YYYY.MM.DD. 22 | :param app_name: name of the app/project where metrics are used. 23 | :param client: client instance of Elasticsearch class. 24 | :param connection_url: Elasticsearch connection url (https://user:secret@localhost:9200). \ 25 | It can be used passing a single connection_url (a string) or passing multiple connection_urls (a list). 26 | :param host: server host. It can be used passing a single host (a string) or passing multiple hosts (a list). 27 | :param port: server port. 28 | :param user: HTTP auth username. 29 | :param password: HTTP auth password. 30 | :param timeout: Elasticsearch connection timeout (seconds). 31 | """ 32 | settings_namespace = 'ELASTIC' 33 | 34 | def __init__(self, index_name, app_name, client=None, connection_url=None, host=None, port=None, 35 | user=None, password=None, timeout=0.3): 36 | if not Elasticsearch: 37 | raise ImproperlyConfigured( 38 | 'You need to install the elasticsearch library to use the Elasticsearch backend.') 39 | if client: 40 | if not isinstance(client, Elasticsearch): 41 | raise ImproperlyConfigured('"client" parameter is not an instance of Elasticsearch client') 42 | self.client = client 43 | elif connection_url: 44 | if not isinstance(connection_url, list): 45 | connection_url = [connection_url] 46 | self.client = Elasticsearch(connection_url, timeout=timeout) 47 | else: 48 | if not isinstance(host, list): 49 | host = [host] 50 | self.client = Elasticsearch(host, port=port, http_auth=(user, password), timeout=timeout) 51 | self.index_name = index_name 52 | self.app_name = app_name 53 | 54 | def _get_payload(self, name, value, tags): 55 | payload = super(ElasticsearchBackend, self)._get_payload(name, value, tags) 56 | payload['app_name'] = self.app_name 57 | payload['@timestamp'] = datetime.utcnow() 58 | return payload 59 | 60 | def _get_index_name(self): 61 | return '{}-{}'.format(self.index_name, datetime.utcnow().strftime('%Y.%m.%d')) 62 | 63 | def report(self, name, metric, value, tags, id_): 64 | payload = self._get_payload(name, value, tags) 65 | try: 66 | return self.client.index(index=self._get_index_name(), doc_type=metric, id=id_, body=payload) 67 | except Exception as e: 68 | logger = logging.getLogger(__name__) 69 | logger.exception(e) 70 | -------------------------------------------------------------------------------- /kaneda/backends/influxdb.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | from datetime import datetime 5 | 6 | try: 7 | from influxdb import InfluxDBClient 8 | except ImportError: 9 | InfluxDBClient = None 10 | 11 | from kaneda.exceptions import ImproperlyConfigured 12 | 13 | from .base import BaseBackend 14 | 15 | 16 | class InfluxBackend(BaseBackend): 17 | """ 18 | InfluxDB backend. 19 | 20 | :param database: name of the InfluxDB database. 21 | :param client: client instance of InfluxDBClient class. 22 | :param connection_url: InfluxDB connection url (influxdb://username:password@localhost:8086/databasename). 23 | :param host: server host. 24 | :param port: server port. 25 | :param username: auth username. 26 | :param password: auth password. 27 | :param timeout: InfluxDB connection timeout (seconds). 28 | """ 29 | settings_namespace = 'INFLUX' 30 | 31 | def __init__(self, database, client=None, connection_url=None, host=None, port=None, username=None, password=None, 32 | timeout=0.3): 33 | if not InfluxDBClient: 34 | raise ImproperlyConfigured('You need to install the influxdb library to use the InfluxDB backend.') 35 | if client: 36 | if not isinstance(client, InfluxDBClient): 37 | raise ImproperlyConfigured('"client" parameter is not an instance of InfluxDBClient client.') 38 | self.client = client 39 | elif connection_url: 40 | self.client = InfluxDBClient.from_dsn(connection_url, timeout=timeout) 41 | else: 42 | self.client = InfluxDBClient(host=host, port=port, username=username, password=password, 43 | database=database, timeout=timeout) 44 | self.client.create_database(database) 45 | 46 | def _get_payload(self, name, value, metric, tags, id_): 47 | if tags: 48 | tags['host'] = self._get_host_name() 49 | else: 50 | tags = {'host': self._get_host_name()} 51 | if isinstance(value, dict): 52 | fields = value 53 | fields['name'] = name 54 | else: 55 | fields = {'name': name, 'value': value} 56 | return [{'measurement': metric, 'time': datetime.utcnow(), 'tags': tags, 'fields': fields}] 57 | 58 | def report(self, name, metric, value, tags, id_): 59 | try: 60 | payload = self._get_payload(name, value, metric, tags, id_) 61 | return self.client.write_points(payload) 62 | except Exception as e: 63 | logger = logging.getLogger(__name__) 64 | logger.exception(e) 65 | -------------------------------------------------------------------------------- /kaneda/backends/logger.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | from .base import BaseBackend 6 | 7 | 8 | class LoggerBackend(BaseBackend): 9 | """ 10 | Logger backend. 11 | 12 | :param logger: logging instance. 13 | :param filename: name of the file where logger will store the metrics. 14 | """ 15 | 16 | def __init__(self, logger=None, filename=''): 17 | if logger: 18 | self.logger = logger 19 | else: 20 | if filename: 21 | handler = logging.FileHandler(filename) 22 | else: 23 | handler = logging.StreamHandler() 24 | self.logger = logging.getLogger(__name__) 25 | self.logger.setLevel(logging.INFO) 26 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s') 27 | handler.setFormatter(formatter) 28 | self.logger.addHandler(handler) 29 | 30 | def _get_payload(self, name, value, metric, tags, id_): 31 | payload = super(LoggerBackend, self)._get_payload(name, value, tags) 32 | payload['metric'] = metric 33 | if id_: 34 | payload['_id'] = id_ 35 | return payload 36 | 37 | def report(self, name, metric, value, tags, id_): 38 | payload = self._get_payload(name, value, metric, tags, id_) 39 | return self.logger.info(payload) 40 | -------------------------------------------------------------------------------- /kaneda/backends/mongodb.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | from datetime import datetime 5 | 6 | try: 7 | from pymongo import MongoClient 8 | except ImportError: 9 | MongoClient = None 10 | 11 | from kaneda.exceptions import ImproperlyConfigured 12 | 13 | from .base import BaseBackend 14 | 15 | 16 | class MongoBackend(BaseBackend): 17 | """ 18 | MongoDB backend. 19 | 20 | :param db_name: name of the MongoDB database. 21 | :param collection_name: name of the MongoDB collection used to store metric data. 22 | :param client: client instance of MongoClient class. 23 | :param connection_url: Mongo connection url (mongodb://localhost:27017/). 24 | :param host: server host. 25 | :param port: server port. 26 | :param timeout: MongoDB connection timeout (milliseconds). 27 | """ 28 | settings_namespace = 'MONGO' 29 | 30 | def __init__(self, db_name, collection_name, client=None, connection_url=None, host=None, port=None, timeout=300): 31 | if not MongoClient: 32 | raise ImproperlyConfigured('You need to install the pymongo library to use the MongoDB backend.') 33 | if client: 34 | if not isinstance(client, MongoClient): 35 | raise ImproperlyConfigured('"client" parameter is not an instance of MongoClient client.') 36 | self.client = client 37 | elif connection_url: 38 | self.client = MongoClient(connection_url, serverSelectionTimeoutMS=timeout) 39 | else: 40 | self.client = MongoClient(host=host, port=port, serverSelectionTimeoutMS=timeout) 41 | db = self.client[db_name] 42 | self.collection = db[collection_name] 43 | 44 | def _get_payload(self, name, value, metric, tags, id_): 45 | payload = super(MongoBackend, self)._get_payload(name, value, tags) 46 | payload['timestamp'] = datetime.utcnow() 47 | payload['metric'] = metric 48 | if id_: 49 | payload['_id'] = id_ 50 | return payload 51 | 52 | def report(self, name, metric, value, tags, id_): 53 | payload = self._get_payload(name, value, metric, tags, id_) 54 | try: 55 | return self.collection.insert_one(payload) 56 | except Exception as e: 57 | logger = logging.getLogger(__name__) 58 | logger.exception(e) 59 | -------------------------------------------------------------------------------- /kaneda/backends/rethink.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | try: 6 | import rethinkdb as r 7 | except ImportError: 8 | r = None 9 | 10 | from kaneda.exceptions import ImproperlyConfigured 11 | 12 | from .base import BaseBackend 13 | 14 | 15 | class RethinkBackend(BaseBackend): 16 | """ 17 | RethinkDB backend. 18 | 19 | :param db: name of the RethinkDB database. 20 | :param table_name: name of the RethinkDB table. If this is not provided, it will be used the name of the metric. 21 | :param host: server host. 22 | :param port: server port. 23 | :param user: auth username. 24 | :param password: auth password. 25 | :param timeout: RethinkDB connection timeout (seconds). 26 | """ 27 | settings_namespace = 'RETHINK' 28 | 29 | def __init__(self, db, table_name=None, connection=None, host=None, port=None, user=None, password=None, 30 | timeout=0.3): 31 | if not r: 32 | raise ImproperlyConfigured('You need to install the rethinkdb library to use the RethinkDB backend.') 33 | if connection: 34 | self.connection = connection 35 | elif host and port: 36 | if user and password: 37 | self.connection = r.connect(host=host, port=port, db=db, user=user, password=password, timeout=timeout) 38 | else: 39 | self.connection = r.connect(host=host, port=port, db=db, timeout=timeout) 40 | self.db = db 41 | self.table_name = table_name 42 | if self.connection is None: 43 | self.connection = r.connect(db=db, timeout=timeout) 44 | self._create_database() 45 | 46 | def _get_payload(self, name, value, tags, id_): 47 | payload = super(RethinkBackend, self)._get_payload(name, value, tags) 48 | payload['timestamp'] = r.now() 49 | if id_: 50 | payload['id'] = id_ 51 | return payload 52 | 53 | def _create_database(self): 54 | if self.db not in r.db_list().run(self.connection): 55 | r.db_create(self.db).run(self.connection) 56 | 57 | def _create_table(self, metric): 58 | table_name = self._get_table_name(metric) 59 | if table_name not in r.db(self.db).table_list().run(self.connection): 60 | r.db(self.db).table_create(table_name).run(self.connection) 61 | 62 | def _get_table_name(self, metric): 63 | return self.table_name or metric 64 | 65 | def report(self, name, metric, value, tags, id_): 66 | try: 67 | table_name = self._get_table_name(metric) 68 | self._create_table(metric) 69 | payload = self._get_payload(name, value, tags, id_) 70 | return r.table(table_name).insert(payload).run(self.connection) 71 | except Exception as e: 72 | logger = logging.getLogger(__name__) 73 | logger.exception(e) 74 | -------------------------------------------------------------------------------- /kaneda/base.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from time import time 4 | from functools import wraps 5 | 6 | from kaneda.utils import get_kaneda_objects 7 | 8 | 9 | class Metrics(object): 10 | """ 11 | Metrics reporting class 12 | 13 | :param backend: instance of kaneda.backends. It is the responsible to store the reported data. 14 | :param queue: instance of kaneda.queues. It is the responsible to store the reported data asynchronously. 15 | 16 | If none of the parameters are passed it tries get the backend from kaneda settings file. 17 | """ 18 | def __init__(self, backend=None, queue=None): 19 | self.backend = backend 20 | self.queue = queue 21 | if not self.backend and not self.queue: 22 | self.backend, self.queue = get_kaneda_objects() 23 | 24 | def gauge(self, name, value, tags=None): 25 | """ 26 | Record the value of a gauge. 27 | 28 | >>> metrics.gauge('users.notifications', 13, tags=['new_message', 'follow_request']) 29 | """ 30 | return self._report(name, 'gauge', value, tags) 31 | 32 | def increment(self, name, tags=None): 33 | """ 34 | Increment a counter. 35 | 36 | >>> metrics.increment('user.profile.views') 37 | """ 38 | self._report(name, 'counter', 1, tags) 39 | 40 | def decrement(self, name, tags=None): 41 | """ 42 | Decrement a counter. 43 | 44 | >>> metrics.decrement('hotel.occupation') 45 | """ 46 | self._report(name, 'counter', -1, tags) 47 | 48 | def timing(self, name, value, tags=None): 49 | """ 50 | Record a timing. 51 | 52 | >>> metrics.timing('hotel.availability.request_time', 4) 53 | """ 54 | self._report(name, 'timing', value, tags) 55 | 56 | def event(self, name, text, tags=None): 57 | """ 58 | Record an event. 59 | 60 | >>> metrics.event('user.signup', 'New user registered') 61 | """ 62 | self._report(name, 'event', text, tags) 63 | 64 | def custom(self, name, metric, value, tags=None, id_=None): 65 | """ 66 | Send a custom metric report. 67 | 68 | >>> metrics.custom('hotel.response_data', metric='xml', value={'status': 'ok', 'xml': ...}, id_='2B75D750') 69 | """ 70 | self._report(name, metric, value, tags, id_) 71 | 72 | class _TimedContextManagerDecorator(object): 73 | """ 74 | Class that implements the context manager and the decorator for "timed" method. 75 | """ 76 | 77 | def __init__(self, metrics, name=None, tags=None, use_ms=None): 78 | self.metrics = metrics 79 | self.name = name 80 | self.tags = tags 81 | self.use_ms = use_ms 82 | 83 | def __call__(self, func): 84 | """ 85 | Decorator which returns the elapsed time of the function call. 86 | """ 87 | if not self.name: 88 | self.name = u'{0:s}.{1:s}'.format(func.__module__, func.__name__) 89 | 90 | @wraps(func) 91 | def wrapped(*args, **kwargs): 92 | with self: 93 | return func(*args, **kwargs) 94 | return wrapped 95 | 96 | def __enter__(self): 97 | self.start = time() 98 | 99 | def __exit__(self, type, value, traceback): 100 | elapsed = time() - self.start 101 | elapsed = int(round(1000 * elapsed)) if self.use_ms else elapsed 102 | self.metrics.timing(self.name, elapsed, self.tags) 103 | 104 | def timed(self, name=None, tags=None, use_ms=None): 105 | """ 106 | Measure the amount of time of a function (using a decorator) or a piece of 107 | code (using a context manager). If name is not provided while using the decorator it 108 | will be used the name of the module and the function. 109 | :: 110 | 111 | # With decorator 112 | @metrics.timed('request.response_time') 113 | def perform_request(params): 114 | pass 115 | 116 | # With context manager 117 | with metrics.timed('request.response_time'): 118 | pass 119 | """ 120 | return self._TimedContextManagerDecorator(self, name, tags, use_ms) 121 | 122 | def _report(self, name, metric, value, tags, id_=None): 123 | if self.backend: 124 | return self.backend.report(name, metric, value, tags, id_) 125 | elif self.queue: 126 | return self.queue.report(name, metric, value, tags, id_) 127 | -------------------------------------------------------------------------------- /kaneda/exceptions.py: -------------------------------------------------------------------------------- 1 | __all__ = ['ImproperlyConfigured', 'UnexistingKanedaClass', 'SettingsError'] 2 | 3 | 4 | class ImproperlyConfigured(ImportError): 5 | """ 6 | Kaneda is improperly configured. 7 | """ 8 | 9 | 10 | class UnexistingKanedaClass(ImportError): 11 | """ 12 | Kaneda is configured with an unexisting backend/queue class. 13 | """ 14 | 15 | 16 | class SettingsError(Exception): 17 | """ 18 | Kaneda is configured without a settings file or needs a required settings variable. 19 | """ 20 | -------------------------------------------------------------------------------- /kaneda/queues/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseQueue # NOQA 2 | from .celery import CeleryQueue # NOQA 3 | from .rq import RQQueue # NOQA 4 | from .zmq import ZMQQueue # NOQA 5 | -------------------------------------------------------------------------------- /kaneda/queues/base.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class BaseQueue(object): 4 | """ 5 | Base class for queues 6 | 7 | settings_namespace is a class attribute that will be used to get the needed 8 | parameters to create new queue instance from a settings file. 9 | """ 10 | settings_namespace = None 11 | 12 | def report(self, name, metric, value, tags, id_): 13 | raise NotImplemented() 14 | -------------------------------------------------------------------------------- /kaneda/queues/celery.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | try: 6 | from celery import Celery 7 | except ImportError: 8 | Celery = None 9 | 10 | from kaneda.exceptions import ImproperlyConfigured 11 | 12 | from .base import BaseQueue 13 | 14 | 15 | class CeleryQueue(BaseQueue): 16 | """ 17 | Celery queue. 18 | 19 | :param app: app instance of Celery class. 20 | :param broker: broker connection url where Celery will attend the async reporting requests. 21 | :param queue_name: name of the queue being used by the Celery worker process. 22 | """ 23 | settings_namespace = 'CELERY' 24 | 25 | def __init__(self, app=None, broker=None, queue_name=''): 26 | if not Celery: 27 | raise ImproperlyConfigured('You need to install the celery library to use Celery queue.') 28 | if app: 29 | if not isinstance(app, Celery): 30 | raise ImproperlyConfigured('"queue" parameter is not an instance of Celery queue.') 31 | self.app = app 32 | else: 33 | self.app = Celery(broker=broker) 34 | self.queue_name = queue_name 35 | 36 | def report(self, name, metric, value, tags, id_): 37 | try: 38 | return self.app.send_task('kaneda.tasks.celery.report', args=(name, metric, value, tags, id_), 39 | queue=self.queue_name) 40 | except Exception as e: 41 | logger = logging.getLogger(__name__) 42 | logger.exception(e) 43 | -------------------------------------------------------------------------------- /kaneda/queues/rq.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | try: 6 | from redis import Redis 7 | from rq import Queue 8 | except ImportError: 9 | Redis = None 10 | Queue = None 11 | 12 | from kaneda.exceptions import ImproperlyConfigured 13 | 14 | from .base import BaseQueue 15 | 16 | 17 | class RQQueue(BaseQueue): 18 | """ 19 | RQ queue 20 | 21 | :param queue: queue instance of RQ class. 22 | :param redis_url: Redis connection url where RQ will attend the async reporting requests. 23 | :param queue_name: name of the queue being used by the RQ worker process. 24 | """ 25 | settings_namespace = 'RQ' 26 | 27 | def __init__(self, queue=None, redis_url=None, queue_name='kaneda'): 28 | if not Redis: 29 | raise ImproperlyConfigured('You need to install redis to use the RQ queue.') 30 | if not Queue: 31 | raise ImproperlyConfigured('You need to install rq library to use the RQ queue.') 32 | if queue: 33 | if not isinstance(queue, Queue): 34 | raise ImproperlyConfigured('"queue" parameter is not an instance of RQ queue.') 35 | self.queue = queue 36 | elif redis_url: 37 | self.queue = Queue(queue_name, connection=Redis.from_url(redis_url)) 38 | else: 39 | self.queue = Queue(queue_name, connection=Redis()) 40 | 41 | def report(self, name, metric, value, tags, id_): 42 | try: 43 | return self.queue.enqueue('kaneda.tasks.rq.report', name, metric, value, tags, id_) 44 | except Exception as e: 45 | logger = logging.getLogger(__name__) 46 | logger.exception(e) 47 | -------------------------------------------------------------------------------- /kaneda/queues/zmq.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | try: 6 | import zmq 7 | except ImportError: 8 | zmq = None 9 | 10 | from kaneda.exceptions import ImproperlyConfigured 11 | 12 | from .base import BaseQueue 13 | 14 | 15 | class ZMQQueue(BaseQueue): 16 | """ 17 | ZeroMQ queue 18 | 19 | :param connection_url: ZMQ connection url (tcp://127.0.0.1:5555). 20 | :param timeout: ZMQ socket timeout (milliseconds). 21 | """ 22 | settings_namespace = 'ZMQ' 23 | 24 | def __init__(self, connection_url, timeout=300): 25 | if not zmq: 26 | raise ImproperlyConfigured('You need to install pyzmq to use the ZMQ queue.') 27 | context = zmq.Context() 28 | self.socket = context.socket(zmq.PUSH) 29 | self.socket.SNDTIMEO = timeout 30 | self.socket.bind(connection_url) 31 | 32 | def report(self, name, metric, value, tags, id_): 33 | payload = locals() 34 | del payload['self'] 35 | try: 36 | return self.socket.send_json(payload) 37 | except Exception as e: 38 | logger = logging.getLogger(__name__) 39 | logger.exception(e) 40 | -------------------------------------------------------------------------------- /kaneda/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from .zmq import zmq_task # NOQA 2 | -------------------------------------------------------------------------------- /kaneda/tasks/celery.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from celery import Celery 4 | 5 | from kaneda.utils import get_backend 6 | 7 | backend = get_backend() 8 | 9 | app = Celery() 10 | app.config_from_object('celeryconfig') 11 | 12 | 13 | @app.task() 14 | def report(name, metric, value, tags, id_): 15 | """ 16 | Celery task to report metrics to the configured backend in kanedasettings.py 17 | 18 | To run the worker execute this command: 19 | celery -A kaneda.tasks.celery worker 20 | """ 21 | return backend.report(name, metric, value, tags, id_) 22 | -------------------------------------------------------------------------------- /kaneda/tasks/rq.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from redis import Redis 4 | from rq.decorators import job 5 | 6 | from kaneda.utils import get_backend 7 | 8 | backend = get_backend() 9 | 10 | 11 | @job(queue='kaneda', connection=Redis()) 12 | def report(name, metric, value, tags, id_): 13 | """ 14 | RQ job to report metrics to the configured backend in kanedasettings.py 15 | 16 | To run the worker execute this command: 17 | rqworker [queue] 18 | """ 19 | return backend.report(name, metric, value, tags, id_) 20 | -------------------------------------------------------------------------------- /kaneda/tasks/zmq.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from datetime import datetime 4 | 5 | import click 6 | import zmq 7 | 8 | from kaneda.exceptions import SettingsError 9 | from kaneda.utils import get_backend, get_settings 10 | 11 | 12 | @click.command() 13 | @click.option('--connection_url', '-u', help='ZMQ connection url, e.g: tcp://127.0.0.1:5555') 14 | def zmq_task(connection_url): 15 | """ 16 | ZMQ job to report metrics to the configured backend in kanedasettings.py 17 | 18 | To run the worker execute this command: 19 | zmqworker --connection_url= 20 | """ 21 | if not connection_url: 22 | try: 23 | settings = get_settings() 24 | connection_url = settings.ZMQ_CONNECTION_URL 25 | except ImportError: 26 | raise SettingsError("Pass --connection_url option or define ZMQ_CONNECTION_URL on Kaneda settings file " 27 | "before use ZMQ task processor.") 28 | backend = get_backend() 29 | context = zmq.Context() 30 | socket = context.socket(zmq.PULL) 31 | socket.connect(connection_url) 32 | poller = zmq.Poller() 33 | poller.register(socket) 34 | click.secho('Running ZMQ worker - listening at {}.'.format(connection_url), fg='blue') 35 | click.secho('Using {}.'.format(backend.__class__.__name__), fg='blue') 36 | click.echo('\n') 37 | while True: 38 | events = dict(poller.poll(0)) 39 | if socket in events: 40 | payload = socket.recv_json() 41 | click.secho('[{}: Received data] {}'.format(datetime.utcnow(), payload), fg='green') 42 | backend.report(**payload) 43 | -------------------------------------------------------------------------------- /kaneda/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import os 4 | from importlib import import_module 5 | from six import string_types 6 | 7 | from kaneda.exceptions import SettingsError, UnexistingKanedaClass 8 | 9 | DEFAULT_SETTINGS_ENVAR = 'KANEDA_SETTINGS_MODULE' 10 | DEFAULT_SETTINGS_MODULE = 'kanedasettings' 11 | 12 | 13 | def import_class(path_or_callable): 14 | if hasattr(path_or_callable, '__call__'): 15 | return path_or_callable 16 | else: 17 | assert isinstance(path_or_callable, string_types) 18 | package, attr = path_or_callable.rsplit('.', 1) 19 | return getattr(import_module(package), attr) 20 | 21 | 22 | def get_settings(): 23 | """ 24 | Get settings from DEFAULT_SETTINGS_MODULE file or from the previously defined 25 | DEFAULT_SETTINGS_ENVAR environment variable pointing to the desired settings filename. 26 | """ 27 | settings_module = os.environ.get(DEFAULT_SETTINGS_ENVAR, DEFAULT_SETTINGS_MODULE) 28 | return import_module(settings_module) 29 | 30 | 31 | def get_object_from_settings(class_path, settings): 32 | """ 33 | Get a backend/queue object from a settings file. It will convert all the 34 | settings definition in order to can be passed as a param dict to 35 | the given backend specified by settings.BACKEND or settings.QUEUE. 36 | 37 | e.g.: 38 | If your backend has the attribute variable "settings_namespace" set to "MY_BACKEND" and 39 | your setting file has the variables MY_BACKEND_HOST and MY_BACKEND_PORT 40 | it will cleanup and create a dict with keys "host" and "port" as a backend parameters. 41 | """ 42 | try: 43 | kaneda_class = import_class(class_path) 44 | namespace = kaneda_class.settings_namespace + '_' 45 | params = {k.replace(namespace, '').lower(): v for k, v in settings.__dict__.items() if k.startswith(namespace)} 46 | return kaneda_class(**params) 47 | except (ImportError, AttributeError): 48 | raise UnexistingKanedaClass('The selected BACKEND or QUEUE class does not exists.') 49 | 50 | 51 | def get_backend(): 52 | """ 53 | Wraps the backend retrieval function in order to control if setting file is 54 | defined and if the defined settings are correct. 55 | """ 56 | try: 57 | settings = get_settings() 58 | if not hasattr(settings, 'BACKEND'): 59 | raise SettingsError('You need to set BACKEND in Kaneda settings file to import a backend instance.') 60 | return get_object_from_settings(settings.BACKEND, settings) 61 | except ImportError: 62 | raise SettingsError('Define backend settings on {}.py or set "{}" enviroment variable ' 63 | 'with your settings module.'.format(DEFAULT_SETTINGS_MODULE, DEFAULT_SETTINGS_ENVAR)) 64 | 65 | 66 | def get_kaneda_objects(): 67 | """ 68 | Returns a backend object or a queue object. 69 | """ 70 | try: 71 | settings = get_settings() 72 | if hasattr(settings, 'BACKEND'): 73 | return get_object_from_settings(settings.BACKEND, settings), None 74 | if hasattr(settings, 'QUEUE'): 75 | return None, get_object_from_settings(settings.QUEUE, settings) 76 | else: 77 | raise SettingsError('You need to set BACKEND or QUEUE to use Kaneda with a settings file.') 78 | except ImportError: 79 | raise SettingsError('Define backend or queue settings on {}.py or set "{}" enviroment variable ' 80 | 'with your settings module.'.format(DEFAULT_SETTINGS_MODULE, DEFAULT_SETTINGS_ENVAR)) 81 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import codecs 4 | 5 | try: 6 | from setuptools import setup, find_packages 7 | except ImportError: 8 | from distutils.core import setup 9 | 10 | 11 | def get_version(package): 12 | """ 13 | Return package version as listed in `__version__` in `init.py`. 14 | """ 15 | init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read() 16 | return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) 17 | 18 | 19 | def get_author(package): 20 | """ 21 | Return package author as listed in `__author__` in `init.py`. 22 | """ 23 | init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read() 24 | return re.search("^__author__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) 25 | 26 | 27 | def get_email(package): 28 | """ 29 | Return package email as listed in `__email__` in `init.py`. 30 | """ 31 | init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read() 32 | return re.search("^__email__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) 33 | 34 | setup( 35 | name='kaneda', 36 | version=get_version('kaneda'), 37 | packages=find_packages(), 38 | include_package_data=True, 39 | description='Configurable Python library for metrics and events reporting', 40 | long_description=codecs.open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8').read(), 41 | author=get_author('kaneda'), 42 | author_email=get_email('kaneda'), 43 | entry_points={ 44 | 'console_scripts': [ 45 | 'zmqworker = kaneda.tasks:zmq_task', 46 | ], 47 | }, 48 | install_requires=[ 49 | 'click==6.6', 50 | ], 51 | classifiers=[ 52 | 'Intended Audience :: Developers', 53 | 'Programming Language :: Python', 54 | 'Programming Language :: Python :: 2', 55 | 'Programming Language :: Python :: 2.7', 56 | 'Programming Language :: Python :: 3', 57 | 'Programming Language :: Python :: 3.3', 58 | 'Programming Language :: Python :: 3.4', 59 | 'Operating System :: OS Independent', 60 | 'Topic :: Software Development' 61 | ], 62 | ) 63 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/APSL/kaneda/739db48588d2237dd7710b16f23921d489182868/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | class KanedaSettings: 5 | """ 6 | Backend and queues settings for Kaneda. 7 | """ 8 | class elastic: 9 | BACKEND = 'kaneda.backends.ElasticsearchBackend' 10 | ELASTIC_INDEX_NAME = 'test' 11 | ELASTIC_APP_NAME = 'test' 12 | ELASTIC_CONNECTION_URL = 'http://test:test@localhost:9200' 13 | ELASTIC_HOST = 'localhost' 14 | ELASTIC_PORT = 9200 15 | ELASTIC_USER = 'test' 16 | ELASTIC_PASSWORD = 'test' 17 | ELASTIC_TIMEOUT = 10 18 | 19 | class mongo: 20 | BACKEND = 'kaneda.backends.MongoBackend' 21 | MONGO_DB_NAME = 'test' 22 | MONGO_COLLECTION_NAME = 'test' 23 | MONGO_CONNECTION_URL = 'mongodb://localhost:27017' 24 | MONGO_HOST = 'localhost' 25 | MONGO_PORT = 27017 26 | MONGO_TIMEOUT = 300 27 | 28 | class rethink: 29 | BACKEND = 'kaneda.backends.RethinkBackend' 30 | RETHINK_DB = 'kaneda_test' 31 | RETHINK_HOST = 'localhost' 32 | RETHINK_PORT = 28015 33 | RETHINK_TIMEOUT = 0.3 34 | 35 | class influx: 36 | BACKEND = 'kaneda.backends.InfluxBackend' 37 | INFLUX_DATABASE = 'test' 38 | INFLUX_CONNECTION_URL = 'influxdb://root:root@localhost:8086/test' 39 | INFLUX_HOST = 'localhost' 40 | INFLUX_PORT = 8086 41 | INFLUX_USERNAME = 'root' 42 | INFLUX_PASSWORD = 'root' 43 | INFLUX_TIMEOUT = 300 44 | 45 | class rq: 46 | QUEUE = 'kaneda.queues.RQQueue' 47 | RQ_REDIS_URL = 'redis://localhost:6379/1' 48 | RQ_QUEUE_NAME = '' 49 | 50 | class celery: 51 | QUEUE = 'kaneda.queues.CeleryQueue' 52 | CELERY_BROKER = 'redis://localhost:6379/1' 53 | CELERY_QUEUE_NAME = '' 54 | 55 | class zmq: 56 | QUEUE = 'kaneda.queues.ZMQQueue' 57 | ZMQ_CONNECTION_URL = 'tcp://127.0.0.1:5555' 58 | ZMQ_TIMEOUT = 300 59 | 60 | 61 | @pytest.fixture 62 | def kaneda_settings(): 63 | return KanedaSettings 64 | 65 | 66 | @pytest.fixture 67 | def elastic_settings(): 68 | return KanedaSettings.elastic 69 | 70 | 71 | @pytest.fixture 72 | def mongo_settings(): 73 | return KanedaSettings.mongo 74 | 75 | 76 | @pytest.fixture 77 | def rethink_settings(): 78 | return KanedaSettings.rethink 79 | 80 | 81 | @pytest.fixture 82 | def influx_settings(): 83 | return KanedaSettings.influx 84 | 85 | 86 | @pytest.fixture 87 | def celery_settings(): 88 | return KanedaSettings.celery 89 | 90 | 91 | @pytest.fixture 92 | def rq_settings(): 93 | return KanedaSettings.rq 94 | 95 | 96 | @pytest.fixture 97 | def zmq_settings(): 98 | return KanedaSettings.zmq 99 | 100 | 101 | def pytest_addoption(parser): 102 | parser.addoption("--run-benchmark", action="store_true", help="run benchmark tests") 103 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/APSL/kaneda/739db48588d2237dd7710b16f23921d489182868/tests/integration/__init__.py -------------------------------------------------------------------------------- /tests/integration/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | mark_benchmark = pytest.mark.skipif(not pytest.config.getoption("--run-benchmark"), 4 | reason="need --run-benchmark option to run") 5 | -------------------------------------------------------------------------------- /tests/integration/benchmarks/test_backends.py: -------------------------------------------------------------------------------- 1 | from kaneda import Metrics 2 | 3 | from . import mark_benchmark 4 | 5 | 6 | @mark_benchmark 7 | class TestBenchmarksBackends(object): 8 | 9 | def test_benchmark_elasticsearch(self, elasticsearch_backend, benchmark): 10 | metrics = Metrics(backend=elasticsearch_backend) 11 | benchmark(metrics.gauge, 'benchmark_elasticsearch', 1) 12 | 13 | def test_benchmark_mongo(self, mongo_backend, benchmark): 14 | metrics = Metrics(backend=mongo_backend) 15 | benchmark(metrics.gauge, 'benchmark_mongo', 1) 16 | 17 | def test_benchmark_rethink(self, rethink_backend, benchmark): 18 | metrics = Metrics(backend=rethink_backend) 19 | benchmark(metrics.gauge, 'benchmark_mongo', 1) 20 | -------------------------------------------------------------------------------- /tests/integration/benchmarks/test_queues.py: -------------------------------------------------------------------------------- 1 | from kaneda import Metrics 2 | 3 | from . import mark_benchmark 4 | 5 | 6 | @mark_benchmark 7 | class TestQueues(object): 8 | 9 | def test_benchmark_celery(self, celery_queue, benchmark): 10 | metrics = Metrics(queue=celery_queue) 11 | benchmark(metrics.gauge, 'benchmark_celery', 1) 12 | 13 | def test_benchmark_rq(self, rq_queue, benchmark): 14 | metrics = Metrics(queue=rq_queue) 15 | benchmark(metrics.gauge, 'benchmark_rq', 1) 16 | 17 | def test_benchmark_zmq(self, zmq_queue, benchmark): 18 | metrics = Metrics(queue=zmq_queue) 19 | benchmark(metrics.gauge, 'benchmark_zmq', 1) 20 | -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import pytest 4 | from elasticsearch import Elasticsearch 5 | from pymongo import MongoClient 6 | import rethinkdb as r 7 | from influxdb import InfluxDBClient 8 | 9 | from kaneda.backends import ElasticsearchBackend, LoggerBackend, MongoBackend, RethinkBackend, InfluxBackend 10 | from kaneda.queues import CeleryQueue, RQQueue, ZMQQueue 11 | 12 | 13 | @pytest.fixture 14 | def elasticsearch_backend(elastic_settings): 15 | return ElasticsearchBackend(index_name=elastic_settings.ELASTIC_INDEX_NAME, 16 | app_name=elastic_settings.ELASTIC_APP_NAME, host=elastic_settings.ELASTIC_HOST, 17 | port=elastic_settings.ELASTIC_PORT, user=elastic_settings.ELASTIC_USER, 18 | password=elastic_settings.ELASTIC_PASSWORD, timeout=elastic_settings.ELASTIC_TIMEOUT) 19 | 20 | 21 | @pytest.fixture 22 | def elasticsearch_backend_client(elastic_settings): 23 | client = Elasticsearch([elastic_settings.ELASTIC_HOST], port=elastic_settings.ELASTIC_PORT, 24 | http_auth=(elastic_settings.ELASTIC_USER, elastic_settings.ELASTIC_PASSWORD), 25 | timeout=elastic_settings.ELASTIC_TIMEOUT) 26 | return ElasticsearchBackend(index_name=elastic_settings.ELASTIC_INDEX_NAME, 27 | app_name=elastic_settings.ELASTIC_APP_NAME, client=client) 28 | 29 | 30 | @pytest.fixture 31 | def elasticsearch_backend_url(elastic_settings): 32 | return ElasticsearchBackend(index_name=elastic_settings.ELASTIC_INDEX_NAME, 33 | app_name=elastic_settings.ELASTIC_APP_NAME, 34 | connection_url=elastic_settings.ELASTIC_CONNECTION_URL) 35 | 36 | 37 | def elasticsearch_clients(): 38 | from tests.conftest import elastic_settings 39 | return [elasticsearch_backend(elastic_settings()).client, elasticsearch_backend_client(elastic_settings()).client, 40 | elasticsearch_backend_url(elastic_settings()).client] 41 | 42 | 43 | @pytest.fixture 44 | def mongo_backend(mongo_settings): 45 | return MongoBackend(db_name=mongo_settings.MONGO_DB_NAME, collection_name=mongo_settings.MONGO_COLLECTION_NAME, 46 | host=mongo_settings.MONGO_HOST, port=mongo_settings.MONGO_PORT) 47 | 48 | 49 | @pytest.fixture 50 | def mongo_backend_client(mongo_settings): 51 | client = MongoClient(host=mongo_settings.MONGO_HOST, port=mongo_settings.MONGO_PORT, 52 | serverSelectionTimeoutMS=mongo_settings.MONGO_TIMEOUT) 53 | return MongoBackend(db_name=mongo_settings.MONGO_DB_NAME, collection_name=mongo_settings.MONGO_COLLECTION_NAME, 54 | client=client) 55 | 56 | 57 | @pytest.fixture 58 | def mongo_backend_url(mongo_settings): 59 | return MongoBackend(db_name=mongo_settings.MONGO_DB_NAME, collection_name=mongo_settings.MONGO_COLLECTION_NAME, 60 | connection_url=mongo_settings.MONGO_CONNECTION_URL) 61 | 62 | 63 | def mongo_clients(): 64 | from tests.conftest import mongo_settings 65 | return [mongo_backend(mongo_settings()).client, mongo_backend_client(mongo_settings()).client, 66 | mongo_backend_url(mongo_settings()).client] 67 | 68 | 69 | @pytest.fixture 70 | def rethink_backend(rethink_settings): 71 | return RethinkBackend(db=rethink_settings.RETHINK_DB, host=rethink_settings.RETHINK_HOST, 72 | port=rethink_settings.RETHINK_PORT, timeout=rethink_settings.RETHINK_TIMEOUT) 73 | 74 | 75 | @pytest.fixture 76 | def rethink_backend_connection(rethink_settings): 77 | connection = r.connect(host=rethink_settings.RETHINK_HOST, port=rethink_settings.RETHINK_PORT, 78 | db=rethink_settings.RETHINK_DB, timeout=rethink_settings.RETHINK_TIMEOUT) 79 | return RethinkBackend(db=rethink_settings.RETHINK_DB, connection=connection) 80 | 81 | 82 | def rethink_clients(): 83 | from tests.conftest import rethink_settings 84 | return [rethink_backend(rethink_settings()).connection, rethink_backend_connection(rethink_settings()).connection] 85 | 86 | 87 | @pytest.fixture 88 | def influx_backend(influx_settings): 89 | return InfluxBackend(database=influx_settings.INFLUX_DATABASE, host=influx_settings.INFLUX_HOST, 90 | port=influx_settings.INFLUX_PORT, username=influx_settings.INFLUX_USERNAME, 91 | password=influx_settings.INFLUX_PASSWORD) 92 | 93 | 94 | @pytest.fixture 95 | def influx_backend_client(influx_settings): 96 | client = InfluxDBClient(host=influx_settings.INFLUX_HOST, port=influx_settings.INFLUX_PORT, 97 | username=influx_settings.INFLUX_USERNAME, password=influx_settings.INFLUX_PASSWORD, 98 | database=influx_settings.INFLUX_DATABASE) 99 | return InfluxBackend(database=influx_settings.INFLUX_DATABASE, client=client) 100 | 101 | 102 | @pytest.fixture 103 | def influx_backend_url(influx_settings): 104 | return InfluxBackend(database=influx_settings.INFLUX_DATABASE, connection_url=influx_settings.INFLUX_CONNECTION_URL) 105 | 106 | 107 | def influx_clients(): 108 | from tests.conftest import influx_settings 109 | return [influx_backend(influx_settings()).client, influx_backend_client(influx_settings()).client, 110 | influx_backend_url(influx_settings()).client] 111 | 112 | 113 | @pytest.fixture 114 | def logger_filename(): 115 | return '/tmp/kaneda-{}.log'.format(datetime.utcnow().strftime('%Y%m%d')) 116 | 117 | 118 | @pytest.fixture 119 | def logger_backend(logger_filename): 120 | return LoggerBackend(filename=logger_filename) 121 | 122 | 123 | @pytest.fixture 124 | def celery_queue(celery_settings): 125 | return CeleryQueue(broker=celery_settings.CELERY_BROKER) 126 | 127 | 128 | @pytest.fixture 129 | def rq_queue(rq_settings): 130 | return RQQueue(redis_url=rq_settings.RQ_REDIS_URL) 131 | 132 | 133 | @pytest.fixture 134 | def zmq_queue(zmq_settings): 135 | return ZMQQueue(connection_url=zmq_settings.ZMQ_CONNECTION_URL) 136 | -------------------------------------------------------------------------------- /tests/integration/django/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/APSL/kaneda/739db48588d2237dd7710b16f23921d489182868/tests/integration/django/__init__.py -------------------------------------------------------------------------------- /tests/integration/django/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def django_settings_backend(elastic_settings): 6 | elastic_settings.DEBUG = False 7 | elastic_settings.QUEUE = None 8 | return elastic_settings 9 | 10 | 11 | @pytest.fixture 12 | def django_settings_debug(): 13 | class Settings: 14 | DEBUG = True 15 | LOGGER = None 16 | LOGGER_FILENAME = None 17 | 18 | return Settings 19 | 20 | 21 | @pytest.fixture 22 | def django_settings_queue(celery_settings): 23 | celery_settings.DEBUG = False 24 | celery_settings.BACKEND = None 25 | return celery_settings 26 | 27 | 28 | def pytest_configure(): 29 | from django.conf import settings 30 | settings.configure() 31 | -------------------------------------------------------------------------------- /tests/integration/django/test_django.py: -------------------------------------------------------------------------------- 1 | from kaneda.backends import LoggerBackend, ElasticsearchBackend 2 | from kaneda.queues import CeleryQueue 3 | from django_kaneda import settings # NOQA 4 | 5 | 6 | class TestDjango(object): 7 | 8 | def test_django_kaneda_with_backend(self, mocker, django_settings_backend): 9 | mocker.patch('django_kaneda.settings', django_settings_backend) 10 | from django_kaneda import LazyMetrics 11 | metrics = LazyMetrics() 12 | assert isinstance(metrics.backend, ElasticsearchBackend) 13 | result = metrics.gauge('test_gauge', 42) 14 | assert result 15 | 16 | def test_django_kaneda_with_debug(self, mocker, django_settings_debug): 17 | mocker.patch('django_kaneda.settings', django_settings_debug) 18 | from django_kaneda import LazyMetrics 19 | metrics = LazyMetrics() 20 | metrics.gauge('test_gauge', 42) 21 | assert isinstance(metrics.backend, LoggerBackend) 22 | 23 | def test_django_kaneda_with_queue(self, mocker, django_settings_queue): 24 | mocker.patch('django_kaneda.settings', django_settings_queue) 25 | from django_kaneda import LazyMetrics 26 | metrics = LazyMetrics() 27 | assert isinstance(metrics.queue, CeleryQueue) 28 | result = metrics.gauge('test_gauge', 42) 29 | assert result 30 | -------------------------------------------------------------------------------- /tests/integration/test_backends.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .conftest import elasticsearch_clients, mongo_clients, rethink_clients, influx_clients 4 | 5 | 6 | class TestBackends(object): 7 | 8 | @pytest.mark.parametrize('client', elasticsearch_clients()) 9 | def test_elasticsearch_connection(self, client): 10 | assert client.ping() 11 | 12 | @pytest.mark.parametrize('client', mongo_clients()) 13 | def test_mongo_connection(self, client): 14 | assert client.server_info() 15 | 16 | @pytest.mark.parametrize('connection', rethink_clients()) 17 | def test_rethink_connection(self, connection): 18 | assert connection.is_open() 19 | 20 | @pytest.mark.parametrize('client', influx_clients()) 21 | def test_influx_connection(self, client): 22 | try: 23 | assert client.get_list_database() 24 | except ConnectionError: 25 | assert False 26 | -------------------------------------------------------------------------------- /tests/integration/test_metrics.py: -------------------------------------------------------------------------------- 1 | from kaneda import Metrics 2 | 3 | 4 | class TestMetrics(object): 5 | 6 | def test_elasticsearch_metric(self, elasticsearch_backend): 7 | metrics = Metrics(backend=elasticsearch_backend) 8 | result = metrics.gauge('test_gauge', 42) 9 | assert result 10 | assert result['_id'] 11 | 12 | def test_mongo_metric(self, mongo_backend): 13 | metrics = Metrics(backend=mongo_backend) 14 | result = metrics.gauge('test_gauge', 42) 15 | assert result 16 | assert result.inserted_id 17 | 18 | def test_rethink_metric(self, rethink_backend): 19 | metrics = Metrics(backend=rethink_backend) 20 | result = metrics.gauge('test_gauge', 42) 21 | assert result 22 | assert result['inserted'] == 1 23 | 24 | def test_influx_metric(self, influx_backend): 25 | metrics = Metrics(backend=influx_backend) 26 | result = metrics.gauge('test_gauge', 42) 27 | assert result 28 | 29 | def test_logger_metric(self, logger_backend, logger_filename): 30 | metrics = Metrics(backend=logger_backend) 31 | metrics.gauge('test_gauge', 42) 32 | with open(logger_filename) as f: 33 | lines = f.readlines() 34 | assert lines 35 | result = lines[-1].split(' - ')[2] 36 | assert result 37 | assert 'test_gauge' in result 38 | -------------------------------------------------------------------------------- /tests/integration/test_queues.py: -------------------------------------------------------------------------------- 1 | from kaneda import Metrics 2 | 3 | 4 | class TestQueues(object): 5 | 6 | def test_celery(self, celery_queue): 7 | metrics = Metrics(queue=celery_queue) 8 | result = metrics.gauge('test_gauge_celery', 1) 9 | assert result 10 | 11 | def test_rq(self, rq_queue): 12 | metrics = Metrics(queue=rq_queue) 13 | result = metrics.gauge('test_gauge_rq', 1) 14 | assert result 15 | 16 | def test_zmq(self, zmq_queue): 17 | metrics = Metrics(queue=zmq_queue) 18 | metrics.gauge('test_gauge_rq', 1) 19 | zmq_queue.socket.close() 20 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/APSL/kaneda/739db48588d2237dd7710b16f23921d489182868/tests/unit/__init__.py -------------------------------------------------------------------------------- /tests/unit/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kaneda.backends import BaseBackend 4 | 5 | 6 | class DummyBackend(BaseBackend): 7 | reported_data = {} 8 | 9 | def report(self, name, metric, value, tags, id_=None): 10 | payload = self._get_payload(name, value, tags) 11 | payload['metric'] = metric 12 | self.reported_data[name] = payload 13 | 14 | 15 | @pytest.fixture 16 | def dummy_backend(): 17 | return DummyBackend() 18 | 19 | 20 | @pytest.fixture 21 | def empty_settings(): 22 | class Settings: 23 | pass 24 | 25 | return Settings 26 | 27 | 28 | @pytest.fixture 29 | def unexisting_backend_settings(): 30 | class Settings: 31 | BACKEND = 'kaneda.backends.UnexsitingBackend' 32 | 33 | return Settings 34 | -------------------------------------------------------------------------------- /tests/unit/test_backends.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | class TestBackends(object): 5 | 6 | @pytest.fixture 7 | def simple_payload(self): 8 | return {'name': 'test.simple', 'metric': 'simple_payload', 'value': 1, 'host': 'test'} 9 | 10 | @pytest.fixture 11 | def structured_payload(self): 12 | return {'name': 'test.structured', 'metric': 'structured_payload', 'val0': 1, 'val2': 'str', 'val3': [1, 2], 13 | 'host': 'test', 'tags': ['tag1', 'tags2']} 14 | 15 | def test_base_backend_simple_payload(self, mocker, dummy_backend, simple_payload): 16 | mock_gethostname = mocker.patch('socket.gethostname') 17 | mock_gethostname.return_value = 'test' 18 | dummy_backend.report(name='test.simple', metric='simple_payload', value=1, tags=None) 19 | reported_data = dummy_backend.reported_data['test.simple'] 20 | assert reported_data == simple_payload 21 | 22 | def test_base_backend_structured_payload(self, mocker, dummy_backend, structured_payload): 23 | mock_gethostname = mocker.patch('socket.gethostname') 24 | mock_gethostname.return_value = 'test' 25 | dummy_backend.report(name='test.structured', metric='structured_payload', 26 | value={'val0': 1, 'val2': 'str', 'val3': [1, 2]}, tags=['tag1', 'tags2']) 27 | reported_data = dummy_backend.reported_data['test.structured'] 28 | assert reported_data == structured_payload 29 | -------------------------------------------------------------------------------- /tests/unit/test_metrics.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | import pytest 4 | 5 | from kaneda import Metrics 6 | 7 | 8 | class TestMetrics(object): 9 | 10 | @pytest.fixture 11 | def metrics(self, dummy_backend): 12 | return Metrics(backend=dummy_backend) 13 | 14 | def assert_reported_data(self, dummy_backend, metric, name, value): 15 | assert dummy_backend.reported_data[name]['name'] == name 16 | assert dummy_backend.reported_data[name]['metric'] == metric 17 | if 'value' in dummy_backend.reported_data[name]: 18 | if isinstance(dummy_backend.reported_data[name]['value'], float): 19 | assert round(dummy_backend.reported_data[name]['value'], 2) == value 20 | else: 21 | assert dummy_backend.reported_data[name]['value'] == value 22 | 23 | def test_gauge(self, metrics, dummy_backend): 24 | value = 123 25 | metrics.gauge('users.online', value) 26 | self.assert_reported_data(dummy_backend, 'gauge', 'users.online', value) 27 | 28 | def test_increment(self, metrics, dummy_backend): 29 | metrics.increment('page.views') 30 | self.assert_reported_data(dummy_backend, 'counter', 'page.views', 1) 31 | 32 | def test_decrement(self, metrics, dummy_backend): 33 | metrics.decrement('credit.usage') 34 | self.assert_reported_data(dummy_backend, 'counter', 'credit.usage', -1) 35 | 36 | def test_timing(self, metrics, dummy_backend): 37 | value = 260 38 | metrics.timing('query.response.time', value) 39 | self.assert_reported_data(dummy_backend, 'timing', 'query.response.time', value) 40 | 41 | def test_event(self, metrics, dummy_backend): 42 | value = 'Too much requests' 43 | metrics.event('server.status', value) 44 | self.assert_reported_data(dummy_backend, 'event', 'server.status', value) 45 | 46 | def test_custom(self, metrics, dummy_backend): 47 | value = {'status': 'ok', 'xml': ''} 48 | metrics.custom(name='availability.request', metric='xml_response', id_='2B75D750', value=value, tags=['test']) 49 | self.assert_reported_data(dummy_backend, 'xml_response', 'availability.request', value) 50 | 51 | def test_timed_context_manager(self, metrics, dummy_backend): 52 | value = 100 53 | with metrics.timed('user.query.time', use_ms=True): 54 | sleep(value / 1000.0) # in ms 55 | self.assert_reported_data(dummy_backend, 'timing', 'user.query.time', value) 56 | 57 | def test_timed_decorator(self, metrics, dummy_backend): 58 | value = 0.1 59 | 60 | @metrics.timed(use_ms=False) 61 | def get_user(): 62 | sleep(value) 63 | get_user() 64 | self.assert_reported_data(dummy_backend, 'timing', 'tests.unit.test_metrics.get_user', value) 65 | -------------------------------------------------------------------------------- /tests/unit/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kaneda.exceptions import SettingsError, UnexistingKanedaClass 4 | from kaneda.queues import RQQueue, CeleryQueue, ZMQQueue 5 | from kaneda.utils import import_class, get_object_from_settings, get_kaneda_objects, get_backend 6 | from kaneda.backends import ElasticsearchBackend, MongoBackend, LoggerBackend, RethinkBackend, InfluxBackend 7 | 8 | from .conftest import empty_settings 9 | 10 | 11 | class TestUtils(object): 12 | 13 | @pytest.mark.parametrize('backend_path_module, backend_class', [ 14 | ('kaneda.backends.ElasticsearchBackend', ElasticsearchBackend), 15 | ('kaneda.backends.MongoBackend', MongoBackend), 16 | ('kaneda.backends.LoggerBackend', LoggerBackend), 17 | ('kaneda.backends.RethinkBackend', RethinkBackend), 18 | ('kaneda.backends.InfluxBackend', InfluxBackend) 19 | ]) 20 | def test_import_backend_class(self, backend_path_module, backend_class): 21 | assert import_class(backend_path_module) is backend_class 22 | 23 | @pytest.mark.parametrize('queue_path_module, queue_class', [ 24 | ('kaneda.queues.RQQueue', RQQueue), 25 | ('kaneda.queues.CeleryQueue', CeleryQueue), 26 | ('kaneda.queues.ZMQQueue', ZMQQueue), 27 | ]) 28 | def test_import_queue_class(self, queue_path_module, queue_class): 29 | assert import_class(queue_path_module) is queue_class 30 | 31 | @pytest.mark.parametrize('backend_name, backend_class', [ 32 | ('elastic', ElasticsearchBackend), 33 | ('mongo', MongoBackend), 34 | ('rethink', RethinkBackend), 35 | ('influx', InfluxBackend) 36 | ]) 37 | def test_get_backend_from_settings(self, kaneda_settings, backend_name, backend_class): 38 | backend_settings = getattr(kaneda_settings, backend_name) 39 | assert isinstance(get_object_from_settings(backend_settings.BACKEND, backend_settings), backend_class) 40 | 41 | @pytest.mark.parametrize('queue_name, queue_class', [ 42 | ('rq', RQQueue), 43 | ('celery', CeleryQueue), 44 | ('zmq', ZMQQueue), 45 | ]) 46 | def test_get_queue_from_settings(self, kaneda_settings, queue_name, queue_class): 47 | queue_settings = getattr(kaneda_settings, queue_name) 48 | assert isinstance(get_object_from_settings(queue_settings.QUEUE, queue_settings), queue_class) 49 | 50 | def test_get_object_from_settings_with_error(self, unexisting_backend_settings): 51 | with pytest.raises(UnexistingKanedaClass): 52 | get_object_from_settings(unexisting_backend_settings.BACKEND, unexisting_backend_settings) 53 | 54 | @pytest.mark.parametrize('config_object, has_backend, has_queue', [ 55 | ('elastic', True, False), 56 | ('rq', False, True), 57 | ]) 58 | def test_get_kaneda_objects(self, mocker, kaneda_settings, config_object, has_backend, has_queue): 59 | mock_get_settings = mocker.patch('kaneda.utils.get_settings') 60 | mock_get_settings.return_value = getattr(kaneda_settings, config_object) 61 | backend, queue = get_kaneda_objects() 62 | assert bool(backend) == has_backend 63 | assert bool(queue) == has_queue 64 | 65 | @pytest.mark.parametrize('retrieval_function', [get_backend, get_kaneda_objects]) 66 | @pytest.mark.parametrize('settings, error_first_word', [ 67 | (None, 'Define'), 68 | (empty_settings(), 'You'), 69 | ]) 70 | def test_retrieval_functions_with_errors(self, mocker, retrieval_function, settings, error_first_word): 71 | if settings: 72 | mock_get_settings = mocker.patch('kaneda.utils.get_settings') 73 | mock_get_settings.return_value = settings 74 | with pytest.raises(SettingsError) as error: 75 | retrieval_function() 76 | assert str(error.value).startswith(error_first_word) 77 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{27,34,35,36,py,py3}-dj{110,111,20}, flake8 3 | 4 | [flake8] 5 | max-line-length = 120 6 | 7 | [testenv] 8 | commands = 9 | py.test 10 | 11 | basepython = 12 | py27: python2.7 13 | py34: python3.4 14 | py35: python3.5 15 | py36: python3.6 16 | pypy: pypy 17 | pypy3: pypy3 18 | 19 | deps = 20 | pytest==2.9.1 21 | pytest-mock==0.11.0 22 | pytest-sugar==0.7.1 23 | elasticsearch 24 | pymongo 25 | rethinkdb 26 | influxdb 27 | celery 28 | rq 29 | pyzmq 30 | redis 31 | six==1.10.0 32 | 33 | dj110: Django>=1.10,<1.11 34 | dj111: Django>=1.11,<2.0 35 | dj20: Django>=2.0,<2.1 36 | 37 | [testenv:flake8] 38 | basepython = python3.6 39 | deps = flake8==2.5.4 40 | commands = flake8 kaneda django_kaneda tests 41 | --------------------------------------------------------------------------------