├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── create_deb ├── docs ├── Makefile ├── api │ ├── logs.rst │ ├── outputs.rst │ ├── protocol.rst │ ├── sources.rst │ └── tensor.rst ├── conf.py ├── examples.rst ├── images │ ├── grafana-addmetric.png │ ├── grafana-editgraph.png │ ├── grafana-eth0.png │ ├── grafana-iface_metrics.png │ └── grafana-start.png ├── index.rst ├── outputs.rst ├── sources.rst └── start.rst ├── puppet ├── .gitignore ├── .puppet-lint.rc ├── .rubocop.yml ├── Gemfile ├── README.md ├── Rakefile ├── manifests │ ├── init.pp │ ├── output.pp │ ├── puppetping.pp │ └── source.pp ├── metadata.json ├── spec │ ├── classes │ │ ├── coverage_spec.rb │ │ └── init_spec.rb │ ├── fixtures │ │ └── modules │ │ │ └── tensor │ │ │ ├── manifests │ │ │ └── templates │ └── spec_helper.rb ├── templates │ ├── puppet_pings.yml.erb │ ├── tensor-output.yml.erb │ ├── tensor-source.yml.erb │ └── tensor.yml.erb └── tests │ └── init.pp ├── requirements.txt ├── scripts ├── post-install.sh └── tensor ├── setup.py ├── tensor.yml ├── tensor ├── __init__.py ├── aggregators.py ├── ihateprotobuf │ ├── __init__.py │ ├── proto.proto │ └── proto_pb2.py ├── interfaces.py ├── logs │ ├── __init__.py │ ├── follower.py │ └── parsers.py ├── objects.py ├── outputs │ ├── __init__.py │ ├── elasticsearch.py │ └── riemann.py ├── protocol │ ├── __init__.py │ ├── elasticsearch.py │ ├── icmp.py │ ├── riemann.py │ ├── sflow │ │ ├── __init__.py │ │ ├── protocol │ │ │ ├── __init__.py │ │ │ ├── counters.py │ │ │ ├── flows.py │ │ │ ├── protocol.py │ │ │ └── utils.py │ │ └── server.py │ └── ssh.py ├── service.py ├── sources │ ├── __init__.py │ ├── database │ │ ├── __init__.py │ │ ├── elasticsearch.py │ │ ├── memcache.py │ │ └── postgresql.py │ ├── docker.py │ ├── generator.py │ ├── haproxy.py │ ├── linux │ │ ├── __init__.py │ │ ├── basic.py │ │ ├── ipsec.py │ │ ├── process.py │ │ └── sensors.py │ ├── media │ │ ├── __init__.py │ │ └── libav.py │ ├── munin.py │ ├── network.py │ ├── nginx.py │ ├── python │ │ ├── __init__.py │ │ └── uwsgi.py │ ├── rabbitmq.py │ ├── redis.py │ ├── riak.py │ ├── riemann.py │ ├── sflow.py │ ├── snmp.py │ └── unbound.py ├── tests │ ├── __init__.py │ ├── test_logs.py │ ├── test_service.py │ ├── test_sflow.py │ ├── test_sources.py │ ├── test_ssh.py │ ├── test_tensor.py │ └── test_utils.py └── utils.py └── twisted └── plugins └── tensor_plugin.py /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | *.swp 4 | ve/ 5 | _trial_temp/ 6 | dropin.cache 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.7 4 | - 3.4 5 | - 3.5 6 | install: 7 | - pip install . 8 | before_install: 9 | - wget http://aphyr.com/riemann/riemann_0.2.6_all.deb 10 | - sudo apt-get update 11 | - sudo apt-get install openjdk-7-jre 12 | - sudo dpkg -i riemann_0.2.6_all.deb 13 | - sudo /etc/init.d/riemann start 14 | script: 15 | - trial tensor 16 | 17 | after_script: 18 | - sudo /etc/init.d/riemann stop 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Colin Alston 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Tensor is now known as [Duct](https://github.com/ducted/duct) 2 | -------------------------------------------------------------------------------- /create_deb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | fpm -s python -t deb --no-python-dependencies --after-install scripts/post-install.sh --deb-init scripts/tensor -d python-twisted -d python-protobuf -d python-yaml -d python-openssl -a amd64 -n tensor setup.py 4 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Tensor.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Tensor.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Tensor" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Tensor" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/api/logs.rst: -------------------------------------------------------------------------------- 1 | tensor.logs 2 | *********** 3 | 4 | tensor.logs.follower 5 | ==================== 6 | 7 | .. automodule:: tensor.logs.follower 8 | :members: 9 | :show-inheritance: 10 | 11 | tensor.logs.parsers 12 | =================== 13 | 14 | .. automodule:: tensor.logs.parsers 15 | :members: 16 | :show-inheritance: 17 | 18 | -------------------------------------------------------------------------------- /docs/api/outputs.rst: -------------------------------------------------------------------------------- 1 | tensor.outputs 2 | ************** 3 | 4 | tensor.outputs.riemann 5 | ====================== 6 | 7 | .. automodule:: tensor.outputs.riemann 8 | :members: 9 | :show-inheritance: 10 | 11 | tensor.outputs.elasticsearch 12 | ============================ 13 | 14 | .. automodule:: tensor.outputs.elasticsearch 15 | :members: 16 | :show-inheritance: 17 | 18 | -------------------------------------------------------------------------------- /docs/api/protocol.rst: -------------------------------------------------------------------------------- 1 | tensor.protocol 2 | *************** 3 | 4 | tensor.protocol.elasticsearch 5 | ============================= 6 | 7 | .. automodule:: tensor.protocol.elasticsearch 8 | :members: 9 | :show-inheritance: 10 | 11 | tensor.protocol.icmp 12 | ==================== 13 | 14 | .. automodule:: tensor.protocol.icmp 15 | :members: 16 | :show-inheritance: 17 | 18 | tensor.protocol.riemann 19 | ======================= 20 | 21 | .. automodule:: tensor.protocol.riemann 22 | :members: 23 | :show-inheritance: 24 | 25 | tensor.protocol.ssh 26 | ===================== 27 | 28 | .. automodule:: tensor.protocol.ssh 29 | :members: 30 | :show-inheritance: 31 | 32 | tensor.protocol.sflow 33 | ===================== 34 | 35 | tensor.protocol.sflow.server 36 | ------------------------------ 37 | 38 | .. automodule:: tensor.protocol.sflow.server 39 | :members: 40 | :show-inheritance: 41 | 42 | tensor.protocol.sflow.protocol 43 | ------------------------------ 44 | 45 | .. automodule:: tensor.protocol.sflow.protocol 46 | :members: 47 | :show-inheritance: 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/api/sources.rst: -------------------------------------------------------------------------------- 1 | tensor.sources 2 | ************** 3 | 4 | tensor.sources.database.postgresql 5 | ================================== 6 | 7 | .. automodule:: tensor.sources.database.postgresql 8 | :members: 9 | :show-inheritance: 10 | 11 | tensor.sources.database.elasticsearch 12 | ===================================== 13 | 14 | .. automodule:: tensor.sources.database.elasticsearch 15 | :members: 16 | :show-inheritance: 17 | 18 | tensor.sources.database.memcache 19 | ================================ 20 | 21 | .. automodule:: tensor.sources.database.memcache 22 | :members: 23 | :show-inheritance: 24 | 25 | tensor.sources.docker 26 | ===================== 27 | 28 | .. automodule:: tensor.sources.docker 29 | :members: 30 | :show-inheritance: 31 | 32 | tensor.sources.haproxy 33 | ====================== 34 | 35 | .. automodule:: tensor.sources.haproxy 36 | :members: 37 | :show-inheritance: 38 | 39 | tensor.sources.generator 40 | ======================== 41 | 42 | .. automodule:: tensor.sources.generator 43 | :members: 44 | :show-inheritance: 45 | 46 | tensor.sources.linux 47 | ==================== 48 | 49 | tensor.sources.linux.basic 50 | -------------------------- 51 | 52 | .. automodule:: tensor.sources.linux.basic 53 | :members: 54 | :show-inheritance: 55 | 56 | tensor.sources.linux.process 57 | ---------------------------- 58 | 59 | .. automodule:: tensor.sources.linux.process 60 | :members: 61 | :show-inheritance: 62 | 63 | tensor.sources.linux.sensors 64 | ---------------------------- 65 | 66 | .. automodule:: tensor.sources.linux.sensors 67 | :members: 68 | :show-inheritance: 69 | 70 | tensor.sources.media 71 | ==================== 72 | 73 | tensor.sources.media.libav 74 | -------------------------- 75 | .. automodule:: tensor.sources.media.libav 76 | :members: 77 | :show-inheritance: 78 | 79 | tensor.sources.munin 80 | ==================== 81 | 82 | .. automodule:: tensor.sources.munin 83 | :members: 84 | :show-inheritance: 85 | 86 | tensor.sources.network 87 | ====================== 88 | 89 | .. automodule:: tensor.sources.network 90 | :members: 91 | :show-inheritance: 92 | 93 | tensor.sources.nginx 94 | ==================== 95 | 96 | .. automodule:: tensor.sources.nginx 97 | :members: 98 | :show-inheritance: 99 | 100 | tensor.sources.python 101 | ==================== 102 | 103 | tensor.sources.python.uwsgi 104 | --------------------------- 105 | 106 | .. automodule:: tensor.sources.python.uwsgi 107 | :members: 108 | :show-inheritance: 109 | 110 | tensor.sources.rabbitmq 111 | ======================= 112 | 113 | .. automodule:: tensor.sources.rabbitmq 114 | :members: 115 | :show-inheritance: 116 | 117 | tensor.sources.redis 118 | ======================= 119 | 120 | .. automodule:: tensor.sources.redis 121 | :members: 122 | :show-inheritance: 123 | 124 | tensor.sources.riak 125 | =================== 126 | 127 | .. automodule:: tensor.sources.riak 128 | :members: 129 | :show-inheritance: 130 | 131 | tensor.sources.riemann 132 | ====================== 133 | 134 | .. automodule:: tensor.sources.riemann 135 | :members: 136 | :show-inheritance: 137 | 138 | tensor.sources.sflow 139 | ==================== 140 | 141 | .. automodule:: tensor.sources.sflow 142 | :members: 143 | :show-inheritance: 144 | 145 | tensor.sources.snmp 146 | ==================== 147 | 148 | .. automodule:: tensor.sources.snmp 149 | :members: 150 | :show-inheritance: 151 | 152 | -------------------------------------------------------------------------------- /docs/api/tensor.rst: -------------------------------------------------------------------------------- 1 | tensor 2 | ****** 3 | 4 | tensor.aggregators 5 | ================== 6 | 7 | .. automodule:: tensor.aggregators 8 | :members: 9 | :show-inheritance: 10 | 11 | tensor.interfaces 12 | ================= 13 | 14 | .. automodule:: tensor.interfaces 15 | :members: 16 | :show-inheritance: 17 | 18 | tensor.objects 19 | ============== 20 | 21 | .. automodule:: tensor.objects 22 | :members: 23 | :show-inheritance: 24 | 25 | tensor.service 26 | ============== 27 | 28 | .. automodule:: tensor.service 29 | :members: 30 | :show-inheritance: 31 | 32 | tensor.utils 33 | ============ 34 | 35 | .. automodule:: tensor.utils 36 | :members: 37 | :show-inheritance: 38 | 39 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Tensor documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Oct 17 21:58:31 2014. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | sys.path.insert(0, os.path.abspath('../')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | 'sphinx.ext.autodoc', 33 | 'sphinx.ext.viewcode', 34 | ] 35 | 36 | # Add any paths that contain templates here, relative to this directory. 37 | templates_path = ['_templates'] 38 | 39 | # The suffix of source filenames. 40 | source_suffix = '.rst' 41 | 42 | # The encoding of source files. 43 | #source_encoding = 'utf-8-sig' 44 | 45 | # The master toctree document. 46 | master_doc = 'index' 47 | 48 | # General information about the project. 49 | project = u'Tensor' 50 | copyright = u'2014, Colin Alston' 51 | 52 | # The version info for the project you're documenting, acts as replacement for 53 | # |version| and |release|, also used in various other places throughout the 54 | # built documents. 55 | # 56 | # The short X.Y version. 57 | version = '1.0' 58 | # The full version, including alpha/beta/rc tags. 59 | release = '1.0' 60 | 61 | # The language for content autogenerated by Sphinx. Refer to documentation 62 | # for a list of supported languages. 63 | #language = None 64 | 65 | # There are two options for replacing |today|: either, you set today to some 66 | # non-false value, then it is used: 67 | #today = '' 68 | # Else, today_fmt is used as the format for a strftime call. 69 | #today_fmt = '%B %d, %Y' 70 | 71 | # List of patterns, relative to source directory, that match files and 72 | # directories to ignore when looking for source files. 73 | exclude_patterns = ['_build'] 74 | 75 | # The reST default role (used for this markup: `text`) to use for all 76 | # documents. 77 | #default_role = None 78 | 79 | # If true, '()' will be appended to :func: etc. cross-reference text. 80 | #add_function_parentheses = True 81 | 82 | # If true, the current module name will be prepended to all description 83 | # unit titles (such as .. function::). 84 | #add_module_names = True 85 | 86 | # If true, sectionauthor and moduleauthor directives will be shown in the 87 | # output. They are ignored by default. 88 | #show_authors = False 89 | 90 | # The name of the Pygments (syntax highlighting) style to use. 91 | pygments_style = 'sphinx' 92 | 93 | # A list of ignored prefixes for module index sorting. 94 | #modindex_common_prefix = [] 95 | 96 | # If true, keep warnings as "system message" paragraphs in the built documents. 97 | #keep_warnings = False 98 | 99 | 100 | # -- Options for HTML output ---------------------------------------------- 101 | 102 | # The theme to use for HTML and HTML Help pages. See the documentation for 103 | # a list of builtin themes. 104 | html_theme = 'default' 105 | 106 | # Theme options are theme-specific and customize the look and feel of a theme 107 | # further. For a list of options available for each theme, see the 108 | # documentation. 109 | #html_theme_options = {} 110 | 111 | # Add any paths that contain custom themes here, relative to this directory. 112 | #html_theme_path = [] 113 | 114 | # The name for this set of Sphinx documents. If None, it defaults to 115 | # " v documentation". 116 | #html_title = None 117 | 118 | # A shorter title for the navigation bar. Default is the same as html_title. 119 | #html_short_title = None 120 | 121 | # The name of an image file (relative to this directory) to place at the top 122 | # of the sidebar. 123 | #html_logo = None 124 | 125 | # The name of an image file (within the static path) to use as favicon of the 126 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 127 | # pixels large. 128 | #html_favicon = None 129 | 130 | # Add any paths that contain custom static files (such as style sheets) here, 131 | # relative to this directory. They are copied after the builtin static files, 132 | # so a file named "default.css" will overwrite the builtin "default.css". 133 | html_static_path = ['_static'] 134 | 135 | # Add any extra paths that contain custom files (such as robots.txt or 136 | # .htaccess) here, relative to this directory. These files are copied 137 | # directly to the root of the documentation. 138 | #html_extra_path = [] 139 | 140 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 141 | # using the given strftime format. 142 | #html_last_updated_fmt = '%b %d, %Y' 143 | 144 | # If true, SmartyPants will be used to convert quotes and dashes to 145 | # typographically correct entities. 146 | #html_use_smartypants = True 147 | 148 | # Custom sidebar templates, maps document names to template names. 149 | #html_sidebars = {} 150 | 151 | # Additional templates that should be rendered to pages, maps page names to 152 | # template names. 153 | #html_additional_pages = {} 154 | 155 | # If false, no module index is generated. 156 | #html_domain_indices = True 157 | 158 | # If false, no index is generated. 159 | #html_use_index = True 160 | 161 | # If true, the index is split into individual pages for each letter. 162 | #html_split_index = False 163 | 164 | # If true, links to the reST sources are added to the pages. 165 | #html_show_sourcelink = True 166 | 167 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 168 | #html_show_sphinx = True 169 | 170 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 171 | #html_show_copyright = True 172 | 173 | # If true, an OpenSearch description file will be output, and all pages will 174 | # contain a tag referring to it. The value of this option must be the 175 | # base URL from which the finished HTML is served. 176 | #html_use_opensearch = '' 177 | 178 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 179 | #html_file_suffix = None 180 | 181 | # Output file base name for HTML help builder. 182 | htmlhelp_basename = 'Tensordoc' 183 | 184 | 185 | # -- Options for LaTeX output --------------------------------------------- 186 | 187 | latex_elements = { 188 | # The paper size ('letterpaper' or 'a4paper'). 189 | #'papersize': 'letterpaper', 190 | 191 | # The font size ('10pt', '11pt' or '12pt'). 192 | #'pointsize': '10pt', 193 | 194 | # Additional stuff for the LaTeX preamble. 195 | #'preamble': '', 196 | } 197 | 198 | # Grouping the document tree into LaTeX files. List of tuples 199 | # (source start file, target name, title, 200 | # author, documentclass [howto, manual, or own class]). 201 | latex_documents = [ 202 | ('index', 'Tensor.tex', u'Tensor Documentation', 203 | u'Colin Alston', 'manual'), 204 | ] 205 | 206 | # The name of an image file (relative to this directory) to place at the top of 207 | # the title page. 208 | #latex_logo = None 209 | 210 | # For "manual" documents, if this is true, then toplevel headings are parts, 211 | # not chapters. 212 | #latex_use_parts = False 213 | 214 | # If true, show page references after internal links. 215 | #latex_show_pagerefs = False 216 | 217 | # If true, show URL addresses after external links. 218 | #latex_show_urls = False 219 | 220 | # Documents to append as an appendix to all manuals. 221 | #latex_appendices = [] 222 | 223 | # If false, no module index is generated. 224 | #latex_domain_indices = True 225 | 226 | 227 | # -- Options for manual page output --------------------------------------- 228 | 229 | # One entry per manual page. List of tuples 230 | # (source start file, name, description, authors, manual section). 231 | man_pages = [ 232 | ('index', 'tensor', u'Tensor Documentation', 233 | [u'Colin Alston'], 1) 234 | ] 235 | 236 | # If true, show URL addresses after external links. 237 | #man_show_urls = False 238 | 239 | 240 | # -- Options for Texinfo output ------------------------------------------- 241 | 242 | # Grouping the document tree into Texinfo files. List of tuples 243 | # (source start file, target name, title, author, 244 | # dir menu entry, description, category) 245 | texinfo_documents = [ 246 | ('index', 'Tensor', u'Tensor Documentation', 247 | u'Colin Alston', 'Tensor', 'One line description of project.', 248 | 'Miscellaneous'), 249 | ] 250 | 251 | # Documents to append as an appendix to all manuals. 252 | #texinfo_appendices = [] 253 | 254 | # If false, no module index is generated. 255 | #texinfo_domain_indices = True 256 | 257 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 258 | #texinfo_show_urls = 'footnote' 259 | 260 | # If true, do not generate a @detailmenu in the "Top" node's menu. 261 | #texinfo_no_detailmenu = False 262 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | Example configurations 2 | ********************** 3 | 4 | Replacing Munin 5 | =============== 6 | 7 | The first step is to create a TRIG stack (Tensor Riemann InfluxDB Grafana). 8 | 9 | Step 1: Install Riemann 10 | ----------------------- 11 | :: 12 | 13 | $ wget http://aphyr.com/riemann/riemann_0.2.6_all.deb 14 | $ aptitude install openjdk-7-jre 15 | $ dpkg -i riemann_0.2.6_all.deb 16 | 17 | Step 2: Install InfluxDB 18 | ------------------------ 19 | :: 20 | 21 | $ wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb 22 | $ sudo dpkg -i influxdb_latest_amd64.deb 23 | 24 | Start InfluxDB, then quickly change the root/root default password 25 | because it also defaults to listening on all interfaces and apparently 26 | this is not important enough for them to fix. 27 | 28 | Create a `riemann` and `grafana` database, and some users for them 29 | 30 | :: 31 | 32 | $ curl -X POST 'http://localhost:8086/db?u=root&p=root' \ 33 | -d '{"name": "riemann"}' 34 | $ curl -X POST 'http://localhost:8086/db?u=root&p=root' \ 35 | -d '{"name": "grafana"}' 36 | $ curl -X POST 'http://localhost:8086/db/riemann/users?u=root&p=root' \ 37 | -d '{"name": "riemann", "password": "riemann"}' 38 | $ curl -X POST 'http://localhost:8086/db/grafana/users?u=root&p=root' \ 39 | -d '{"name": "grafana", "password": "grafana"}' 40 | 41 | NB. InfluxDB is easy to get running but is not production ready or stable 42 | so your data can very easily be lost. 43 | 44 | Step 3: Install Grafana 45 | ----------------------- 46 | :: 47 | 48 | $ aptitude install nginx 49 | $ mkdir /var/www 50 | $ cd /var/www 51 | $ wget http://grafanarel.s3.amazonaws.com/grafana-1.8.1.tar.gz 52 | $ tar -zxf grafana-1.8.1.tar.gz 53 | $ mv grafana-1.8.1 grafana 54 | 55 | Now we must create an nginx configuration in `/etc/nginx/sites-enabled`. 56 | 57 | You can use something like this 58 | :: 59 | 60 | server { 61 | listen 80; 62 | server_name ; 63 | access_log /var/log/nginx/grafana-access.log; 64 | error_log /var/log/nginx/grafana-error.log; 65 | 66 | location / { 67 | alias /var/www/grafana/; 68 | index index.html; 69 | try_files $uri $uri/ /index.html; 70 | } 71 | } 72 | 73 | Next we need a configuration file for grafana. Open `/var/www/grafana/config.js` 74 | and use the following configuration :: 75 | 76 | define(['settings'], 77 | function (Settings) { 78 | return new Settings({ 79 | datasources: { 80 | influxdb: { 81 | type: 'influxdb', 82 | url: "http://:8086/db/riemann", 83 | username: 'riemann', 84 | password: 'riemann', 85 | }, 86 | grafana: { 87 | type: 'influxdb', 88 | url: "http://:8086/db/grafana", 89 | username: 'grafana', 90 | password: 'grafana', 91 | grafanaDB: true 92 | }, 93 | }, 94 | search: { 95 | max_results: 20 96 | }, 97 | default_route: '/dashboard/file/default.json', 98 | unsaved_changes_warning: true, 99 | playlist_timespan: "1m", 100 | admin: { 101 | password: '' 102 | }, 103 | window_title_prefix: 'Grafana - ', 104 | plugins: { 105 | panels: [], 106 | dependencies: [], 107 | } 108 | }); 109 | }); 110 | 111 | Step 4: Glue things together 112 | ---------------------------- 113 | 114 | Lets start by configuring Riemann to talk to InfluxDB. This is the 115 | full /etc/riemann/riemann.config file. :: 116 | 117 | ; -*- mode: clojure; -*- 118 | ; vim: filetype=clojure 119 | (require 'capacitor.core) 120 | (require 'capacitor.async) 121 | (require 'clojure.core.async) 122 | 123 | (defn make-async-influxdb-client [opts] 124 | (let [client (capacitor.core/make-client opts) 125 | events-in (capacitor.async/make-chan) 126 | resp-out (capacitor.async/make-chan)] 127 | (capacitor.async/run! events-in resp-out client 100 10000) 128 | (fn [series payload] 129 | (let [p (merge payload { 130 | :series series 131 | :time (* 1000 (:time payload)) ;; s → ms 132 | })] 133 | (clojure.core.async/put! events-in p))))) 134 | 135 | (def influx (make-async-influxdb-client { 136 | :host "localhost" 137 | :port 8086 138 | :username "riemann" 139 | :password "riemann" 140 | :db "riemann" 141 | })) 142 | 143 | (logging/init {:file "/var/log/riemann/riemann.log"}) 144 | 145 | ; Listen on the local interface over TCP (5555), UDP (5555), and websockets 146 | ; (5556) 147 | (let [host "0.0.0.0"] 148 | (tcp-server {:host host}) 149 | (udp-server {:host host}) 150 | (ws-server {:host host})) 151 | 152 | (periodically-expire 60) 153 | 154 | (let [index (index)] 155 | (streams 156 | index 157 | 158 | (fn [event] 159 | (let [series (format "%s.%s" (:host event) (:service event))] 160 | (influx series { 161 | :time (:time event) 162 | :value (:metric event) 163 | }))))) 164 | 165 | You're pretty much done at this point, and should see the metrics from the 166 | Riemann server process if you open up Grafana and look through the query 167 | builder. 168 | 169 | Step 5: Using Tensor to retrieve stats from munin-node 170 | ------------------------------------------------------ 171 | 172 | First of all, install Tensor :: 173 | 174 | $ pip install tensor 175 | 176 | Next create /etc/tensor and a `tensor.yml` file in that directory. 177 | 178 | The `tensor.yml` config file should look like this :: 179 | 180 | ttl: 60.0 181 | interval: 1.0 182 | 183 | outputs: 184 | - output: tensor.outputs.riemann.RiemannTCP 185 | port: 5555 186 | server: 187 | 188 | # Sources 189 | sources: 190 | - service: mymunin 191 | source: tensor.sources.munin.MuninNode 192 | interval: 60.0 193 | ttl: 120.0 194 | critical: { 195 | mymunin.system.load.load: "> 2" 196 | } 197 | 198 | This configures Tensor to connect to the munin-node on the local machine and 199 | retrieve all configured plugin values. You can create critical alert levels 200 | by setting the dot separated prefix for the service name and munin plugin. 201 | 202 | You can now start Tensor :: 203 | 204 | $ twistd -n tensor -c /etc/tensor/tensor.yml 205 | 2014-10-22 13:30:38+0200 [-] Log opened. 206 | 2014-10-22 13:30:38+0200 [-] twistd 14.0.2 (/home/colin/riemann-tensor/ve/bin/python 2.7.6) starting up. 207 | 2014-10-22 13:30:38+0200 [-] reactor class: twisted.internet.epollreactor.EPollReactor. 208 | 2014-10-22 13:30:38+0200 [-] Starting factory 209 | 210 | This pretty much indiciates everything is alright, or else we'd see quickly 211 | see some errors. 212 | 213 | Next we will add some graphs to Grafana 214 | 215 | Step 6: Creating graphs in Grafana 216 | ---------------------------------- 217 | 218 | .. image:: images/grafana-start.png 219 | 220 | Click on the green row tag on the left, and delete all but the last row. 221 | This will leave you with an empty graph. 222 | 223 | Click the title of the graph, then click `Edit`. 224 | 225 | .. image:: images/grafana-editgraph.png 226 | 227 | In the edit screen the Metrics tab will be open already. Now we can add our 228 | munin metrics. If you start typing in the `series` field you should see your 229 | hosts and metrics autocomplete. 230 | 231 | .. image:: images/grafana-addmetric.png 232 | 233 | Many Munin metrics are `counter` types which are usually converted to a rate 234 | by the RRD aggregation on Munin Graph. 235 | 236 | Handily the :class:`tensor.sources.munin.MuninNode` source takes care of this 237 | by caching the metric between run intervals when that type is used. 238 | 239 | If we wanted to graph our network interface all we need to do is make it a 240 | slightly better unit by multiplying the Byte/sec metric by 8, since Grafana 241 | provides a bit/sec legend format. 242 | 243 | To do this start by clicking the gear icon on the metric query, then select 244 | `Raw query mode`. 245 | 246 | Use the following query :: 247 | 248 | select value * 8 from ".munin.network.if_eth0.down" where $timeFilter group by time($interval) order asc 249 | 250 | And chose an alias of "RX". Do the same for if_eth0.up and alias that "TX". 251 | You should end up with something like this 252 | 253 | .. image:: images/grafana-iface_metrics.png 254 | 255 | Click on `General` to edit the title, and then on `Axes & Grid` change the 256 | Format to `bps`. Under `Display Styles` you can stack the data or play around 257 | with the look of the graph. Click `Back to dashboard` and you should end up 258 | with something as follows 259 | 260 | .. image:: images/grafana-eth0.png 261 | -------------------------------------------------------------------------------- /docs/images/grafana-addmetric.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/docs/images/grafana-addmetric.png -------------------------------------------------------------------------------- /docs/images/grafana-editgraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/docs/images/grafana-editgraph.png -------------------------------------------------------------------------------- /docs/images/grafana-eth0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/docs/images/grafana-eth0.png -------------------------------------------------------------------------------- /docs/images/grafana-iface_metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/docs/images/grafana-iface_metrics.png -------------------------------------------------------------------------------- /docs/images/grafana-start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/docs/images/grafana-start.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Tensor documentation master file, created by 2 | sphinx-quickstart on Fri Oct 17 21:58:31 2014. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Tensor's documentation! 7 | ================================== 8 | 9 | Tensor is a modular gateway and event router built using the 10 | Twisted framework. It can be used as a monitoring agent and ETL 11 | framework for a wide range of applications, and provides a simple 12 | yet "powerful" plugin mechanism to expand its capabilities. 13 | 14 | Contents: 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | 19 | start 20 | sources 21 | outputs 22 | examples 23 | 24 | 25 | API Documentation: 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | 30 | api/tensor.rst 31 | api/protocol.rst 32 | api/logs.rst 33 | api/sources.rst 34 | api/outputs.rst 35 | 36 | Indices and tables 37 | ================== 38 | 39 | * :ref:`genindex` 40 | * :ref:`modindex` 41 | * :ref:`search` 42 | 43 | -------------------------------------------------------------------------------- /docs/outputs.rst: -------------------------------------------------------------------------------- 1 | Outputs 2 | ******* 3 | 4 | Introduction 5 | ============ 6 | 7 | Outputs are Python objects which subclass :class:`tensor.objects.Output`. They 8 | are constructed with a dictionary parsed from the YAML configuration block 9 | which defines them, and as such can read any attributes from that either 10 | optional or mandatory. 11 | 12 | Since outputs are constructed at startup time they can retain any required 13 | state. A copy of the queue is passed to all 14 | :method:`tensor.objects.Output.eventsReceived` calls which happen at each 15 | queue `interval` config setting as the queue is emptied. This list of 16 | :class:`tensor.objects.Event` objects must not be altered by the output. 17 | 18 | The `output` configuration option is passed a string representing an object 19 | the same way as `sources` configurations are. For example this outputs events 20 | to Riemann over TCP:: 21 | 22 | outputs: 23 | - output: tensor.outputs.riemann.RiemannTCP 24 | server: 127.0.0.1 25 | port: 5555 26 | 27 | Using TLS with Riemann 28 | ====================== 29 | 30 | The RiemannTCP output also supports TLS, which can make use of Puppet certs for 31 | convenience :: 32 | 33 | outputs: 34 | - output: tensor.outputs.riemann.RiemannTCP 35 | server: 127.0.0.1 36 | port: 5554 37 | tls: true 38 | cert: /var/lib/puppet/ssl/certs/test.acme.com.pem 39 | key: /var/lib/puppet/ssl/private_keys/test.acme.com.pem 40 | 41 | Writing your own outputs 42 | ======================== 43 | 44 | An output clas should subclass :class:`tensor.objects.Output`. 45 | 46 | The output can implement a `createClient` method which starts the output in 47 | whatever way necessary and can be a deferred. The output must also have a 48 | `eventsReceived` method which takes a list of :class:`tensor.objects.Event` 49 | objects and process them accordingly, it can also be a deferred. 50 | 51 | An example logging source:: 52 | 53 | from twisted.internet import reactor, defer 54 | from twisted.python import log 55 | 56 | from tensor.objects import Output 57 | 58 | class Logger(Output): 59 | def eventsReceived(self, events): 60 | log.msg("Events dequeued: %s" % len(events)) 61 | 62 | If you save this as `test.py` the basic configuration you need is simply :: 63 | 64 | outputs: 65 | - output: tensor.outputs.riemann.RiemannUDP 66 | server: localhost 67 | port: 5555 68 | 69 | - output: test.Logger 70 | 71 | You should now see how many events are exiting in the Tensor log file :: 72 | 73 | 2014-10-24 15:35:27+0200 [-] Starting protocol 74 | 2014-10-24 15:35:28+0200 [-] Events dequeued: 7 75 | 2014-10-24 15:35:29+0200 [-] Events dequeued: 2 76 | 2014-10-24 15:35:30+0200 [-] Events dequeued: 3 77 | 78 | Events can be routed in different ways to outputs, see the Getting started 79 | guide for more details 80 | -------------------------------------------------------------------------------- /docs/sources.rst: -------------------------------------------------------------------------------- 1 | Sources 2 | ******* 3 | 4 | Introduction 5 | ============ 6 | 7 | Sources are Python objects which subclass :class:`tensor.objects.Source`. They 8 | are constructed with a dictionary parsed from the YAML configuration block 9 | which defines them, and as such can read any attributes from that either 10 | optional or mandatory. 11 | 12 | Since sources are constructed at startup time they can retain any required 13 | state, for example the last metric value to report rates of change or for 14 | any other purpose. However since a Tensor process might be running many checks 15 | a source should not use an excessive amount of memory. 16 | 17 | The `source` configuration option is passed a string representing an object 18 | in much the same way as you would import it in a python module. The final 19 | class name is split from this string. For example specifying:: 20 | 21 | source: tensor.sources.network.Ping 22 | 23 | is equivalent to:: 24 | 25 | from tensor.sources.network import Ping 26 | 27 | Writing your own sources 28 | ======================== 29 | 30 | A source class must subclass :class:`tensor.objects.Source` and also 31 | implement the interface :class:`tensor.interfaces.ITensorSource` 32 | 33 | The source must have a `get` method which returns a :class:`tensor.objects.Event` 34 | object. The Source parent class provides a helper method `createEvent` which 35 | performs the metric level checking (evaluating the simple logical statement in 36 | the configuration), sets the correct service name and handles prefixing service 37 | names. 38 | 39 | A "Hello world" source:: 40 | 41 | from zope.interface import implementer 42 | 43 | from tensor.interfaces import ITensorSource 44 | from tensor.objects import Source 45 | 46 | @implementer(ITensorSource) 47 | class HelloWorld(Source): 48 | 49 | def get(self): 50 | return self.createEvent('ok', 'Hello world!', 0) 51 | 52 | To hold some state, you can re-implement the `__init__` method, as long as the 53 | arguments remain the same. 54 | 55 | Extending the above example to create a simple flip-flop metric event:: 56 | 57 | from zope.interface import implementer 58 | 59 | from tensor.interfaces import ITensorSource 60 | from tensor.objects import Source 61 | 62 | @implementer(ITensorSource) 63 | class HelloWorld(Source): 64 | def __init__(self, *a): 65 | Source.__init__(self, *a) 66 | self.bit = False 67 | 68 | def get(self): 69 | self.bit = not self.bit 70 | return self.createEvent('ok', 'Hello world!', self.bit and 0.0 or 1.0) 71 | 72 | You could then place this in a Python module like `hello.py` and as long as it's 73 | in the Python path for Tensor it can be used as a source with `hello.HelloWorld` 74 | 75 | A list of events can also be returned but be careful of overwhelming the output 76 | buffer, and if you need to produce lots of metrics it may be worthwhile to 77 | return nothing from `get` and call `self.queueBack` as needed. 78 | 79 | Using custom sources 80 | ==================== 81 | 82 | When a source is specified, eg :: 83 | 84 | source: tensor.sources.network.Ping 85 | 86 | Tensor will import and instantiate the `Ping` class from `tensor.sources.network`. 87 | Consequently a source can be any installed Python module. 88 | 89 | For the sake of convenience, however, Tensor also appends `/var/lib/tensor` to the 90 | Python path. This means you can easily create, test and distribute sources in that 91 | directory. 92 | 93 | For example, create the above `hello.py` file and place it in `/var/lib/tensor` then 94 | use the configuration :: 95 | 96 | source: hello.HelloWorld 97 | 98 | You can also always submit Github pull request with sources to have them added to 99 | Tensor for others to benefit from! 100 | 101 | Handling asynchronous tasks 102 | =========================== 103 | 104 | Since Tensor is written using the Twisted asynchronous framework, sources can 105 | (and in most cases *must*) make full use of it to implement network checks, or 106 | execute other processes. 107 | 108 | The simplest example of a source which executes an external process is the 109 | ProcessCount check:: 110 | 111 | from zope.interface import implementer 112 | 113 | from twisted.internet import defer 114 | 115 | from tensor.interfaces import ITensorSource 116 | from tensor.objects import Source 117 | from tensor.utils import fork 118 | 119 | @implementer(ITensorSource) 120 | class ProcessCount(Source): 121 | @defer.inlineCallbacks 122 | def get(self): 123 | out, err, code = yield fork('/bin/ps', args=('-e',)) 124 | 125 | count = len(out.strip('\n').split('\n')) 126 | 127 | defer.returnValue( 128 | self.createEvent('ok', 'Process count %s' % (count), count) 129 | ) 130 | 131 | For more information please read the Twisted documentation at https://twistedmatrix.com/trac/wiki/Documentation 132 | 133 | The :py:meth:`tensor.utils.fork` method returns a deferred which can timeout 134 | after a specified time. 135 | 136 | Thinking outside the box 137 | ======================== 138 | 139 | Historically monitoring systems are poorly architected, and terribly 140 | inflexible. To demonstrate how Tensor offers a different concept 141 | to the boring status quo it's interesting to note that there is nothing 142 | preventing you from starting a listening service directly within a source which 143 | processes and relays events to Riemann implementing some protocol. 144 | 145 | Here is an example of a source which listens for TCP connections to port 146 | 8000, accepting any number on a line and passing that to the event queue:: 147 | 148 | from twisted.internet.protocol import Factory 149 | from twisted.protocols.basic import LineReceiver 150 | from twisted.internet import reactor 151 | 152 | from zope.interface import implementer 153 | 154 | from tensor.interfaces import ITensorSource 155 | from tensor.objects import Source 156 | 157 | class Numbers(LineReceiver): 158 | def __init__(self, source): 159 | self.source = source 160 | 161 | def lineReceived(self, line): 162 | """ 163 | Send any numbers received back to the Tensor queue 164 | """ 165 | print repr(line) 166 | try: 167 | num = float(line) 168 | self.source.queueBack( 169 | self.source.createEvent('ok', 'Number: %s' % num, num) 170 | ) 171 | except: 172 | pass 173 | 174 | class NumbersFactory(Factory): 175 | def __init__(self, source): 176 | self.source = source 177 | 178 | def buildProtocol(self, addr): 179 | return Numbers(self.source) 180 | 181 | @implementer(ITensorSource) 182 | class NumberProxy(Source): 183 | def startTimer(self): 184 | # Override starting the source timer, we don't need it 185 | f = NumbersFactory(self) 186 | reactor.listenTCP(8000, f) 187 | 188 | def get(self): 189 | # Implement the get method, but we can ignore it 190 | pass 191 | -------------------------------------------------------------------------------- /puppet/.gitignore: -------------------------------------------------------------------------------- 1 | # Bundler 2 | /.bundle/ 3 | /vendor/bundle/ 4 | Gemfile.lock 5 | 6 | # librarian-puppet 7 | /.librarian/ 8 | /.tmp/ 9 | /vendor/puppet/ 10 | /modules/ 11 | Puppetfile.lock 12 | 13 | # Fixture modules loaded by librarian-puppet 14 | /spec/fixtures/modules/ 15 | !/spec/fixtures/modules/tensor 16 | 17 | # Puppet 18 | /pkg/ 19 | -------------------------------------------------------------------------------- /puppet/.puppet-lint.rc: -------------------------------------------------------------------------------- 1 | --relative 2 | --no-class_inherits_from_params_class 3 | --no-80chars 4 | -------------------------------------------------------------------------------- /puppet/.rubocop.yml: -------------------------------------------------------------------------------- 1 | AllCops: 2 | TargetRubyVersion: 1.9 3 | Exclude: 4 | - 'modules/**/*' 5 | - 'pkg/**/*' 6 | - 'spec/fixtures/**/*' 7 | - 'vendor/**/*' 8 | 9 | Style/HashSyntax: 10 | EnforcedStyle: hash_rockets 11 | 12 | Style/RegexpLiteral: 13 | # EnforcedStyle: slashes # Doesn't seem to work :'( 14 | Enabled: false 15 | 16 | Style/TrailingCommaInLiteral: 17 | Enabled: false 18 | 19 | Style/SpaceInsideHashLiteralBraces: 20 | EnforcedStyle: no_space 21 | 22 | Style/WordArray: 23 | Enabled: false 24 | -------------------------------------------------------------------------------- /puppet/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | group :test do 4 | gem 'rake' 5 | 6 | puppetversion = ENV['PUPPET_VERSION'] || ['>= 3.4.0'] 7 | gem 'puppet', puppetversion 8 | 9 | gem 'librarian-puppet' 10 | gem 'metadata-json-lint' 11 | gem 'puppetlabs_spec_helper', '>= 0.8.2' 12 | gem 'puppet-lint', '>= 1.0.0' 13 | gem 'facter', '>= 1.7.0' 14 | gem 'rspec-puppet-facts' 15 | end 16 | -------------------------------------------------------------------------------- /puppet/README.md: -------------------------------------------------------------------------------- 1 | # tensor 2 | 3 | -------------------------------------------------------------------------------- /puppet/Rakefile: -------------------------------------------------------------------------------- 1 | require 'puppetlabs_spec_helper/rake_tasks' 2 | require 'metadata-json-lint/rake_task' 3 | require 'puppet-lint/tasks/puppet-lint' 4 | 5 | task :librarian_spec_prep do 6 | sh 'librarian-puppet install --path=spec/fixtures/modules/' 7 | end 8 | task :spec_prep => :librarian_spec_prep 9 | 10 | Rake::Task[:lint].clear 11 | PuppetLint::RakeTask.new(:lint) do |config| 12 | config.fail_on_warnings = true 13 | config.ignore_paths = [ 14 | 'modules/**/*.pp', 15 | 'pkg/**/*.pp', 16 | 'spec/**/*.pp', 17 | 'vendor/**/*.pp', 18 | ] 19 | end 20 | 21 | desc 'Validate manifests, templates, and ruby files' 22 | task :validate do 23 | Dir['manifests/**/*.pp'].each do |manifest| 24 | sh "puppet parser validate --noop #{manifest}" 25 | end 26 | Dir['spec/**/*.rb', 'lib/**/*.rb'].each do |ruby_file| 27 | sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/ 28 | end 29 | Dir['templates/**/*.erb'].each do |template| 30 | sh "erb -P -x -T '-' #{template} | ruby -c" 31 | end 32 | end 33 | 34 | desc 'Run syntax, lint, metadata and spec tests.' 35 | task :test => [ 36 | :syntax, 37 | :lint, 38 | :metadata_lint, 39 | :spec, 40 | ] 41 | -------------------------------------------------------------------------------- /puppet/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # TODO: Document 2 | class tensor( 3 | $interval=1.0, 4 | $default_ttl=60.0, 5 | $outputs={}, 6 | $sources={} 7 | ) { 8 | 9 | if $::operatingsystem == 'Ubuntu' { 10 | apt::source {'tensor': 11 | location => 'https://calston.github.io/tensor/ubuntu', 12 | repos => 'main', 13 | key => 'B70AAA23106FEDF92AD79F3D6FC4C33F2B2A5480', 14 | key_server => 'keyserver.ubuntu.com' 15 | } 16 | } 17 | if $::operatingsystem == 'Debian' { 18 | apt::source {'tensor': 19 | location => 'https://calston.github.io/tensor/debian', 20 | repos => 'main', 21 | key => 'B70AAA23106FEDF92AD79F3D6FC4C33F2B2A5480', 22 | key_server => 'keyserver.ubuntu.com' 23 | } 24 | } 25 | 26 | package{'tensor': 27 | ensure => latest, 28 | require => Apt::Source['tensor'] 29 | } 30 | 31 | service{'tensor': 32 | ensure => running, 33 | require => Package['tensor'] 34 | } 35 | 36 | file{'/etc/tensor/conf.d': 37 | ensure => directory, 38 | require => Package['tensor'] 39 | } 40 | 41 | file{'/etc/tensor/tensor.yml': 42 | ensure => present, 43 | content => template('tensor/tensor.yml.erb'), 44 | notify => Service['tensor'], 45 | require => File['/etc/tensor/conf.d'], 46 | } 47 | 48 | create_resources(tensor::output, $outputs) 49 | 50 | create_resources(tensor::source, $sources) 51 | } 52 | 53 | -------------------------------------------------------------------------------- /puppet/manifests/output.pp: -------------------------------------------------------------------------------- 1 | # TODO: Document 2 | define tensor::output($output, $config=false) { 3 | file {"/etc/tensor/conf.d/output_${title}.yml": 4 | ensure => present, 5 | content => template('tensor/tensor-output.yml.erb'), 6 | owner => root, 7 | mode => '0644', 8 | notify => Service['tensor'], 9 | require => File['/etc/tensor/conf.d'], 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /puppet/manifests/puppetping.pp: -------------------------------------------------------------------------------- 1 | # TODO: Document 2 | define tensor::puppetping($service='ping', $route=false, $query='kernel="Linux"') { 3 | $orghosts = query_nodes($query) 4 | 5 | file { '/etc/tensor/conf.d/puppet_pings.yml': 6 | ensure => present, 7 | content => template('tensor/puppet_pings.yml.erb'), 8 | owner => root, 9 | mode => '0644', 10 | notify => Service['tensor'], 11 | require => File['/etc/tensor/conf.d'], 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /puppet/manifests/source.pp: -------------------------------------------------------------------------------- 1 | # TODO: Document 2 | define tensor::source($source, $interval='60.0', $config=false, 3 | $critical=false, $warning=false, $service_name=false, $tags=false 4 | ) { 5 | $service = $title 6 | 7 | file {"/etc/tensor/conf.d/${service}.yml": 8 | ensure => present, 9 | content => template('tensor/tensor-source.yml.erb'), 10 | notify => Service['tensor'], 11 | require => File['/etc/tensor/conf.d'], 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /puppet/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "calston-tensor", 3 | "version": "0.1.1", 4 | "author": "calston", 5 | "summary": "Tensor agent module", 6 | "license": "MIT", 7 | "source": "https://github.com/calston/tensor/", 8 | "project_page": "https://tensor.readthedocs.org/en/stable/", 9 | "issues_url": "https://github.com/calston/tensor/issues", 10 | "dependencies": [ 11 | { 12 | "name": "puppetlabs/stdlib", 13 | "version_requirement": ">= 1.0.0" 14 | }, 15 | { 16 | "name": "puppetlabs/apt" 17 | } 18 | ], 19 | "data_provider": null, 20 | "operatingsystem_support": [ 21 | { 22 | "operatingsystem": "Ubuntu", 23 | "operatingsystemrelease": [ 24 | "14.04" 25 | ] 26 | }, 27 | { 28 | "operatingsystem": "Debian", 29 | "operatingsystemrelease": [ 30 | "8.5" 31 | ] 32 | } 33 | 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /puppet/spec/classes/coverage_spec.rb: -------------------------------------------------------------------------------- 1 | at_exit { RSpec::Puppet::Coverage.report! } 2 | -------------------------------------------------------------------------------- /puppet/spec/classes/init_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe 'tensor' do 4 | on_supported_os.each do |os, facts| 5 | context "on #{os}" do 6 | let(:facts) { facts } 7 | 8 | describe 'with defaults for all parameters' do 9 | it { is_expected.to contain_class('tensor') } 10 | end 11 | end 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /puppet/spec/fixtures/modules/tensor/manifests: -------------------------------------------------------------------------------- 1 | ../../../../manifests -------------------------------------------------------------------------------- /puppet/spec/fixtures/modules/tensor/templates: -------------------------------------------------------------------------------- 1 | ../../../../templates -------------------------------------------------------------------------------- /puppet/spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'puppetlabs_spec_helper/module_spec_helper' 2 | 3 | require 'rspec-puppet-facts' 4 | include RspecPuppetFacts 5 | -------------------------------------------------------------------------------- /puppet/templates/puppet_pings.yml.erb: -------------------------------------------------------------------------------- 1 | sources: 2 | <%- orghosts.each do |host| -%> 3 | - service: <%= @service %> 4 | source: tensor.sources.network.Ping 5 | <%- if @route -%> 6 | route: <%= @route %> 7 | <%- end -%> 8 | interval: 30.0 9 | hostname: <%= host %> 10 | tags: alert 11 | critical: { 12 | ping.latency: "> 600", 13 | ping.loss: "> 50" 14 | } 15 | warning: { 16 | ping.loss: "> 0" 17 | } 18 | 19 | <%- end -%> 20 | -------------------------------------------------------------------------------- /puppet/templates/tensor-output.yml.erb: -------------------------------------------------------------------------------- 1 | outputs: 2 | - output: <%= @output %> 3 | <%- if @config -%> 4 | <%- @config.keys.sort.each do |key| -%> 5 | <%= key %>: <%= @config[key] %> 6 | <%- end -%> 7 | <%- end -%> 8 | -------------------------------------------------------------------------------- /puppet/templates/tensor-source.yml.erb: -------------------------------------------------------------------------------- 1 | sources: 2 | <%- if @service_name -%> 3 | - service: <%= @service_name %> 4 | <%- else -%> 5 | - service: <%= @service %> 6 | <%- end -%> 7 | source: <%= @source %> 8 | interval: <%= @interval %> 9 | <%- if @tags -%> 10 | tags: <%= @tags %> 11 | <%- end -%> 12 | <%- if @config -%> 13 | <%- @config.keys.sort.each do |key| -%> 14 | <%= key %>: <%= @config[key] %> 15 | <%- end -%> 16 | <%- end -%> 17 | <%- if @critical -%> 18 | critical: { 19 | <%- @critical.keys.sort.each do |key| -%> 20 | <%= key %>: "<%= @critical[key] %>", 21 | <%- end -%> 22 | } 23 | <%- end -%> 24 | <%- if @warning -%> 25 | warning: { 26 | <%- @warning.keys.sort.each do |key| -%> 27 | <%= key %>: "<%= @warning[key] %>", 28 | <%- end -%> 29 | } 30 | <%- end -%> 31 | -------------------------------------------------------------------------------- /puppet/templates/tensor.yml.erb: -------------------------------------------------------------------------------- 1 | ttl: <%= @default_ttl %> 2 | interval: <%= @interval %> 3 | 4 | include_path: /etc/tensor/conf.d/ 5 | -------------------------------------------------------------------------------- /puppet/tests/init.pp: -------------------------------------------------------------------------------- 1 | # The baseline for module testing used by Puppet Labs is that each manifest 2 | # should have a corresponding test manifest that declares that class or defined 3 | # type. 4 | # 5 | # Tests are then run by using puppet apply --noop (to check for compilation 6 | # errors and view a log of events) or by fully applying the test in a virtual 7 | # environment (to compare the resulting system state to the desired state). 8 | # 9 | # Learn more about module testing here: 10 | # http://docs.puppetlabs.com/guides/tests_smoke.html 11 | # 12 | include tensor 13 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Twisted 2 | PyYaml 3 | protobuf 4 | construct<2.6 5 | pysnmp==4.2.5 6 | cryptography 7 | -------------------------------------------------------------------------------- /scripts/post-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -d /etc/tensor ]; 4 | then 5 | mkdir -p /etc/tensor 6 | cat >/etc/tensor/tensor.yml < 0.8" 29 | } 30 | 31 | - service: memory 32 | source: tensor.sources.linux.basic.Memory 33 | interval: 2.0 34 | EOL 35 | fi 36 | 37 | if [ ! -d /var/lib/tensor ]; 38 | then 39 | mkdir -p /var/lib/tensor 40 | fi 41 | 42 | update-rc.d tensor defaults 43 | service tensor status >/dev/null 2>&1 44 | 45 | if [ "$?" -gt "0" ]; 46 | then 47 | service tensor start 2>&1 48 | else 49 | service tensor restart 2>&1 50 | fi 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /scripts/tensor: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: tensor 4 | # Required-Start: $remote_fs $network $named 5 | # Required-Stop: $remote_fs $network $named 6 | # Default-Start: 2 3 4 5 7 | # Default-Stop: 0 1 6 8 | # Short-Description: Start/stop tensor 9 | # Description: Start/stop tensor daemon 10 | ### END INIT INFO 11 | 12 | 13 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 14 | DAEMON=`which twistd` 15 | NAME=tensor 16 | DESC=tensor 17 | FDHACK=4096 18 | 19 | test -x $DAEMON || exit 0 20 | 21 | LOGDIR=/var/log 22 | PIDFILE=/var/run/$NAME.pid 23 | DODTIME=2 24 | DAEMON_OPTS="--pidfile=${PIDFILE} --logfile=${LOGDIR}/tensor.log tensor -c /etc/tensor/tensor.yml" 25 | 26 | set -e 27 | 28 | ulimit -n $FDHACK 29 | 30 | running_pid() 31 | { 32 | # Check if a given process pid's cmdline matches a given name 33 | pid=$1 34 | name=$2 35 | [ -z "$pid" ] && return 1 36 | [ ! -d /proc/$pid ] && return 1 37 | (cat /proc/$pid/cmdline | tr "\000" "\n"|grep -q $name) || return 1 38 | return 0 39 | } 40 | 41 | running() 42 | { 43 | # Check if the process is running looking at /proc 44 | # (works for all users) 45 | 46 | # No pidfile, probably no daemon present 47 | [ ! -f "$PIDFILE" ] && return 1 48 | # Obtain the pid and check it against the binary name 49 | pid=`cat $PIDFILE` 50 | running_pid $pid $DAEMON || return 1 51 | return 0 52 | } 53 | 54 | force_stop() { 55 | # Forcefully kill the process 56 | [ ! -f "$PIDFILE" ] && return 57 | if running ; then 58 | kill -15 $pid 59 | # Is it really dead? 60 | [ -n "$DODTIME" ] && sleep "$DODTIME"s 61 | if running ; then 62 | kill -9 $pid 63 | [ -n "$DODTIME" ] && sleep "$DODTIME"s 64 | if running ; then 65 | echo "Cannot kill $NAME (pid=$pid)!" 66 | exit 1 67 | fi 68 | fi 69 | fi 70 | rm -f $PIDFILE 71 | return 0 72 | } 73 | 74 | case "$1" in 75 | start) 76 | echo -n "Starting $NAME: " 77 | start-stop-daemon --start --quiet --pidfile $PIDFILE \ 78 | --exec $DAEMON -- $DAEMON_OPTS 79 | test -f $PIDFILE || sleep 1 80 | if running ; then 81 | echo "$NAME." 82 | else 83 | echo " ERROR." 84 | fi 85 | ;; 86 | stop) 87 | echo -n "Stopping $NAME: " 88 | start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE 89 | echo "$NAME." 90 | ;; 91 | force-stop) 92 | echo -n "Forcefully stopping $NAME: " 93 | force_stop 94 | if ! running ; then 95 | echo "$NAME." 96 | else 97 | echo " ERROR." 98 | fi 99 | ;; 100 | force-reload) 101 | start-stop-daemon --stop --test --quiet --pidfile $PIDFILE --exec $DAEMON \ && $0 restart || exit 0 102 | ;; 103 | restart) 104 | echo -n "Restarting $NAME: " 105 | 106 | start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE 107 | [ -n "$DODTIME" ] && sleep $DODTIME 108 | 109 | start-stop-daemon --start --quiet --pidfile $PIDFILE \ 110 | --exec $DAEMON -- $DAEMON_OPTS 111 | echo "$NAME." 112 | ;; 113 | status) 114 | echo -n "$NAME is " 115 | if running ; then 116 | echo "running" 117 | else 118 | echo " not running." 119 | exit 1 120 | fi 121 | ;; 122 | *) 123 | N=/etc/init.d/$NAME 124 | echo "Usage: $N {start|stop|restart|force-reload|status|force-stop}" >&2 125 | exit 1 126 | ;; 127 | esac 128 | 129 | exit 0 130 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup( 5 | name="tensor", 6 | version='1.0.0', 7 | url='http://github.com/calston/tensor', 8 | license='MIT', 9 | description="A Twisted based monitoring agent for Riemann", 10 | author='Colin Alston', 11 | author_email='colin.alston@gmail.com', 12 | packages=find_packages() + [ 13 | "twisted.plugins", 14 | ], 15 | package_data={ 16 | 'twisted.plugins': ['twisted/plugins/tensor_plugin.py'] 17 | }, 18 | include_package_data=True, 19 | install_requires=[ 20 | 'Twisted', 21 | 'PyYaml', 22 | 'protobuf', 23 | 'construct<2.6', 24 | 'pysnmp==4.2.5', 25 | 'cryptography', 26 | ], 27 | classifiers=[ 28 | 'Development Status :: 4 - Beta', 29 | 'Intended Audience :: System Administrators', 30 | 'License :: OSI Approved :: MIT License', 31 | 'Operating System :: POSIX', 32 | 'Programming Language :: Python', 33 | 'Topic :: System :: Monitoring', 34 | ], 35 | ) 36 | -------------------------------------------------------------------------------- /tensor.yml: -------------------------------------------------------------------------------- 1 | # A YAML (sorry) config file for Tensor 2 | 3 | # Output details 4 | outputs: 5 | - output: tensor.outputs.riemann.RiemannUDP 6 | server: 127.0.0.1 7 | port: 5555 8 | 9 | # Default TTL for events (default: 60 seconds) 10 | # It should be noted if Tensor loses its connection to Riemann it will purge 11 | # queued events if their age exceeds the TTL. 12 | # Sources can also have a custom TTL 13 | ttl: 60.0 14 | 15 | # Dequeue events and send to Riemann at this rate in seconds. This basically 16 | # decides how "realtimey" the events are. If this is longer than your source 17 | # intervals, you'll get 'bursty' events, but they will retain the right 18 | # timestamps. 19 | interval: 1.0 20 | 21 | # Sources 22 | sources: 23 | - service: load 24 | source: tensor.sources.linux.basic.LoadAverage 25 | interval: 2.0 26 | 27 | - service: cpu 28 | source: tensor.sources.linux.basic.CPU 29 | interval: 2.0 30 | critical: { 31 | cpu: "> 0.1" 32 | } 33 | 34 | - service: memory 35 | source: tensor.sources.linux.basic.Memory 36 | interval: 2.0 37 | -------------------------------------------------------------------------------- /tensor/__init__.py: -------------------------------------------------------------------------------- 1 | """Tensor - A monitoring client for Riemann 2 | 3 | .. moduleauthor:: Colin Alston 4 | 5 | """ 6 | 7 | from tensor import service 8 | 9 | def makeService(config): 10 | # Create TensorService 11 | return service.TensorService(config) 12 | -------------------------------------------------------------------------------- /tensor/aggregators.py: -------------------------------------------------------------------------------- 1 | def Counter32(a, b, delta): 2 | """32bit counter aggregator with wrapping 3 | """ 4 | if b < a: 5 | c = 4294967295 - a 6 | return (c + b) / float(delta) 7 | 8 | return (b - a) / float(delta) 9 | 10 | def Counter64(a, b, delta): 11 | """64bit counter aggregator with wrapping 12 | """ 13 | if b < a: 14 | c = 18446744073709551615 - a 15 | return (c + b) / float(delta) 16 | 17 | return (b - a) / float(delta) 18 | 19 | def Counter(a, b, delta): 20 | """Counter derivative 21 | """ 22 | if b < a: 23 | return None 24 | 25 | return (b - a) / float(delta) 26 | -------------------------------------------------------------------------------- /tensor/ihateprotobuf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/ihateprotobuf/__init__.py -------------------------------------------------------------------------------- /tensor/ihateprotobuf/proto.proto: -------------------------------------------------------------------------------- 1 | option java_package = "com.aphyr.riemann"; 2 | option java_outer_classname = "Proto"; 3 | 4 | message State { 5 | optional int64 time = 1; 6 | optional string state = 2; 7 | optional string service = 3; 8 | optional string host = 4; 9 | optional string description = 5; 10 | optional bool once = 6; 11 | repeated string tags = 7; 12 | optional float ttl = 8; 13 | } 14 | 15 | message Event { 16 | optional int64 time = 1; 17 | optional string state = 2; 18 | optional string service = 3; 19 | optional string host = 4; 20 | optional string description = 5; 21 | repeated string tags = 7; 22 | optional float ttl = 8; 23 | repeated Attribute attributes = 9; 24 | 25 | optional sint64 metric_sint64 = 13; 26 | optional double metric_d = 14; 27 | optional float metric_f = 15; 28 | } 29 | 30 | message Query { 31 | optional string string = 1; 32 | } 33 | 34 | message Msg { 35 | optional bool ok = 2; 36 | optional string error = 3; 37 | repeated State states = 4; 38 | optional Query query = 5; 39 | repeated Event events = 6; 40 | } 41 | 42 | message Attribute { 43 | required string key = 1; 44 | optional string value = 2; 45 | } 46 | -------------------------------------------------------------------------------- /tensor/interfaces.py: -------------------------------------------------------------------------------- 1 | from zope.interface import Interface 2 | 3 | 4 | class ITensorProtocol(Interface): 5 | """Interface for Tensor client protocols""" 6 | 7 | def sendEvent(self, event): 8 | """Sends an event to this client""" 9 | pass 10 | 11 | class ITensorSource(Interface): 12 | """Interface for Tensor metric sources""" 13 | 14 | def get(self): 15 | """Return this source data""" 16 | pass 17 | -------------------------------------------------------------------------------- /tensor/logs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/logs/__init__.py -------------------------------------------------------------------------------- /tensor/logs/follower.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class LogFollower(object): 4 | """Provides a class for following log files between runs 5 | 6 | :param logfile: Full path to logfile 7 | :type logfile: str 8 | :param parser: Optional parser method for log lines 9 | :type parser: str 10 | """ 11 | 12 | def __init__(self, logfile, parser=None, tmp_path="/var/lib/tensor/", history=False): 13 | self.logfile = logfile 14 | self.tmp = os.path.join(tmp_path, 15 | '%s.lf' % self.logfile.lstrip('/').replace('/','-')) 16 | 17 | self.history = history 18 | 19 | self.readLast() 20 | 21 | self.parser = parser 22 | 23 | def cleanStore(self): 24 | os.unlink(self.tmp) 25 | 26 | def storeLast(self): 27 | fi = open(self.tmp, 'wt') 28 | fi.write('%s:%s' % (self.lastSize, self.lastInode)) 29 | fi.close() 30 | 31 | def readLast(self): 32 | if os.path.exists(self.tmp): 33 | fi = open(self.tmp, 'rt') 34 | ls, li = fi.read().split(':') 35 | self.lastSize = int(ls) 36 | self.lastInode = int(li) 37 | else: 38 | if self.history: 39 | self.lastSize = 0 40 | self.lastInode = 0 41 | else: 42 | # Don't re-read the entire file 43 | stat = os.stat(self.logfile) 44 | self.lastSize = stat.st_size 45 | self.lastInode = stat.st_ino 46 | 47 | def get_fn(self, fn, max_lines=None): 48 | """Passes each parsed log line to `fn` 49 | This is a better idea than storing a giant log file in memory 50 | """ 51 | stat = os.stat(self.logfile) 52 | 53 | if (stat.st_ino == self.lastInode) and (stat.st_size == self.lastSize): 54 | # Nothing new 55 | return [] 56 | 57 | # Handle rollover and rotations vaguely 58 | if (stat.st_ino != self.lastInode) or (stat.st_size < self.lastSize): 59 | self.lastSize = 0 60 | 61 | fi = open(self.logfile, 'rt') 62 | fi.seek(self.lastSize) 63 | 64 | self.lastInode = stat.st_ino 65 | 66 | lines = 0 67 | 68 | for i in fi: 69 | lines += 1 70 | if max_lines and (lines > max_lines): 71 | self.storeLast() 72 | fi.close() 73 | return 74 | 75 | if '\n' in i: 76 | self.lastSize += len(i) 77 | if self.parser: 78 | line = self.parser(i.strip('\n')) 79 | else: 80 | line = i.strip('\n') 81 | 82 | fn(line) 83 | 84 | self.storeLast() 85 | 86 | fi.close() 87 | 88 | def get(self, max_lines=None): 89 | """Returns a big list of all log lines since the last run 90 | """ 91 | rows = [] 92 | 93 | self.get_fn(lambda row: rows.append(row), max_lines=max_lines) 94 | 95 | return rows 96 | -------------------------------------------------------------------------------- /tensor/logs/parsers.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | 4 | class ApacheLogParserError(Exception): 5 | pass 6 | 7 | class ApacheLogParser: 8 | """Parses Apache log format 9 | 10 | Adapted from http://code.google.com/p/apachelog 11 | 12 | :param format: Apache log format definition eg 13 | r'%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"' 14 | or one of 'common', 'vhcommon' or 'combined' 15 | :type format: str 16 | """ 17 | def __init__(self, format): 18 | formats = { 19 | # Common Log Format (CLF) 20 | 'common': r'%h %l %u %t \"%r\" %>s %b', 21 | 22 | # Common Log Format with Virtual Host 23 | 'vhcommon': r'%v %h %l %u %t \"%r\" %>s %b', 24 | 25 | # NCSA extended/combined log format 26 | 'combined': r'%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"', 27 | } 28 | 29 | self._names = [] 30 | self._types = [] 31 | self._regex = None 32 | self._pattern = '' 33 | 34 | self.types = { 35 | '%h': ('client', str), 36 | '%a': ('client-ip', str), 37 | '%b': ('bytes', int), 38 | '%B': ('bytes', int), 39 | '%D': ('request-time', int), 40 | '%T': ('request-time', float), 41 | '%f': ('filename', str), 42 | '%l': ('logname', str), 43 | '%u': ('user', str), 44 | '%t': ('time', self._parse_date), 45 | '%r': ('request', str), 46 | '%>s': ('status', int), 47 | '%v': ('vhost', str), 48 | } 49 | 50 | if format in formats: 51 | self._parse_format(formats[format]) 52 | else: 53 | self._parse_format(format) 54 | 55 | def _parse_date(self, date): 56 | date = date.split()[0][1:] 57 | return datetime.strptime(date, "%d/%b/%Y:%H:%M:%S") 58 | 59 | def alias(self, field): 60 | if field in self.types: 61 | return self.types[field][0] 62 | else: 63 | return field 64 | 65 | def _parse_format(self, format): 66 | """ 67 | Converts the input format to a regular 68 | expression, as well as extracting fields 69 | 70 | Raises an exception if it couldn't compile 71 | the generated regex. 72 | """ 73 | format = format.strip() 74 | format = re.sub('[ \t]+',' ',format) 75 | 76 | subpatterns = [] 77 | 78 | findquotes = re.compile(r'^\\"') 79 | findreferreragent = re.compile('Referer|User-Agent') 80 | findpercent = re.compile('^%.*t$') 81 | lstripquotes = re.compile(r'^\\"') 82 | rstripquotes = re.compile(r'\\"$') 83 | header = re.compile(r'.*%\{([^\}]+)\}i') 84 | 85 | for element in format.split(' '): 86 | 87 | hasquotes = 0 88 | if findquotes.search(element): hasquotes = 1 89 | 90 | if hasquotes: 91 | element = lstripquotes.sub('', element) 92 | element = rstripquotes.sub('', element) 93 | 94 | head = header.match(element) 95 | if head: 96 | self._names.append(head.groups()[0].lower()) 97 | self._types.append(str) 98 | else: 99 | self._names.append(self.alias(element)) 100 | self._types.append(self.types.get(element, [None, str])[1]) 101 | 102 | subpattern = '(\S*)' 103 | 104 | if hasquotes: 105 | if element == '%r' or findreferreragent.search(element): 106 | subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"' 107 | else: 108 | subpattern = r'\"([^\"]*)\"' 109 | 110 | elif findpercent.search(element): 111 | subpattern = r'(\[[^\]]+\])' 112 | 113 | elif element == '%U': 114 | subpattern = '(.+?)' 115 | 116 | subpatterns.append(subpattern) 117 | 118 | self._pattern = '^' + ' '.join(subpatterns) + '$' 119 | try: 120 | self._regex = re.compile(self._pattern) 121 | except Exception as e: 122 | raise ApacheLogParserError(e) 123 | 124 | def parse(self, line): 125 | """ 126 | Parses a single line from the log file and returns 127 | a dictionary of it's contents. 128 | 129 | Raises and exception if it couldn't parse the line 130 | """ 131 | line = line.strip() 132 | match = self._regex.match(line) 133 | 134 | if match: 135 | data = {} 136 | for i, e in enumerate(match.groups()): 137 | if e == "-": 138 | k, v = self._names[i], None 139 | else: 140 | k, v = self._names[i], self._types[i](e) 141 | data[k] = v 142 | return data 143 | 144 | raise ApacheLogParserError("Unable to parse: %s" % line) 145 | 146 | def pattern(self): 147 | """ 148 | Returns the compound regular expression the parser extracted 149 | from the input format (a string) 150 | """ 151 | return self._pattern 152 | 153 | def names(self): 154 | """ 155 | Returns the field names the parser extracted from the 156 | input format (a list) 157 | """ 158 | return self._names 159 | 160 | -------------------------------------------------------------------------------- /tensor/outputs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/outputs/__init__.py -------------------------------------------------------------------------------- /tensor/outputs/elasticsearch.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import datetime 4 | 5 | from twisted.internet import reactor, defer, task 6 | from twisted.python import log 7 | 8 | try: 9 | from OpenSSL import SSL 10 | from twisted.internet import ssl 11 | except: 12 | SSL=None 13 | 14 | from tensor.protocol import elasticsearch 15 | 16 | from tensor.objects import Output 17 | 18 | class ElasticSearch(Output): 19 | """ElasticSearch HTTP API output 20 | 21 | **Configuration arguments:** 22 | 23 | :param url: Elasticsearch URL (default: http://localhost:9200) 24 | :type url: str 25 | :param maxsize: Maximum queue backlog size (default: 250000, 0 disables) 26 | :type maxsize: int 27 | :param maxrate: Maximum rate of documents added to index (default: 100) 28 | :type maxrate: int 29 | :param interval: Queue check interval in seconds (default: 1.0) 30 | :type interval: int 31 | :param user: Optional basic auth username 32 | :type user: str 33 | :param password: Optional basic auth password 34 | :type password: str 35 | :param index: Index name format to store documents in Elastic 36 | (default: tensor-%Y.%m.%d) 37 | :type index: str 38 | """ 39 | def __init__(self, *a): 40 | Output.__init__(self, *a) 41 | self.events = [] 42 | self.t = task.LoopingCall(self.tick) 43 | 44 | self.inter = float(self.config.get('interval', 1.0)) # tick interval 45 | self.maxsize = int(self.config.get('maxsize', 250000)) 46 | 47 | self.user = self.config.get('user') 48 | self.password = self.config.get('password') 49 | 50 | self.url = self.config.get('url', 'http://localhost:9200') 51 | 52 | maxrate = int(self.config.get('maxrate', 100)) 53 | 54 | self.index = self.config.get('index', 'tensor-%Y.%m.%d') 55 | 56 | if maxrate > 0: 57 | self.queueDepth = int(maxrate * self.inter) 58 | else: 59 | self.queueDepth = None 60 | 61 | def createClient(self): 62 | """Sets up HTTP connector and starts queue timer 63 | """ 64 | 65 | server = self.config.get('server', 'localhost') 66 | port = int(self.config.get('port', 9200)) 67 | 68 | self.client = elasticsearch.ElasticSearch(self.url, self.user, 69 | self.password, self.index) 70 | 71 | self.t.start(self.inter) 72 | 73 | def stop(self): 74 | """Stop this client. 75 | """ 76 | self.t.stop() 77 | 78 | def transformEvent(self, e): 79 | d = dict(e) 80 | t = datetime.datetime.utcfromtimestamp(e.time) 81 | d['@timestamp'] = t.isoformat() 82 | 83 | if 'ttl' in d: 84 | # Useless field to Elasticsearch 85 | del d['ttl'] 86 | 87 | return d 88 | 89 | def sendEvents(self, events): 90 | return self.client.bulkIndex([self.transformEvent(e) for e in events]) 91 | 92 | @defer.inlineCallbacks 93 | def tick(self): 94 | """Clock tick called every self.inter 95 | """ 96 | if self.events: 97 | if self.queueDepth and (len(self.events) > self.queueDepth): 98 | # Remove maximum of self.queueDepth items from queue 99 | events = self.events[:self.queueDepth] 100 | self.events = self.events[self.queueDepth:] 101 | else: 102 | events = self.events 103 | self.events = [] 104 | 105 | try: 106 | result = yield self.sendEvents(events) 107 | if result.get('errors', False): 108 | log.msg(repr(result)) 109 | self.events.extend(events) 110 | 111 | except Exception as e: 112 | log.msg('Could not connect to elasticsearch ' + str(e)) 113 | self.events.extend(events) 114 | 115 | def eventsReceived(self, events): 116 | """Receives a list of events and queues them 117 | 118 | Arguments: 119 | events -- list of `tensor.objects.Event` 120 | """ 121 | # Make sure queue isn't oversized 122 | if (self.maxsize < 1) or (len(self.events) < self.maxsize): 123 | self.events.extend(events) 124 | 125 | # Backward compatibility stub 126 | ElasticSearchLog = ElasticSearch 127 | -------------------------------------------------------------------------------- /tensor/outputs/riemann.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | 4 | from twisted.internet import reactor, defer, task 5 | from twisted.python import log 6 | 7 | try: 8 | from OpenSSL import SSL 9 | from twisted.internet import ssl 10 | except: 11 | SSL=None 12 | 13 | from tensor.protocol import riemann 14 | 15 | from tensor.objects import Output 16 | 17 | if SSL: 18 | class ClientTLSContext(ssl.ClientContextFactory): 19 | def __init__(self, key, cert): 20 | self.key = key 21 | self.cert = cert 22 | 23 | def getContext(self): 24 | self.method = SSL.TLSv1_METHOD 25 | ctx = ssl.ClientContextFactory.getContext(self) 26 | ctx.use_certificate_file(self.cert) 27 | ctx.use_privatekey_file(self.key) 28 | 29 | return ctx 30 | 31 | class RiemannTCP(Output): 32 | """Riemann TCP output 33 | 34 | **Configuration arguments:** 35 | 36 | :param server: Riemann server hostname (default: localhost) 37 | :type server: str. 38 | :param port: Riemann server port (default: 5555) 39 | :type port: int. 40 | :param failover: Enable server failover, in which case `server` may be a list 41 | :type failover: bool. 42 | :param maxrate: Maximum de-queue rate (0 is no limit) 43 | :type maxrate: int. 44 | :param maxsize: Maximum queue size (0 is no limit, default is 250000) 45 | :type maxsize: int. 46 | :param interval: De-queue interval in seconds (default: 1.0) 47 | :type interval: float. 48 | :param pressure: Maximum backpressure (-1 is no limit) 49 | :type pressure: int. 50 | :param tls: Use TLS (default false) 51 | :type tls: bool. 52 | :param cert: Host certificate path 53 | :type cert: str. 54 | :param key: Host private key path 55 | :type key: str. 56 | :param allow_nan: Send events with None metric value (default true) 57 | :type allow_nan: bool 58 | """ 59 | def __init__(self, *a): 60 | Output.__init__(self, *a) 61 | self.events = [] 62 | self.t = task.LoopingCall(self.tick) 63 | 64 | self.inter = float(self.config.get('interval', 1.0)) # tick interval 65 | self.pressure = int(self.config.get('pressure', -1)) 66 | self.maxsize = int(self.config.get('maxsize', 250000)) 67 | self.expire = self.config.get('expire', False) 68 | self.allow_nan = self.config.get('allow_nan', True) 69 | 70 | maxrate = int(self.config.get('maxrate', 0)) 71 | 72 | if maxrate > 0: 73 | self.queueDepth = int(maxrate * self.inter) 74 | else: 75 | self.queueDepth = None 76 | 77 | self.tls = self.config.get('tls', False) 78 | 79 | if self.tls: 80 | self.cert = self.config['cert'] 81 | self.key = self.config['key'] 82 | 83 | def createClient(self): 84 | """Create a TCP connection to Riemann with automatic reconnection 85 | """ 86 | 87 | server = self.config.get('server', 'localhost') 88 | port = self.config.get('port', 5555) 89 | failover = self.config.get('failover', False) 90 | 91 | self.factory = riemann.RiemannClientFactory(server, failover=failover) 92 | 93 | if failover: 94 | initial = random.choice(server) 95 | else: 96 | initial = server 97 | 98 | log.msg('Connecting to Riemann on %s:%s' % (initial, port)) 99 | 100 | if self.tls: 101 | if SSL: 102 | self.connector = reactor.connectSSL(initial, port, self.factory, 103 | ClientTLSContext(self.key, self.cert)) 104 | else: 105 | log.msg('[FATAL] SSL support not available!' \ 106 | ' Please install PyOpenSSL. Exiting now') 107 | reactor.stop() 108 | else: 109 | self.connector = reactor.connectTCP(initial, port, self.factory) 110 | 111 | d = defer.Deferred() 112 | 113 | def cb(): 114 | # Wait until we have a useful proto object 115 | if hasattr(self.factory, 'proto') and self.factory.proto: 116 | self.t.start(self.inter) 117 | d.callback(None) 118 | else: 119 | reactor.callLater(0.01, cb) 120 | 121 | cb() 122 | 123 | return d 124 | 125 | def stop(self): 126 | """Stop this client. 127 | """ 128 | self.t.stop() 129 | self.factory.stopTrying() 130 | self.connector.disconnect() 131 | 132 | def tick(self): 133 | """Clock tick called every self.inter 134 | """ 135 | if self.factory.proto: 136 | # Check backpressure 137 | if (self.pressure < 0) or (self.factory.proto.pressure <= self.pressure): 138 | self.emptyQueue() 139 | elif self.expire: 140 | # Check queue age and expire stale events 141 | for i, e in enumerate(self.events): 142 | if (time.time() - e.time) > e.ttl: 143 | self.events.pop(i) 144 | 145 | def emptyQueue(self): 146 | """Remove all or self.queueDepth events from the queue 147 | """ 148 | if self.events: 149 | if self.queueDepth and (len(self.events) > self.queueDepth): 150 | # Remove maximum of self.queueDepth items from queue 151 | events = self.events[:self.queueDepth] 152 | self.events = self.events[self.queueDepth:] 153 | else: 154 | events = self.events 155 | self.events = [] 156 | 157 | if self.allow_nan: 158 | self.factory.proto.sendEvents(events) 159 | else: 160 | self.factory.proto.sendEvents([e for e in events if e.metric is not None]) 161 | 162 | def eventsReceived(self, events): 163 | """Receives a list of events and transmits them to Riemann 164 | 165 | Arguments: 166 | events -- list of `tensor.objects.Event` 167 | """ 168 | # Make sure queue isn't oversized 169 | if (self.maxsize < 1) or (len(self.events) < self.maxsize): 170 | self.events.extend(events) 171 | 172 | class RiemannUDP(Output): 173 | """Riemann UDP output (spray-and-pray mode) 174 | 175 | **Configuration arguments:** 176 | 177 | :param server: Riemann server IP address (default: 127.0.0.1) 178 | :type server: str. 179 | :param port: Riemann server port (default: 5555) 180 | :type port: int. 181 | """ 182 | 183 | def __init__(self, *a): 184 | Output.__init__(self, *a) 185 | self.protocol = None 186 | 187 | def createClient(self): 188 | """Create a UDP connection to Riemann""" 189 | server = self.config.get('server', '127.0.0.1') 190 | port = self.config.get('port', 5555) 191 | 192 | def connect(ip): 193 | self.protocol = riemann.RiemannUDP(ip, port) 194 | self.endpoint = reactor.listenUDP(0, self.protocol) 195 | 196 | d = reactor.resolve(server) 197 | d.addCallback(connect) 198 | return d 199 | 200 | def eventsReceived(self, events): 201 | """Receives a list of events and transmits them to Riemann 202 | 203 | Arguments: 204 | events -- list of `tensor.objects.Event` 205 | """ 206 | if self.protocol: 207 | self.protocol.sendEvents(events) 208 | 209 | -------------------------------------------------------------------------------- /tensor/protocol/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/protocol/__init__.py -------------------------------------------------------------------------------- /tensor/protocol/elasticsearch.py: -------------------------------------------------------------------------------- 1 | import time 2 | import uuid 3 | import json 4 | from base64 import b64encode 5 | 6 | from tensor import utils 7 | 8 | class ElasticSearch(object): 9 | """Twisted ElasticSearch API 10 | """ 11 | def __init__(self, url='http://localhost:9200', user=None, password=None, 12 | index='tensor-%Y.%m.%d'): 13 | self.url = url.rstrip('/') 14 | self.index = index 15 | self.user = user 16 | self.password = password 17 | 18 | def _get_index(self): 19 | return time.strftime(self.index) 20 | 21 | def _request(self, path, data=None, method='GET'): 22 | headers = {} 23 | if self.user: 24 | authorization = b64encode('%s:%s' % (self.user, self.password)).decode() 25 | headers['Authorization'] = ['Basic ' + authorization] 26 | 27 | return utils.HTTPRequest().getJson( 28 | self.url + path, method, headers=headers, data=data.encode()) 29 | 30 | def _gen_id(self): 31 | return b64encode(uuid.uuid4().bytes).decode().rstrip('=') 32 | 33 | def stats(self): 34 | return self._request('/_cluster/stats') 35 | 36 | def node_stats(self): 37 | return self._request('/_nodes/stats') 38 | 39 | def insertIndex(self, type, data): 40 | return self._request('/%s/%s/%s' % ( 41 | self._get_index(), type, self._gen_id() 42 | ), json.dumps(data), 'PUT') 43 | 44 | def bulkIndex(self, data): 45 | serdata = "" 46 | 47 | for row in data: 48 | if '_id' in row: 49 | id = row['id'] 50 | del row['id'] 51 | else: 52 | id = self._gen_id() 53 | 54 | d = { 55 | "index": { 56 | "_index": self._get_index(), 57 | "_type": row.get('type', 'event'), 58 | "_id": id, 59 | } 60 | } 61 | 62 | serdata += json.dumps(d) + '\n' 63 | serdata += json.dumps(row) + '\n' 64 | 65 | return self._request('/_bulk', serdata, 'PUT') 66 | -------------------------------------------------------------------------------- /tensor/protocol/icmp.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import time 3 | import fcntl 4 | import random 5 | import struct 6 | 7 | from zope.interface import implementer 8 | 9 | from twisted.internet import task, defer, reactor, udp 10 | from twisted.internet.protocol import DatagramProtocol 11 | from twisted.internet.interfaces import ISystemHandle 12 | 13 | # OMG SHUT UP 14 | class STFU(object): 15 | msg = lambda x, y: None 16 | udp.log = STFU() 17 | 18 | class IP(object): 19 | """IP header decoder 20 | """ 21 | def __init__(self, packet): 22 | self.readPacket(packet) 23 | 24 | def readPacket(self, packet): 25 | vl = struct.unpack('!b', packet[0])[0] 26 | l = (vl & 0xf) * 4 27 | 28 | head = packet[:l] 29 | self.offset = struct.unpack('!H', packet[6:8]) 30 | 31 | self.payload = packet[l:] 32 | 33 | class EchoPacket(object): 34 | """ICMP Echo packet encoder and decoder 35 | """ 36 | def __init__(self, seq=0, id=None, data=None, packet=None): 37 | if packet: 38 | self.decodePacket(packet) 39 | self.packet = packet 40 | else: 41 | self.id = id 42 | self.seq = seq 43 | self.data = data 44 | self.encodePacket() 45 | 46 | def calculateChecksum(self, buffer): 47 | nleft = len(buffer) 48 | sum = 0 49 | pos = 0 50 | while nleft > 1: 51 | sum = ord(buffer[pos]) * 256 + (ord(buffer[pos + 1]) + sum) 52 | pos = pos + 2 53 | nleft = nleft - 2 54 | if nleft == 1: 55 | sum = sum + ord(buffer[pos]) * 256 56 | 57 | sum = (sum >> 16) + (sum & 0xFFFF) 58 | sum += (sum >> 16) 59 | sum = (~sum & 0xFFFF) 60 | 61 | return sum 62 | 63 | def encodePacket(self): 64 | head = struct.pack('!bb', 8, 0) 65 | 66 | echo = struct.pack('!HH', self.seq, self.id) 67 | 68 | chk = self.calculateChecksum( 69 | head + '\x00\x00' + echo + self.data) 70 | 71 | chk = struct.pack('!H', chk) 72 | 73 | self.packet = head + chk + echo + self.data 74 | 75 | def decodePacket(self, packet): 76 | self.type, self.code, self.chk, self.seq, self.id = struct.unpack( 77 | '!bbHHH', packet[:8]) 78 | 79 | self.data = packet[8:] 80 | 81 | rc = '%s\x00\x00%s' % (packet[:2], packet[4:]) 82 | mychk = self.calculateChecksum(rc) 83 | 84 | if mychk == self.chk: 85 | self.valid = True 86 | else: 87 | self.valid = False 88 | 89 | def __repr__(self): 90 | return "" % ( 91 | self.type, self.code, self.chk, self.seq, len(self.data), self.valid) 92 | 93 | class ICMPPing(DatagramProtocol): 94 | """ICMP Ping implementation 95 | """ 96 | noisy=False 97 | def __init__(self, d, dst, count, inter=0.2, maxwait=1000, size=64): 98 | self.deferred = d 99 | self.dst = dst 100 | self.size = size - 36 101 | self.count = count 102 | self.seq = 0 103 | self.start = 0 104 | self.id_base = random.randint(0, 40000) 105 | self.maxwait = maxwait 106 | self.inter = inter 107 | 108 | self.t = task.LoopingCall(self.ping) 109 | self.recv = [] 110 | 111 | def datagramReceived(self, datagram, address): 112 | now = int(time.time()*1000000) 113 | host, port = address 114 | 115 | packet = IP(datagram) 116 | 117 | icmp = EchoPacket(packet=packet.payload) 118 | 119 | if icmp.valid and icmp.code==0 and icmp.type==0: 120 | # Check ID is from this pinger 121 | if (icmp.id-icmp.seq) == self.id_base: 122 | ts = icmp.data[:8] 123 | data = icmp.data[8:] 124 | delta = (now - struct.unpack('!Q', ts)[0])/1000.0 125 | 126 | self.maxwait = (self.maxwait + delta)/2.0 127 | 128 | self.recv.append((icmp.seq, delta)) 129 | 130 | def createData(self, n): 131 | s = "" 132 | c = 33 133 | for i in range(n): 134 | s += chr(c) 135 | if c < 126: 136 | c += 1 137 | else: 138 | c = 33 139 | return s 140 | 141 | def sendEchoRequest(self): 142 | # Pack the packet with an ascii table 143 | md = self.createData(self.size) 144 | 145 | us = int(time.time()*1000000) 146 | data = '%s%s' % (struct.pack('!Q', us), md) 147 | 148 | pkt = EchoPacket(seq=self.seq, id=self.id_base+self.seq, data=data) 149 | 150 | self.transport.write(pkt.packet) 151 | self.seq += 1 152 | 153 | def ping(self): 154 | if self.seq < self.count: 155 | self.sendEchoRequest() 156 | else: 157 | self.t.stop() 158 | 159 | tdelay = (self.maxwait * self.count)/1000.0 160 | elapsed = time.time() - self.start 161 | remaining = tdelay - elapsed 162 | if remaining < 0.05: 163 | remaining = 0.05 164 | 165 | reactor.callLater(remaining, self.endPing) 166 | 167 | def endPing(self): 168 | r = len(self.recv) 169 | loss = (self.count - r) / float(self.count) 170 | loss = int(100*loss) 171 | if r: 172 | avgLatency = sum([i[1] for i in self.recv]) / float(r) 173 | else: 174 | avgLatency = None 175 | 176 | self.deferred.callback((loss, avgLatency)) 177 | 178 | def startPing(self): 179 | self.transport.connect(self.dst, random.randint(33434, 33534)) 180 | self.start = time.time() 181 | self.t.start(self.inter) 182 | 183 | def startProtocol(self): 184 | self.startPing() 185 | 186 | @implementer(ISystemHandle) 187 | class ICMPPort(udp.Port): 188 | """Raw socket listener for ICMP 189 | """ 190 | maxThroughput = 256 * 1024 191 | 192 | def createInternetSocket(self): 193 | s = socket.socket( 194 | socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) 195 | 196 | s.setblocking(0) 197 | 198 | fd = s.fileno() 199 | 200 | # Set close-on-exec 201 | 202 | flags = fcntl.fcntl(fd, fcntl.F_GETFD) 203 | flags = flags | fcntl.FD_CLOEXEC 204 | fcntl.fcntl(fd, fcntl.F_SETFD, flags) 205 | 206 | return s 207 | 208 | def ping(dst, count, inter=0.2, maxwait=1000, size=64): 209 | """Sends ICMP echo requests to destination `dst` `count` times. 210 | Returns a deferred which fires when responses are finished. 211 | """ 212 | def _then(result, p): 213 | p.stopListening() 214 | return result 215 | 216 | d = defer.Deferred() 217 | p = ICMPPort(0, ICMPPing(d, dst, count, inter, maxwait, size), "", 8192, reactor) 218 | p.startListening() 219 | 220 | return d.addCallback(_then, p) 221 | -------------------------------------------------------------------------------- /tensor/protocol/riemann.py: -------------------------------------------------------------------------------- 1 | from tensor.ihateprotobuf import proto_pb2 2 | from tensor.interfaces import ITensorProtocol 3 | 4 | from zope.interface import implementer 5 | 6 | from twisted.protocols.basic import Int32StringReceiver 7 | from twisted.internet.protocol import DatagramProtocol 8 | from twisted.internet import protocol 9 | from twisted.python import log 10 | 11 | class RiemannProtobufMixin(object): 12 | def encodeEvent(self, event): 13 | """Adapts an Event object to a Riemann protobuf event Event""" 14 | pbevent = proto_pb2.Event( 15 | time=int(event.time), 16 | state=event.state, 17 | service=event.service, 18 | host=event.hostname, 19 | description=event.description, 20 | tags=event.tags, 21 | ttl=event.ttl, 22 | ) 23 | 24 | if event.metric is not None: 25 | # I have no idea what I'm doing 26 | if isinstance(event.metric, int): 27 | pbevent.metric_sint64 = event.metric 28 | pbevent.metric_f = float(event.metric) 29 | else: 30 | pbevent.metric_d = float(event.metric) 31 | pbevent.metric_f = float(event.metric) 32 | if event.attributes is not None: 33 | for key, value in event.attributes.items(): 34 | attribute = pbevent.attributes.add() 35 | attribute.key, attribute.value = key, value 36 | 37 | return pbevent 38 | 39 | def encodeMessage(self, events): 40 | """Encode a list of Tensor events with protobuf""" 41 | 42 | message = proto_pb2.Msg( 43 | events=[self.encodeEvent(e) for e in events if e._type=='riemann'] 44 | ) 45 | 46 | return message.SerializeToString() 47 | 48 | def decodeMessage(self, data): 49 | """Decode a protobuf message into a list of Tensor events""" 50 | message = proto_pb2.Msg() 51 | message.ParseFromString(data) 52 | 53 | return message 54 | 55 | def sendEvents(self, events): 56 | """Send a Tensor Event to Riemann""" 57 | self.pressure += 1 58 | self.sendString(self.encodeMessage(events)) 59 | 60 | @implementer(ITensorProtocol) 61 | class RiemannProtocol(Int32StringReceiver, RiemannProtobufMixin): 62 | """Riemann protobuf protocol 63 | """ 64 | 65 | def __init__(self): 66 | self.pressure = 0 67 | 68 | def stringReceived(self, string): 69 | self.pressure -= 1 70 | 71 | class RiemannClientFactory(protocol.ReconnectingClientFactory): 72 | """A reconnecting client factory which creates RiemannProtocol instances 73 | """ 74 | maxDelay = 30 75 | initialDelay = 5 76 | factor = 2 77 | jitter = 0 78 | 79 | def __init__(self, hosts, failover=False): 80 | self.failover = failover 81 | 82 | if self.failover: 83 | if isinstance(hosts, list): 84 | self.hosts = hosts 85 | else: 86 | self.hosts = [hosts] 87 | 88 | self.host_index = 0 89 | 90 | def buildProtocol(self, addr): 91 | self.resetDelay() 92 | self.proto = RiemannProtocol() 93 | return self.proto 94 | 95 | def _do_failover(self, connector): 96 | if self.failover: 97 | if self.host_index >= (len(self.hosts)-1): 98 | self.host_index = 0 99 | else: 100 | self.host_index += 1 101 | 102 | connector.host = self.hosts[self.host_index] 103 | 104 | def clientConnectionLost(self, connector, reason): 105 | log.msg('Lost connection. Reason:' + str(reason)) 106 | self.proto = None 107 | 108 | self._do_failover(connector) 109 | 110 | log.msg('Reconnecting to Riemann on %s:%s' % (connector.host, connector.port)) 111 | protocol.ReconnectingClientFactory.clientConnectionLost( 112 | self, connector, reason) 113 | 114 | def clientConnectionFailed(self, connector, reason): 115 | log.msg('Connection failed. Reason:' + str(reason)) 116 | self.proto = None 117 | 118 | self._do_failover(connector) 119 | 120 | log.msg('Reconnecting to Riemann on %s:%s' % (connector.host, connector.port)) 121 | protocol.ReconnectingClientFactory.clientConnectionFailed( 122 | self, connector, reason) 123 | 124 | @implementer(ITensorProtocol) 125 | class RiemannUDP(DatagramProtocol, RiemannProtobufMixin): 126 | """UDP datagram protocol for Riemann 127 | """ 128 | 129 | def __init__(self, host, port): 130 | self.host = host 131 | self.port = port 132 | self.pressure = 0 133 | 134 | def sendString(self, string): 135 | self.transport.write(string, (self.host, self.port)) 136 | self.pressure -= 1 137 | -------------------------------------------------------------------------------- /tensor/protocol/sflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/protocol/sflow/__init__.py -------------------------------------------------------------------------------- /tensor/protocol/sflow/protocol/__init__.py: -------------------------------------------------------------------------------- 1 | from tensor.protocol.sflow.protocol import protocol 2 | 3 | Sflow = protocol.Sflow 4 | FlowSample = protocol.FlowSample 5 | CounterSample = protocol.CounterSample 6 | -------------------------------------------------------------------------------- /tensor/protocol/sflow/protocol/protocol.py: -------------------------------------------------------------------------------- 1 | import xdrlib 2 | 3 | from tensor.protocol.sflow.protocol import flows, counters 4 | 5 | 6 | class Sflow(object): 7 | def __init__(self, payload, host): 8 | self.host = host 9 | assert isinstance(payload, bytes) 10 | u = xdrlib.Unpacker(payload) 11 | 12 | self.version = u.unpack_uint() 13 | 14 | self.samplers = { 15 | 1: FlowSample, 16 | 2: CounterSample 17 | } 18 | 19 | if self.version == 5: 20 | self.sflow_v5(u) 21 | 22 | def sflow_v5(self, u): 23 | self.addrtype = u.unpack_uint() 24 | 25 | if self.addrtype == 1: 26 | self.address = u.unpack_fstring(4) 27 | 28 | if self.addrtype == 2: 29 | self.address = u.unpack_fstring(16) 30 | 31 | self.sub_agent_id = u.unpack_uint() 32 | self.sequence_number = u.unpack_uint() 33 | self.uptime = u.unpack_uint() 34 | 35 | self.sample_count = u.unpack_uint() 36 | 37 | self.decode_samples(u) 38 | 39 | # Sort samples by sequence number 40 | self.samples.sort(key=lambda x: x.sequence) 41 | 42 | def decode_samples(self, u): 43 | self.samples = [] 44 | 45 | for i in range(self.sample_count): 46 | sample_type = u.unpack_uint() 47 | 48 | self.samples.append(self.samplers[sample_type](u)) 49 | 50 | class FlowSample(object): 51 | def __init__(self, u): 52 | self.size = u.unpack_uint() 53 | 54 | self.sequence = u.unpack_uint() 55 | self.source_id = u.unpack_uint() 56 | self.sample_rate = u.unpack_uint() 57 | self.sample_pool = u.unpack_uint() 58 | self.dropped_packets = u.unpack_uint() 59 | 60 | self.if_inIndex = u.unpack_uint() 61 | self.if_outIndex = u.unpack_uint() 62 | 63 | self.record_count = u.unpack_uint() 64 | 65 | self.flows = {} 66 | 67 | for i in range(self.record_count): 68 | flow_format = u.unpack_uint() 69 | flow_head = u.unpack_opaque() 70 | flow_u = xdrlib.Unpacker(flow_head) 71 | 72 | d = flows.getDecoder(flow_format) 73 | if d: 74 | self.flows[flow_format] = d(flow_u) 75 | 76 | class CounterSample(object): 77 | def __init__(self, u): 78 | 79 | self.size = u.unpack_uint() 80 | self.sequence = u.unpack_uint() 81 | 82 | self.source_id = u.unpack_uint() 83 | 84 | self.record_count = u.unpack_uint() 85 | 86 | self.counters = {} 87 | 88 | for i in range(self.record_count): 89 | counter_format = u.unpack_uint() 90 | counter = u.unpack_opaque() 91 | 92 | d = counters.getDecoder(counter_format) 93 | 94 | if d: 95 | self.counters[counter_format] = d(xdrlib.Unpacker(counter)) 96 | else: 97 | print("Unknown format:", counter_format) 98 | -------------------------------------------------------------------------------- /tensor/protocol/sflow/protocol/utils.py: -------------------------------------------------------------------------------- 1 | import struct, socket 2 | 3 | 4 | def unpack_address(u): 5 | addrtype = u.unpack_uint() 6 | 7 | if self.addrtype == 1: 8 | self.address = u.unpack_fopaque(4) 9 | 10 | if self.addrtype == 2: 11 | self.address = u.unpack_fopaque(16) 12 | 13 | return self.address 14 | 15 | class IPv4Address(object): 16 | def __init__(self, addr_int): 17 | self.addr_int = addr_int 18 | self.na = struct.pack(b'!L', addr_int) 19 | 20 | def __str__(self): 21 | return socket.inet_ntoa(self.na) 22 | 23 | def asString(self): 24 | return str(self) 25 | 26 | def __repr__(self): 27 | return "" % str(self) 28 | -------------------------------------------------------------------------------- /tensor/protocol/sflow/server.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from twisted.internet.protocol import DatagramProtocol, ClientCreator 4 | from twisted.internet import reactor, task, defer 5 | from twisted.application import service, internet 6 | from twisted.web.client import getPage 7 | 8 | from tensor.protocol.sflow import protocol 9 | from tensor.protocol.sflow.protocol import flows, counters 10 | 11 | class DatagramReceiver(DatagramProtocol): 12 | """DatagramReceiver for sFlow packets 13 | """ 14 | def datagramReceived(self, data, (host, port)): 15 | sflow = protocol.Sflow(data, host) 16 | 17 | for sample in sflow.samples: 18 | if isinstance(sample, protocol.FlowSample): 19 | self.process_flow_sample(sflow, sample) 20 | 21 | if isinstance(sample, protocol.CounterSample): 22 | self.process_counter_sample(sflow, sample) 23 | 24 | def process_flow_sample(self, sflow, flow): 25 | for k,v in flow.flows.items(): 26 | if isinstance(v, flows.HeaderSample) and v.frame: 27 | reactor.callLater(0, self.receive_flow, flow, v.frame, sflow.host) 28 | 29 | def process_counter_sample(self, sflow, counter): 30 | for k,v in counter.counters.items(): 31 | if isinstance(v, counters.InterfaceCounters): 32 | reactor.callLater(0, self.receive_counter, v, sflow.host) 33 | 34 | elif isinstance(v, counters.HostCounters): 35 | reactor.callLater(0, self.receive_host_counter, v) 36 | 37 | def receive_flow(self, flow, sample): 38 | pass 39 | 40 | def receive_counter(self, counter): 41 | pass 42 | 43 | def receive_host_counter(self, counter): 44 | pass 45 | -------------------------------------------------------------------------------- /tensor/protocol/ssh.py: -------------------------------------------------------------------------------- 1 | from twisted.conch.ssh.keys import EncryptedKeyError, Key 2 | from twisted.conch.client.knownhosts import KnownHostsFile 3 | from twisted.conch.endpoints import SSHCommandClientEndpoint 4 | 5 | from twisted.internet import defer, protocol, endpoints, reactor, error 6 | 7 | from twisted.python.compat import nativeString 8 | from twisted.python.filepath import FilePath 9 | from twisted.python import log 10 | 11 | # Monkey patch noisy logs 12 | class FakeLog(object): 13 | def msg(self, *a): 14 | pass 15 | 16 | def callWithLogger(self, *a, **kw): 17 | return log.callWithLogger(*a, **kw) 18 | from twisted.conch.ssh import connection, channel 19 | connection.log = FakeLog() 20 | channel.log = FakeLog() 21 | 22 | try: 23 | from io import StringIO 24 | except ImportError: 25 | from StringIO import StringIO 26 | 27 | 28 | class SSHCommandProtocol(protocol.Protocol): 29 | def connectionMade(self): 30 | self.finished = defer.Deferred() 31 | self.stdOut = StringIO() 32 | self.stdErr = StringIO() 33 | 34 | def dataReceived(self, data): 35 | self.stdOut.write(data.decode()) 36 | 37 | def extReceived(self, code, data): 38 | self.stdErr.write(data.decode()) 39 | 40 | def connectionLost(self, reason): 41 | self.stdOut.seek(0) 42 | self.stdErr.seek(0) 43 | if reason.type is error.ConnectionDone: 44 | # Success 45 | code = 0 46 | else: 47 | code = reason.value.exitCode 48 | self.factory.done.callback((self.stdOut, self.stdErr, code)) 49 | 50 | class SSHClient(object): 51 | def __init__(self, hostname, username, port, password=None, 52 | knownhosts=None): 53 | 54 | self.hostname = hostname.encode() 55 | self.username = username.encode() 56 | self.port = int(port) 57 | self.password = None 58 | if password: 59 | self.password = password.encode() 60 | self.connection = None 61 | 62 | if not knownhosts: 63 | knownhosts = '/var/lib/tensor/known_hosts' 64 | 65 | self.knownHosts = KnownHostsFile.fromPath(FilePath(knownhosts.encode())) 66 | self.knownHosts.verifyHostKey = self.verifyHostKey 67 | 68 | self.keys = [] 69 | 70 | def verifyHostKey(self, ui, hostname, ip, key): 71 | hhk = defer.maybeDeferred(self.knownHosts.hasHostKey, hostname, key) 72 | def gotHasKey(result): 73 | if result: 74 | if not self.knownHosts.hasHostKey(ip, key): 75 | log.msg("Added new %s host key for IP address '%s'." % 76 | (key.type(), nativeString(ip))) 77 | self.knownHosts.addHostKey(ip, key) 78 | self.knownHosts.save() 79 | return result 80 | else: 81 | log.msg("Added %s host key for IP address '%s'." % 82 | (key.type(), nativeString(ip))) 83 | self.knownHosts.addHostKey(hostname, key) 84 | self.knownHosts.addHostKey(ip, key) 85 | self.knownHosts.save() 86 | return True 87 | return hhk.addCallback(gotHasKey) 88 | 89 | def addKeyFile(self, kfile, password=None): 90 | if not os.path.exists(kfile): 91 | raise Exception("Key file not found %s", kfile) 92 | 93 | try: 94 | self.keys.append(Key.fromFile(kfile)) 95 | except EncryptedKeyError: 96 | self.keys.append(Key.fromFile(kfile, passphrase=password)) 97 | 98 | def addKeyString(self, kstring, password=None): 99 | try: 100 | self.keys.append(Key.fromString(kstring)) 101 | except EncryptedKeyError: 102 | self.keys.append(Key.fromString(kstring, passphrase=password)) 103 | 104 | def _get_endpoint(self): 105 | """ Creates a generic endpoint connection that doesn't finish 106 | """ 107 | return SSHCommandClientEndpoint.newConnection( 108 | reactor, b'/bin/cat', self.username, self.hostname, 109 | port=self.port, keys=self.keys, password=self.password, 110 | knownHosts = self.knownHosts) 111 | 112 | def connect(self): 113 | log.msg("Opening SSH connection to %s@%s:%s" % ( 114 | self.username, self.hostname, self.port)) 115 | 116 | self.endpoint = self._get_endpoint() 117 | factory = protocol.Factory() 118 | factory.protocol = protocol.Protocol 119 | 120 | def connected(protocol): 121 | log.msg("Established SSH connection to %s" % ( 122 | self.hostname,)) 123 | self.connection = protocol.transport.conn 124 | 125 | d = self.endpoint.connect(factory) 126 | d.addCallback(connected) 127 | 128 | return d 129 | 130 | def fork(self, command, args=(), env={}, path=None, timeout=3600): 131 | if not self.connection: 132 | log.msg("Connection to %s not yet ready" % ( 133 | self.hostname,)) 134 | 135 | return defer.maybeDeferred(lambda: (None, "SSH not ready", 255)) 136 | 137 | if env: 138 | env = ' '.join('%s=%s' % (k, v) for k, v in env.items()) + ' ' 139 | else: 140 | env = '' 141 | 142 | if args: 143 | args = ' ' + ' '.join(args) 144 | else: 145 | args = '' 146 | 147 | e = SSHCommandClientEndpoint.existingConnection(self.connection, 148 | (env + command + args).encode()) 149 | 150 | factory = protocol.Factory() 151 | factory.protocol = SSHCommandProtocol 152 | factory.done = defer.Deferred() 153 | 154 | def finished(result): 155 | stdout, stderr, code = result 156 | return (stdout.read(), stderr.read(), code) 157 | 158 | factory.done.addCallback(finished) 159 | 160 | def connected(connection): 161 | # Be nice if Conch exposed this better... 162 | connection.transport.extReceived = connection.extReceived 163 | return factory.done 164 | 165 | return e.connect(factory).addCallback(connected) 166 | -------------------------------------------------------------------------------- /tensor/sources/__init__.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from zope.interface import implementer 4 | 5 | from twisted.internet import defer 6 | 7 | from tensor.interfaces import ITensorSource 8 | from tensor.objects import Source 9 | 10 | 11 | @implementer(ITensorSource) 12 | class Tensor(Source): 13 | """Reports Tensor information about numbers of checks 14 | and queue sizes. 15 | 16 | **Metrics:** 17 | 18 | :(service name).event qrate: Events added to the queue per second 19 | :(service name).dequeue rate: Events removed from the queue per second 20 | :(service name).event qsize: Number of events held in the queue 21 | :(service name).sources: Number of sources running 22 | """ 23 | 24 | def __init__(self, *a): 25 | Source.__init__(self, *a) 26 | 27 | self.events = self.tensor.eventCounter 28 | self.rtime = time.time() 29 | 30 | def get(self): 31 | events = [] 32 | 33 | sources = len(self.tensor.sources) 34 | 35 | t_delta = time.time() - self.rtime 36 | 37 | erate = (self.tensor.eventCounter - self.events)/t_delta 38 | 39 | self.events = self.tensor.eventCounter 40 | 41 | self.rtime = time.time() 42 | 43 | return [ 44 | self.createEvent('ok', 'Event rate', erate, prefix="event rate"), 45 | self.createEvent('ok', 'Sources', sources, prefix="sources"), 46 | ] 47 | -------------------------------------------------------------------------------- /tensor/sources/database/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/sources/database/__init__.py -------------------------------------------------------------------------------- /tensor/sources/database/elasticsearch.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: elasticsearch 3 | :platform: Unix 4 | :synopsis: A source module for elasticsearch stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | from twisted.internet import defer 10 | from twisted.python import log 11 | 12 | from zope.interface import implementer 13 | 14 | from tensor.interfaces import ITensorSource 15 | from tensor.objects import Source 16 | 17 | from tensor.aggregators import Counter64 18 | from tensor.protocol import elasticsearch 19 | 20 | 21 | @implementer(ITensorSource) 22 | class ElasticSearch(Source): 23 | """Reads elasticsearch metrics 24 | 25 | **Configuration arguments:** 26 | 27 | :param url: Elasticsearch base URL (default: http://localhost:9200) 28 | :type url: str. 29 | :param user: Basic auth username 30 | :type user: str. 31 | :param password: Password 32 | :type password: str. 33 | 34 | **Metrics:** 35 | 36 | :(service name).cluster.status: Cluster status (Red=0, Yellow=1, Green=2) 37 | :(service name).cluster.nodes: Cluster node count 38 | :(service name).indices: Total indices in cluster 39 | :(service name).shards.total: Total number of shards 40 | :(service name).shards.primary: Number of primary shards 41 | :(service name).documents.total: Total documents 42 | :(service name).documents.rate: Documents per second 43 | :(service name).documents.size: Size of document store in bytes 44 | """ 45 | 46 | def __init__(self, *a, **kw): 47 | Source.__init__(self, *a, **kw) 48 | self.url = self.config.get('url', 'http://localhost:9200').rstrip('\n') 49 | user = self.config.get('user') 50 | passwd = self.config.get('password') 51 | 52 | self.client = elasticsearch.ElasticSearch(self.url, user, passwd) 53 | 54 | @defer.inlineCallbacks 55 | def get(self): 56 | stats = yield self.client.stats() 57 | node_stats = yield self.client.node_stats() 58 | 59 | status = {'green': 2, 'yellow': 1, 'red': 0}[stats['status']] 60 | 61 | nodes = stats['nodes']['count']['total'] 62 | index_count = stats['indices']['count'] 63 | shards = stats['indices']['shards']['total'] 64 | shards_primary = stats['indices']['shards']['primaries'] 65 | 66 | docs = stats['indices']['docs']['count'] 67 | store = stats['indices']['store']['size_in_bytes'] 68 | 69 | events = [ 70 | self.createEvent('ok', 'Status', status, prefix='cluster.status'), 71 | self.createEvent('ok', 'Nodes', nodes, prefix='cluster.nodes'), 72 | self.createEvent('ok', 'Indices', index_count, prefix='indices'), 73 | self.createEvent('ok', 'Shards', shards, prefix='shards.total'), 74 | self.createEvent('ok', 'Primary shards', shards_primary, prefix='shards.primary'), 75 | self.createEvent('ok', 'Documents', shards_primary, prefix='documents.total'), 76 | self.createEvent('ok', 'Documents', shards_primary, prefix='documents.rate', aggregation=Counter64), 77 | self.createEvent('ok', 'Store size', store, prefix='documents.size'), 78 | ] 79 | 80 | nodes = {} 81 | 82 | for k, v in node_stats['nodes'].items(): 83 | node_name = v['host'] 84 | 85 | if v.get('attributes', {}).get('client', 'false') == 'true': 86 | continue 87 | 88 | if node_name not in nodes: 89 | nodes[node_name] = { 90 | 'search': v['indices']['search']['query_total'], 91 | 'delete': v['indices']['indexing']['delete_total'], 92 | 'index': v['indices']['indexing']['index_total'], 93 | 'get': v['indices']['get']['total'] 94 | } 95 | 96 | for node, ms in nodes.items(): 97 | for mname, m in ms.items(): 98 | events.append(self.createEvent('ok', mname, m, 99 | prefix='nodes.%s.%s' % (node, mname), 100 | aggregation=Counter64)) 101 | 102 | defer.returnValue(events) 103 | 104 | -------------------------------------------------------------------------------- /tensor/sources/database/memcache.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: memcache 3 | :platform: Unix 4 | :synopsis: A source module for memcache stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | import exceptions 11 | 12 | from twisted.internet import defer 13 | from twisted.internet import reactor, protocol 14 | from twisted.protocols.memcache import MemCacheProtocol 15 | from twisted.python import log 16 | 17 | from zope.interface import implementer 18 | 19 | from tensor.interfaces import ITensorSource 20 | from tensor.objects import Source 21 | 22 | from tensor.aggregators import Counter64 23 | 24 | 25 | @implementer(ITensorSource) 26 | class Memcache(Source): 27 | """Reads memcache metrics 28 | 29 | **Configuration arguments:** 30 | 31 | :param host: Database host (default localhost) 32 | :type host: str. 33 | :param port: Database port (default 11211) 34 | :type port: int. 35 | 36 | **Metrics:** 37 | 38 | :(service name).(metrics): Metrics from memcached 39 | """ 40 | 41 | def __init__(self, *a, **kw): 42 | Source.__init__(self, *a, **kw) 43 | self.host = self.config.get('host', '127.0.0.1') 44 | self.port = self.config.get('port', 11211) 45 | 46 | @defer.inlineCallbacks 47 | def get(self): 48 | events = [] 49 | try: 50 | memcache = yield protocol.ClientCreator(reactor, MemCacheProtocol 51 | ).connectTCP(self.host, self.port) 52 | events.append(self.createEvent('ok', 'Connection', 1, 53 | prefix='state')) 54 | except: 55 | memcache = None 56 | events.append(self.createEvent('critical', 'Connection refused', 0, 57 | prefix='state')) 58 | 59 | if memcache: 60 | stats = yield memcache.stats() 61 | 62 | yield memcache.transport.loseConnection() 63 | 64 | counters = [ 65 | 'reclaimed', 'evictions', 'total_items', 66 | 'touch_hits', 'touch_misses', 67 | 'delete_misses', 'delete_hits', 68 | 'incr_hits', 'incr_misses', 69 | 'cas_hits', 'cas_misses', 'cas_badval', 70 | 'get_misses', 'get_hits', 71 | 'decr_misses', 'decr_hits', 72 | 'cmd_set', 'cmd_flush', 'cmd_touch', 'cmd_get', 73 | 'bytes_written', 'bytes_read', 74 | ] 75 | 76 | vals = ['curr_connections', 'curr_items', 'hash_bytes', 'bytes'] 77 | 78 | for key in counters: 79 | d = key.capitalize().replace('_', ' ') 80 | s = key.replace('_', '.') 81 | events.append(self.createEvent('ok', 82 | d, int(stats[key]), prefix=s, aggregation=Counter64)) 83 | 84 | for key in vals: 85 | d = key.capitalize().replace('_', ' ') 86 | s = key.replace('_', '.') 87 | 88 | events.append(self.createEvent('ok', d, int(stats[key]), 89 | prefix=s)) 90 | 91 | defer.returnValue(events) 92 | -------------------------------------------------------------------------------- /tensor/sources/database/postgresql.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: postgresql 3 | :platform: Unix 4 | :synopsis: A source module for postgres stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | import exceptions 11 | 12 | from twisted.internet import defer 13 | from twisted.enterprise import adbapi 14 | from twisted.python import log 15 | 16 | from zope.interface import implementer 17 | 18 | from tensor.interfaces import ITensorSource 19 | from tensor.objects import Source 20 | 21 | from tensor.aggregators import Counter64 22 | 23 | 24 | @implementer(ITensorSource) 25 | class PostgreSQL(Source): 26 | """Reads PostgreSQL metrics 27 | 28 | **Configuration arguments:** 29 | 30 | :param host: Database host 31 | :type host: str. 32 | :param port: Database port 33 | :type port: int. 34 | :param user: Username 35 | :type user: str. 36 | :param password: Password 37 | :type password: str. 38 | 39 | **Metrics:** 40 | 41 | :(service name).(database name).(metrics): Metrics from pg_stat_database 42 | """ 43 | 44 | def __init__(self, *a, **kw): 45 | Source.__init__(self, *a, **kw) 46 | self.user = self.config.get('user', 'postgres') 47 | self.password = self.config.get('password', '') 48 | self.port = self.config.get('port', 5432) 49 | self.host = self.config.get('host', '127.0.0.1') 50 | 51 | @defer.inlineCallbacks 52 | def get(self): 53 | try: 54 | p = adbapi.ConnectionPool('psycopg2', 55 | database='postgres', 56 | host=self.host, 57 | port=self.port, 58 | user=self.user, 59 | password=self.password) 60 | 61 | cols = ( 62 | ('xact_commit', 'commits'), 63 | ('xact_rollback', 'rollbacks'), 64 | ('blks_read', 'disk.read'), 65 | ('blks_hit', 'disk.cache'), 66 | ('tup_returned', 'returned'), 67 | ('tup_fetched', 'selects'), 68 | ('tup_inserted', 'inserts'), 69 | ('tup_updated', 'updates'), 70 | ('tup_deleted', 'deletes'), 71 | ('deadlocks', 'deadlocks') 72 | ) 73 | 74 | keys, names = zip(*cols) 75 | 76 | q = yield p.runQuery( 77 | 'SELECT datname,numbackends,%s FROM pg_stat_database' % ( 78 | ','.join(keys)) 79 | ) 80 | 81 | for row in q: 82 | db = row[0] 83 | threads = row[1] 84 | if db not in ('template0', 'template1'): 85 | self.queueBack(self.createEvent('ok', 86 | 'threads: %s' % threads, 87 | threads, 88 | prefix='%s.threads' % db) 89 | ) 90 | 91 | for i, col in enumerate(row[2:]): 92 | self.queueBack(self.createEvent('ok', 93 | '%s: %s' % (names[i], col), 94 | col, 95 | prefix='%s.%s' % (db, names[i]), 96 | aggregation=Counter64) 97 | ) 98 | 99 | yield p.close() 100 | 101 | defer.returnValue(self.createEvent('ok', 'Connection ok', 1, 102 | prefix='state')) 103 | 104 | except exceptions.ImportError: 105 | log.msg('tensor.sources.database.postgresql.PostgreSQL' 106 | ' requires psycopg2') 107 | defer.returnValue(None) 108 | except Exception as e: 109 | defer.returnValue(self.createEvent('critical', 110 | 'Connection error: %s' % str(e).replace('\n',' '), 111 | 0, prefix='state') 112 | ) 113 | 114 | -------------------------------------------------------------------------------- /tensor/sources/docker.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: docker 3 | :platform: Any 4 | :synopsis: A source module for Docker container metrics 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import json 10 | 11 | from twisted.internet import defer, reactor 12 | 13 | from zope.interface import implementer 14 | 15 | from tensor.interfaces import ITensorSource 16 | from tensor.objects import Source 17 | 18 | from tensor.utils import HTTPRequest, PersistentCache 19 | from tensor.aggregators import Counter64 20 | 21 | 22 | @implementer(ITensorSource) 23 | class ContainerStats(Source): 24 | """Returns stats for Docker containers on this host 25 | 26 | **Configuration arguments:** 27 | 28 | :param url: Docker stats URL 29 | :type url: str. 30 | 31 | **Metrics:** 32 | 33 | :(service name).(container name).mem_limit: Maximum memory for container 34 | :(service name).(container name).mem_used: Memory used by container 35 | :(service name).(container name).cpu: Percentage of system CPU in use 36 | :(service name).(container name).io_read: IO reads per second 37 | :(service name).(container name).io_write: IO writes per second 38 | :(service name).(container name).io_sync: IO synchronous op/s 39 | :(service name).(container name).io_async: IO asynchronous op/s 40 | :(service name).(container name).io_total: Total IOPS 41 | 42 | Note. If a MARATHON_APP_ID environment variable exists on the container 43 | then `container name` will be used instead of that. 44 | 45 | """ 46 | 47 | def __init__(self, *a, **kw): 48 | Source.__init__(self, *a, **kw) 49 | 50 | self.url = self.config.get('url', 'unix:/var/run/docker.sock') 51 | 52 | self.cache = PersistentCache(location='/tmp/dockerstats.cache') 53 | 54 | @defer.inlineCallbacks 55 | def _get_stats_from_node(self): 56 | if self.url.startswith('unix:'): 57 | sock = self.url 58 | pref = '' 59 | else: 60 | sock = None 61 | pref = self.url 62 | 63 | containers = yield HTTPRequest().getJson( 64 | '%s/containers/json' % pref, socket=sock) 65 | 66 | allStats = {} 67 | 68 | for container in containers: 69 | name = container.get('Names', [None])[0].lstrip('/').encode('ascii') 70 | 71 | stats = yield HTTPRequest().getJson( 72 | '%s/containers/%s/stats?stream=false' % (pref, name), socket=sock) 73 | 74 | detail = yield HTTPRequest().getJson( 75 | '%s/containers/%s/json' % (pref, name), socket=sock) 76 | 77 | env = detail['Config']['Env'] 78 | 79 | 80 | if env: 81 | for var in env: 82 | if var.startswith('MARATHON_APP_ID='): 83 | name = var.split('=', 1)[-1].lstrip('/').encode('ascii') 84 | 85 | allStats[name] = { 86 | 'mem_limit': stats['memory_stats']['limit'], 87 | 'mem_used': stats['memory_stats']['usage'] 88 | } 89 | io_stats = stats['blkio_stats']['io_service_bytes_recursive'] 90 | 91 | for item in io_stats: 92 | allStats[name]['io_' + item['op'].lower()] = item['value'] 93 | 94 | sysCpu = stats['cpu_stats']['system_cpu_usage'] 95 | dockCpu = stats['cpu_stats']['cpu_usage']['total_usage'] 96 | 97 | if self.cache.contains(name): 98 | lastTime, lastStats = self.cache.get(name) 99 | 100 | sysDelta = sysCpu - lastStats[0] 101 | dockDelta = dockCpu - lastStats[1] 102 | 103 | if sysDelta > 0: 104 | usage = int((dockDelta / sysDelta) * 100) 105 | 106 | allStats[name]['cpu'] = usage 107 | 108 | self.cache.set(name, [sysCpu, dockCpu]) 109 | 110 | defer.returnValue(allStats) 111 | 112 | @defer.inlineCallbacks 113 | def get(self): 114 | stats = yield self._get_stats_from_node() 115 | 116 | events = [] 117 | for name, container in stats.items(): 118 | for pref, val in container.items(): 119 | if pref.startswith('io_'): 120 | events.append(self.createEvent('ok', '', val, 121 | prefix='%s.%s' % (name, pref), 122 | aggregation=Counter64) 123 | ) 124 | else: 125 | events.append(self.createEvent( 126 | 'ok', '', val, prefix='%s.%s' % (name, pref))) 127 | 128 | defer.returnValue(events) 129 | -------------------------------------------------------------------------------- /tensor/sources/generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: generator 3 | :platform: Any 4 | :synopsis: A function generator source module 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time, math 10 | 11 | from twisted.internet import defer, reactor 12 | 13 | from zope.interface import implementer 14 | 15 | from tensor.interfaces import ITensorSource 16 | from tensor.objects import Source 17 | 18 | 19 | @implementer(ITensorSource) 20 | class Function(Source): 21 | """Produces an arbitrary function 22 | 23 | Functions can contain the functions sin, cos, sinh, cosh, tan, tanh, asin, 24 | acos, atan, asinh, acosh, atanh, log(n, [base|e]), abs 25 | 26 | Or the constants e, pi, and variable x 27 | 28 | **Configuration arguments:** 29 | 30 | :param dx: Resolution with time (steps of x) 31 | :type dx: float. 32 | :param function: Function to produce 33 | :type function: string. 34 | """ 35 | 36 | x = 0 37 | 38 | def get(self): 39 | self.x += self.config.get('dx', 0.1) 40 | 41 | val = eval(self.config.get('function', 'sin(x)'), { 42 | 'sin': math.sin, 43 | 'sinh': math.sinh, 44 | 'cos': math.cos, 45 | 'cosh': math.cosh, 46 | 'tan': math.tan, 47 | 'tanh': math.tanh, 48 | 'asin': math.asin, 49 | 'acos': math.acos, 50 | 'atan': math.atan, 51 | 'asinh': math.asinh, 52 | 'acosh': math.acosh, 53 | 'atanh': math.atanh, 54 | 'log': math.log, 55 | 'abs': abs, 56 | 'e': math.e, 57 | 'pi': math.pi, 58 | 'x': self.x 59 | }) 60 | 61 | return self.createEvent('ok', 'Sine wave', val) 62 | -------------------------------------------------------------------------------- /tensor/sources/haproxy.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: haproxy 3 | :platform: Unix 4 | :synopsis: A source module for haproxy stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | import csv 11 | from base64 import b64encode 12 | 13 | from twisted.internet import defer 14 | 15 | from zope.interface import implementer 16 | 17 | from tensor.interfaces import ITensorSource 18 | from tensor.objects import Source 19 | 20 | from tensor.utils import HTTPRequest 21 | from tensor.aggregators import Counter 22 | 23 | 24 | @implementer(ITensorSource) 25 | class HAProxy(Source): 26 | """Reads Nginx stub_status 27 | 28 | **Configuration arguments:** 29 | 30 | :param url: URL to fetch stats from 31 | :type url: str. 32 | :param user: Username 33 | :type user: str. 34 | :param password: Password 35 | :type password: str. 36 | 37 | **Metrics:** 38 | 39 | :(service name).(backend|frontend|nodes).(stats): Various statistics 40 | """ 41 | 42 | def __init__(self, *a, **kw): 43 | Source.__init__(self, *a, **kw) 44 | 45 | self.url = self.config.get('url', 'http://localhost/haproxy?stats;csv') 46 | self.user = self.config.get('user', 'haproxy') 47 | self.password = self.config.get('password', 'stats') 48 | 49 | def _ev(self, val, desc, pref, aggr=True): 50 | if val: 51 | val = int(val) 52 | if aggr: 53 | aggr = Counter 54 | else: 55 | aggr = None 56 | 57 | return self.createEvent('ok', 58 | '%s: %s' % (desc, val), val, prefix=pref, aggregation=aggr) 59 | 60 | @defer.inlineCallbacks 61 | def get(self): 62 | url = self.config.get('url', self.config.get('stats_url')) 63 | 64 | authorization = b64encode('%s:%s' % (self.user, self.password)) 65 | 66 | events = [] 67 | 68 | try: 69 | body = yield HTTPRequest().getBody(self.url, 70 | headers={ 71 | 'User-Agent': ['Tensor'], 72 | 'Authorization': ['Basic ' + authorization] 73 | } 74 | ) 75 | 76 | body = body.lstrip('# ').split('\n') 77 | 78 | events.append(self.createEvent('ok', 79 | 'Connection ok', 1, prefix='state')) 80 | except Exception as e: 81 | defer.returnValue(self.createEvent('critical', 82 | 'Connection failed: %s' % (str(e)), 0, prefix='state')) 83 | 84 | c = csv.DictReader(body, delimiter=',') 85 | for row in c: 86 | if row['svname'] == 'BACKEND': 87 | p = 'backends.%s' % row['pxname'] 88 | 89 | events.append(self._ev(row['act'], 'Active servers', 90 | '%s.active' % p)) 91 | 92 | elif row['svname'] == 'FRONTEND': 93 | p = 'frontends.%s' % row['pxname'] 94 | 95 | else: 96 | p = 'nodes.%s' % row['pxname'] 97 | 98 | events.append(self._ev(row['chkfail'], 'Check failures', 99 | '%s.checks_failed' % p)) 100 | 101 | # Sessions 102 | events.extend([ 103 | self._ev(row['scur'], 'Sessions', 104 | '%s.sessions' % p, False), 105 | self._ev(row['stot'], 'Session rate', 106 | '%s.session_rate' % p), 107 | self._ev(row['ereq'], 'Request errors', 108 | '%s.errors_req' % p), 109 | self._ev(row['econ'], 'Backend connection errors', 110 | '%s.errors_con' % p), 111 | self._ev(row['eresp'], 'Response errors', 112 | '%s.errors_resp' % p), 113 | self._ev(row['wretr'], 'Retries', 114 | '%s.retries' % p), 115 | self._ev(row['wredis'], 'Switches', 116 | '%s.switches' % p), 117 | self._ev(int(row['bin'])*8, 'Bytes in', 118 | '%s.bytes_in' % p), 119 | self._ev(int(row['bout'])*8, 'Bytes out', 120 | '%s.bytes_out' % p), 121 | self._ev(row['hrsp_1xx'], '1xx codes', 122 | '%s.code_1xx' % p), 123 | self._ev(row['hrsp_2xx'], '2xx codes', 124 | '%s.code_2xx' % p), 125 | self._ev(row['hrsp_3xx'], '3xx codes', 126 | '%s.code_3xx' % p), 127 | self._ev(row['hrsp_4xx'], '4xx codes', 128 | '%s.code_4xx' % p), 129 | self._ev(row['hrsp_5xx'], '5xx codes', 130 | '%s.code_5xx' % p), 131 | ]) 132 | 133 | defer.returnValue([e for e in events if e]) 134 | 135 | -------------------------------------------------------------------------------- /tensor/sources/linux/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/sources/linux/__init__.py -------------------------------------------------------------------------------- /tensor/sources/linux/ipsec.py: -------------------------------------------------------------------------------- 1 | from zope.interface import implementer 2 | 3 | from twisted.internet import defer 4 | 5 | from tensor.interfaces import ITensorSource 6 | from tensor.objects import Source 7 | 8 | 9 | @implementer(ITensorSource) 10 | class StrongSwan(Source): 11 | """Returns the status of strongSwan IPSec tunnels 12 | 13 | **Metrics:** 14 | 15 | :(service name).(peer name): Tunnel status 16 | """ 17 | ssh = True 18 | 19 | @defer.inlineCallbacks 20 | def get(self): 21 | out, err, code = yield self.fork('/usr/bin/sudo', args=( 22 | 'ipsec', 'statusall')) 23 | 24 | connections = {} 25 | 26 | s = 0 27 | 28 | for l in out.strip('\n').split('\n'): 29 | if l == "Connections:": 30 | s = 1 31 | continue 32 | elif l == "Routed Connections:": 33 | s = 2 34 | elif "Security Associations" in l: 35 | s = 3 36 | elif l[0] == ' ' and ':' in l: 37 | if s == 1: 38 | con, detail = l.strip().split(': ', 1) 39 | detail = detail.strip() 40 | 41 | if con not in connections: 42 | connections[con] = { 43 | 'source': detail.split('...')[0], 44 | 'destination': detail.split('...')[1].split()[0], 45 | 'up': False 46 | } 47 | elif s == 3: 48 | con, detail = l.strip().split(': ', 1) 49 | detail = detail.strip() 50 | if '[' in con: 51 | con = con.split('[')[0] 52 | else: 53 | con = con.split('{')[0] 54 | 55 | if 'ESTABLISHED' in detail: 56 | connections[con]['up'] = True 57 | 58 | events = [] 59 | for k, v in connections.items(): 60 | if v['up']: 61 | events.append(self.createEvent('ok', 'IPSec tunnel %s up' % k, 62 | 1, prefix=k)) 63 | else: 64 | events.append(self.createEvent('critical', 65 | 'IPSec tunnel %s down' % k, 0, prefix=k)) 66 | 67 | defer.returnValue(events) 68 | -------------------------------------------------------------------------------- /tensor/sources/linux/process.py: -------------------------------------------------------------------------------- 1 | from zope.interface import implementer 2 | 3 | from twisted.internet import defer 4 | 5 | from tensor.interfaces import ITensorSource 6 | from tensor.objects import Source 7 | 8 | @implementer(ITensorSource) 9 | class ProcessCount(Source): 10 | """Returns the ps count on the system 11 | 12 | **Metrics:** 13 | 14 | :(service name): Number of processes 15 | """ 16 | 17 | ssh = True 18 | 19 | @defer.inlineCallbacks 20 | def get(self): 21 | out, err, code = yield self.fork('/bin/ps', args=('-e',)) 22 | 23 | count = len(out.strip('\n').split('\n')) - 1 24 | 25 | defer.returnValue( 26 | self.createEvent('ok', 'Process count %s' % (count), count) 27 | ) 28 | 29 | @implementer(ITensorSource) 30 | class ProcessStats(Source): 31 | """Returns memory used by each active parent process 32 | 33 | **Metrics:** 34 | 35 | :(service name).proc.(process name).cpu: Per process CPU usage 36 | :(service name).proc.(process name).memory: Per process memory use 37 | :(service name).proc.(process name).age: Per process age 38 | :(service name).user.(user name).cpu: Per user CPU usage 39 | :(service name).user.(user name).memory: Per user memory use 40 | """ 41 | 42 | ssh = True 43 | 44 | @defer.inlineCallbacks 45 | def get(self): 46 | out, err, code = yield self.fork('/bin/ps', args=( 47 | '-eo','pid,user:50,etime,rss,pcpu,comm:50,cmd:255')) 48 | 49 | lines = out.strip('\n').split('\n') 50 | 51 | cols = lines[0].split() 52 | 53 | procs = {} 54 | users = {} 55 | 56 | vals = [] 57 | 58 | for l in lines[1:]: 59 | parts = l.split(None, len(cols) - 1) 60 | 61 | proc = {} 62 | for i, e in enumerate(parts): 63 | proc[cols[i]] = e.strip() 64 | 65 | parts = None 66 | 67 | elapsed = proc['ELAPSED'] 68 | if '-' in elapsed: 69 | days = int(elapsed.split('-')[0]) 70 | hours, minutes, seconds = [ 71 | int (i) for i in elapsed.split('-')[1].split(':')] 72 | age = (days*24*60*60) + (hours*60*60) + (minutes*60) 73 | age += seconds 74 | 75 | elif elapsed.count(':')==2: 76 | hours, minutes, seconds = [ 77 | int (i) for i in elapsed.split(':')] 78 | age = (hours*60*60) + (minutes*60) + seconds 79 | 80 | else: 81 | minutes, seconds = [ 82 | int (i) for i in elapsed.split(':')] 83 | age = (minutes*60) + seconds 84 | 85 | # Ignore kernel and tasks that just started, usually it's this ps 86 | if (proc['CMD'][0] != '[') and (age>0): 87 | binary = proc['CMD'].split()[0].split('/')[-1].strip(':').strip('-') 88 | pid = proc['PID'] 89 | cmd = proc['CMD'] 90 | comm = proc['COMMAND'] 91 | user = proc['USER'].lower().replace('+', '').strip('-') 92 | 93 | mem = int(proc['RSS'])/1024.0 94 | cpu = float(proc['%CPU']) 95 | 96 | if user in users: 97 | users[user]['cpu'] += cpu 98 | users[user]['mem'] += mem 99 | else: 100 | users[user] = { 101 | 'cpu': cpu, 'mem': mem 102 | } 103 | 104 | if binary != comm: 105 | key = "%s.%s" % (binary,comm) 106 | else: 107 | key = comm 108 | 109 | key = key.strip('.').replace('+', '').strip('-').replace( 110 | '(', '').replace(')', '').lower().split('.')[0] 111 | 112 | if key in procs: 113 | procs[key]['cpu'] += cpu 114 | procs[key]['mem'] += mem 115 | procs[key]['age'] += age 116 | else: 117 | procs[key] = { 118 | 'cpu': cpu, 'mem': mem, 'age': age 119 | } 120 | 121 | events = [] 122 | 123 | for k,v in users.items(): 124 | events.append(self.createEvent('ok', 'User memory %s: %0.2fMB' % ( 125 | k, v['mem']), v['mem'], prefix="user.%s.mem" % k)) 126 | events.append(self.createEvent('ok', 'User CPU usage %s: %s%%' % ( 127 | k, int(v['cpu']*100)), v['cpu'], prefix="user.%s.cpu" % k)) 128 | 129 | for k,v in procs.items(): 130 | events.append(self.createEvent('ok', 'Process age %s: %ss' % ( 131 | k, v['age']), v['age'], prefix="proc.%s.age" % k)) 132 | events.append(self.createEvent('ok', 'Process memory %s: %0.2fMB' % ( 133 | k, v['mem']), v['mem'], prefix="proc.%s.mem" % k)) 134 | events.append( 135 | self.createEvent('ok', 'Process CPU usage %s: %s%%' % ( 136 | k, int(v['cpu']*100)), v['cpu'], 137 | prefix="proc.%s.cpu" % k 138 | ) 139 | ) 140 | 141 | defer.returnValue(events) 142 | -------------------------------------------------------------------------------- /tensor/sources/linux/sensors.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from zope.interface import implementer 4 | 5 | from twisted.internet import defer 6 | 7 | from tensor.interfaces import ITensorSource 8 | from tensor.objects import Source 9 | 10 | 11 | @implementer(ITensorSource) 12 | class Sensors(Source): 13 | """Returns lm-sensors output 14 | 15 | NB. This is very untested on different configurations and versions. Please 16 | report any issues with the output of the `sensors` command to help 17 | improve it. 18 | 19 | **Metrics:** 20 | 21 | :(service name).(adapter).(sensor): Sensor value 22 | """ 23 | ssh = True 24 | 25 | @defer.inlineCallbacks 26 | def _get_sensors(self): 27 | out, err, code = yield self.fork('/usr/bin/sensors') 28 | if code == 0: 29 | defer.returnValue(out.strip('\n').split('\n')) 30 | else: 31 | defer.returnValue([]) 32 | 33 | def _parse_sensors(self, sensors): 34 | adapters = {} 35 | adapter = None 36 | for i in sensors: 37 | l = i.strip() 38 | if not l: 39 | continue 40 | 41 | if ':' in l: 42 | n, v = l.split(':') 43 | vals = v.strip().split() 44 | 45 | if n=='Adapter': 46 | continue 47 | 48 | if '\xc2\xb0' in vals[0]: 49 | val = vals[0].split('\xc2\xb0')[0] 50 | unit = vals[0][-1] 51 | elif len(vals)>1: 52 | val = vals[0] 53 | unit = vals[1] 54 | else: 55 | continue 56 | 57 | val = float(val) 58 | 59 | adapters[adapter][n] = val 60 | 61 | else: 62 | adapter = l 63 | adapters[adapter] = {} 64 | 65 | return adapters 66 | 67 | @defer.inlineCallbacks 68 | def get(self): 69 | sensors = yield self._get_sensors() 70 | adapters = self._parse_sensors(sensors) 71 | 72 | events = [] 73 | 74 | for adapter, v in adapters.items(): 75 | for sensor, val in v.items(): 76 | events.append( 77 | self.createEvent('ok', 78 | 'Sensor %s:%s - %s' % (adapter, sensor, val), 79 | val, 80 | prefix='%s.%s' % (adapter, sensor,))) 81 | 82 | defer.returnValue(events) 83 | 84 | @implementer(ITensorSource) 85 | class SMART(Source): 86 | """Returns SMART output for all disks 87 | 88 | **Metrics:** 89 | 90 | :(service name).(disk).(sensor): Sensor value 91 | """ 92 | 93 | ssh = True 94 | 95 | def __init__(self, *a, **kw): 96 | Source.__init__(self, *a, **kw) 97 | 98 | self.devices = [] 99 | 100 | @defer.inlineCallbacks 101 | def _get_disks(self): 102 | out, err, code = yield self.fork('/usr/sbin/smartctl', 103 | args=('--scan',)) 104 | 105 | if code != 0: 106 | defer.returnValue([]) 107 | 108 | out = out.strip('\n').split('\n') 109 | devices = [] 110 | for ln in out: 111 | if '/dev' in ln: 112 | devices.append(ln.split()[0]) 113 | 114 | defer.returnValue(devices) 115 | 116 | @defer.inlineCallbacks 117 | def _get_smart(self, device): 118 | out, err, code = yield self.fork('/usr/sbin/smartctl', 119 | args=('-A', device)) 120 | 121 | if code == 0: 122 | defer.returnValue(out.strip('\n').split('\n')) 123 | else: 124 | defer.returnValue([]) 125 | 126 | def _parse_smart(self, smart): 127 | mark = False 128 | attributes = {} 129 | for l in smart: 130 | ln = l.strip('\n').strip() 131 | if not ln: 132 | continue 133 | 134 | if mark: 135 | (id, attribute, flag, val, worst,thresh, type, u, wf, raw 136 | ) = ln.split(None, 9) 137 | 138 | try: 139 | raw = int(raw.split()[0]) 140 | attributes[attribute.replace('_', ' ')] = raw 141 | except: 142 | pass 143 | 144 | if ln[:3] == 'ID#': 145 | mark = True 146 | 147 | return attributes 148 | 149 | @defer.inlineCallbacks 150 | def get(self): 151 | disks = self._get_disks() 152 | 153 | if not self.devices: 154 | self.devices = yield self._get_disks() 155 | 156 | events = [] 157 | 158 | for disk in self.devices: 159 | smart = yield self._get_smart(disk) 160 | stats = self._parse_smart(smart) 161 | 162 | for sensor, val in stats.items(): 163 | events.append( 164 | self.createEvent('ok', 165 | 'Attribute %s:%s - %s' % (disk, sensor, val), 166 | val, 167 | prefix='%s.%s' % (disk, sensor,))) 168 | 169 | defer.returnValue(events) 170 | -------------------------------------------------------------------------------- /tensor/sources/media/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/sources/media/__init__.py -------------------------------------------------------------------------------- /tensor/sources/media/libav.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from twisted.internet import defer 4 | 5 | from zope.interface import implementer 6 | 7 | from tensor.interfaces import ITensorSource 8 | from tensor.objects import Source 9 | from tensor.utils import fork 10 | 11 | 12 | @implementer(ITensorSource) 13 | class DarwinRTSP(Source): 14 | """Makes avprobe requests of a Darwin RTSP sample stream 15 | (sample_100kbit.mp4) 16 | 17 | **Configuration arguments:** 18 | 19 | :param destination: Host name or IP address to check 20 | :type method: str. 21 | 22 | **Metrics:** 23 | :(service name): Time to complete request 24 | 25 | You can also override the `hostname` argument to make it match 26 | metrics from that host. 27 | """ 28 | 29 | @defer.inlineCallbacks 30 | def get(self): 31 | host = self.config.get('destination', self.hostname) 32 | 33 | t0 = time.time() 34 | try: 35 | out, err, code = yield fork('/usr/bin/avprobe', 36 | args=('rtsp://%s/sample_100kbit.mp4' % host, ), timeout=30.0) 37 | except: 38 | code = 1 39 | err = None 40 | 41 | t_delta = (time.time() - t0) * 1000 42 | 43 | if code == 0: 44 | e = self.createEvent('ok', 'RTSP Request time to %s' % host, 45 | t_delta) 46 | else: 47 | if err: 48 | try: 49 | error = err.strip('\n').split('\n')[-2] 50 | except: 51 | error = err.replace('\n', '-') 52 | else: 53 | error = "Execution error" 54 | 55 | e = self.createEvent('critical', 56 | 'Unable to stream %s:%s' % (host, error), 57 | t_delta) 58 | 59 | defer.returnValue(e) 60 | 61 | -------------------------------------------------------------------------------- /tensor/sources/munin.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: munin 3 | :platform: Any 4 | :synopsis: Provides MuninNode source which can get events from the 5 | munin-node protocol. 6 | 7 | .. moduleauthor:: Colin Alston 8 | """ 9 | 10 | import time 11 | 12 | from twisted.internet import defer, reactor 13 | from twisted.protocols.basic import LineReceiver 14 | from twisted.internet.protocol import ClientCreator 15 | 16 | from zope.interface import implementer 17 | 18 | from tensor.interfaces import ITensorSource 19 | from tensor.objects import Source 20 | from tensor.aggregators import Counter, Counter32 21 | 22 | class MuninProtocol(LineReceiver): 23 | """MuninProtocol - provides a line receiver protocol for making requests 24 | to munin-node 25 | 26 | Requests must be made sequentially 27 | """ 28 | delimiter = '\n' 29 | def __init__(self): 30 | self.ready = False 31 | self.buffer = [] 32 | self.d = None 33 | 34 | def lineReceived(self, line): 35 | if (line[0] == '#'): 36 | return 37 | 38 | if self.d and (not self.d.called): 39 | if self.list: 40 | if line == '.': 41 | buffer = self.buffer 42 | self.buffer = [] 43 | self.d.callback(buffer) 44 | else: 45 | self.buffer.append(line) 46 | else: 47 | self.d.callback(line) 48 | 49 | def disconnect(self): 50 | return self.transport.loseConnection() 51 | 52 | def sendCommand(self, command, list=False): 53 | self.d = defer.Deferred() 54 | self.list = list 55 | self.sendLine(command) 56 | return self.d 57 | 58 | 59 | @implementer(ITensorSource) 60 | class MuninNode(Source): 61 | """Connects to munin-node and retrieves all metrics 62 | 63 | **Configuration arguments:** 64 | 65 | :param host: munin-node hostname (probably localhost) 66 | :type host: str. 67 | :param port: munin-node port (probably 4949) 68 | :type port: int. 69 | 70 | **Metrics:** 71 | 72 | :(service name).(plugin name).(keys...): A dot separated tree of 73 | munin plugin keys 74 | """ 75 | 76 | @defer.inlineCallbacks 77 | def get(self): 78 | host = self.config.get('host', 'localhost') 79 | port = int(self.config.get('port', 4949)) 80 | 81 | creator = ClientCreator(reactor, MuninProtocol) 82 | proto = yield creator.connectTCP(host, port) 83 | 84 | # Announce our capabilities 85 | yield proto.sendCommand('cap multigraph') 86 | 87 | listout = yield proto.sendCommand('list') 88 | plug_list = listout.split() 89 | events = [] 90 | 91 | for plug in plug_list: 92 | # Retrive the configuration for this plugin 93 | config = yield proto.sendCommand('config %s' % plug, True) 94 | plugin_config = {} 95 | for r in config: 96 | name, val = r.split(' ', 1) 97 | if '.' in name: 98 | metric, key = name.split('.') 99 | 100 | if key in ['type', 'label', 'min', 'info']: 101 | plugin_config['%s.%s.%s' % (plug, metric, key)] = val 102 | 103 | else: 104 | if name == 'graph_category': 105 | plugin_config['%s.category' % plug] = val 106 | 107 | category = plugin_config.get('%s.category' % plug, 'system') 108 | 109 | # Retrieve the metrics 110 | metrics = yield proto.sendCommand('fetch %s' % plug, True) 111 | for m in metrics: 112 | name, val = m.split(' ', 1) 113 | if name != 'multigraph': 114 | metric, key = name.split('.') 115 | 116 | base = '%s.%s' % (plug, metric) 117 | 118 | m_type = plugin_config.get('%s.type' % base, 'GAUGE') 119 | 120 | try: 121 | val = float(val) 122 | except: 123 | continue 124 | 125 | base = '%s.%s' % (plug, metric) 126 | info = plugin_config.get('%s.info' % base, base) 127 | prefix = '%s.%s' % (category, base) 128 | 129 | if m_type == 'COUNTER': 130 | events.append(self.createEvent('ok', info, val, 131 | prefix=prefix, aggregation=Counter32)) 132 | elif m_type == 'DERIVE': 133 | events.append(self.createEvent('ok', info, val, 134 | prefix=prefix, aggregation=Counter)) 135 | else: 136 | events.append(self.createEvent('ok', info, val, 137 | prefix=prefix)) 138 | 139 | yield proto.disconnect() 140 | 141 | defer.returnValue(events) 142 | -------------------------------------------------------------------------------- /tensor/sources/network.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: network 3 | :platform: Unix 4 | :synopsis: A source module for network checks 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | 11 | from twisted.internet import defer, reactor 12 | from twisted.python import log 13 | 14 | from zope.interface import implementer 15 | 16 | from tensor.interfaces import ITensorSource 17 | from tensor.objects import Source 18 | from tensor.protocol import icmp 19 | 20 | from tensor.utils import HTTPRequest, Timeout 21 | 22 | @implementer(ITensorSource) 23 | class HTTP(Source): 24 | """Performs an HTTP request 25 | 26 | **Configuration arguments:** 27 | 28 | :param url: HTTP URL 29 | :type url: str. 30 | :param method: HTTP request method to use (default GET) 31 | :type method: str. 32 | :param match: A text string to match in the document when it is correct 33 | :type match: str. 34 | :param useragent: User-Agent header to use 35 | :type useragent: str. 36 | :param timeout: Timeout for connection (default 60s) 37 | :type timeout: int. 38 | 39 | **Metrics:** 40 | 41 | :(service name).latency: Time to complete request 42 | """ 43 | 44 | @defer.inlineCallbacks 45 | def get(self): 46 | 47 | method = self.config.get('method', 'GET') 48 | url = self.config.get('url', 'http://%s/' % self.hostname) 49 | match = self.config.get('match', None) 50 | ua = self.config.get('useragent', 'Tensor HTTP checker') 51 | timeout = self.config.get('timeout', 60) 52 | 53 | t0 = time.time() 54 | 55 | try: 56 | body = yield HTTPRequest(timeout).getBody(url, method, 57 | {'User-Agent': [ua]}, 58 | ) 59 | except Timeout: 60 | log.msg('[%s] Request timeout' % url) 61 | t_delta = (time.time() - t0) * 1000 62 | defer.returnValue( 63 | self.createEvent('critical', '%s - timeout' % url, t_delta, 64 | prefix="latency") 65 | ) 66 | except Exception as e: 67 | log.msg('[%s] Request error %s' % (url, e)) 68 | t_delta = (time.time() - t0) * 1000 69 | defer.returnValue( 70 | self.createEvent('critical', '%s - %s' % (url, e), t_delta, 71 | prefix="latency") 72 | ) 73 | 74 | t_delta = (time.time() - t0) * 1000 75 | 76 | if match: 77 | if (match in body): 78 | state = 'ok' 79 | else: 80 | state = 'critical' 81 | else: 82 | state = 'ok' 83 | 84 | defer.returnValue( 85 | self.createEvent(state, 'Latency to %s' % url, t_delta, 86 | prefix="latency") 87 | ) 88 | 89 | @implementer(ITensorSource) 90 | class Ping(Source): 91 | """Performs an Ping checks against a destination 92 | 93 | **Configuration arguments:** 94 | 95 | :param destination: Host name or IP address to ping 96 | :type destination: str. 97 | 98 | **Metrics:** 99 | 100 | :(service name).latency: Ping latency 101 | :(service name).loss: Packet loss 102 | 103 | You can also override the `hostname` argument to make it match 104 | metrics from that host. 105 | """ 106 | 107 | @defer.inlineCallbacks 108 | def get(self): 109 | host = self.config.get('destination', self.hostname) 110 | 111 | try: 112 | ip = yield reactor.resolve(host) 113 | except: 114 | ip = None 115 | 116 | if ip: 117 | try: 118 | loss, latency = yield icmp.ping(ip, 5) 119 | except: 120 | loss, latency = 100, None 121 | 122 | event = [self.createEvent('ok', '%s%% loss to %s' % (loss,host), loss, 123 | prefix="loss")] 124 | 125 | if latency: 126 | event.append(self.createEvent('ok', 'Latency to %s' % host, latency, 127 | prefix="latency")) 128 | else: 129 | event = [self.createEvent('critical', 'Unable to resolve %s' % host, 100, 130 | prefix="loss")] 131 | 132 | defer.returnValue(event) 133 | -------------------------------------------------------------------------------- /tensor/sources/nginx.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: nginx 3 | :platform: Unix 4 | :synopsis: A source module for nginx stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | import datetime 11 | 12 | from twisted.internet import defer 13 | 14 | from zope.interface import implementer 15 | 16 | from tensor.interfaces import ITensorSource 17 | from tensor.objects import Source 18 | 19 | from tensor.utils import HTTPRequest, fork 20 | from tensor.aggregators import Counter64 21 | from tensor.logs import parsers, follower 22 | 23 | @implementer(ITensorSource) 24 | class Nginx(Source): 25 | """Reads Nginx stub_status 26 | 27 | **Configuration arguments:** 28 | 29 | :param stats_url: URL to fetch stub_status from 30 | :type stats_url: str. 31 | 32 | **Metrics:** 33 | 34 | :(service name).active: Active connections at this time 35 | :(service name).accepts: Accepted connections 36 | :(service name).handled: Handled connections 37 | :(service name).requests: Total client requests 38 | :(service name).reading: Reading requests 39 | :(service name).writing: Writing responses 40 | :(service name).waiting: Waiting connections 41 | """ 42 | 43 | def _parse_nginx_stats(self, stats): 44 | stats = stats.split('\n') 45 | active = stats[0].split(': ')[-1] 46 | 47 | accepts, handled, requests = stats[2].split() 48 | 49 | _, reading, _, writing, _, waiting = stats[3].split() 50 | 51 | metrics = { 52 | 'active': (float(active), None), 53 | 'accepts': (float(accepts), Counter64), 54 | 'requests': (float(requests), Counter64), 55 | 'handled': (float(handled), Counter64), 56 | 'reading': (float(reading), None), 57 | 'writing': (float(writing), None), 58 | 'waiting': (float(waiting), None), 59 | } 60 | 61 | return metrics 62 | 63 | @defer.inlineCallbacks 64 | def get(self): 65 | url = self.config.get('url', self.config.get('stats_url')) 66 | 67 | body = yield HTTPRequest().getBody(url, 68 | headers={'User-Agent': ['Tensor']}, 69 | ) 70 | 71 | events = [] 72 | 73 | if body: 74 | metrics = self._parse_nginx_stats(body) 75 | 76 | for k,v in metrics.items(): 77 | metric, aggr = v 78 | events.append( 79 | self.createEvent('ok', 'Nginx %s' % (k), metric, prefix=k, 80 | aggregation=aggr) 81 | ) 82 | 83 | defer.returnValue(events) 84 | 85 | @implementer(ITensorSource) 86 | class NginxLogMetrics(Source): 87 | """Tails Nginx log files, parses them and returns metrics for data usage 88 | and requests against other fields. 89 | 90 | **Configuration arguments:** 91 | 92 | :param log_format: Log format passed to parser, same as the config 93 | definition 94 | :type log_format: str. 95 | :param file: Log file 96 | :type file: str. 97 | :param max_lines: Maximum number of log lines to read per interval to 98 | prevent overwhelming Tensor when reading large logs 99 | (default 2000) 100 | :type max_lines: int. 101 | :param resolution: Aggregate bucket resolution in seconds (default 10) 102 | :type resolution: int. 103 | :param history: Read the entire file from scratch if we've never seen 104 | it (default false) 105 | :type history: bool. 106 | 107 | **Metrics:** 108 | 109 | :(service name).total_rbytes: Bytes total for all requests 110 | :(service name).total_requests: Total request count 111 | :(service name).stats.(code).(requests|rbytes): Metrics by status code 112 | :(service name).user-agent.(agent).(requests|rbytes): Metrics by user agent 113 | :(service name).client.(ip).(requests|rbytes): Metrics by client IP 114 | :(service name).request.(request path).(requests|rbytes): Metrics by request path 115 | """ 116 | 117 | # Don't allow overlapping runs 118 | sync = True 119 | 120 | def __init__(self, *a): 121 | Source.__init__(self, *a) 122 | 123 | parser = parsers.ApacheLogParser(self.config.get('log_format', 'combined')) 124 | 125 | history = self.config.get('history', False) 126 | 127 | self.log = follower.LogFollower(self.config['file'], 128 | parser=parser.parse, history=history) 129 | 130 | self.max_lines = int(self.config.get('max_lines', 2000)) 131 | self.bucket_res = int(self.config.get('resolution', 10)) 132 | 133 | self.bucket = 0 134 | 135 | def _aggregate_fields(self, row, b, field, fil=None): 136 | f = row.get(field, None) 137 | 138 | if f: 139 | if fil: 140 | f = fil(f) 141 | if not (field in self.st): 142 | self.st[field] = {} 143 | 144 | if not (f in self.st[field]): 145 | self.st[field][f] = [b, 1] 146 | 147 | else: 148 | self.st[field][f][0] += b 149 | self.st[field][f][1] += 1 150 | 151 | def dumpEvents(self, ts): 152 | if self.st: 153 | events = [ 154 | self.createEvent('ok', 'Nginx rbytes', self.rbytes, prefix='total_rbytes', 155 | evtime=ts), 156 | self.createEvent('ok', 'Nginx requests', self.requests, 157 | prefix='total_requests', evtime=ts) 158 | ] 159 | 160 | for field, block in self.st.items(): 161 | for key, vals in block.items(): 162 | rbytes, requests = vals 163 | events.extend([ 164 | self.createEvent('ok', 'Nginx %s %s rbytes' % (field, key), rbytes, 165 | prefix='%s.%s.rbytes' % (field, key), evtime=ts), 166 | self.createEvent('ok', 'Nginx %s %s requests' % (field, key), requests, 167 | prefix='%s.%s.requests' % (field, key), evtime=ts) 168 | ]) 169 | 170 | self.st = {} 171 | self.rbytes = 0 172 | self.requests = 0 173 | 174 | self.queueBack(events) 175 | 176 | def got_line(self, line): 177 | b = line.get('rbytes', 0) 178 | if b: 179 | self.rbytes += b 180 | 181 | self.requests += 1 182 | 183 | t = time.mktime(line['time'].timetuple()) 184 | 185 | # Calculate the time bucket for this line 186 | bucket = int(int(t)/self.bucket_res)*self.bucket_res 187 | 188 | if self.bucket: 189 | if (bucket != self.bucket): 190 | self.dumpEvents(float(self.bucket)) 191 | self.bucket = bucket 192 | else: 193 | self.bucket = bucket 194 | 195 | self._aggregate_fields(line, b, 'status') 196 | self._aggregate_fields(line, b, 'client') 197 | self._aggregate_fields(line, b, 'user-agent', 198 | fil=lambda l: l.replace('.',',') 199 | ) 200 | self._aggregate_fields(line, b, 'request', 201 | fil=lambda l: l.split()[1].split('?')[0].replace('.',',') 202 | ) 203 | 204 | def get(self): 205 | self.rbytes = 0 206 | self.requests = 0 207 | self.st = {} 208 | 209 | self.log.get_fn(self.got_line, max_lines=self.max_lines) 210 | 211 | self.dumpEvents(float(self.bucket)) 212 | 213 | @implementer(ITensorSource) 214 | class NginxLog(Source): 215 | """Tails Nginx log files, parses them and returns log events for outputs 216 | which support them. 217 | 218 | **Configuration arguments:** 219 | 220 | :param log_format: Log format passed to parser, same as the config 221 | definition (default: combined) 222 | :type log_format: str. 223 | :param file: Log file 224 | :type file: str. 225 | :param max_lines: Maximum number of log lines to read per interval to 226 | prevent overwhelming Tensor when reading large logs 227 | (default 2000) 228 | :type max_lines: int. 229 | """ 230 | 231 | # Don't allow overlapping runs 232 | sync = True 233 | 234 | def __init__(self, *a): 235 | Source.__init__(self, *a) 236 | 237 | self.parser = parsers.ApacheLogParser(self.config.get('log_format', 'combined')) 238 | 239 | self.log = follower.LogFollower(self.config['file'], 240 | parser=self._parser_proxy, history=False) 241 | 242 | self.max_lines = int(self.config.get('max_lines', 2000)) 243 | 244 | def got_eventlog(self, event): 245 | self.queueBack(event) 246 | 247 | def _parser_proxy(self, line): 248 | """Parses log lines and returns a `log` type Event object 249 | """ 250 | d = self.parser.parse(line) 251 | 252 | t = time.mktime(d['time'].timetuple()) 253 | 254 | d['@timestamp'] = datetime.datetime.utcfromtimestamp(t).isoformat() 255 | d['time'] = str(d['time']) 256 | d['message'] = line 257 | d['logname'] = self.config['file'] 258 | 259 | return self.createLog('nginx', d, t) 260 | 261 | def get(self): 262 | self.log.get_fn(self.got_eventlog, max_lines=self.max_lines) 263 | -------------------------------------------------------------------------------- /tensor/sources/python/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/sources/python/__init__.py -------------------------------------------------------------------------------- /tensor/sources/python/uwsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: uwsgi 3 | :platform: Any 4 | :synopsis: Reads UWSGI stats 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | import json 11 | 12 | try: 13 | from StringIO import StringIO 14 | except ImportError: 15 | from io import StringIO 16 | 17 | from twisted.internet import defer, reactor 18 | from twisted.protocols.basic import LineReceiver 19 | from twisted.internet.protocol import ClientCreator, Protocol 20 | 21 | from zope.interface import implementer 22 | 23 | from tensor.interfaces import ITensorSource 24 | from tensor.objects import Source 25 | from tensor.aggregators import Counter, Counter32 26 | 27 | class JSONProtocol(Protocol): 28 | """ 29 | JSON line protocol 30 | """ 31 | delimiter = '\n' 32 | def __init__(self): 33 | self.ready = False 34 | self.buffer = StringIO() 35 | self.d = defer.Deferred() 36 | 37 | def dataReceived(self, data): 38 | self.buffer.write(data) 39 | 40 | def connectionLost(self, why): 41 | self.buffer.seek(0) 42 | self.d.callback(json.load(self.buffer)) 43 | 44 | def disconnect(self): 45 | return self.transport.loseConnection() 46 | 47 | 48 | @implementer(ITensorSource) 49 | class Emperor(Source): 50 | """Connects to UWSGI Emperor stats and creates useful metrics 51 | 52 | **Configuration arguments:** 53 | 54 | :param host: Hostname (default localhost) 55 | :type host: str. 56 | :param port: Port 57 | :type port: int. 58 | 59 | """ 60 | 61 | @defer.inlineCallbacks 62 | def get(self): 63 | host = self.config.get('host', 'localhost') 64 | port = int(self.config.get('port', 6001)) 65 | 66 | proto = yield ClientCreator(reactor, JSONProtocol 67 | ).connectTCP(host, port) 68 | 69 | stats = yield proto.d 70 | 71 | nodes = stats.get('vassals', []) 72 | 73 | events = [] 74 | 75 | active = 0 76 | accepting = 0 77 | respawns = 0 78 | 79 | for node in nodes: 80 | if node['accepting'] > 0: 81 | active += 1 82 | accepting += node['accepting'] 83 | if node['respawns'] > 0: 84 | respawns += 1 85 | 86 | events.extend([ 87 | self.createEvent('ok', 'accepting', node['accepting'], 88 | prefix=node['id'] + '.accepting'), 89 | self.createEvent('ok', 'respawns', node['respawns'], 90 | prefix=node['id'] + '.respawns'), 91 | ]) 92 | 93 | 94 | events.extend([ 95 | self.createEvent('ok', 'active', active, prefix='total.active'), 96 | self.createEvent('ok', 'accepting', accepting, 97 | prefix='total.accepting'), 98 | self.createEvent('ok', 'respawns', respawns, 99 | prefix='total.respawns'), 100 | ]) 101 | 102 | defer.returnValue(events) 103 | -------------------------------------------------------------------------------- /tensor/sources/rabbitmq.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from zope.interface import implementer 4 | 5 | from twisted.internet import defer 6 | from twisted.python import log 7 | 8 | from tensor.interfaces import ITensorSource 9 | from tensor.objects import Source 10 | 11 | 12 | @implementer(ITensorSource) 13 | class Queues(Source): 14 | """Returns Queue information for a particular vhost 15 | 16 | **Configuration arguments:** 17 | 18 | :param vhost: Vhost name 19 | :type vhost: str. 20 | 21 | **Metrics:** 22 | 23 | :(service_name).(queue).ready: Ready messages for queue 24 | :(service_name).(queue).unack: Unacknowledged messages for queue 25 | :(service_name).(queue).ready_rate: Ready rate of change per second 26 | :(service_name).(queue).unack_rate: Unacknowledge rate of change per second 27 | 28 | """ 29 | ssh = True 30 | 31 | def __init__(self, *a, **kw): 32 | Source.__init__(self, *a, **kw) 33 | 34 | self.last_t = None 35 | 36 | self.ready = {} 37 | self.unack = {} 38 | 39 | self.last_ready = 0 40 | self.last_unack = 0 41 | 42 | @defer.inlineCallbacks 43 | def get(self): 44 | vhost = self.config.get('vhost', '/') 45 | 46 | mqctl = self.config.get('rabbitmqctl', '/usr/sbin/rabbitmqctl') 47 | 48 | out, err, code = yield self.fork(mqctl, args=( 49 | 'list_queues', '-p', vhost, 'name', 'messages_ready', 50 | 'messages_unacknowledged' 51 | )) 52 | 53 | if code == 0: 54 | t = time.time() 55 | 56 | total_ready = 0 57 | total_unack = 0 58 | 59 | rows = out.strip('\n').split('\n') 60 | 61 | events = [] 62 | 63 | for row in rows: 64 | if ("..." in row): 65 | continue 66 | name, ready, unack = row.split() 67 | ready = int(ready) 68 | unack = int(unack) 69 | 70 | total_ready += ready 71 | total_unack += unack 72 | 73 | events.extend([ 74 | self.createEvent('ok', '%s unacknowledged messages: %s' % ( 75 | name, unack), unack, prefix='%s.unack' % name), 76 | self.createEvent('ok', '%s ready messages: %s' % ( 77 | name, ready), ready, prefix='%s.ready' % name) 78 | ]) 79 | 80 | if name in self.ready: 81 | last_ready = self.ready[name] 82 | last_unack = self.unack[name] 83 | 84 | rrate = (ready - last_ready)/float(t - self.last_t) 85 | urate = (unack - last_unack)/float(t - self.last_t) 86 | 87 | events.extend([ 88 | self.createEvent('ok', '%s unacknowledged rate: %0.2f' % ( 89 | name, urate), urate, prefix='%s.unack_rate' % name), 90 | self.createEvent('ok', '%s ready rate: %0.2f' % ( 91 | name, rrate), rrate, prefix='%s.ready_rate' % name) 92 | ]) 93 | 94 | self.ready[name] = ready 95 | self.unack[name] = unack 96 | 97 | if self.last_t: 98 | # Get total rates 99 | rrate = (total_ready - self.last_ready)/float(t - self.last_t) 100 | urate = (total_unack - self.last_unack)/float(t - self.last_t) 101 | 102 | events.extend([ 103 | self.createEvent('ok', 104 | 'Total unacknowledged rate: %0.2f' % urate, 105 | urate, prefix='total.unack_rate'), 106 | self.createEvent('ok', 107 | 'Total ready rate: %0.2f' % rrate, 108 | rrate, prefix='total.ready_rate'), 109 | self.createEvent('ok', 110 | 'Total unacknowledged messages: %s' % total_unack, 111 | total_unack, prefix='total.unack'), 112 | self.createEvent('ok', 113 | 'Total ready messages: %s' % total_ready, 114 | total_ready, prefix='total.ready') 115 | ]) 116 | 117 | self.last_ready = total_ready 118 | self.last_unack = total_unack 119 | 120 | self.last_t = t 121 | 122 | defer.returnValue(events) 123 | else: 124 | log.msg('Error running rabbitmqctl: ' + repr(err)) 125 | -------------------------------------------------------------------------------- /tensor/sources/redis.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from zope.interface import implementer 4 | 5 | from twisted.internet import defer 6 | from twisted.python import log 7 | 8 | from tensor.interfaces import ITensorSource 9 | from tensor.objects import Source 10 | from tensor.aggregators import Counter 11 | 12 | 13 | @implementer(ITensorSource) 14 | class Queues(Source): 15 | """Query llen from redis-cli 16 | 17 | **Configuration arguments:** 18 | 19 | :param queue: Queue name (defaults to 'celery', just because) 20 | :type queue: str. 21 | :param db: DB number 22 | :type db: int. 23 | :param clipath: Path to redis-cli (default: /usr/bin/redis-cli) 24 | :type clipath: str. 25 | 26 | **Metrics:** 27 | 28 | :(service_name): Queue length 29 | :(service_name): Queue rate 30 | """ 31 | ssh = True 32 | 33 | def __init__(self, *a, **kw): 34 | Source.__init__(self, *a, **kw) 35 | 36 | self.queue = self.config.get('queue', 'celery') 37 | self.db = int(self.config.get('db', 0)) 38 | 39 | self.clipath = self.config.get('clipath', '/usr/bin/redis-cli') 40 | 41 | @defer.inlineCallbacks 42 | def get(self): 43 | 44 | out, err, code = yield self.fork(self.clipath, 45 | args=('-n', str(self.db), 'llen', self.queue,) 46 | ) 47 | 48 | events = [] 49 | if code == 0: 50 | val = int(out.strip('\n').split()[-1]) 51 | 52 | defer.returnValue([ 53 | self.createEvent('ok', '%s queue length' % self.queue, val), 54 | 55 | self.createEvent('ok', 'Queue rate', val, prefix='rate', 56 | aggregation=Counter) 57 | ]) 58 | 59 | else: 60 | err = 'Error running %s: %s' % (self.clipath, repr(err)) 61 | log.msg(err) 62 | defer.returnValue( 63 | self.createEvent('critical', err, None) 64 | ) 65 | -------------------------------------------------------------------------------- /tensor/sources/riak.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: riak 3 | :platform: Any 4 | :synopsis: A source module for Riak metrics 5 | 6 | .. moduleauthor:: Jeremy Thurgood 7 | """ 8 | 9 | import json 10 | 11 | from twisted.internet import defer, reactor 12 | from twisted.web.client import Agent 13 | from twisted.web.http_headers import Headers 14 | 15 | from zope.interface import implementer 16 | 17 | from tensor.interfaces import ITensorSource 18 | from tensor.objects import Source 19 | 20 | from tensor.utils import BodyReceiver 21 | 22 | 23 | @implementer(ITensorSource) 24 | class RiakStats(Source): 25 | """Returns GET/PUT rates for a Riak node 26 | 27 | **Configuration arguments:** 28 | 29 | :param url: Riak stats URL 30 | :type url: str. 31 | :param useragent: User-Agent header to use 32 | :type useragent: str. 33 | 34 | **Metrics:** 35 | 36 | :(service name).latency: Time to complete request 37 | """ 38 | 39 | @defer.inlineCallbacks 40 | def _get_stats_from_node(self): 41 | agent = Agent(reactor) 42 | 43 | url = self.config.get('url', 'http://%s:8098/stats' % self.hostname) 44 | ua = self.config.get('useragent', 'Tensor Riak stats checker') 45 | 46 | headers = Headers({'User-Agent': [ua]}) 47 | request = yield agent.request('GET'.encode(), url.encode(), headers) 48 | 49 | if (request.length) and (request.code == 200): 50 | d = defer.Deferred() 51 | request.deliverBody(BodyReceiver(d)) 52 | b = yield d 53 | body = b.read() 54 | else: 55 | body = "{}" 56 | 57 | defer.returnValue(json.loads(body)) 58 | 59 | @defer.inlineCallbacks 60 | def get(self): 61 | stats = yield self._get_stats_from_node() 62 | get_rate = stats['node_gets'] / 60.0 63 | put_rate = stats['node_puts'] / 60.0 64 | 65 | defer.returnValue([ 66 | self.createEvent( 67 | 'ok', 'GETs per second for past minute', get_rate, 68 | prefix="gets_per_second"), 69 | self.createEvent( 70 | 'ok', 'PUTs per second for past minute', put_rate, 71 | prefix="puts_per_second"), 72 | ]) 73 | -------------------------------------------------------------------------------- /tensor/sources/riemann.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: riemann 3 | :platform: Unix 4 | :synopsis: A source module which provides Riemann servers 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | 11 | from twisted.internet import defer, reactor 12 | from twisted.names import client 13 | from twisted.internet.protocol import Factory 14 | 15 | from zope.interface import implementer 16 | 17 | from tensor.interfaces import ITensorSource 18 | from tensor.objects import Source, Event 19 | from tensor import utils 20 | 21 | from tensor.protocol import riemann 22 | 23 | 24 | class RiemannTCPServer(riemann.RiemannProtocol): 25 | """ 26 | Server implementation of the Riemann protocol 27 | """ 28 | def __init__(self, source): 29 | riemann.RiemannProtocol.__init__(self) 30 | self.source = source 31 | 32 | def stringReceived(self, string): 33 | message = self.decodeMessage(string) 34 | 35 | for event in message.events: 36 | self.source.queueBack( 37 | Event( 38 | event.state, 39 | event.service, 40 | event.description, 41 | event.metric_f, 42 | event.ttl, 43 | hostname=event.host, 44 | evtime=event.time 45 | ) 46 | ) 47 | 48 | class RiemannTCPFactory(Factory): 49 | def __init__(self, source): 50 | self.source = source 51 | 52 | def buildProtocol(self, addr): 53 | return RiemannTCPServer(self.source) 54 | 55 | @implementer(ITensorSource) 56 | class RiemannTCP(Source): 57 | """Provides a listening server which accepts Riemann metrics 58 | and proxies them to our queue. 59 | 60 | **Configuration arguments:** 61 | 62 | :param port: Port to listen on (default 5555) 63 | :type port: int. 64 | 65 | """ 66 | def startTimer(self): 67 | """Creates a Riemann TCP server instead of a timer 68 | """ 69 | reactor.listenTCP(int(self.config.get('port', 5555)), 70 | RiemannTCPFactory(self)) 71 | 72 | def get(self): 73 | pass 74 | -------------------------------------------------------------------------------- /tensor/sources/sflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: sflow 3 | :platform: Unix 4 | :synopsis: A source module which provides an sflow collector 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | 11 | from twisted.internet import defer, reactor 12 | from twisted.names import client 13 | 14 | from zope.interface import implementer 15 | 16 | from tensor.interfaces import ITensorSource 17 | from tensor.objects import Source 18 | from tensor import utils 19 | 20 | from tensor.protocol.sflow import server 21 | from tensor.protocol.sflow.protocol import flows, counters 22 | 23 | 24 | class sFlowReceiver(server.DatagramReceiver): 25 | """sFlow datagram protocol 26 | """ 27 | def __init__(self, source): 28 | self.source = source 29 | self.lookup = source.config.get('dnslookup', True) 30 | self.counterCache = {} 31 | self.convoQueue = {} 32 | 33 | self.resolver = utils.Resolver() 34 | 35 | def process_convo_queue(self, queue, host, idx, deltaIn, tDelta): 36 | cn_bytes = sum(map(lambda i: i[4], queue)) 37 | 38 | addr = {'dst':{}, 'src': {}} 39 | port = {'dst':{}, 'src': {}} 40 | 41 | btotal = 0 42 | 43 | # Try and aggregate chunks of flow information into something that 44 | # is actually useful in Riemann and InfluxDB. 45 | for convo in queue: 46 | src, sport, dst, dport, bytes = convo 47 | 48 | if not src in addr['src']: 49 | addr['src'][src] = 0 50 | 51 | if not dst in addr['dst']: 52 | addr['dst'][dst] = 0 53 | 54 | btotal += bytes 55 | addr['src'][src] += bytes 56 | addr['dst'][dst] += bytes 57 | 58 | if not sport in port['src']: 59 | port['src'][sport] = 0 60 | 61 | if not dport in port['dst']: 62 | port['dst'][dport] = 0 63 | 64 | port['src'][sport] += bytes 65 | port['dst'][dport] += bytes 66 | 67 | for direction, v in addr.items(): 68 | for ip, bytes in v.items(): 69 | m = ((bytes/float(btotal)) * deltaIn)/tDelta 70 | 71 | self.source.queueBack( 72 | self.source.createEvent('ok', 73 | 'sFlow if:%s addr:%s inOctets/sec %0.2f' % ( 74 | idx, ip, m), 75 | m, 76 | prefix='%s.ip.%s.%s' % (idx, ip, direction), 77 | hostname=host 78 | ) 79 | ) 80 | 81 | for direction, v in port.items(): 82 | for port, bytes in v.items(): 83 | m = ((bytes/float(btotal)) * deltaIn)/tDelta 84 | 85 | if port: 86 | self.source.queueBack( 87 | self.source.createEvent('ok', 88 | 'sFlow if:%s port:%s inOctets/sec %0.2f' % ( 89 | idx, port, m), 90 | m, 91 | prefix='%s.port.%s.%s' % (idx, port, direction), 92 | hostname=host 93 | ) 94 | ) 95 | 96 | def receive_flow(self, flow, sample, host): 97 | def queueFlow(host): 98 | if isinstance(sample, flows.IPv4Header): 99 | if sample.ip.proto in ('TCP', 'UDP'): 100 | sport, dport = (sample.ip_sport, sample.ip_dport) 101 | else: 102 | sport, dport = (None, None) 103 | 104 | src, dst = (sample.ip.src.asString(), sample.ip.dst.asString()) 105 | bytes = sample.ip.total_length 106 | 107 | if not host in self.convoQueue: 108 | self.convoQueue[host] = {} 109 | 110 | if not flow.if_inIndex in self.convoQueue[host]: 111 | self.convoQueue[host][flow.if_inIndex] = [] 112 | 113 | self.convoQueue[host][flow.if_inIndex].append( 114 | (src, sport, dst, dport, bytes)) 115 | 116 | if self.lookup: 117 | return self.resolver.reverse(host).addCallback( 118 | queueFlow).addErrback(queueFlow) 119 | else: 120 | return queueFlow(None, host) 121 | 122 | def receive_counter(self, counter, host): 123 | def _hostcb(host): 124 | idx = counter.if_index 125 | 126 | if not host in self.convoQueue: 127 | self.convoQueue[host] = {} 128 | 129 | if not host in self.counterCache: 130 | self.counterCache[host] = {} 131 | 132 | if idx in self.counterCache[host]: 133 | lastIn, lastOut, lastT = self.counterCache[host][idx] 134 | tDelta = time.time() - lastT 135 | 136 | self.counterCache[host][idx] = ( 137 | counter.if_inOctets, counter.if_outOctets, time.time()) 138 | 139 | deltaOut = counter.if_outOctets - lastOut 140 | deltaIn = counter.if_inOctets - lastIn 141 | 142 | inRate = deltaIn / tDelta 143 | outRate = deltaOut / tDelta 144 | 145 | # Grab the queue for this interface 146 | if idx in self.convoQueue[host]: 147 | queue = self.convoQueue[host][idx] 148 | self.convoQueue[host][idx] = [] 149 | self.process_convo_queue(queue, host, idx, deltaIn, tDelta) 150 | 151 | self.source.queueBack([ 152 | self.source.createEvent('ok', 153 | 'sFlow index %s inOctets/sec %0.2f' % (idx, inRate), 154 | inRate, 155 | prefix='%s.inOctets' % idx, hostname=host), 156 | 157 | self.source.createEvent('ok', 158 | 'sFlow index %s outOctets/sec %0.2f' % (idx, outRate), 159 | outRate, 160 | prefix='%s.outOctets' % idx, hostname=host), 161 | ]) 162 | 163 | else: 164 | self.counterCache[host][idx] = ( 165 | counter.if_inOctets, counter.if_outOctets, time.time()) 166 | 167 | if self.lookup: 168 | return self.resolver.reverse(host).addCallback( 169 | _hostcb).addErrback(_hostcb) 170 | else: 171 | return _hostcb(None, host) 172 | 173 | @implementer(ITensorSource) 174 | class sFlow(Source): 175 | """Provides an sFlow server Source 176 | 177 | **Configuration arguments:** 178 | 179 | :param port: UDP port to listen on 180 | :type port: int. 181 | :param dnslookup: Enable reverse DNS lookup for device IPs (default: True) 182 | :type dnslookup: bool. 183 | 184 | **Metrics:** 185 | 186 | Metrics are published using the key patterns 187 | (device).(service name).(interface).(in|out)Octets 188 | (device).(service name).(interface).ip 189 | (device).(service name).(interface).port 190 | """ 191 | 192 | def get(self): 193 | pass 194 | 195 | def startTimer(self): 196 | """Creates a sFlow datagram server 197 | """ 198 | reactor.listenUDP(self.config.get('port', 6343), sFlowReceiver(self)) 199 | -------------------------------------------------------------------------------- /tensor/sources/snmp.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: snmp 3 | :platform: Unix 4 | :synopsis: A source module for polling SNMP. Requires PySNMP4 5 | 6 | .. moduleauthor:: Colin Alston 7 | """ 8 | 9 | import time 10 | 11 | from twisted.internet import reactor, defer 12 | 13 | from zope.interface import implementer 14 | 15 | from tensor.interfaces import ITensorSource 16 | from tensor.objects import Source 17 | from tensor import aggregators 18 | 19 | from pysnmp.entity import engine, config 20 | from pysnmp.entity.rfc3413.twisted import cmdgen 21 | from pysnmp.carrier.twisted import dispatch 22 | from pysnmp.carrier.twisted.dgram import udp 23 | from pysnmp.proto import rfc1905, rfc1902 24 | 25 | 26 | class SNMPConnection(object): 27 | """A wrapper class for PySNMP functions 28 | 29 | :param host: SNMP agent host 30 | :type host: str. 31 | :param port: SNMP port 32 | :type port: int. 33 | :param community: SNMP read community 34 | :type community: str. 35 | 36 | (This is not a source and you shouldn't try to use it as one) 37 | """ 38 | 39 | def __init__(self, host, port, community): 40 | self.snmp = engine.SnmpEngine() 41 | self.snmp.registerTransportDispatcher(dispatch.TwistedDispatcher()) 42 | 43 | config.addV1System(self.snmp, 'my-area', community) 44 | config.addTargetParams(self.snmp, 45 | 'my-creds', 'my-area', 'noAuthNoPriv', 0) 46 | config.addSocketTransport(self.snmp, 47 | udp.domainName, udp.UdpTwistedTransport().openClientMode() 48 | ) 49 | config.addTargetAddr(self.snmp, 'my-router', udp.domainName, 50 | (host, port), 'my-creds') 51 | 52 | def _walk(self, soid): 53 | # Error/response receiver 54 | result = [] 55 | def cbFun(cbCtx, result, d): 56 | (errorIndication, errorStatus, errorIndex, varBindTable) = cbCtx 57 | if errorIndication: 58 | print(errorIndication) 59 | elif errorStatus and errorStatus != 2: 60 | print('%s at %s' % ( 61 | errorStatus.prettyPrint(), 62 | errorIndex and varBindTable[-1][int(errorIndex)-1][0] or '?' 63 | ) 64 | ) 65 | else: 66 | for varBindRow in varBindTable: 67 | for oid, val in varBindRow: 68 | if str(oid).startswith(soid): 69 | result.append((oid, val)) 70 | 71 | for oid, val in varBindRow: 72 | if not str(oid).startswith(soid): 73 | d.callback(result) 74 | return 75 | if not val.isSameTypeWith(rfc1905.endOfMibView): 76 | break 77 | else: 78 | d.callback(result) 79 | return 80 | 81 | df = defer.Deferred() 82 | df.addCallback(cbFun, result, d) 83 | return df 84 | 85 | d.callback(result) 86 | 87 | # Prepare request to be sent yielding Twisted deferred object 88 | df = cmdgen.NextCommandGenerator().sendReq(self.snmp, 89 | 'my-router', ((soid, None),)) 90 | 91 | d = defer.Deferred() 92 | df.addCallback(cbFun, result, d) 93 | return d 94 | 95 | def walk(self, oid): 96 | return self._walk(oid) 97 | 98 | @implementer(ITensorSource) 99 | class SNMP(Source): 100 | """Connects to an SNMP agent and retrieves OIDs 101 | 102 | **Configuration arguments:** 103 | 104 | :param ip: SNMP agent host (default: 127.0.0.1) 105 | :type ip: str. 106 | :param port: SNMP port (default: 161) 107 | :type port: int. 108 | :param community: SNMP read community 109 | :type community: str. 110 | """ 111 | 112 | def __init__(self, *a, **kw): 113 | Source.__init__(self, *a, **kw) 114 | 115 | host = self.config.get('ip', '127.0.0.1') 116 | port = int(self.config.get('port', 161)) 117 | 118 | # Must add v3 support 119 | 120 | community = self.config.get('community', None) 121 | self.snmp = SNMPConnection(host, port, community) 122 | 123 | def getCounter(self, soid): 124 | return self.snmp.walk(soid) 125 | 126 | @defer.inlineCallbacks 127 | def getIfMetrics(self): 128 | ifaces = yield self.snmp.walk('1.3.6.1.2.1.2.2.1.2') 129 | 130 | table = [ 131 | ('1.3.6.1.2.1.2.2.1.10', 'ifInOctets'), 132 | ('1.3.6.1.2.1.2.2.1.11', 'ifInUcastPkts'), 133 | ('1.3.6.1.2.1.2.2.1.12', 'ifInNUcastPkts'), 134 | ('1.3.6.1.2.1.2.2.1.14', 'ifInErrors'), 135 | ('1.3.6.1.2.1.2.2.1.13', 'ifInDiscards'), 136 | ('1.3.6.1.2.1.2.2.1.16', 'ifOutOctets'), 137 | ('1.3.6.1.2.1.2.2.1.17', 'ifOutUcastPkts'), 138 | ('1.3.6.1.2.1.2.2.1.18', 'ifOutNUcastPkts'), 139 | ('1.3.6.1.2.1.2.2.1.20', 'ifOutErrors'), 140 | ('1.3.6.1.2.1.2.2.1.19', 'ifOutDiscards'), 141 | ] 142 | 143 | data = {} 144 | for oid, key in table: 145 | d = yield self.getCounter(oid) 146 | for i, v in enumerate(d): 147 | noid, val = v 148 | 149 | if val: 150 | iface = str(ifaces[i][1]) 151 | if not iface in data: 152 | data[iface] = {} 153 | data[iface][key] = val 154 | 155 | events = [] 156 | 157 | for iface, metrics in data.items(): 158 | for key, val in metrics.items(): 159 | aggr = None 160 | 161 | if isinstance(val, rfc1902.Counter32): 162 | aggr = aggregators.Counter32 163 | 164 | if isinstance(val, rfc1902.Counter64): 165 | aggr = aggregators.Counter64 166 | 167 | events.append( 168 | self.createEvent('ok', 169 | "SNMP interface %s %s=%0.2f" % (iface, key, int(val)), 170 | int(val), 171 | prefix="%s.%s" % (iface, key), aggregation=aggr)) 172 | 173 | defer.returnValue(events) 174 | 175 | @defer.inlineCallbacks 176 | def get(self): 177 | events = yield self.getIfMetrics() 178 | defer.returnValue(events) 179 | 180 | class SNMPCisco837(SNMP): 181 | """Connects to a Cisco 837 and makes metrics 182 | 183 | **Configuration arguments:** 184 | 185 | :param ip: SNMP agent host (default: 127.0.0.1) 186 | :type ip: str. 187 | :param port: SNMP port (default: 161) 188 | :type port: int. 189 | :param community: SNMP read community 190 | :type community: str. 191 | """ 192 | 193 | @defer.inlineCallbacks 194 | def get(self): 195 | 196 | events = yield self.getIfMetrics() 197 | 198 | sync_us = (yield self.snmp.walk('1.3.6.1.2.1.10.94.1.1.5'))[0][1] 199 | sync_ds = (yield self.snmp.walk('1.3.6.1.2.1.10.94.1.1.4'))[0][1] 200 | 201 | sync_us = int(sync_us) 202 | sync_ds = int(sync_ds) 203 | 204 | events.append( 205 | self.createEvent('ok', "SNMP ADSL sync downstream %s" % sync_ds, 206 | sync_ds, prefix="adsl.rxrate")) 207 | 208 | events.append( 209 | self.createEvent('ok', "SNMP ADSL sync upstream %s" % sync_us, 210 | sync_us, prefix="adsl.txrate")) 211 | 212 | link = yield self.snmp.walk('1.3.6.1.2.1.10.94.1.1.3') 213 | link = dict([(str(i), j) for i, j in link]) 214 | 215 | output = int(link['1.3.6.1.2.1.10.94.1.1.3.1.7.15'])/10.0 216 | attn = int(link['1.3.6.1.2.1.10.94.1.1.3.1.5.15'])/10.0 217 | margin = int(link['1.3.6.1.2.1.10.94.1.1.3.1.4.15'])/10.0 218 | 219 | events.append( 220 | self.createEvent('ok', "SNMP ADSL output power %0.2fdBm" % output, 221 | output, prefix="adsl.outpwr")) 222 | 223 | events.append( 224 | self.createEvent('ok', "SNMP ADSL attenuation %0.2fdB" % attn, 225 | attn, prefix="adsl.attn")) 226 | 227 | events.append( 228 | self.createEvent('ok', "SNMP ADSL noise margin %0.2fdB" % margin, 229 | margin, prefix="adsl.margin")) 230 | 231 | defer.returnValue(events) 232 | -------------------------------------------------------------------------------- /tensor/sources/unbound.py: -------------------------------------------------------------------------------- 1 | from zope.interface import implementer 2 | 3 | from twisted.internet import defer 4 | from twisted.python import log 5 | 6 | from tensor.interfaces import ITensorSource 7 | from tensor.objects import Source 8 | 9 | 10 | @implementer(ITensorSource) 11 | class Stats(Source): 12 | """Returns stats from unbound-control 13 | 14 | **Configuration arguments:** 15 | 16 | :param executable: Path to unbound-control executable 17 | (default: /usr/sbin/unbound-control) 18 | :type executable: str. 19 | 20 | """ 21 | ssh = True 22 | 23 | def __init__(self, *a, **kw): 24 | Source.__init__(self, *a, **kw) 25 | 26 | self.uc = self.config.get('executable', '/usr/sbin/unbound-control') 27 | 28 | @defer.inlineCallbacks 29 | def _get_uc_stats(self): 30 | out, err, code = yield self.fork(self.uc, args=('stats', )) 31 | 32 | if code == 0: 33 | defer.returnValue(out.strip('\n').split('\n')) 34 | else: 35 | log.msg('Error running unbound-control: ' + repr(err)) 36 | defer.returnValue([]) 37 | 38 | @defer.inlineCallbacks 39 | def get(self): 40 | events = [] 41 | 42 | stats = yield self._get_uc_stats() 43 | 44 | for row in stats: 45 | key, val = row.split('=') 46 | 47 | try: 48 | val = float(val) 49 | except: 50 | # Not a number 51 | continue 52 | 53 | events.append(self.createEvent('ok', key, val, prefix=key)) 54 | 55 | defer.returnValue(events) 56 | -------------------------------------------------------------------------------- /tensor/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/calston/tensor/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/tests/__init__.py -------------------------------------------------------------------------------- /tensor/tests/test_logs.py: -------------------------------------------------------------------------------- 1 | from twisted.trial import unittest 2 | 3 | from tensor.logs import follower, parsers 4 | 5 | import datetime 6 | import os 7 | 8 | 9 | class TestLogs(unittest.TestCase): 10 | 11 | def test_logfollow(self): 12 | try: 13 | os.unlink('test.log.lf') 14 | os.unlink('test.log') 15 | except: 16 | pass 17 | 18 | log = open('test.log', 'wt') 19 | log.write('foo\nbar\n') 20 | log.flush() 21 | 22 | f = follower.LogFollower('test.log', tmp_path=".", history=True) 23 | 24 | r = f.get() 25 | 26 | log.write('test') 27 | log.flush() 28 | 29 | r2 = f.get() 30 | 31 | log.write('ing\n') 32 | log.flush() 33 | 34 | r3 = f.get() 35 | 36 | self.assertEqual(r[0], 'foo') 37 | self.assertEqual(r[1], 'bar') 38 | 39 | self.assertEqual(r2, []) 40 | self.assertEqual(r3[0], 'testing') 41 | 42 | log.close() 43 | 44 | # Move inode 45 | os.rename('test.log', 'testold.log') 46 | 47 | log = open('test.log', 'wt') 48 | log.write('foo2\nbar2\n') 49 | log.close() 50 | 51 | r = f.get() 52 | 53 | self.assertEqual(r[0], 'foo2') 54 | self.assertEqual(r[1], 'bar2') 55 | 56 | # Go backwards 57 | log = open('test.log', 'wt') 58 | log.write('foo3\n') 59 | log.close() 60 | 61 | r = f.get() 62 | 63 | self.assertEqual(r[0], 'foo3') 64 | 65 | os.unlink('test.log') 66 | os.unlink('testold.log') 67 | 68 | def test_apache_parser(self): 69 | log = parsers.ApacheLogParser('combined') 70 | 71 | line = '192.168.0.102 - - [16/Jan/2015:11:11:45 +0200] "GET / HTTP/1.1" 200 709 "-" "My browser"' 72 | 73 | want = { 74 | 'status': 200, 75 | 'request': 'GET / HTTP/1.1', 76 | 'bytes': 709, 77 | 'user-agent': 'My browser', 78 | 'client': '192.168.0.102', 79 | 'time': datetime.datetime(2015, 1, 16, 11, 11, 45), 80 | 'referer': None, 81 | 'logname': None, 82 | 'user': None, 83 | } 84 | 85 | p = log.parse(line) 86 | 87 | for k,v in want.items(): 88 | self.assertEquals(p[k], v) 89 | -------------------------------------------------------------------------------- /tensor/tests/test_sflow.py: -------------------------------------------------------------------------------- 1 | from twisted.trial import unittest 2 | from twisted.internet import defer 3 | 4 | from tensor.protocol.sflow import protocol 5 | 6 | 7 | # Long strings are the devils play things 8 | testPacket = b'\x00\x00\x00\x05\x00\x00\x00\x01\xac\x1e\x00\x05\x00\x00\x00\x00\x00\x00\x02L\x01?lD\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\xc4\x00\x00\x01\xff\x00\x00\x00\x10\x00\x00\x00\x80\x00(\x1d6\x00\x00\x03\x9a\x00\x00\x00\x03\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x84\x00\x00\x00\x01\x00\x00\x00u\x00\x00\x00\x04\x00\x00\x00t\xe4\xce\x8f.\x8a\xd4\xd4\xcam\x97r*\x08\x00Ex\x00cM-\x00\x007\x11kh)\x86\xf4\x89\xac\x1e\x00O\x05\xd8\xe3\xeb\x00O\xe3\x9d\xd8\xd0m\x15B\x81;\xda\x98\xa7\xd0\x00\t\xc7\xbe>K\x9b\xd5\xd3n\x15\xb9U\x1c0\x1e]+i2\xc9\xb5N4\xca"G\x1f\xfe\x86H\x9b\xc0\xa0\xb9\x9d\x97\xb9yHMIzI\xf9\xc8\xcd\x88\xadgtx\x85\x90\xf1\xfa_\x98-\x1a\x00\x00\x00\x00\x00\x03\xe9\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x94\x00\x00\x01\xd4\x00\x00\x00\n\x00\x00\x00\x80\x00fH)\x00\x00\x04\x0b\x00\x00\x00\x03\x00\x00\x00\n\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00T\x00\x00\x00\x01\x00\x00\x00F\x00\x00\x00\x04\x00\x00\x00D(77\x1b\x02\xf4\xd4\xcam\x97r*\x08\x00E\x80\x004\xb6\x13@\x00&\x06\xdf\xa22\x11\xe0\x11\xac\x1e\x00M\x01\xbb\xee\xae\x953w\x93"j\xbb\x1d\x80\x10\x00S\x0f\xb6\x00\x00\x01\x01\x08\n\x83\xe4\x92\x18ASv\x1d\x00\x00\x00\x00\x03\xe9\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x94\x00\x00\x01\xd5\x00\x00\x00\n\x00\x00\x00\x80\x00fH)\x00\x00\x04\x10\x00\x00\x00\n\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00T\x00\x00\x00\x01\x00\x00\x00F\x00\x00\x00\x04\x00\x00\x00D\xd4\xcam\x97r*(\xcf\xe9Z\xd8a\x08\x00E\x00\x004\x13\x89@\x00@\x06~8\xac\x1e\x00ZRK\xaa?\xf1\xfe\xed\xe1"\xe8\x97\xd6c\xbfC\x0e\x80\x11 \x00N\x9c\x00\x00\x01\x01\x08\n;\x829Y\x00\x11\xa8\xc3\x00\x00\x00\x00\x03\xe9\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\xff\xff\xff\xff\x00\x00\x00\x02\x00\x00\x00\xa8\x00\x00\x00!\x00\x00\x00\x0c\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00X\x00\x00\x00\x0c\x00\x00\x00\x06\x00\x00\x00\x00\x00\x98\x96\x80\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x004\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x94\x00\x00\x02\x00\x00\x00\x00\x10\x00\x00\x00\x80\x00(\x1e\x1a\x00\x00\x03\x9b\x00\x00\x00\x10\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00T\x00\x00\x00\x01\x00\x00\x00F\x00\x00\x00\x04\x00\x00\x00D\xd4\xcam\x97r*\\\n[\xe0\xe4\x14\x08\x00E\x00\x004\x04\r@\x00@\x06\x11\xfa\xac\x1e\x00\xae\x17C`\xae\x8f\xe0\x01\xbb\xac ;\xa8\x10\xc0\xab\xd7\x80\x10/\xea\x0e\'\x00\x00\x01\x01\x08\n\x00JuM\xe0\xd3\x87\x87\x00\x00\x00\x00\x03\xe9\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\xff\xff\xff\xff' 9 | 10 | class Test(unittest.TestCase): 11 | def test_decode(self): 12 | 13 | proto = protocol.Sflow(testPacket, '172.30.0.5') 14 | 15 | self.assertTrue(proto.version == 5) 16 | 17 | self.assertTrue(len(proto.samples) == 5) 18 | -------------------------------------------------------------------------------- /tensor/tests/test_ssh.py: -------------------------------------------------------------------------------- 1 | from twisted.trial import unittest 2 | 3 | from tensor.sources.linux import basic 4 | 5 | testKey = """-----BEGIN RSA PRIVATE KEY----- 6 | Proc-Type: 4,ENCRYPTED 7 | DEK-Info: AES-128-CBC,A6588464A721D661311DBCE44C76337E 8 | 9 | /bqr0AEIbiWubFiPEcdlNw8WdDrFqELOCZo78ohtDX/2HJhkMCCtAuv46is5UCvj 10 | pweYupJQgZZ9g+6rKLhTo6d0VYwaSOuR6OJWEecIr7quyQBgCPOvun2fVGrx6/7U 11 | D9HiXBdBDVc4vcEUviZu5V+E9xLmP9GteD1OR7TfR1AqFMPzHVvDE9UxrzEacFY4 12 | KPs7KP6x+8so5KvZSJKisczc+JBt+PlZisDwX9BCHJNmAYYFRm2umY7sCmLNmeoc 13 | Y95E6Tmpze4J1559mLM7nuzOpnnFEii4pA5H7unMUCa9AwkLLYLOV7N8iRETgG0R 14 | snvH5uiVRqEB84ypItCZF+Nk5Y0/WPSWPDq/bhwyQeodEPjlIfiHKzDf9GuuT9El 15 | Q4dGxA0mLOKMqPDJGGc7mwTTN5iczj94gsLTfI1me1qzTzxdko/BMqsmPSUbkNXS 16 | wgkofT+48L00HL9zq0quHkgjoTe1Wud8tI4mG0Tl9BTFE9PUtlfdJNoEQ1dk9RcR 17 | UkhjMbuN3h8r9w9EVugAvbp/c7SQILXEJ6QZK2NMzO01SA5Tv7hmDh1J0lcIF1zb 18 | VI+rlxly/riDN6U9w35vOZEzKl3qYbAXrnRteo7MEYvc/BahvxBP0ZEGRXeoNfAj 19 | JLvBrkhBUVy1cH5fGs2SYIwUEKBx5nLL5NeNI1ymRKbsyJ3oTKZU+PQhfarEJD2r 20 | u/dZoDb/AEjxCkaM1EaDG590Bjc6ZxC1ZkF6gSK27iJRP5CCj5XoD7kIpmZFE+gc 21 | KpVNHHe6ef2ptOngkEDUyTmZ7z18lVCeC4sBPzrLPDnWB+cie+19/cJDJpRz0n0j 22 | qMkh7MY+FQ8t0AopFAy7r50nV5FlGt9rG7YaWO8j5Lv3TsPPDOxFk5IoB6AtRpr8 23 | tSQCCyCcdHkD3M1wI/PD9bEjuELaDG8PaVzOuj5rVyh+saZQeD9GmegsuBkDGb4g 24 | COtzWOQ1H0ii478rbQAxwsOEMdR5lxEFOo8mC0p4mnWJti2DzJQorQC/fjbRRv7z 25 | vfJamXvfEuHj3NPP9cumrskBtD+kRz/c2zgVJ8vwRgNPazdfJqGYjmFB0loVVyuu 26 | x+hBHOD5zyMPFrJW9MNDTiTEaQREaje5tUOfNoA1Wa4s2bVLnhHCXdMSWmiDmJQp 27 | HEYAIZI2OJhMe8V431t6dBx+nutApzParWqET5D0DWvlurDWFrHMnazh164RqsGu 28 | E4Dg6ZsRnI+PEJmroia6gYEscUfd5QSUebxIeLhNzo1Kf5JRBW93NNxhAzn9ZJ9O 29 | 2bjvkHOJlADnfON5TsPgroXX95P/9V8DWUT+/ske1Fw43V1pIT+PtraYqrlyvow+ 30 | uJMA2q9sRLzXnFb2vg7JdD1XA4f2eUBwzbtq8wSuQexSErWaTx5uAERDnGAWyaN2 31 | 3xCYl8CTfF70xN7j39hG/pI0ghRLGVBmCJ5NRzNZ80SPBE/nzYy/X6pGV+vsjPoZ 32 | S3dBmvlBV/HzB4ljsO2pI/VjCJVNZdOWDzy18GQ/jt8/xH8R9Ld6/6tuS0HbiefS 33 | ZefHS5wV1KNZBK+vh08HvX/AY9WBHPH+DEbrpymn/9oAKVmhH+f73ADqVOanMPk0 34 | -----END RSA PRIVATE KEY----- 35 | """ 36 | 37 | testKeyPassword = "testtest" 38 | 39 | class FakeTensor(object): 40 | config = { 41 | 'ssh_username': 'test', 42 | 'ssh_key': testKey, 43 | 'ssh_keypass': testKeyPassword, 44 | } 45 | hostConnectorCache = {} 46 | 47 | class Tests(unittest.TestCase): 48 | def _qb(self, result): 49 | pass 50 | 51 | def test_ssh_source_setup(self): 52 | s = basic.LoadAverage({ 53 | 'interval': 1.0, 54 | 'service': 'mem', 55 | 'ttl': 60, 56 | 'use_ssh': True, 57 | }, self._qb, FakeTensor()) 58 | -------------------------------------------------------------------------------- /tensor/tests/test_tensor.py: -------------------------------------------------------------------------------- 1 | from twisted.trial import unittest 2 | 3 | from twisted.internet import defer, reactor, error 4 | from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol 5 | 6 | from tensor.protocol import riemann 7 | 8 | from tensor.objects import Event 9 | 10 | from tensor.utils import fork 11 | 12 | class Tests(unittest.TestCase): 13 | def setUp(self): 14 | self.endpoint = None 15 | 16 | def tearDown(self): 17 | if self.endpoint: 18 | return defer.maybeDeferred(self.endpoint.stopListening) 19 | 20 | def test_riemann_protobuf(self): 21 | proto = riemann.RiemannProtocol() 22 | 23 | event = Event('ok', 'sky', 'Sky has not fallen', 1.0, 60.0) 24 | 25 | # Well, I guess we'll just assume this is right 26 | message = proto.encodeMessage([event]) 27 | 28 | def test_riemann_protobuf_with_attributes(self): 29 | proto = riemann.RiemannProtocol() 30 | 31 | event = Event('ok', 'sky', 'Sky has not fallen', 1.0, 60.0, 32 | attributes={"chicken": "little"}) 33 | 34 | e = proto.encodeEvent(event) 35 | attrs = e.attributes 36 | self.assertEqual(len(attrs), 1) 37 | self.assertEqual(attrs[0].key, "chicken") 38 | self.assertEqual(attrs[0].value, "little") 39 | 40 | @defer.inlineCallbacks 41 | def test_tcp_riemann(self): 42 | 43 | event = Event('ok', 'sky', 'Sky has not fallen', 1.0, 60.0) 44 | 45 | end = TCP4ClientEndpoint(reactor, "localhost", 5555) 46 | 47 | p = yield connectProtocol(end, riemann.RiemannProtocol()) 48 | 49 | yield p.sendEvents([event]) 50 | 51 | p.transport.loseConnection() 52 | 53 | @defer.inlineCallbacks 54 | def test_udp_riemann(self): 55 | 56 | event = Event('ok', 'sky', 'Sky has not fallen', 1.0, 60.0) 57 | 58 | protocol = riemann.RiemannUDP('127.0.0.1', 5555) 59 | self.endpoint = reactor.listenUDP(0, protocol) 60 | 61 | yield protocol.sendEvents([event]) 62 | 63 | @defer.inlineCallbacks 64 | def test_utils_fork(self): 65 | o, e, c = yield fork('echo', args=('hi',)) 66 | 67 | self.assertEquals(o, "hi\n") 68 | self.assertEquals(c, 0) 69 | 70 | @defer.inlineCallbacks 71 | def test_utils_fork_timeout(self): 72 | died = False 73 | try: 74 | o, e, c = yield fork('sleep', args=('2',), timeout=0.1) 75 | except error.ProcessTerminated: 76 | died = True 77 | 78 | self.assertTrue(died) 79 | -------------------------------------------------------------------------------- /tensor/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from twisted.trial import unittest 2 | 3 | from twisted.internet import defer, reactor, error 4 | 5 | from tensor import utils 6 | 7 | class Tests(unittest.TestCase): 8 | def test_persistent_cache(self): 9 | pc = utils.PersistentCache(location='test.cache') 10 | 11 | pc.set('foo', 'bar') 12 | pc.set('bar', 'baz') 13 | 14 | pc2 = utils.PersistentCache(location='test.cache') 15 | 16 | self.assertEquals(pc2.get('foo')[1], 'bar') 17 | 18 | pc.set('foo', 'baz') 19 | 20 | self.assertEquals(pc2.get('foo')[1], 'baz') 21 | 22 | pc.delete('foo') 23 | 24 | self.assertFalse(pc.contains('foo')) 25 | self.assertTrue(pc.contains('bar')) 26 | 27 | pc.expire(0) 28 | 29 | self.assertFalse(pc.contains('bar')) 30 | 31 | -------------------------------------------------------------------------------- /twisted/plugins/tensor_plugin.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | from zope.interface import implementer 4 | 5 | from twisted.python import usage 6 | from twisted.plugin import IPlugin 7 | from twisted.application.service import IServiceMaker 8 | 9 | import tensor 10 | 11 | 12 | class Options(usage.Options): 13 | optParameters = [ 14 | ["config", "c", "tensor.yml", "Config file"], 15 | ] 16 | 17 | 18 | @implementer(IServiceMaker, IPlugin) 19 | class TensorServiceMaker(object): 20 | tapname = "tensor" 21 | description = "A Riemann(.io) event thingy" 22 | options = Options 23 | 24 | def makeService(self, options): 25 | config = yaml.load(open(options['config'])) 26 | return tensor.makeService(config) 27 | 28 | serviceMaker = TensorServiceMaker() 29 | --------------------------------------------------------------------------------