├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.md ├── nixstats-example.ini ├── nixstatsagent ├── __init__.py ├── nixstatsagent.py └── plugins │ ├── __init__.py │ ├── apt-updates.py │ ├── asterisk.py │ ├── cloudlinux-dbgov.py │ ├── cloudlinux.py │ ├── cpanel.py │ ├── cpu.py │ ├── cpu_freq.py │ ├── dirsize.py │ ├── diskstatus-nvme.py │ ├── diskstatus.py │ ├── diskusage.py │ ├── docker.py │ ├── elasticsearch.py │ ├── exim.py │ ├── gpu.py │ ├── haproxy.py │ ├── httpd.py │ ├── iostat.py │ ├── janus.py │ ├── kamailio.py │ ├── litespeed.py │ ├── loadavg.py │ ├── mdstat.py │ ├── megacli.py │ ├── memcached.py │ ├── memory.py │ ├── minecraft.py │ ├── mongodb.py │ ├── mysql.py │ ├── network.py │ ├── nginx.py │ ├── openvpn.py │ ├── phpfpm.py │ ├── ping.py │ ├── plugins.py │ ├── powerdns.py │ ├── process.py │ ├── rabbitmq.py │ ├── redis_stat.py │ ├── sleeper.py │ ├── swap.py │ ├── system.py │ ├── temp.py │ ├── unbound.py │ └── vms.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | nixstats.ini 2 | *.pyc 3 | *~ 4 | *.bak 5 | build 6 | debian 7 | dist 8 | nixstatsagent.egg-info 9 | README.html 10 | .idea/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | dist: xenial 3 | python: 4 | - "2.7" 5 | - "3.5" 6 | - "3.6" 7 | matrix: 8 | include: 9 | - python: "2.6" 10 | before_install: pip install setuptools==36.2.0 11 | dist: trusty 12 | install: python setup.py install 13 | script: python setup.py sdist bdist_egg bdist_wheel 14 | deploy: 15 | provider: pypi 16 | user: nixstats 17 | skip_existing: true 18 | password: 19 | secure: JnBTnS4KSy+TkqQAMXXG8StwxCDwiJ6GcAYb7JtZhQ3NNPMCaUjJ3W3jL0VU3Yen3k6hp4js7kyz1mOO/iXCkPl4FNpNFPlP4LufEmlA8kMV4o9OwHsvPEthxnhddS+K77+5bfv+idi0IWq87TN6sCs/A/rv1IUiOagI1bbIcHqJ4RlKP+5LE+XgJlmVfip4W34RZjqB0rVBDouk3YBOnxuCHKLyo0h/7PUF6/QdMv8aGdUJeuRajzoymJi8ny1cqXd5j0J/XcIBho5AGSDegM7NhHnAlj+QVSWWadi1Gi3piwAfD2nKMx+QjRpdJnm3xjTiGGMbO0yqV/czrRU9RyI10zQZGfl0ALrIQcK+YVIjIdA+pj+BWuVdYN/EwE5vM19Yyx6b0B6yH/Gu/WYIE+nWmNOYGuMCLzybNTDw8BOLEyW7CKyguWMSk+u0bipMZupoVnYFrj1m0FnDbHeqpvKGi066sGnNY9E2Q8o50FRxh1QY1EXkYpE2cFsOMElUhSVIQc6Zlgv3GAWjW8c2fOsI1gKIpp6QavA2cKDCVqg6EuqrFmhTNflHlMbAcmMDICMbZjiFxaBSu0+sQxATEWCpxWF+J5K2Z7I+c6gaRbXe2UO49/UIj4lmv3aGH5v+zlJtwBumPMZj97WnbUMKBdArR8KPtCtThLzx+JRw6gc= 20 | on: 21 | tags: true 22 | distributions: sdist bdist_egg bdist_wheel 23 | repo: NIXStats/nixstatsagent 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD Simplified License 2 | 3 | Copyright (c) 2016, NIXStats 4 | 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | 10 | * Redistributions of source code must retain the above copyright notice, 11 | this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | * Neither the name of the copyright holder nor the names of its contributors 16 | may be used to endorse or promote products derived from this software 17 | without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include nixstats-example.ini LICENSE README.md 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NixStats Agent 2 | ============== 3 | 4 | NixStats.com is a web service of monitoring and displaying statistics of 5 | your server performance. 6 | 7 | This software is an OS-agnostic agent compatible with Python 2.4, 2.5, 2.6 and 2.7. 8 | It's been optimized to have a small CPU consumption and comes with an 9 | extendable set of useful plugins. 10 | 11 | [![Build Status](https://travis-ci.org/NIXStats/nixstatsagent.svg?branch=master)](https://travis-ci.org/NIXStats/nixstatsagent) 12 | 13 | Installation 14 | ------------ 15 | 16 | Depending on your platform, many installation options are possible. We 17 | are listing them more or less in the order from the most specific (and 18 | preferred) to the most generic ones. 19 | 20 | ### Debian GNU/Linux 21 | 22 | Manual installation: 23 | ``` 24 | apt-get install python3-devel python3-setuptools python3-pip 25 | pip3 install nixstatsagent 26 | wget -O /etc/nixstats.ini https://www.nixstats.com/nixstats.ini 27 | ``` 28 | 29 | You can find your USERTOKEN on the settings page (https://nixstats.com/settings/overview). You need this to generate a serverid. 30 | 31 | ``` 32 | nixstatshello USERTOKEN /etc/nixstats-token.ini 33 | ``` 34 | 35 | Create a service for systemd at `/etc/systemd/system/nixstatsagent.service` 36 | ``` 37 | [Unit] 38 | Description=Nixstatsagent 39 | 40 | [Service] 41 | ExecStart=/usr/local/bin/nixstatsagent 42 | User=nixstats 43 | 44 | [Install] 45 | WantedBy=multi-user.target 46 | ``` 47 | Then run: 48 | ``` 49 | chmod 644 /etc/systemd/system/nixstatsagent.service 50 | systemctl daemon-reload 51 | systemctl enable nixstatsagent 52 | systemctl start nixstatsagent 53 | ``` 54 | 55 | ### Fedora / CentOS 56 | 57 | For version 6 or earlier (python 2.6, 2.7): 58 | ``` 59 | yum install python-devel python-setuptools gcc 60 | easy_install nixstatsagent netifaces psutil 61 | ``` 62 | 63 | For version 7 and later (python 3): 64 | ``` 65 | yum install python36-devel python36 gcc 66 | ``` 67 | 68 | ``` 69 | pip3 install nixstatsagent 70 | ``` 71 | 72 | ### Windows 73 | 74 | Download the [windows installer for nixstatsagent](https://nixstats.com/windows/nixstatsagent-setup.exe). When asked for the usertoken, provide the usertoken that is available on the [settings page](https://nixstats.com/settings) at Nixstats. 75 | 76 | ### Python 2.4 or 2.5 environment 77 | 78 | As the source package is published on [PyPI](https://pypi.python.org/pypi), 79 | provided that you've obtained [setuptools](https://pypi.python.org/pypi/setuptools#installation-instructions), 80 | simply do: 81 | 82 | ``` 83 | easy_install nixstatsagent 84 | ``` 85 | -------------------------------------------------------------------------------- /nixstats-example.ini: -------------------------------------------------------------------------------- 1 | # [DEFAULT] ; Values here *override* the hardcoded defaults (listed) 2 | # max_data_span = 60 ; Collected data span threshold for sending, sec 3 | # max_data_age = 600 ; Collected data age threshold for sending, sec 4 | # logging_level = 30 ; (logging.WARNING) Logging level, defined in logging 5 | # threads = 100 ; Maximun amount of threads 6 | # interval = 60 ; Interval betveen iterations, sec 7 | # plugins = plugins ; Path to plugins 8 | # enabled = no ; Toggle plugin 9 | # subprocess = no ; Run plugin as a subprocess 10 | # ttl = 60 ; Time to live (for a subprocess), sec 11 | # user = '' ; API user name 12 | # server = '' ; API server name 13 | # 14 | # 15 | ###################### Default sections 16 | # [agent] ; Main thread 17 | # 18 | # [execution] ; Plugin execution threads 19 | # ttl = 15 ; Example: plugins will be killed after 15 sec 20 | # 21 | # [data] ; Collected data sening thread 22 | # interval = 600 ; Example: the data collected will be checked every minute for thresholds 23 | # 24 | # 25 | ##################### Plugin sections examples 26 | # [cpu] ; Example: Shortname for 'plugins/cpu.py' plugin 27 | # enabled = yes ; Example: cpu plugin enabled 28 | # ttl = 5 ; Example: cpu plugin time to live 29 | # 30 | # [sleeper] ; Example: Shortname for 'plugins/sleeper.py' plugin 31 | # enabled = yes ; Example: sleeper plugin disabled 32 | 33 | #[redis_stat] 34 | #enabled = yes 35 | #host = 127.0.0.1 36 | #port = 6379 37 | #db = 0 38 | #password = pass4redis 39 | 40 | #[rabbitmq] 41 | #enabled = yes 42 | #status_page_url = http://username:password@localhost:15672/api/overview 43 | -------------------------------------------------------------------------------- /nixstatsagent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NIXStats/nixstatsagent/bfe01d6cf4e05c0674a3103fa26621b31cda48ef/nixstatsagent/__init__.py -------------------------------------------------------------------------------- /nixstatsagent/nixstatsagent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8; tab-width: 4; indent-tabs: nil; -*- 3 | # by Al Nikolov 4 | from __future__ import print_function 5 | import bz2 6 | import sys 7 | if sys.version_info >= (3,): 8 | try: 9 | from past.builtins import basestring 10 | except ImportError: 11 | basestring = str 12 | import configparser 13 | import http.client 14 | from queue import Queue, Empty 15 | import io 16 | else: 17 | import ConfigParser 18 | import httplib 19 | import StringIO 20 | from Queue import Queue, Empty 21 | 22 | import glob 23 | import imp 24 | try: 25 | import json 26 | except ImportError: 27 | import simplejson as json 28 | import logging 29 | import os 30 | import pickle 31 | import signal 32 | import socket 33 | import subprocess 34 | import threading 35 | import time 36 | import types 37 | import urllib 38 | 39 | try: 40 | from urllib.parse import urlparse, urlencode 41 | from urllib.request import urlopen, Request 42 | from urllib.error import HTTPError 43 | except ImportError: 44 | from urlparse import urlparse 45 | from urllib import urlencode 46 | from urllib2 import urlopen, Request, HTTPError 47 | 48 | __version__ = '1.2.18' 49 | __FILEABSDIRNAME__ = os.path.dirname(os.path.abspath(__file__)) 50 | 51 | ini_files = ( 52 | os.path.join('/etc', 'nixstats.ini'), 53 | os.path.join('/etc', 'nixstats-token.ini'), 54 | os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats.ini'), 55 | os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats-token.ini'), 56 | os.path.abspath('nixstats.ini'), 57 | os.path.abspath('nixstats-token.ini'), 58 | ) 59 | 60 | if sys.platform == 'win32': 61 | ini_files = ( 62 | os.path.join(__FILEABSDIRNAME__, 'nixstats.ini'), 63 | os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini'), 64 | ) 65 | 66 | def info(): 67 | ''' 68 | Return string with info about nixstatsagent: 69 | - version 70 | - plugins enabled 71 | - absolute path to plugin directory 72 | - server id from configuration file 73 | ''' 74 | agent = Agent(dry_instance=True) 75 | plugins_path = agent.config.get('agent', 'plugins') 76 | 77 | plugins_enabled = agent._get_plugins(state='enabled') 78 | 79 | return '\n'.join(( 80 | 'Version: %s' % __version__, 81 | 'Plugins enabled: %s' % ', '.join(plugins_enabled), 82 | 'Plugins directory: %s' % plugins_path, 83 | 'Server: %s' % agent.config.get('agent', 'server'), 84 | )) 85 | 86 | 87 | def hello(proto='https'): 88 | user_id = sys.argv[1] 89 | agent = Agent(dry_instance=True) 90 | if len(sys.argv) > 2: 91 | token_filename = sys.argv[2] 92 | else: 93 | token_filename = os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini') 94 | if len(sys.argv) > 3: 95 | unique_id = sys.argv[3] 96 | else: 97 | unique_id = '' 98 | if '_' in user_id: 99 | server_id = user_id.split('_')[1] 100 | user_id = user_id.split('_')[0] 101 | else: 102 | try: 103 | hostname = os.uname()[1] 104 | except AttributeError: 105 | hostname = socket.getfqdn() 106 | server_id = urlopen( 107 | proto + '://' + agent.config.get('data', 'api_host') + '/hello.php', 108 | data=urlencode({ 109 | 'user': user_id, 110 | 'hostname': hostname, 111 | 'unique_id': unique_id 112 | }).encode("utf-8") 113 | ).read().decode() 114 | if len(server_id) == 24: 115 | print('Got server_id: %s' % server_id) 116 | open(token_filename, 'w').\ 117 | write('[DEFAULT]\nuser=%s\nserver=%s\n' % (user_id, server_id)) 118 | else: 119 | print('Could not retrieve server_id: %s' % server_id) 120 | 121 | 122 | # def run_agent(): 123 | # Agent().run() 124 | 125 | 126 | def _plugin_name(plugin): 127 | if isinstance(plugin, basestring): 128 | basename = os.path.basename(plugin) 129 | return os.path.splitext(basename)[0] 130 | else: 131 | return plugin.__name__ 132 | 133 | 134 | def test_plugins(plugins=[]): 135 | ''' 136 | Test specified plugins and print their data output after single check. 137 | If plugins list is empty test all enabled plugins. 138 | ''' 139 | agent = Agent(dry_instance=True) 140 | plugins_path = agent.config.get('agent', 'plugins') 141 | if plugins_path not in sys.path: 142 | sys.path.insert(0, plugins_path) 143 | 144 | if not plugins: 145 | plugins = agent._get_plugins(state='enabled') 146 | print('Check all enabled plugins: %s' % ', '.join(plugins)) 147 | 148 | for plugin_name in plugins: 149 | print('%s:' % plugin_name) 150 | 151 | try: 152 | fp, pathname, description = imp.find_module(plugin_name) 153 | except Exception as e: 154 | print('Find error:', e) 155 | continue 156 | 157 | try: 158 | module = imp.load_module(plugin_name, fp, pathname, description) 159 | except Exception as e: 160 | print('Load error:', e) 161 | continue 162 | finally: 163 | # Since we may exit via an exception, close fp explicitly. 164 | if fp: 165 | fp.close() 166 | 167 | try: 168 | payload = module.Plugin().run(agent.config) 169 | print(json.dumps(payload, indent=4, sort_keys=True)) 170 | except Exception as e: 171 | print('Execution error:', e) 172 | 173 | 174 | class Agent: 175 | execute = Queue() 176 | metrics = Queue() 177 | data = Queue() 178 | cemetery = Queue() 179 | shutdown = False 180 | 181 | def __init__(self, dry_instance=False): 182 | ''' 183 | Initialize internal strictures 184 | ''' 185 | self._config_init() 186 | 187 | # Cache for plugins so they can store values related to previous checks 188 | self.plugins_cache = {} 189 | 190 | if dry_instance: 191 | return 192 | 193 | self._logging_init() 194 | self._plugins_init() 195 | self._data_worker_init() 196 | self._dump_config() 197 | 198 | def _config_init(self): 199 | ''' 200 | Initialize configuration object 201 | ''' 202 | defaults = { 203 | 'max_data_span': 60, 204 | 'max_data_age': 60 * 10, 205 | 'logging_level': logging.INFO, 206 | 'threads': 100, 207 | 'ttl': 60, 208 | 'interval': 60, 209 | 'plugins': os.path.join(__FILEABSDIRNAME__, 'plugins'), 210 | 'enabled': 'no', 211 | 'subprocess': 'no', 212 | 'user': '', 213 | 'server': '', 214 | 'api_host': 'api.nixstats.com', 215 | 'api_path': '/v2/server/poll', 216 | 'log_file': '/var/log/nixstatsagent.log', 217 | 'log_file_mode': 'a', 218 | 'max_cached_collections': 10, 219 | } 220 | sections = [ 221 | 'agent', 222 | 'execution', 223 | 'data', 224 | ] 225 | if sys.version_info >= (3,): 226 | config = configparser.RawConfigParser(defaults) 227 | else: 228 | config = ConfigParser.RawConfigParser(defaults) 229 | config.read(ini_files) 230 | self.config = config 231 | for section in sections: 232 | self._config_section_create(section) 233 | if section == 'data': 234 | self.config.set(section, 'interval', 1) 235 | if section == 'agent': 236 | self.config.set(section, 'interval', .5) 237 | 238 | def _config_section_create(self, section): 239 | ''' 240 | Create an addition section in the configuration object 241 | if it's not exists 242 | ''' 243 | if not self.config.has_section(section): 244 | self.config.add_section(section) 245 | 246 | def _logging_init(self): 247 | ''' 248 | Initialize logging faculty 249 | ''' 250 | level = self.config.getint('agent', 'logging_level') 251 | 252 | log_file = self.config.get('agent', 'log_file') 253 | 254 | log_file_mode = self.config.get('agent', 'log_file_mode') 255 | if log_file_mode in ('w', 'a'): 256 | pass 257 | elif log_file_mode == 'truncate': 258 | log_file_mode = 'w' 259 | elif log_file_mode == 'append': 260 | log_file_mode = 'a' 261 | else: 262 | log_file_mode = 'a' 263 | 264 | if log_file == '-': 265 | logging.basicConfig(level=level) # Log to sys.stderr by default 266 | else: 267 | try: 268 | logging.basicConfig(filename=log_file, filemode=log_file_mode, level=level, format="%(asctime)-15s %(levelname)s %(message)s") 269 | except IOError as e: 270 | logging.basicConfig(level=level) 271 | logging.info('IOError: %s', e) 272 | logging.info('Drop logging to stderr') 273 | 274 | logging.info('Agent logging_level %i', level) 275 | 276 | def _plugins_init(self): 277 | ''' 278 | Discover the plugins 279 | ''' 280 | logging.info('_plugins_init') 281 | plugins_path = self.config.get('agent', 'plugins') 282 | filenames = glob.glob(os.path.join(plugins_path, '*.py')) 283 | if plugins_path not in sys.path: 284 | sys.path.insert(0, plugins_path) 285 | self.schedule = {} 286 | for filename in filenames: 287 | name = _plugin_name(filename) 288 | if name == 'plugins': 289 | continue 290 | self._config_section_create(name) 291 | if self.config.getboolean(name, 'enabled'): 292 | if self.config.getboolean(name, 'subprocess'): 293 | self.schedule[filename] = 0 294 | else: 295 | fp, pathname, description = imp.find_module(name) 296 | try: 297 | module = imp.load_module(name, fp, pathname, description) 298 | except Exception: 299 | module = None 300 | logging.error('import_plugin_exception:%s', str(sys.exc_info()[0])) 301 | finally: 302 | # Since we may exit via an exception, close fp explicitly. 303 | if fp: 304 | fp.close() 305 | if module: 306 | self.schedule[module] = 0 307 | else: 308 | logging.error('import_plugin:%s', name) 309 | 310 | def _subprocess_execution(self, task): 311 | ''' 312 | Execute /task/ in a subprocess 313 | ''' 314 | process = subprocess.Popen((sys.executable, task), 315 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, 316 | universal_newlines=True) 317 | logging.debug('%s:process:%i', threading.currentThread(), process.pid) 318 | interval = self.config.getint('execution', 'interval') 319 | name = _plugin_name(task) 320 | ttl = self.config.getint(name, 'ttl') 321 | ticks = ttl / interval or 1 322 | process.poll() 323 | while process.returncode is None and ticks > 0: 324 | logging.debug('%s:tick:%i', threading.currentThread(), ticks) 325 | time.sleep(interval) 326 | ticks -= 1 327 | process.poll() 328 | if process.returncode is None: 329 | logging.error('%s:kill:%i', threading.currentThread(), process.pid) 330 | os.kill(process.pid, signal.SIGTERM) 331 | stdout, stderr = process.communicate() 332 | if process.returncode != 0 or stderr: 333 | logging.error('%s:%s:%s:%s', threading.currentThread(), 334 | task, process.returncode, stderr) 335 | if stdout: 336 | ret = pickle.loads(stdout) 337 | else: 338 | ret = None 339 | return ret 340 | 341 | def _execution(self): 342 | ''' 343 | Take queued execution requests, execute plugins and queue the results 344 | ''' 345 | while True: 346 | if self.shutdown: 347 | logging.info('%s:shutdown', threading.currentThread()) 348 | break 349 | logging.debug('%s:exec_queue:%i', threading.currentThread(), self.execute.qsize()) 350 | try: 351 | task = self.execute.get_nowait() 352 | except Empty: 353 | break 354 | logging.debug('%s:task:%s', threading.currentThread(), task) 355 | name = _plugin_name(task) 356 | try: 357 | interval = self.config.get(name, 'interval') 358 | except: 359 | interval = 60 360 | ts = time.time() 361 | if isinstance(task, basestring): 362 | payload = self._subprocess_execution(task) 363 | else: 364 | try: 365 | # Setup cache for plugin instance 366 | # if name not in self.plugins_cache.iterkeys(): 367 | # self.plugins_cache[name] = [] 368 | self.plugins_cache.update({ 369 | name: self.plugins_cache.get(name, []) 370 | }) 371 | 372 | plugin = task.Plugin(agent_cache=self.plugins_cache[name]) 373 | payload = plugin.run(self.config) 374 | except Exception: 375 | logging.exception('plugin_exception') 376 | payload = {'exception': str(sys.exc_info()[0])} 377 | self.metrics.put({ 378 | 'ts': ts, 379 | 'task': task, 380 | 'name': name, 381 | 'interval': interval, 382 | 'payload': payload, 383 | }) 384 | self.cemetery.put(threading.currentThread()) 385 | self.hire.release() 386 | 387 | 388 | def _data(self): 389 | ''' 390 | Take and collect data, send and clean if needed 391 | ''' 392 | logging.info('%s', threading.currentThread()) 393 | api_host = self.config.get('data', 'api_host') 394 | api_path = self.config.get('data', 'api_path') 395 | max_age = self.config.getint('agent', 'max_data_age') 396 | max_span = self.config.getint('agent', 'max_data_span') 397 | server = self.config.get('agent', 'server') 398 | user = self.config.get('agent', 'user') 399 | interval = self.config.getint('data', 'interval') 400 | max_cached_collections = self.config.get('agent', 'max_cached_collections') 401 | cached_collections = [] 402 | collection = [] 403 | while True: 404 | loop_ts = time.time() 405 | if self.shutdown: 406 | logging.info('%s:shutdown', threading.currentThread()) 407 | break 408 | logging.debug('%s:data_queue:%i:collection:%i', 409 | threading.currentThread(), self.data.qsize(), len(collection)) 410 | while self.data.qsize(): 411 | try: 412 | collection.append(self.data.get_nowait()) 413 | except Exception as e: 414 | logging.error('Data queue error: %s' % e) 415 | if collection: 416 | first_ts = min((e['ts'] for e in collection)) 417 | last_ts = max((e['ts'] for e in collection)) 418 | now = time.time() 419 | send = False 420 | if last_ts - first_ts >= max_span: 421 | logging.debug('Max data span') 422 | send = True 423 | clean = False 424 | elif now - first_ts >= max_age: 425 | logging.warning('Max data age') 426 | send = True 427 | clean = True 428 | if send: 429 | headers = { 430 | "Content-type": "application/json", 431 | "Authorization": "ApiKey %s:%s" % (user, server), 432 | } 433 | logging.debug('collection: %s', 434 | json.dumps(collection, indent=2, sort_keys=True)) 435 | if not (server and user): 436 | logging.warning('Empty server or user, nowhere to send.') 437 | clean = True 438 | else: 439 | 440 | try: 441 | if sys.version_info >= (3,): 442 | connection = http.client.HTTPSConnection(api_host, timeout=15) 443 | else: 444 | connection = httplib.HTTPSConnection(api_host, timeout=15) 445 | 446 | # Trying to send cached collections if any 447 | if cached_collections: 448 | logging.info('Sending cached collections: %i', len(cached_collections)) 449 | while cached_collections: 450 | connection.request('PUT', '%s?version=%s' % (api_path, __version__), 451 | cached_collections[0], 452 | headers=headers) 453 | response = connection.getresponse() 454 | response.read() 455 | if response.status == 200: 456 | del cached_collections[0] # Remove just sent collection 457 | logging.debug('Successful response: %s', response.status) 458 | else: 459 | raise ValueError('Unsuccessful response: %s' % response.status) 460 | logging.info('All cached collections sent') 461 | 462 | # Send recent collection (reuse existing connection) 463 | connection.request('PUT', '%s?version=%s' % (api_path, __version__), 464 | bz2.compress(str(json.dumps(collection)+"\n").encode()), 465 | headers=headers) 466 | response = connection.getresponse() 467 | response.read() 468 | 469 | if response.status == 200: 470 | logging.debug('Successful response: %s', response.status) 471 | clean = True 472 | else: 473 | raise ValueError('Unsuccessful response: %s' % response.status) 474 | except Exception as e: 475 | logging.error('Failed to submit collection: %s' % e) 476 | 477 | # Store recent collection in cached_collections if send failed 478 | if max_cached_collections > 0: 479 | if len(cached_collections) >= max_cached_collections: 480 | del cached_collections[0] # Remove oldest collection 481 | logging.info('Reach max_cached_collections (%s): oldest cached collection dropped', 482 | max_cached_collections) 483 | logging.info('Cache current collection to resend next time') 484 | cached_collections.append(bz2.compress(str(json.dumps(collection)+"\n").encode())) 485 | collection = [] 486 | finally: 487 | connection.close() 488 | if clean: 489 | collection = [] 490 | sleep_interval = interval - (time.time() - loop_ts) 491 | if sleep_interval > 0: 492 | time.sleep(sleep_interval) 493 | 494 | def _data_worker_init(self): 495 | ''' 496 | Initialize data worker thread 497 | ''' 498 | logging.info('_data_worker_init') 499 | threading.Thread(target=self._data).start() 500 | 501 | def _dump_config(self): 502 | ''' 503 | Dumps configuration object 504 | ''' 505 | if sys.version_info >= (3,): 506 | buf = io.StringIO() 507 | else: 508 | buf = StringIO.StringIO() 509 | 510 | self.config.write(buf) 511 | logging.info('Config: %s', buf.getvalue()) 512 | 513 | def _get_plugins(self, state='enabled'): 514 | ''' 515 | Return list with plugins names 516 | ''' 517 | plugins_path = self.config.get('agent', 'plugins') 518 | plugins = [] 519 | for filename in glob.glob(os.path.join(plugins_path, '*.py')): 520 | plugin_name = _plugin_name(filename) 521 | if plugin_name == 'plugins': 522 | continue 523 | self._config_section_create(plugin_name) 524 | 525 | if state == 'enabled': 526 | if self.config.getboolean(plugin_name, 'enabled'): 527 | plugins.append(plugin_name) 528 | elif state == 'disabled': 529 | if not self.config.getboolean(plugin_name, 'enabled'): 530 | plugins.append(plugin_name) 531 | 532 | return plugins 533 | 534 | 535 | def _rip(self): 536 | ''' 537 | Join with dead workers 538 | Workaround for https://bugs.python.org/issue37788 539 | ''' 540 | logging.debug('cemetery:%i', self.cemetery.qsize()) 541 | while True: 542 | try: 543 | thread = self.cemetery.get_nowait() 544 | except Empty: 545 | break 546 | logging.debug('joining:%s', thread) 547 | thread.join() 548 | 549 | 550 | def run(self): 551 | ''' 552 | Start all the worker threads 553 | ''' 554 | logging.info('Agent main loop') 555 | interval = self.config.getfloat('agent', 'interval') 556 | self.hire = threading.Semaphore( 557 | self.config.getint('execution', 'threads')) 558 | try: 559 | while True: 560 | self._rip() 561 | now = time.time() 562 | logging.debug('%i threads', threading.activeCount()) 563 | while self.metrics.qsize(): 564 | metrics = self.metrics.get_nowait() 565 | name = metrics['name'] 566 | logging.debug('metrics:%s', name) 567 | plugin = metrics.get('task') 568 | if plugin: 569 | self.schedule[plugin] = \ 570 | int(now) + self.config.getint(name, 'interval') 571 | if isinstance(plugin, types.ModuleType): 572 | metrics['task'] = plugin.__file__ 573 | self.data.put(metrics) 574 | execute = [ 575 | what 576 | for what, when in self.schedule.items() 577 | if when <= now 578 | ] 579 | for name in execute: 580 | logging.debug('scheduling:%s', name) 581 | del self.schedule[name] 582 | self.execute.put(name) 583 | if self.hire.acquire(False): 584 | try: 585 | thread = threading.Thread(target=self._execution) 586 | thread.start() 587 | logging.debug('new_execution_worker_thread:%s', thread) 588 | except Exception as e: 589 | logging.warning('Can not start new thread: %s', e) 590 | else: 591 | logging.warning('threads_capped') 592 | self.metrics.put({ 593 | 'ts': now, 594 | 'name': 'agent_internal', 595 | 'payload': { 596 | 'threads_capping': 597 | self.config.getint('execution', 'threads')} 598 | }) 599 | sleep_interval = .5-(time.time()-now) 600 | if sleep_interval > 0: 601 | time.sleep(sleep_interval) 602 | else: 603 | logging.warning('not enough time to start worker threads') 604 | time.sleep(.1) 605 | 606 | except KeyboardInterrupt: 607 | logging.warning(sys.exc_info()[0]) 608 | logging.info('Shutting down') 609 | self._rip() 610 | wait_for = True 611 | while wait_for: 612 | all_threads = threading.enumerate() 613 | logging.info('Remaining threads: %s', all_threads) 614 | wait_for = [ 615 | thread for thread in all_threads 616 | if not thread.isDaemon() and 617 | not isinstance(thread, threading._MainThread) 618 | ] 619 | if not wait_for: 620 | logging.info('Bye!') 621 | sys.exit(0) 622 | self.shutdown = True 623 | logging.info('Waiting for %i threads to exit', len(wait_for)) 624 | for thread in wait_for: 625 | logging.info('Joining with %s/%f', thread, interval) 626 | thread.join(interval) 627 | except Exception as e: 628 | logging.error('Worker error: %s' % e) 629 | 630 | 631 | def main(): 632 | if len(sys.argv) > 1: 633 | if sys.argv[1].startswith('--'): 634 | sys.argv[1] = sys.argv[1][2:] 635 | 636 | if sys.argv[1] == 'help': 637 | print('\n'.join(( 638 | 'Run without options to run agent.', 639 | 'Acceptable options (leading -- is optional):', 640 | ' help, info, version, hello, insecure-hello, test', 641 | ))) 642 | sys.exit() 643 | elif sys.argv[1] == 'info': 644 | print(info()) 645 | sys.exit() 646 | elif sys.argv[1] == 'version': 647 | print(__version__) 648 | sys.exit() 649 | elif sys.argv[1] == 'hello': 650 | del sys.argv[1] 651 | sys.exit(hello()) 652 | elif sys.argv[1] == 'insecure-hello': 653 | del sys.argv[1] 654 | sys.exit(hello(proto='http')) 655 | elif sys.argv[1] == 'test': 656 | sys.exit(test_plugins(sys.argv[2:])) 657 | else: 658 | 659 | print('Invalid option:', sys.argv[1], file=sys.stderr) 660 | sys.exit(1) 661 | else: 662 | Agent().run() 663 | 664 | 665 | if __name__ == '__main__': 666 | main() 667 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NIXStats/nixstatsagent/bfe01d6cf4e05c0674a3103fa26621b31cda48ef/nixstatsagent/plugins/__init__.py -------------------------------------------------------------------------------- /nixstatsagent/plugins/apt-updates.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'apt-updates' 8 | 9 | def run(self, config): 10 | ''' 11 | ubuntu/debian updates available from apt-get 12 | add to /etc/sudoers the following line: 13 | nixstats ALL=(ALL) NOPASSWD: /usr/bin/apt-get 14 | 15 | test by running: 16 | sudo -u nixstats nixstatsagent test apt-updates 17 | 18 | Add to /etc/nixstats.ini: 19 | [apt-updates] 20 | enabled = yes 21 | interval = 3600 22 | ''' 23 | data = {} 24 | data['security'] = int(os.popen('sudo -n apt-get upgrade -s | grep Inst | grep security | wc -l').read()) 25 | data['other'] = int(os.popen('sudo -n apt-get upgrade -s | grep Inst | grep -v security | wc -l').read()) 26 | return data 27 | 28 | if __name__ == '__main__': 29 | Plugin().execute() 30 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/asterisk.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import subprocess 5 | 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'asterisk' 9 | 10 | def run(self, config): 11 | ip = config.get(__name__, 'sbcip') 12 | p = subprocess.Popen("sudo asterisk -rx 'core show calls' | grep 'active' | cut -f1 -d ' '", stdout=subprocess.PIPE, shell=True) 13 | p = p.communicate()[0].decode('utf-8').replace("\n", "") 14 | incoming = subprocess.Popen("sudo asterisk -rx 'core show channels verbose' | cut -c1-15 | grep 'pstn_' | wc -l", stdout=subprocess.PIPE, shell=True) 15 | incoming = incoming.communicate()[0].decode('utf-8').replace("\n", "") 16 | 17 | devices = subprocess.Popen("sudo asterisk -rx 'sip show peers' | grep '%s' | wc -l" % (ip), stdout=subprocess.PIPE, shell=True) 18 | devices = devices.communicate()[0].decode('utf-8').replace("\n", "") 19 | 20 | res = { "calls": p, "incomingcalls": incoming, "devices": devices } 21 | return res 22 | 23 | if __name__ == '__main__': 24 | Plugin().execute() 25 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/cloudlinux-dbgov.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import json 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'cloudlinux-dbgov' 10 | 11 | def run(self, config): 12 | ''' 13 | Beta plugin to monitor cloudlinux db governor users 14 | Requires sudo access to lveinfo (whereis lveinfo) add to /etc/sudoers: 15 | nixstats ALL=(ALL) NOPASSWD: /REPLACE/PATH/TO/lveinfo 16 | 17 | To enable add to /etc/nixstats.ini: 18 | [cloudlinux-dbgov] 19 | enabled = yes 20 | ''' 21 | data = os.popen('sudo lveinfo --dbgov --period 5m -o cpu --limit 20 --json').read() 22 | results = {} 23 | 24 | try: 25 | data = json.loads(data) 26 | except Exception: 27 | return "Could not load lveinfo dbgov data" 28 | 29 | if data['success'] is not 1: 30 | return "Failed to load lveinfo dbgov" 31 | 32 | results = {} 33 | 34 | for line in data['data']: 35 | username = line['USER'] 36 | del line['USER'] 37 | results[username] = line 38 | 39 | return results 40 | 41 | if __name__ == '__main__': 42 | Plugin().execute() 43 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/cloudlinux.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import json 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'cloudlinux' 10 | 11 | def run(self, config): 12 | ''' 13 | Beta plugin to monitor cloudlinux users 14 | Requires sudo access to lveinfo (whereis lveinfo) add to /etc/sudoers: 15 | nixstats ALL=(ALL) NOPASSWD: /REPLACE/PATH/TO/lveinfo 16 | 17 | To enable add to /etc/nixstats.ini: 18 | [cloudlinux] 19 | enabled = yes 20 | ''' 21 | data = os.popen('sudo lveinfo -d --period 5m -o cpu_avg -l 20 --json').read() 22 | results = {} 23 | 24 | try: 25 | data = json.loads(data) 26 | except Exception: 27 | return "Could not load lveinfo data" 28 | 29 | if data['success'] is not 1: 30 | return "Failed to load lveinfo" 31 | 32 | results = {} 33 | 34 | for line in data['data']: 35 | username = line['ID'] 36 | del line['ID'] 37 | results[username] = line 38 | 39 | return results 40 | 41 | if __name__ == '__main__': 42 | Plugin().execute() 43 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/cpanel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'cpanel' 8 | 9 | def run(self, config): 10 | ''' 11 | Plugin to measure disk usage of cpanel users 12 | To enable add to /etc/nixstats.ini: 13 | [cpanel] 14 | enabled = yes 15 | ''' 16 | if os.path.isdir("/var/cpanel/users/") is False: 17 | return "/var/cpanel/users does not exist" 18 | data = os.popen('for user in `/bin/ls -A /var/cpanel/users/` ; do du -sc /home/$user ;done | grep -v \'total\|system\|nobody\' | cut -d"/" -f1,3 | sort -nrk 1,1').read() 19 | results = {} 20 | i=0 21 | try: 22 | for line in data.splitlines(): 23 | i = i + 1 24 | if i > 50: 25 | break 26 | results[line.split("\t")[1].strip("/")] = {"bytes": int(line.split("\t")[0])} 27 | except Exception: 28 | return "Could not fetch cpanel users" 29 | 30 | return results 31 | 32 | if __name__ == '__main__': 33 | Plugin().execute() 34 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/cpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | import time 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'cpu' 9 | 10 | def run(self, *unused): 11 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 12 | next_cache = {} 13 | next_cache['ts'] = time.time() 14 | results = {} 15 | data_stats = psutil.cpu_stats() 16 | 17 | # if there's no previous cache, build the first baseline value 18 | # so we don't have all 0% values, should happen only the first time 19 | try: 20 | prev_cache['ts'] 21 | except KeyError: 22 | data = psutil.cpu_times(percpu=True) 23 | cpu_number = -1 24 | prev_cache['ts'] = time.time() 25 | for cpu in data: 26 | cpu_number = cpu_number+1 27 | prev_cache[cpu_number] = {} 28 | for key in cpu._fields: 29 | prev_cache[cpu_number][key] = getattr(cpu, key) 30 | time.sleep(0.5) 31 | 32 | data = psutil.cpu_times(percpu=True) 33 | cpu_number = -1 34 | for cpu in data: 35 | cpu_number = cpu_number+1 36 | results[cpu_number] = {} 37 | next_cache[cpu_number] = {} 38 | for key in cpu._fields: 39 | next_cache[cpu_number][key] = getattr(cpu, key) 40 | try: 41 | time_delta = time.time() - prev_cache['ts'] 42 | except: 43 | continue 44 | if time_delta <= 0: 45 | continue 46 | cpu_time_delta = getattr(cpu, key) - prev_cache[cpu_number][key] 47 | if cpu_time_delta < 0: 48 | cpu_time_delta = 0 49 | results[cpu_number][key] = cpu_time_delta / time_delta * 100 50 | if results[cpu_number][key] > 100: 51 | results[cpu_number][key] = 100 52 | if results[cpu_number][key] < 0: 53 | results[cpu_number][key] = 0 54 | self.set_agent_cache(next_cache) 55 | return results 56 | 57 | 58 | if __name__ == '__main__': 59 | Plugin().execute() 60 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/cpu_freq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'cpu_freq' 8 | 9 | def run(self, *unused): 10 | results = {} 11 | data = psutil.cpu_freq(percpu=True) 12 | cpu_number = -1 13 | for cpu in data: 14 | core = {} 15 | cpu_number = cpu_number+1 16 | results[cpu_number] = {} 17 | for key in cpu._fields: 18 | core[key] = getattr(cpu, key) 19 | results[cpu_number] = core['current'] 20 | return results 21 | 22 | 23 | if __name__ == '__main__': 24 | Plugin().execute() 25 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/dirsize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import subprocess 5 | import plugins 6 | import json 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'dirsize' 10 | 11 | def run(self, config): 12 | ''' 13 | Monitor total directory sizes, specify the directories you want to monitor in /etc/nixstats.ini 14 | ''' 15 | 16 | data = {} 17 | my_dirs = config.get('dirsize', 'dirs').split(',') 18 | 19 | for dir in my_dirs: 20 | data[dir] = {'bytes': os.popen('du -c {} | grep total'.format(dir)).read().replace('total', '').rstrip()} 21 | 22 | return data 23 | 24 | 25 | if __name__ == '__main__': 26 | Plugin().execute() 27 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/diskstatus-nvme.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import subprocess 5 | import plugins 6 | import json 7 | import re 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'diskstatus-nvme' 11 | 12 | def run(self, config): 13 | ''' 14 | Monitor nvme disk status 15 | For NVME drives install nvme-cli (https://github.com/linux-nvme/nvme-cli#distro-support) 16 | This plugin requires the agent to be run under the root user. 17 | ''' 18 | results = {} 19 | try: 20 | data = subprocess.Popen('nvme --list --output-format=json', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0] 21 | data = json.loads(data.decode('utf-8')) 22 | data['Devices'] 23 | nvme = True 24 | except Exception: 25 | nvme = False 26 | return "Could not fetch nvme status information" 27 | 28 | if nvme is True: 29 | for value in data['Devices']: 30 | device = {} 31 | disk_data = os.popen('nvme smart-log {} --output-format=json'.format(value['DevicePath'])).read() 32 | try: 33 | data_disk = json.loads(disk_data) 34 | except Exception: 35 | pass 36 | 37 | for disk_key, disk_value in data_disk.items(): 38 | if disk_key.startswith('temperature'): 39 | device[disk_key] = round(disk_value-273.15, 0) # kelvin to celsius 40 | else: 41 | device[disk_key] = disk_value 42 | results[value['DevicePath'].replace('/dev/', '')] = device 43 | return results 44 | 45 | 46 | if __name__ == '__main__': 47 | Plugin().execute() 48 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/diskstatus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import subprocess 5 | import plugins 6 | import json 7 | import re 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'diskstatus' 11 | 12 | def run(self, config): 13 | ''' 14 | Monitor nvme or smart disk status. 15 | For NVME drives use the diskstatus-nvme plugin 16 | for smart status install smartmontools (apt-get/yum install smartmontools) 17 | This plugin requires the agent to be run under the root user. 18 | ''' 19 | results = {} 20 | 21 | try: 22 | devlist = subprocess.Popen('smartctl --scan', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0].decode().splitlines() 23 | smartctl = True 24 | except Exception as e: 25 | smartctl = False 26 | return "Could not fetch smartctl status information" 27 | 28 | if smartctl is True: 29 | for row in devlist: 30 | try: 31 | disk_id = row.split(' ')[0].split('/')[2] 32 | disk_stats = os.popen('smartctl -A -H {}'.format(row.split(' ')[0])).read().splitlines() 33 | smart_status = 0 34 | if disk_stats[4].split(': ')[1] == 'PASSED': 35 | smart_status = 1 36 | results[disk_id] = {} 37 | start = False 38 | for stats in disk_stats: 39 | if stats[0:3] == 'ID#': 40 | start = True 41 | continue 42 | if start is False: 43 | continue 44 | stats = re.sub(' +', ' ', stats).strip() 45 | stats = stats.split(' ') 46 | if len(stats) > 9: 47 | results[disk_id][stats[1].lower().replace('_celsius','')] = stats[9] 48 | results[disk_id]["status"] = smart_status 49 | except Exception as e: 50 | print(e) 51 | return results 52 | 53 | 54 | if __name__ == '__main__': 55 | Plugin().execute() 56 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/diskusage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import psutil 5 | import plugins 6 | import json 7 | 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'diskusage' 11 | 12 | 13 | def run(self, config): 14 | disk = {} 15 | disk['df-psutil'] = [] 16 | 17 | for part in psutil.disk_partitions(False): 18 | valid_part = True 19 | ignored_partitions = ['/loop', '/snap', 'squashfs', 'cagefs-skeleton'] 20 | 21 | for ignore in ignored_partitions: 22 | if ignore in part.device or ignore in part.mountpoint or ignore in part.fstype: 23 | valid_part = False 24 | if valid_part == False: 25 | continue 26 | 27 | if os.name == 'nt': 28 | if 'cdrom' in part.opts or part.fstype == '': 29 | # skip cd-rom drives with no disk in it; they may raise 30 | # ENOENT, pop-up a Windows GUI error for a non-ready 31 | # partition or just hang. 32 | continue 33 | try: 34 | usage = psutil.disk_usage(part.mountpoint) 35 | diskdata = {} 36 | diskdata['info'] = part 37 | for key in usage._fields: 38 | diskdata[key] = getattr(usage, key) 39 | disk['df-psutil'].append(diskdata) 40 | except: 41 | pass 42 | 43 | try: 44 | force_df = config.get('diskusage', 'force_df') 45 | except: 46 | force_df = 'no' 47 | 48 | if len(disk['df-psutil']) == 0 or force_df == 'yes': 49 | try: 50 | disk['df-psutil'] = [] 51 | df_output_lines = [s.split() for s in os.popen("df -Pl").read().splitlines()] 52 | del df_output_lines[0] 53 | for row in df_output_lines: 54 | if row[0] == 'tmpfs': 55 | continue 56 | disk['df-psutil'].append({'info': [row[0], row[5],'',''], 'total': int(row[1])*1024, 'used': int(row[2])*1024, 'free': int(row[3])*1024, 'percent': row[4][:-1]}) 57 | except: 58 | pass 59 | 60 | try: 61 | zfs_stats = config.get('diskusage', 'zfs') 62 | except: 63 | zfs_stats = 'no' 64 | 65 | if zfs_stats == 'yes': 66 | try: 67 | lines = [s.split(', ') for s in os.popen("zfs list -Hp -t volume").read().splitlines()] 68 | for row in lines: 69 | v = {} 70 | v['vg_name'] = row[0] 71 | v['vg_size'] = int(row[5][:-1]) 72 | v['vg_free'] = int(row[6][:-1]) 73 | v['vg_used'] = int(v['vg_size']-v['vg_free']) 74 | v['vg_percentage'] = (v['vg_used']/float(v['vg_size']))*100 75 | disk['df-psutil'].append({'info': [v['vg_name'], v['vg_name'], 'zfs', False], 'total': v['vg_size'], 'used': v['vg_used'], 'free': v['vg_free'], 'percent': v['vg_percentage']}) 76 | except Exception as e: 77 | return e.message 78 | 79 | try: 80 | lvm_stats = config.get('diskusage', 'lvm') 81 | except: 82 | lvm_stats = 'no' 83 | 84 | 85 | # For LVM volume group monitoring, requires sudo access to vgs 86 | # add vgs to /etc/sudoers 87 | # agent360 ALL=(ALL) NOPASSWD: /usr/sbin/vgs 88 | # set lvm = yes right under enabled = yes in /etc/agent360.ini 89 | if lvm_stats == 'yes': 90 | try: 91 | lines = [s.split(', ') for s in os.popen("sudo vgs --all --units b --noheadings --separator ', '").read().splitlines()] 92 | for row in lines: 93 | v = {} 94 | v['vg_name'] = row[0] 95 | v['vg_size'] = int(row[5][:-1]) 96 | v['vg_free'] = int(row[6][:-1]) 97 | v['vg_used'] = int(v['vg_size']-v['vg_free']) 98 | v['vg_percentage'] = (v['vg_used']/float(v['vg_size']))*100 99 | disk['df-psutil'].append({'info': [v['vg_name'], v['vg_name'], 'lvm', False], 'total': v['vg_size'], 'used': v['vg_used'], 'free': v['vg_free'], 'percent': v['vg_percentage']}) 100 | except Exception as e: 101 | return e.message 102 | 103 | 104 | return disk 105 | 106 | 107 | if __name__ == '__main__': 108 | Plugin().execute() 109 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/docker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import time 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'docker' 10 | 11 | def run(self, config): 12 | ''' 13 | Docker monitoring, needs sudo access! 14 | Instructions at: 15 | https://help.nixstats.com/en/article/monitoring-docker-9st778/ 16 | ''' 17 | containers = {} 18 | last_value = {} 19 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 20 | try: 21 | lines = [s.split(' / ') for s in os.popen('sudo docker stats --no-stream --no-trunc --format "{{.CPUPerc}} / {{.Name}} / {{.ID}} / {{.MemUsage}} / {{.NetIO}} / {{.BlockIO}} / {{.MemPerc}}"').read().splitlines()] 22 | for row in lines: 23 | container = {} 24 | container['cpu'] = row[0].strip('%') 25 | name = row[1] 26 | container_id = row[2] 27 | container['mem_usage_bytes'] = self.computerReadable(row[3]) 28 | container['mem_total_bytes'] = self.computerReadable(row[4]) 29 | container['net_in_bytes'] = self.absolute_to_per_second('%s_%s' % (name, 'net_in_bytes'), self.computerReadable(row[5]), prev_cache) 30 | container['net_out_bytes'] = self.absolute_to_per_second('%s_%s' % (name, 'net_out_bytes'), self.computerReadable(row[6]), prev_cache) 31 | container['disk_in_bytes'] = self.absolute_to_per_second('%s_%s' % (name, 'disk_in_bytes'), self.computerReadable(row[7]), prev_cache) 32 | container['disk_out_bytes'] = self.absolute_to_per_second('%s_%s' % (name, 'disk_out_bytes'), self.computerReadable(row[8]), prev_cache) 33 | container['mem_pct'] = row[9].strip('%') 34 | 35 | last_value['%s_%s' % (name, 'mem_usage_bytes')] = self.computerReadable(row[3]) 36 | last_value['%s_%s' % (name, 'net_in_bytes')] = self.computerReadable(row[5]) 37 | last_value['%s_%s' % (name, 'net_out_bytes')] = self.computerReadable(row[6]) 38 | last_value['%s_%s' % (name, 'disk_in_bytes')] = self.computerReadable(row[7]) 39 | last_value['%s_%s' % (name, 'disk_out_bytes')] = self.computerReadable(row[8]) 40 | containers[name] = container 41 | except Exception as e: 42 | return e.message 43 | containers['containers'] = len(containers) 44 | last_value['ts'] = time.time() 45 | self.set_agent_cache(last_value) 46 | 47 | return containers 48 | 49 | def computerReadable(self, value): 50 | if value[-3:] == 'KiB': 51 | return float(value[:-3])*1024 52 | elif value[-3:] == 'MiB': 53 | return float(value[:-3])*1024*1024 54 | elif value[-3:] == 'GiB': 55 | return float(value[:-3])*1024*1024*1024 56 | elif value[-3:] == 'TiB': 57 | return float(value[:-3])*1024*1024*1024*1024 58 | elif value[-3:] == 'PiB': 59 | return float(value[:-3])*1024*1024*1024*1024*1024 60 | elif value[-2:] == 'kB': 61 | return float(value[:-2])*1024 62 | elif value[-2:] == 'MB': 63 | return float(value[:-2])*1024*1024 64 | elif value[-2:] == 'GB': 65 | return float(value[:-2])*1024*1024*1024 66 | elif value[-2:] == 'TB': 67 | return float(value[:-2])*1024*1024*1024*1024 68 | elif value[-2:] == 'PB': 69 | return float(value[:-2])*1024*1024*1024*1024*1024 70 | elif value[-1:] == 'B': 71 | return float(value[:-1]) 72 | 73 | if __name__ == '__main__': 74 | Plugin().execute() 75 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/elasticsearch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import urllib.request 4 | try: 5 | from urllib.parse import urlparse, urlencode 6 | from urllib.request import urlopen, Request 7 | from urllib.error import HTTPError 8 | except ImportError: 9 | from urlparse import urlparse 10 | from urllib import urlencode 11 | from urllib2 import urlopen, Request, HTTPError 12 | import time 13 | import plugins 14 | import json 15 | import collections 16 | import base64 17 | 18 | class Plugin(plugins.BasePlugin): 19 | __name__ = 'elasticsearch' 20 | 21 | def run(self, config): 22 | ''' 23 | experimental monitoring plugin for elasticsearch 24 | Add to /etc/nixstats.ini: 25 | [elasticsearch] 26 | enabled = yes 27 | status_page_url = http://127.0.0.1:9200/_stats 28 | # In case basic_auth is needed: 29 | basic_auth = yes 30 | username = 31 | password = 32 | ''' 33 | 34 | def ascii_encode_dict(data): 35 | ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x 36 | return dict(map(ascii_encode, pair) for pair in data.items()) 37 | 38 | results = dict() 39 | next_cache = dict() 40 | request = urllib.request.Request(config.get('elasticsearch', 'status_page_url')) 41 | basic_auth_enabled = config.get('elasticsearch', 'basic_auth') 42 | if bool(basic_auth_enabled): 43 | user = config.get('elasticsearch', 'username') 44 | password = config.get('elasticsearch', 'password') 45 | b64auth = base64.b64encode(bytes("%s:%s" % (user, password), 'ascii')) 46 | request.add_header("Authorization", "Basic %s" % b64auth.decode('utf-8')) 47 | raw_response = urllib.request.urlopen(request) 48 | next_cache['ts'] = time.time() 49 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 50 | def flatten(d, parent_key='', sep='_'): 51 | items = [] 52 | for k, v in d.items(): 53 | new_key = parent_key + sep + k if parent_key else k 54 | if isinstance(v, collections.MutableMapping): 55 | items.extend(flatten(v, new_key, sep=sep).items()) 56 | else: 57 | items.append((new_key, v)) 58 | return dict(items) 59 | try: 60 | j = flatten(json.loads(raw_response.read())['_all']['total']) 61 | except Exception: 62 | return False 63 | 64 | 65 | delta_keys = ( 66 | 'get_time_in_millis', 67 | 'indexing_index_time_in_millis', 68 | 'flush_total_time_in_millis', 69 | 'indexing_delete_time_in_millis', 70 | 'indexing_index_time_in_millis', 71 | 'indexing_throttle_time_in_millis', 72 | 'merges_total_stopped_time_in_millis', 73 | 'merges_total_throttled_time_in_millis', 74 | 'merges_total_time_in_millis', 75 | 'recovery_throttle_time_in_millis', 76 | 'refresh_total_time_in_millis', 77 | 'search_fetch_time_in_millis', 78 | 'search_query_time_in_millis', 79 | 'search_scroll_time_in_millis', 80 | 'search_suggest_time_in_millis', 81 | 'warmer_total_time_in_millis', 82 | 'docs_count', 83 | 'docs_deleted', 84 | 'flush_total', 85 | 'get_exists_total', 86 | 'get_missing_total', 87 | 'get_total', 88 | 'indexing_delete_total', 89 | 'indexing_index_total', 90 | 'indexing_noop_update_total', 91 | 'merges_total', 92 | 'merges_total_docs', 93 | 'merges_total_auto_throttle_in_bytes', 94 | 'query_cache_cache_count', 95 | 'query_cache_cache_size', 96 | 'query_cache_evictions', 97 | 'query_cache_hit_count', 98 | 'query_cache_miss_count', 99 | 'query_cache_total_count', 100 | 'refresh_total', 101 | 'request_cache_hit_count', 102 | 'request_cache_miss_count', 103 | 'search_fetch_total', 104 | 'search_open_contexts', 105 | 'search_query_total', 106 | 'search_scroll_total', 107 | 'search_suggest_total', 108 | 'segments_count', 109 | 'segments_max_unsafe_auto_id_timestamp', 110 | 'warmer_total', 111 | 'get_exists_time_in_millis', 112 | 'get_missing_time_in_millis' 113 | ) 114 | 115 | data = {} 116 | constructors = [str, float] 117 | for key, value in j.items(): 118 | key = key.lower().strip() 119 | for c in constructors: 120 | try: 121 | value = c(value) 122 | except ValueError: 123 | pass 124 | if key in delta_keys and type(value) is not str: 125 | j[key] = self.absolute_to_per_second(key, float(value), prev_cache) 126 | data[key] = float(value) 127 | else: 128 | pass 129 | 130 | data['ts'] = time.time() 131 | # Cache absolute values for next check calculations 132 | self.set_agent_cache(data) 133 | 134 | return j 135 | 136 | 137 | if __name__ == '__main__': 138 | Plugin().execute() 139 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/exim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'exim' 8 | 9 | def run(self, config): 10 | ''' 11 | exim mail queue monitoring, needs sudo access! 12 | Instructions at: 13 | https://help.nixstats.com/en/article/monitoring-exim-mail-queue-size-1vcukxa/ 14 | ''' 15 | data = {} 16 | data['queue_size'] = int(os.popen('sudo exim -bpc').read()) 17 | return data 18 | 19 | if __name__ == '__main__': 20 | Plugin().execute() 21 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/gpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import sys 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'gpu' 8 | 9 | def run(self, *unused): 10 | ''' 11 | expirimental plugin used to collect GPU load from OpenHardWareMonitor (Windows) 12 | ''' 13 | data = {} 14 | 15 | if sys.platform == "win32": 16 | try: 17 | import wmi 18 | except: 19 | return 'wmi module not installed.' 20 | try: 21 | w = wmi.WMI(namespace="root\OpenHardwareMonitor") 22 | temperature_infos = w.Sensor() 23 | for sensor in temperature_infos: 24 | if sensor.SensorType==u'Load' and sensor.Name==u'GPU Core': 25 | data[sensor.Parent.replace('/','-').strip('-')] = sensor.Value 26 | except: 27 | return 'Could not fetch GPU Load data from OpenHardwareMonitor.' 28 | 29 | return data 30 | 31 | 32 | if __name__ == '__main__': 33 | Plugin().execute() 34 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/haproxy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import time 4 | import plugins 5 | import csv 6 | import requests 7 | 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'haproxy' 11 | 12 | def run(self, config): 13 | results = dict() 14 | next_cache = dict() 15 | try: 16 | username = config.get('haproxy', 'username') 17 | password = config.get('haproxy', 'password') 18 | user_pass = (username, password) 19 | except: 20 | user_pass = False 21 | request = requests.get(config.get('haproxy', 'status_page_url'), auth=user_pass) 22 | next_cache['ts'] = time.time() 23 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 24 | if request.status_code is 200: 25 | response = request.text.split("\n") 26 | else: 27 | return "Could not load haproxy status page: {}".format(request.text) 28 | 29 | non_delta = ( 30 | 'qcur', 31 | 'qmax', 32 | 'scur', 33 | 'smax', 34 | 'slim', 35 | 'stot', 36 | 'weight', 37 | # 'act', 38 | # 'bck', 39 | # 'chkfail', 40 | # 'chkdown', 41 | # 'lastchg', 42 | # 'downtime', 43 | 'qlimit', 44 | # 'pid', 45 | # 'iid', 46 | # 'sid', 47 | 'throttle', 48 | 'lbtot', 49 | 'tracked', 50 | # 'type', 51 | 'rate', 52 | 'rate_lim', 53 | 'rate_max', 54 | # 'check_status', 55 | # 'check_code', 56 | # 'check_duration', 57 | 'hanafail', 58 | 'req_rate', 59 | 'req_rate_max', 60 | 'req_tot', 61 | # 'lastsess', 62 | # 'last_chk', 63 | # 'last_agt', 64 | # 'qtime', 65 | # 'ctime', 66 | # 'rtime', 67 | # 'ttime', 68 | # 'agent_status', 69 | # 'agent_code', 70 | # 'agent_duration', 71 | # 'agent_health', 72 | 'conn_rate', 73 | 'conn_rate_max', 74 | 'conn_tot', 75 | # 'intercepted' 76 | ) 77 | 78 | delta = ( 79 | 'bin', 80 | 'bout', 81 | 'cli_abrt', 82 | 'srv_abrt', 83 | 'intercepted', 84 | 'hrsp_1xx', 85 | 'hrsp_2xx', 86 | 'hrsp_3xx', 87 | 'hrsp_4xx', 88 | 'check_rise', 89 | 'check_fall', 90 | 'check_health', 91 | 'agent_rise', 92 | 'agent_fall', 93 | 'hrsp_5xx', 94 | 'comp_in', 95 | 'comp_out', 96 | 'comp_byp', 97 | 'comp_rsp', 98 | 'hrsp_other', 99 | 'dcon', 100 | 'dreq', 101 | 'dresp', 102 | 'ereq', 103 | 'econ', 104 | 'eresp', 105 | 'wretr', 106 | 'wredis', 107 | 'dses' 108 | ) 109 | csv_reader = csv.DictReader(response) 110 | data = dict() 111 | constructors = [str, float] 112 | for row in csv_reader: 113 | results[row["# pxname"]+"/"+row["svname"]] = {} 114 | data[row["# pxname"]+"/"+row["svname"]] = {} 115 | try: 116 | prev_cache[row["# pxname"]+"/"+row["svname"]]['ts'] = prev_cache['ts'] 117 | except KeyError: 118 | prev_cache[row["# pxname"]+"/"+row["svname"]] = {} 119 | 120 | for k, v in row.items(): 121 | for c in constructors: 122 | try: 123 | v = c(v) 124 | except ValueError: 125 | pass 126 | if k in non_delta: 127 | results[row["# pxname"]+"/"+row["svname"]][k] = v 128 | elif k in delta and type(v) is not str: 129 | results[row["# pxname"]+"/"+row["svname"]][k] = self.absolute_to_per_second(k, float(v), prev_cache[row["# pxname"]+"/"+row["svname"]]) 130 | data[row["# pxname"]+"/"+row["svname"]][k] = float(v) 131 | else: 132 | pass 133 | 134 | data['ts'] = time.time() 135 | self.set_agent_cache(data) 136 | 137 | return results 138 | 139 | 140 | if __name__ == '__main__': 141 | Plugin().execute() 142 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/httpd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | try: 4 | from urllib.parse import urlparse, urlencode 5 | from urllib.request import urlopen, Request 6 | from urllib.error import HTTPError 7 | except ImportError: 8 | from urlparse import urlparse 9 | from urllib import urlencode 10 | from urllib2 import urlopen, Request, HTTPError 11 | import time 12 | import plugins 13 | import re 14 | 15 | 16 | class Plugin(plugins.BasePlugin): 17 | __name__ = 'httpd' 18 | 19 | def run(self, config): 20 | ''' 21 | Apache/httpd status page metrics 22 | ''' 23 | 24 | prev_cache = {} 25 | next_cache = dict() 26 | next_cache['ts'] = time.time() 27 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 28 | 29 | try: 30 | request = Request(config.get('httpd', 'status_page_url')) 31 | data = urlopen(request).read().decode('utf-8') 32 | except Exception as e: 33 | return False 34 | 35 | exp = re.compile('^([A-Za-z ]+):\s+(.+)$') 36 | results = {} 37 | def parse_score_board(sb): 38 | 39 | ret = [] 40 | 41 | ret.append(('IdleWorkers', sb.count('_'))) 42 | ret.append(('ReadingWorkers', sb.count('R'))) 43 | ret.append(('WritingWorkers', sb.count('W'))) 44 | ret.append(('KeepaliveWorkers', sb.count('K'))) 45 | ret.append(('DnsWorkers', sb.count('D'))) 46 | ret.append(('ClosingWorkers', sb.count('C'))) 47 | ret.append(('LoggingWorkers', sb.count('L'))) 48 | ret.append(('FinishingWorkers', sb.count('G'))) 49 | ret.append(('CleanupWorkers', sb.count('I'))) 50 | 51 | return ret 52 | for line in data.split('\n'): 53 | if line: 54 | m = exp.match(line) 55 | if m: 56 | k = m.group(1) 57 | v = m.group(2) 58 | 59 | # Ignore the following values 60 | if k == 'IdleWorkers' or k == 'Server Built' or k == 'Server Built' \ 61 | or k == 'CurrentTime' or k == 'RestartTime' or k == 'ServerUptime' \ 62 | or k == 'CPULoad' or k == 'CPUUser' or k == 'CPUSystem' \ 63 | or k == 'CPUChildrenUser' or k == 'CPUChildrenSystem' \ 64 | or k == 'ReqPerSec': 65 | continue 66 | 67 | if k == 'Total Accesses': 68 | results['requests_per_second'] = self.absolute_to_per_second(k, int(v), prev_cache) 69 | next_cache['Total Accesses'] = int(v) 70 | 71 | if k == 'Scoreboard': 72 | for sb_kv in parse_score_board(v): 73 | results[sb_kv[0]] = sb_kv[1] 74 | else: 75 | results[k] = v 76 | self.set_agent_cache(next_cache) 77 | return results 78 | 79 | if __name__ == '__main__': 80 | Plugin().execute() 81 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/iostat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | 5 | import os 6 | import signal 7 | import subprocess 8 | import sys 9 | import psutil 10 | import plugins 11 | import time 12 | 13 | def diskstats_parse(dev=None): 14 | file_path = '/proc/diskstats' 15 | result = {} 16 | 17 | if not os.path.isfile("/proc/diskstats"): 18 | return False 19 | 20 | # ref: http://lxr.osuosl.org/source/Documentation/iostats.txt 21 | columns_disk = ['m', 'mm', 'dev', 'reads', 'rd_mrg', 'rd_sectors', 22 | 'ms_reading', 'writes', 'wr_mrg', 'wr_sectors', 23 | 'ms_writing', 'cur_ios', 'ms_doing_io', 'ms_weighted'] 24 | # For kernel 4.18+ 25 | columns_disk_418 = ['m', 'mm', 'dev', 'reads', 'rd_mrg', 'rd_sectors', 26 | 'ms_reading', 'writes', 'wr_mrg', 'wr_sectors', 27 | 'ms_writing', 'cur_ios', 'ms_doing_io', 'ms_weighted', 28 | 'discards', 'discards_merged', 'discarded_sectors', 29 | 'discarded_time'] 30 | # for kernel 5.5+ 31 | columns_disk_55 = ['m', 'mm', 'dev', 'reads', 'rd_mrg', 'rd_sectors', 32 | 'ms_reading', 'writes', 'wr_mrg', 'wr_sectors', 33 | 'ms_writing', 'cur_ios', 'ms_doing_io', 'ms_weighted', 34 | 'discards', 'discards_merged', 'discarded_sectors', 35 | 'discarded_time', 'flush', 'flush_time'] 36 | 37 | columns_partition = ['m', 'mm', 'dev', 'reads', 'rd_sectors', 'writes', 'wr_sectors'] 38 | 39 | lines = open(file_path, 'r').readlines() 40 | for line in lines: 41 | if line == '': 42 | continue 43 | split = line.split() 44 | if len(split) == len(columns_disk_55): 45 | columns = columns_disk_55 46 | elif len(split) == len(columns_disk_418): 47 | columns = columns_disk_418 48 | elif len(split) == len(columns_disk): 49 | columns = columns_disk 50 | elif len(split) == len(columns_partition): 51 | columns = columns_partition 52 | else: 53 | # No match 54 | continue 55 | 56 | data = dict(zip(columns, split)) 57 | 58 | if data['dev'][:3] == 'nvm' and data['dev'][-2:-1] == 'n': 59 | pass 60 | elif data['dev'][-1:].isdigit() is True: 61 | continue 62 | 63 | if "loop" in data['dev'] or "ram" in data['dev']: 64 | continue 65 | 66 | if dev is not None and dev != data['dev']: 67 | continue 68 | for key in data: 69 | if key != 'dev': 70 | data[key] = int(data[key]) 71 | result[data['dev']] = data 72 | 73 | return result 74 | 75 | 76 | class Plugin(plugins.BasePlugin): 77 | __name__ = 'iostat' 78 | 79 | def run(self, *unused): 80 | delta_keys = ( 81 | 'reads', 82 | 'writes', 83 | 'wr_sectors', 84 | 'rd_sectors', 85 | 'ms_reading', 86 | 'rd_mrg', 87 | 'wr_mrg', 88 | 'ms_weighted', 89 | 'ms_doing_io', 90 | 'ms_writing', 91 | 'discarded_sectors', 92 | 'discarded_time', 93 | 'flush', 94 | 'flush_time', 95 | 'discards' 96 | ) 97 | next_cache = {} 98 | next_cache['ts'] = time.time() 99 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 100 | disks = diskstats_parse() 101 | if not disks or disks is False: 102 | results = {} 103 | try: 104 | diskdata = psutil.disk_io_counters(perdisk=True) 105 | for device, values in diskdata.items(): 106 | device_stats = {} 107 | for key_value in values._fields: 108 | device_stats[key_value] = getattr(values, key_value) 109 | results[device] = device_stats 110 | except Exception as e: 111 | results = e.message 112 | else: 113 | results = {} 114 | for device, values in disks.items(): 115 | device_stats = {} 116 | next_cache[device] = {} 117 | next_cache[device]['ts'] = time.time() 118 | try: 119 | prev_cache[device] 120 | except: 121 | prev_cache[device] = {} 122 | for key_value, value in values.items(): 123 | if key_value in delta_keys: 124 | try: 125 | device_stats[key_value] = self.absolute_to_per_second(key_value, value, prev_cache[device]) 126 | except: 127 | pass 128 | next_cache[device][key_value] = value 129 | else: 130 | device_stats[key_value] = value 131 | try: 132 | device_stats['avgrq-sz'] = (device_stats['wr_sectors']+device_stats['rd_sectors']) / (device_stats['reads']+device_stats['writes']) 133 | except: 134 | device_stats['avgrq-sz'] = 0 135 | try: 136 | device_stats['tps'] = device_stats['reads']+device_stats['writes'] 137 | except: 138 | device_stats['tps'] = 0 139 | try: 140 | device_stats['usage'] = (100 * device_stats['ms_doing_io']) / (1000 * (next_cache['ts'] - prev_cache['ts'])) 141 | except: 142 | device_stats['usage'] = 0 143 | 144 | results[device] = device_stats 145 | 146 | self.set_agent_cache(next_cache) 147 | return results 148 | 149 | 150 | if __name__ == '__main__': 151 | Plugin().execute() 152 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/janus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import subprocess 5 | 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'janus' 9 | 10 | def run(self, config): 11 | adminpw = config.get('janus', 'adminpw') 12 | p = subprocess.Popen("curl -s -H \"Accept: application/json\" -H \"Content-type: application/json\" -X POST -d '{ \"janus\": \"list_sessions\", \"transaction\": \"324\", \"admin_secret\": \""+adminpw+"\" }' http://localhost:7088/admin | awk 'NR>=5' | head -n -2 | wc -l", stdout=subprocess.PIPE, shell=True) 13 | p = p.communicate()[0].decode('utf-8').replace("\n", "") 14 | res = { "janus_sessions": p } 15 | return res 16 | 17 | if __name__ == '__main__': 18 | Plugin().execute() 19 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/kamailio.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import subprocess 5 | 6 | ### You need to add `nixstats ALL=(ALL) NOPASSWD: /usr/sbin/kamctl` to /etc/sudoers in order for this to work 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'asterisk' 9 | 10 | def run(self, *unused): 11 | p = subprocess.Popen("sudo kamctl ul show | grep AOR | wc -l", stdout=subprocess.PIPE, shell=True) 12 | p = p.communicate()[0].decode('utf-8').replace("\n", "") 13 | p = { "devices_online": p } 14 | return p 15 | 16 | if __name__ == '__main__': 17 | Plugin().execute() 18 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/litespeed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import os 5 | import time 6 | import re 7 | import base64 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'litespeed' 11 | 12 | ''' 13 | Litespeed monitoring plugin. Add the following section to /etc/nixstats.ini 14 | 15 | [litespeed] 16 | enabled=yes 17 | host=localhost 18 | port=7080 19 | username=admin 20 | password=pass 21 | ''' 22 | 23 | def run(self, config): 24 | result = {} 25 | results = {} 26 | data = False 27 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 28 | 29 | response = os.popen("curl -s -i -k -u %s:%s 'https://%s:%s/status?rpt=summary'"% (config.get('litespeed', 'username'), config.get('litespeed', 'password'), config.get('litespeed', 'host'),config.get('litespeed', 'port'))).read() 30 | 31 | for line in response.splitlines(): 32 | test = re.search('REQ_RATE \[(.*)\]', line) 33 | if test is not None and test.group(1): 34 | data = True 35 | try: 36 | result[test.group(1)] 37 | except KeyError: 38 | result[test.group(1)] = {} 39 | lines = line.replace('\n', '').replace(test.group(0), '').split(', ') 40 | for line in lines: 41 | keyval = line.strip(':').strip().split(':') 42 | try: 43 | result[test.group(1)][keyval[0]] += float(keyval[1]) 44 | except KeyError: 45 | result[test.group(1)][keyval[0]] = float(keyval[1]) 46 | 47 | metrics = ( 48 | 'SSL_BPS_IN', 49 | 'BPS_OUT', 50 | 'MAXSSL_CONN', 51 | 'PLAINCONN', 52 | 'BPS_IN', 53 | 'SSLCONN', 54 | 'AVAILSSL', 55 | 'IDLECONN', 56 | 'SSL_BPS_OUT', 57 | 'AVAILCONN', 58 | 'MAXCONN', 59 | 'REQ_PROCESSING' 60 | ) 61 | 62 | if data is True: 63 | for vhost, statistics in result.items(): 64 | try: 65 | prev_cache[vhost]['ts'] = prev_cache['ts'] 66 | except KeyError: 67 | prev_cache[vhost] = {} 68 | results[vhost] = {} 69 | for key, value in statistics.items(): 70 | if key == 'TOT_REQS': 71 | results[vhost]['RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) 72 | if key == 'TOTAL_STATIC_HITS': 73 | results[vhost]['STATIC_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) 74 | if key == 'TOTAL_PUB_CACHE_HITS': 75 | results[vhost]['PUB_CACHE_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) 76 | if key == 'TOTAL_PRIVATE_CACHE_HITS': 77 | results[vhost]['PRIVATE_CACHE_RPS'] = self.absolute_to_per_second(key, value, prev_cache[vhost]) 78 | if key in metrics: 79 | results[vhost][key] = value 80 | 81 | result['ts'] = time.time() 82 | self.set_agent_cache(result) 83 | return results 84 | 85 | if __name__ == '__main__': 86 | Plugin().execute() 87 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/loadavg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import sys 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'loadavg' 9 | 10 | def run(self, *unused): 11 | if sys.platform == 'win32': 12 | return None 13 | else: 14 | return os.getloadavg() 15 | 16 | 17 | if __name__ == '__main__': 18 | Plugin().execute() 19 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/mdstat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import json 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'mdstat' 10 | 11 | def run(self, config): 12 | ''' 13 | Monitor software raid status using mdadm 14 | pip install mdstat 15 | ''' 16 | data = os.popen('sudo mdjson').read() 17 | results = {} 18 | 19 | try: 20 | data = json.loads(data) 21 | except Exception: 22 | return "Could not load mdstat data" 23 | 24 | for key, value in data['devices'].items(): 25 | device = {} 26 | if(value['active'] is not True): 27 | device['active'] = 0 28 | else: 29 | device['active'] = 1 30 | if(value['read_only'] is not False): 31 | device['read_only'] = 1 32 | else: 33 | device['read_only'] = 0 34 | if(value['resync'] is not None): 35 | device['resync'] = 1 36 | else: 37 | device['resync'] = 0 38 | device['faulty'] = 0 39 | for disk, diskvalue in value['disks'].items(): 40 | if diskvalue['faulty'] is not False: 41 | device['faulty'] = device['faulty'] + 1 42 | results[key] = device 43 | return results 44 | 45 | if __name__ == '__main__': 46 | Plugin().execute() 47 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/megacli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import json 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | __name__ = 'megacli' 10 | 11 | def run(self, config): 12 | disk = {} 13 | try: 14 | df_output_lines = os.popen("megacli -LDInfo -Lall -aALL").read().splitlines() 15 | data = {} 16 | for line in df_output_lines: 17 | 18 | if line.startswith('Virtual Drive'): 19 | delim = line.find('(') 20 | offset = line.find(':') 21 | data['virtualdisk_id'] = int(line[offset+1:delim].strip()) 22 | if line.startswith('Name'): 23 | offset = line.find(':') 24 | data['name'] = line[offset+1:].strip() 25 | elif line.startswith('RAID Level'): 26 | offset = line.find(':') 27 | data['raid_level'] = line[offset+1:].strip() 28 | elif line.startswith('Size'): 29 | offset = line.find(':') 30 | data['size'] = line[offset+1:].strip() 31 | elif line.startswith('State'): 32 | offset = line.find(':') 33 | data['state'] = line[offset+1:].strip() 34 | elif line.startswith('Strip Size'): 35 | delim = line.find(' KB') 36 | offset = line.find(':') 37 | data['stripe_size'] = line[offset+1:delim].strip() 38 | elif line.startswith('Number Of Drives'): 39 | offset = line.find(':') 40 | data['number_of_drives'] = int(line[offset+1:].strip()) 41 | elif line.startswith('Span Depth'): 42 | offset = line.find(':') 43 | data['span_depth'] = int(line[offset+1:].strip()) 44 | elif line.startswith('Default Cache Policy'): 45 | offset = line.find(':') 46 | data['default_cache_policy'] = line[offset+1:].strip() 47 | elif line.startswith('Current Cache Policy'): 48 | offset = line.find(':') 49 | data['current_cache_policy'] = line[offset+1:].strip() 50 | elif line.startswith('Current Access Policy'): 51 | offset = line.find(':') 52 | data['access_policy'] = line[offset+1:].strip() 53 | elif line.startswith('Disk Cache Policy'): 54 | offset = line.find(':') 55 | data['disk_cache_policy'] = line[offset+1:].strip() 56 | elif line.startswith('Encryption'): 57 | offset = line.find(':') 58 | data['encryption'] = line[offset+1:].strip() 59 | 60 | disk[data['virtualdisk_id']] = data 61 | except Exception as e: 62 | return e 63 | 64 | return disk 65 | 66 | 67 | if __name__ == '__main__': 68 | Plugin().execute() 69 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/memcached.py: -------------------------------------------------------------------------------- 1 | import plugins 2 | import struct 3 | import time 4 | import memcache 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'memcached' 8 | 9 | def run(self, config): 10 | ''' 11 | pip install python-memcached 12 | add to /etc/nixstats.ini 13 | [memcached] 14 | enabled=yes 15 | host=127.0.0.1 16 | port=11211 17 | ''' 18 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 19 | try: 20 | socket = config.get('memcached', 'socket') 21 | except: 22 | socket = False 23 | try: 24 | # Connect 25 | if socket is False: 26 | mc = memcache.Client(['%s:%s' % (config.get('memcached', 'host'), config.get('memcached', 'port'))], debug=0) 27 | else: 28 | mc = memcache.Client(['unix:/%s' % socket], debug=0) 29 | except: 30 | return "Could not connect to memcached" 31 | 32 | non_delta = ( 33 | 'accepting_conns', 34 | 'bytes', 35 | 'uptime', 36 | 'total_items', 37 | 'total_connections', 38 | 'time_in_listen_disabled_us', 39 | 'threads', 40 | 'rusage_user', 41 | 'rusage_system', 42 | 'reserved_fds', 43 | 'pointer_size', 44 | 'malloc_fails', 45 | 'lrutail_reflocked', 46 | 'listen_disabled_num', 47 | 'limit_maxbytes', 48 | 'hash_power_level', 49 | 'hash_bytes', 50 | 'curr_items', 51 | 'curr_connections', 52 | 'connection_structures', 53 | 'conn_yields', 54 | 'reclaimed' 55 | ) 56 | delta_keys = ( 57 | 'auth_cmds', 58 | 'auth_errors', 59 | 'bytes_read', 60 | 'bytes_written', 61 | 'touch_misses', 62 | 'touch_hits', 63 | 'incr_misses', 64 | 'incr_hits', 65 | 'cas_misses', 66 | 'cas_badval', 67 | 'incr_hits', 68 | 'get_misses', 69 | 'get_hits', 70 | 'expired_unfetched', 71 | 'evictions', 72 | 'evicted_unfetched', 73 | 'delete_misses', 74 | 'delete_hits', 75 | 'decr_misses', 76 | 'decr_hits', 77 | 'crawler_reclaimed', 78 | 'crawler_items_checked', 79 | 'cmd_touch', 80 | 'cmd_get', 81 | 'cmd_set', 82 | 'cmd_flush', 83 | 'cmd_misses', 84 | 'cmd_badval', 85 | 'cmd_hits' 86 | ) 87 | 88 | results = {} 89 | data = {} 90 | try: 91 | result = mc.get_stats() 92 | for key, key_value in enumerate(result[0][1]): 93 | value = result[0][1][key_value] 94 | key = key_value.lower().strip() 95 | if key in non_delta: 96 | results[key] = float(value) 97 | elif key in delta_keys: 98 | value = float(value) 99 | results[key] = self.absolute_to_per_second(key, float(value), prev_cache) 100 | data[key] = float(value) 101 | else: 102 | pass 103 | except: 104 | return 'Could not fetch memcached stats' 105 | 106 | data['ts'] = time.time() 107 | self.set_agent_cache(data) 108 | return results 109 | 110 | 111 | if __name__ == '__main__': 112 | Plugin().execute() 113 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/memory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | import os 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'memory' 9 | 10 | def run(self, *unused): 11 | memory = {} 12 | mem = psutil.virtual_memory() 13 | for name in mem._fields: 14 | memory[name] = getattr(mem, name) 15 | 16 | if (memory['available'] == 0 or memory['buffers'] == 0) and os.name != 'nt': 17 | tot_m, used_m, free_m, sha_m, buf_m, cac_m, ava_m = map(int, os.popen('free -b -w').readlines()[1].split()[1:]) 18 | memory['percent'] = 100-(((free_m+buf_m+cac_m)*100)/tot_m) 19 | memory['available'] = ava_m 20 | memory['buffers'] = buf_m 21 | memory['cached'] = cac_m 22 | memory['total'] = tot_m 23 | memory['used'] = used_m 24 | memory['shared'] = sha_m 25 | 26 | return memory 27 | 28 | if __name__ == '__main__': 29 | Plugin().execute() 30 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/minecraft.py: -------------------------------------------------------------------------------- 1 | import plugins 2 | import socket 3 | import struct 4 | import json 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'minecraft' 8 | 9 | def run(self, config): 10 | ''' 11 | Fetch the amount of active and max players 12 | add to /etc/nixstats.ini 13 | [minecraft] 14 | enabled=yes 15 | hosts=127.0.0.1:8000,127.0.0.2:8000... 16 | ''' 17 | 18 | my_hosts = config.get('minecraft', 'hosts').split(',') 19 | result = {} 20 | for connection_string in my_hosts: 21 | try: 22 | # Connect 23 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 24 | hostname = connection_string.split(':')[0] 25 | port = int(connection_string.split(':')[1]) 26 | s.connect((hostname, port)) 27 | 28 | # Send handshake + status request 29 | s.send(self.pack_data("\x00\x00" + self.pack_data(hostname.encode('utf8')) + self.pack_port(port) + "\x01")) 30 | s.send(self.pack_data("\x00")) 31 | 32 | # Read response 33 | self.unpack_varint(s) # Packet length 34 | self.unpack_varint(s) # Packet ID 35 | l = self.unpack_varint(s) # String length 36 | 37 | d = "" 38 | while len(d) < l: 39 | d += s.recv(1024) 40 | 41 | # Close our socket 42 | s.close() 43 | except: 44 | pass 45 | 46 | results = {} 47 | 48 | try: 49 | players = json.loads(d.decode('utf8'))['players'] 50 | results['online'] = int(players['online']) 51 | results['max'] = int(players['max']) 52 | except: 53 | results['online'] = 0 54 | results['max'] = 0 55 | result[str(connection_string.replace('.', '-'))] = results 56 | 57 | return result 58 | 59 | 60 | def unpack_varint(self, s): 61 | d = 0 62 | for i in range(5): 63 | b = ord(s.recv(1)) 64 | d |= (b & 0x7F) << 7*i 65 | if not b & 0x80: 66 | break 67 | return d 68 | 69 | def pack_varint(self, d): 70 | o = "" 71 | while True: 72 | b = d & 0x7F 73 | d >>= 7 74 | o += struct.pack("B", b | (0x80 if d > 0 else 0)) 75 | if d == 0: 76 | break 77 | return o 78 | 79 | def pack_data(self, d): 80 | return self.pack_varint(len(d)) + d 81 | 82 | def pack_port(self, i): 83 | return struct.pack('>H', i) 84 | 85 | if __name__ == '__main__': 86 | Plugin().execute() 87 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/mongodb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import time 3 | import plugins 4 | from pymongo import MongoClient 5 | 6 | class Plugin(plugins.BasePlugin): 7 | __name__ = 'mongodb' 8 | 9 | 10 | def run(self, config): 11 | """ 12 | Mongodb monitoring 13 | """ 14 | 15 | client = MongoClient(config.get('mongodb', 'connection_string')) 16 | db = client.admin 17 | statistics = db.command("serverStatus") 18 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 19 | data = {} 20 | results = {} 21 | # replication status 22 | try: 23 | results['isprimary'] = 0 if statistics['repl']['ismaster'] is False else 1 24 | results['members'] = len(statistics['repl']['hosts']) 25 | except: 26 | pass 27 | 28 | # transactions stats, available in v3.6.3 and up 29 | try: 30 | data['transactions-retriedCommandsCount'] = statistics['transactions']['retriedCommandsCount'] 31 | data['transactions-retriedStatementsCount'] = statistics['transactions']['retriedStatementsCount'] 32 | data['transactions-transactionsCollectionWriteCount'] = statistics['transactions']['transactionsCollectionWriteCount'] 33 | data['transactions-totalAborted'] = statistics['transactions']['totalAborted'] 34 | data['transactions-totalCommitted'] = statistics['transactions']['totalCommitted'] 35 | data['transactions-totalStarted'] = statistics['transactions']['totalStarted'] 36 | results['transactions-currentActive'] = statistics['transactions']['currentActive'] 37 | results['transactions-currentInactive'] = statistics['transactions']['currentInactive'] 38 | results['transactions-currentOpen'] = statistics['transactions']['currentOpen'] 39 | except: 40 | pass 41 | 42 | data['connections.totalCreated'] = statistics['connections']['totalCreated'] 43 | results['connections.available'] = statistics['connections']['available'] 44 | results['connections.current'] = statistics['connections']['current'] 45 | data['opcounters.command'] = statistics['opcounters']['command'] 46 | data['opcounters.delete'] = statistics['opcounters']['delete'] 47 | data['opcounters.getmore'] = statistics['opcounters']['getmore'] 48 | data['opcounters.insert'] = statistics['opcounters']['insert'] 49 | data['opcounters.query'] = statistics['opcounters']['query'] 50 | data['opcounters.update'] = statistics['opcounters']['update'] 51 | 52 | data['opLatencies.commands.latency'] = statistics['opLatencies']['commands']['latency'] 53 | data['opLatencies.commands.ops'] = statistics['opLatencies']['commands']['ops'] 54 | data['opLatencies.reads.latency'] = statistics['opLatencies']['reads']['latency'] 55 | data['opLatencies.reads.ops'] = statistics['opLatencies']['reads']['ops'] 56 | data['opLatencies.writes.latency'] = statistics['opLatencies']['writes']['latency'] 57 | data['opLatencies.writes.ops'] = statistics['opLatencies']['writes']['ops'] 58 | 59 | data['globalLock.currentQueue.total'] = statistics['globalLock']['currentQueue']['total'] 60 | data['globalLock.currentQueue.readers'] = statistics['globalLock']['currentQueue']['readers'] 61 | data['globalLock.currentQueue.writers'] = statistics['globalLock']['currentQueue']['writers'] 62 | 63 | data['globalLock.activeClients.total'] = statistics['globalLock']['activeClients']['total'] 64 | data['globalLock.activeClients.readers'] = statistics['globalLock']['activeClients']['readers'] 65 | data['globalLock.activeClients.writers'] = statistics['globalLock']['activeClients']['writers'] 66 | 67 | data['asserts.msg'] = statistics['asserts']['msg'] 68 | data['asserts.regular'] = statistics['asserts']['regular'] 69 | data['asserts.rollovers'] = statistics['asserts']['rollovers'] 70 | data['asserts.user'] = statistics['asserts']['user'] 71 | data['asserts.warning'] = statistics['asserts']['warning'] 72 | 73 | #deadlock stats 74 | try: 75 | for key, val in statistics['locks'].items(): 76 | for key2, val2 in val.items(): 77 | for key3, val3 in val2.items(): 78 | data['locks-{}-{}-{}'.format(key.lower(), key2, key3)] = val3 79 | except: 80 | pass 81 | 82 | try: 83 | data['opcountersRepl.command'] = statistics['opcountersRepl']['command'] 84 | data['opcountersRepl.delete'] = statistics['opcountersRepl']['delete'] 85 | data['opcountersRepl.getmore'] = statistics['opcountersRepl']['getmore'] 86 | data['opcountersRepl.insert'] = statistics['opcountersRepl']['insert'] 87 | data['opcountersRepl.query'] = statistics['opcountersRepl']['query'] 88 | data['opcountersRepl.update'] = statistics['opcountersRepl']['update'] 89 | except KeyError: 90 | pass 91 | 92 | for key, val in data.items(): 93 | results[key] = self.absolute_to_per_second(key, val, prev_cache) 94 | 95 | try: 96 | results['opLatencies.commands'] = results['opLatencies.commands.latency']/results['opLatencies.commands.ops'] 97 | results['opLatencies.writes'] = results['opLatencies.writes.latency']/results['opLatencies.writes.ops'] 98 | results['opLatencies.reads'] = results['opLatencies.reads.latency']/results['opLatencies.reads.ops'] 99 | except: 100 | pass 101 | 102 | next_cache = data 103 | next_cache['ts'] = time.time() 104 | self.set_agent_cache(next_cache) 105 | results['mem.resident'] = statistics['mem']['resident'] 106 | results['mem.bits'] = statistics['mem']['bits'] 107 | results['mem.virtual'] = statistics['mem']['virtual'] 108 | results['mem.supported'] = 0 if statistics['mem']['supported'] is False else 1 109 | 110 | return results 111 | 112 | 113 | if __name__ == '__main__': 114 | Plugin().execute() 115 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/mysql.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import time 4 | import MySQLdb 5 | import plugins 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'mysql' 9 | 10 | def run(self, config): 11 | ''' 12 | MySQL metrics plugin 13 | ''' 14 | 15 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 16 | auth = {} 17 | try: 18 | auth['port'] = int(config.get('mysql', 'port')) 19 | except ValueError: 20 | auth['port'] = 3306 21 | try: 22 | auth['user'] = config.get('mysql', 'username') 23 | except: 24 | auth['user'] = 'root' 25 | try: 26 | auth['passwd'] = config.get('mysql', 'password') 27 | except: 28 | auth['passwd'] = '' 29 | try: 30 | auth['host'] = config.get('mysql', 'host') 31 | except: 32 | auth['unix_socket'] = config.get('mysql', 'socket') 33 | try: 34 | auth['db'] = config.get('mysql', 'database') 35 | except: 36 | auth['db'] = 'mysql' 37 | 38 | db = MySQLdb.connect(**auth) 39 | cursor = db.cursor() 40 | cursor.execute("SHOW GLOBAL STATUS;") 41 | query_result = cursor.fetchall() 42 | non_delta = ( 43 | 'max_used_connections', 44 | 'open_files', 45 | 'open_tables', 46 | 'qcache_free_blocks', 47 | 'qcache_free_memory', 48 | 'qcache_total_blocks', 49 | 'slave_open_temp_tables', 50 | 'threads_cached', 51 | 'threads_connected', 52 | 'threads_running', 53 | 'uptime' 54 | ) 55 | delta_keys = ( 56 | 'aborted_clients', 57 | 'aborted_connects', 58 | 'binlog_cache_disk_use', 59 | 'binlog_cache_use', 60 | 'bytes_received', 61 | 'bytes_sent', 62 | 'com_delete', 63 | 'com_delete_multi', 64 | 'com_insert', 65 | 'com_insert_select', 66 | 'com_load', 67 | 'com_replace', 68 | 'com_replace_select', 69 | 'com_select', 70 | 'com_update', 71 | 'com_update_multi', 72 | 'connections', 73 | 'created_tmp_disk_tables', 74 | 'created_tmp_files', 75 | 'created_tmp_tables', 76 | 'key_reads', 77 | 'key_read_requests', 78 | 'key_writes', 79 | 'key_write_requests', 80 | 'max_used_connections', 81 | 'open_files', 82 | 'open_tables', 83 | 'opened_tables', 84 | 'qcache_free_blocks', 85 | 'qcache_free_memory', 86 | 'qcache_hits', 87 | 'qcache_inserts', 88 | 'qcache_lowmem_prunes', 89 | 'qcache_not_cached', 90 | 'qcache_queries_in_cache', 91 | 'qcache_total_blocks', 92 | 'questions', 93 | 'select_full_join', 94 | 'select_full_range_join', 95 | 'select_range', 96 | 'select_range_check', 97 | 'select_scan', 98 | 'slave_open_temp_tables', 99 | 'slave_retried_transactions', 100 | 'slow_launch_threads', 101 | 'slow_queries', 102 | 'sort_range', 103 | 'sort_rows', 104 | 'sort_scan', 105 | 'table_locks_immediate', 106 | 'table_locks_waited', 107 | 'threads_cached', 108 | 'threads_connected', 109 | 'threads_created', 110 | 'threads_running' 111 | ) 112 | 113 | results = dict() 114 | data = dict() 115 | constructors = [str, float] 116 | for key, value in query_result: 117 | key = key.lower().strip() 118 | for c in constructors: 119 | try: 120 | value = c(value) 121 | except ValueError: 122 | pass 123 | if key in non_delta: 124 | results[key] = value 125 | elif key in delta_keys and type(value) is not str: 126 | results[key] = self.absolute_to_per_second(key, float(value), prev_cache) 127 | data[key] = float(value) 128 | else: 129 | pass 130 | 131 | cursor = db.cursor(MySQLdb.cursors.DictCursor) 132 | cursor.execute('SHOW SLAVE STATUS') 133 | query_result_slave = cursor.fetchone() 134 | non_delta_slave = ( 135 | 'slave_io_state', 136 | 'master_host', 137 | 'seconds_behind_master', 138 | 'read_master_log_pos', 139 | 'relay_log_pos', 140 | 'slave_io_running', 141 | 'slave_sql_running', 142 | 'last_error', 143 | 'exec_master_log_pos', 144 | 'relay_log_space', 145 | 'slave_sql_running_state', 146 | 'master_retry_count' 147 | ) 148 | if query_result_slave is None: 149 | query_result_slave = dict() 150 | for key, value in query_result_slave.items(): 151 | key = key.lower().strip() 152 | if key == 'slave_sql_running': 153 | value = 1 if value == 'Yes' else 0 154 | if key == 'slave_io_running': 155 | value = 1 if value == 'Yes' else 0 156 | 157 | for c in constructors: 158 | try: 159 | value = c(value) 160 | except ValueError: 161 | pass 162 | if key in non_delta_slave and type(value) is not str: 163 | results[key] = value 164 | else: 165 | pass 166 | 167 | db.close() 168 | data['ts'] = time.time() 169 | self.set_agent_cache(data) 170 | return results 171 | 172 | 173 | if __name__ == '__main__': 174 | Plugin().execute() 175 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/network.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | import time 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'network' 9 | 10 | def run(self, config): 11 | ''' 12 | Network monitoring plugin. 13 | To only enable certain interfaces add below [network]: 14 | interfaces = eth1,eth3,... 15 | ''' 16 | 17 | absolute = dict() 18 | absolute['ts'] = time.time() 19 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 20 | 21 | try: 22 | enabled_interfaces = config.get('network', 'interfaces').split(',') 23 | except: 24 | enabled_interfaces = False 25 | 26 | returndata = {} 27 | interfaces = psutil.net_io_counters(pernic=True) 28 | for interface, stats in interfaces.items(): 29 | if enabled_interfaces is not False: 30 | if interface not in enabled_interfaces: 31 | continue 32 | try: 33 | prev_cache[interface] 34 | except: 35 | prev_cache[interface] = {} 36 | absolute[interface] = {} 37 | absolute[interface]['ts'] = time.time() 38 | absolute[interface]['bytes_sent'] = stats.bytes_sent 39 | absolute[interface]['bytes_recv'] = stats.bytes_recv 40 | absolute[interface]['packets_sent'] = stats.packets_sent 41 | absolute[interface]['packets_recv'] = stats.packets_recv 42 | absolute[interface]['errin'] = stats.errin 43 | absolute[interface]['errout'] = stats.errout 44 | absolute[interface]['dropin'] = stats.dropin 45 | absolute[interface]['dropout'] = stats.dropout 46 | returndata[interface] = {} 47 | returndata[interface]['bytes_sent'] = self.absolute_to_per_second('bytes_sent', stats.bytes_sent, prev_cache[interface]) 48 | returndata[interface]['bytes_recv'] = self.absolute_to_per_second('bytes_recv', stats.bytes_recv, prev_cache[interface]) 49 | returndata[interface]['packets_sent'] = self.absolute_to_per_second('packets_sent', stats.packets_sent, prev_cache[interface]) 50 | returndata[interface]['packets_recv'] = self.absolute_to_per_second('packets_recv', stats.packets_recv, prev_cache[interface]) 51 | returndata[interface]['errin'] = self.absolute_to_per_second('errin', stats.errin, prev_cache[interface]) 52 | returndata[interface]['errout'] = self.absolute_to_per_second('errout', stats.errout, prev_cache[interface]) 53 | returndata[interface]['dropin'] = self.absolute_to_per_second('dropin', stats.dropin, prev_cache[interface]) 54 | returndata[interface]['dropout'] = self.absolute_to_per_second('dropout', stats.dropout, prev_cache[interface]) 55 | self.set_agent_cache(absolute) 56 | return returndata 57 | 58 | 59 | if __name__ == '__main__': 60 | Plugin().execute() 61 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/nginx.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # import psutil 4 | try: 5 | from urllib.parse import urlparse, urlencode 6 | from urllib.request import urlopen, Request 7 | from urllib.error import HTTPError 8 | except ImportError: 9 | from urlparse import urlparse 10 | from urllib import urlencode 11 | from urllib2 import urlopen, Request, HTTPError 12 | import time 13 | import plugins 14 | 15 | 16 | class Plugin(plugins.BasePlugin): 17 | __name__ = 'nginx' 18 | 19 | def run(self, config): 20 | ''' 21 | Provides the following metrics (example): 22 | "accepts": 588462, 23 | "accepts_per_second": 0.0, 24 | "active_connections": 192, 25 | "handled": 588462, 26 | "handled_per_second": 0.0, 27 | "reading": 0, 28 | "requests": 9637106, 29 | "requests_per_second": 0.0, 30 | "waiting": 189, 31 | "writing": 3 32 | 33 | requests, accepts, handled are values since the start of nginx. 34 | *_per_second values calculated from them using cached values from previous call. 35 | ''' 36 | 37 | try: 38 | results = dict() 39 | next_cache = dict() 40 | # request = urllib2.Request(config.get('nginx', 'status_page_url')) 41 | # raw_response = urllib2.urlopen(request) 42 | next_cache['ts'] = time.time() 43 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 44 | # response = raw_response.readlines() 45 | request = Request(config.get('nginx', 'status_page_url')) 46 | response = urlopen(request).read().decode('utf-8').split("\n") 47 | # Active connections: N 48 | # active_connections = response[0].split(':')[1].strip() 49 | active_connections = response[0].split()[-1] 50 | results['active_connections'] = int(active_connections) 51 | 52 | # server accepts handled requests 53 | keys = response[1].split()[1:] 54 | values = response[2].split() 55 | for key, value in zip(keys, values): 56 | next_cache[key] = int(value) 57 | results[key] = next_cache[key] # Keep absolute values in results 58 | try: 59 | if next_cache[key] >= prev_cache[key]: 60 | results['%s_per_second' % key] = \ 61 | (next_cache[key] - prev_cache[key]) / \ 62 | (next_cache['ts'] - prev_cache['ts']) 63 | else: # Nginx was restarted after previous caching 64 | results['%s_per_second' % key] = \ 65 | next_cache[key] / \ 66 | (next_cache['ts'] - prev_cache['ts']) 67 | except KeyError: # No cache yet, can't calculate 68 | results['%s_per_second' % key] = 0.0 69 | 70 | # Reading: X Writing: Y Waiting: Z 71 | keys = response[3].split()[0::2] 72 | keys = [entry.strip(':').lower() for entry in keys] 73 | values = response[3].split()[1::2] 74 | for key, value in zip(keys, values): 75 | results[key] = int(value) 76 | 77 | # Cache absolute values for next check calculations 78 | self.set_agent_cache(next_cache) 79 | 80 | return results 81 | except Exception: 82 | return False 83 | 84 | 85 | if __name__ == '__main__': 86 | Plugin().execute() 87 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/openvpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import plugins 5 | import time 6 | from openvpn_status import parse_status 7 | 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'openvpn' 11 | 12 | def run(self, config): 13 | ''' 14 | OpenVPN monitoring, needs access to openvpn-status.log file. 15 | pip install openvpn-status 16 | or 17 | pip3 install openvpn-status 18 | 19 | In /etc/nixstats.ini to enable put: 20 | [openvpn] 21 | enabled = yes 22 | status_path = /etc/openvpn/openvpn-status.log 23 | 24 | test the plugin by running: sudo -u nixstats nixstatsagent test OpenVPN 25 | 26 | If you are having permission issues try to run the agent as root user: 27 | https://help.nixstats.com/en/article/running-the-monitoring-agent-as-root-user-m0ylxw/ 28 | ''' 29 | openvpn_clients = {} 30 | last_value = {} 31 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 32 | 33 | try: 34 | with open(config.get('openvpn', 'status_path')) as logfile: 35 | status = parse_status(logfile.read()) 36 | except Exception as e: 37 | return e 38 | 39 | try: 40 | openvpn_clients['containers'] = len(status.client_list.items()) 41 | for key, client in status.client_list.items(): 42 | client.common_name = client.common_name.replace('.', '-') 43 | openvpn_clients[client.common_name] = {} 44 | bytes_out = int(client.bytes_sent) 45 | bytes_in = int(client.bytes_received) 46 | openvpn_clients[client.common_name]['net_out_bytes'] = self.absolute_to_per_second('%s_%s' % (client.common_name, 'net_out_bytes'), bytes_out, prev_cache) 47 | openvpn_clients[client.common_name]['net_in_bytes'] = self.absolute_to_per_second('%s_%s' % (client.common_name, 'net_in_bytes'), bytes_in, prev_cache) 48 | 49 | last_value['%s_%s' % (client.common_name, 'net_in_bytes')] = bytes_in 50 | last_value['%s_%s' % (client.common_name, 'net_out_bytes')] = bytes_out 51 | except Exception as e: 52 | return e 53 | 54 | last_value['ts'] = time.time() 55 | self.set_agent_cache(last_value) 56 | 57 | return openvpn_clients 58 | 59 | if __name__ == '__main__': 60 | Plugin().execute() 61 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/phpfpm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | #from past.builtins import basestring # pip install future 4 | try: 5 | from urllib.parse import urlparse, urlencode 6 | from urllib.request import urlopen, Request 7 | from urllib.error import HTTPError 8 | except ImportError: 9 | from urlparse import urlparse 10 | from urllib import urlencode 11 | from urllib2 import urlopen, Request, HTTPError 12 | import sys 13 | import time 14 | import plugins 15 | import json 16 | 17 | 18 | class Plugin(plugins.BasePlugin): 19 | __name__ = 'phpfpm' 20 | 21 | def run(self, config): 22 | ''' 23 | php-fpm status page metrics 24 | ''' 25 | def ascii_encode_dict(data): 26 | ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x 27 | return dict(map(ascii_encode, pair) for pair in data.items()) 28 | 29 | results = dict() 30 | next_cache = dict() 31 | my_pools = config.get(__name__, 'status_page_url').split(',') 32 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 33 | for pool in my_pools: 34 | request = Request(pool) 35 | raw_response = urlopen(request) 36 | 37 | try: 38 | data = raw_response.read().decode('utf-8') 39 | # pprint.pprint(data) 40 | if sys.version_info >= (3,): 41 | j = json.loads(data) 42 | else: 43 | j = json.loads(data, object_hook=ascii_encode_dict) 44 | results[j['pool']] = {} 45 | next_cache['%s_ts' % j['pool']] = time.time() 46 | for k, v in j.items(): 47 | results[j['pool']][k.replace(" ", "_")] = v 48 | 49 | next_cache['%s_accepted_conn' % j['pool']] = int(results[j['pool']]['accepted_conn']) 50 | except Exception as e: 51 | return e 52 | 53 | try: 54 | if next_cache['%s_accepted_conn' % j['pool']] >= prev_cache['%s_accepted_conn' % j['pool']]: 55 | results[j['pool']]['accepted_conn_per_second'] = \ 56 | (next_cache['%s_accepted_conn' % j['pool']] - prev_cache['%s_accepted_conn' % j['pool']]) / \ 57 | (next_cache['%s_ts' % j['pool']] - prev_cache['%s_ts' % j['pool']]) 58 | else: # Was restarted after previous caching 59 | results[j['pool']]['accepted_conn_per_second'] = \ 60 | next_cache['%s_accepted_conn' % j['pool']] / \ 61 | (next_cache['%s_ts' % j['pool']] - prev_cache['%s_ts' % j['pool']]) 62 | except KeyError: # No cache yet, can't calculate 63 | results[j['pool']]['accepted_conn_per_second'] = 0.0 64 | 65 | # Cache absolute values for next check calculations 66 | self.set_agent_cache(next_cache) 67 | 68 | return results 69 | 70 | 71 | if __name__ == '__main__': 72 | Plugin().execute() 73 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/ping.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import re 4 | from subprocess import Popen, PIPE, CalledProcessError 5 | import sys 6 | import plugins 7 | 8 | 9 | def _get_match_groups(ping_output, regex): 10 | match = regex.search(ping_output) 11 | if not match: 12 | return False 13 | else: 14 | return match.groups() 15 | 16 | 17 | def system_command(Command, newlines=True): 18 | Output = "" 19 | Error = "" 20 | try: 21 | proc = Popen(Command.split(), stdout=PIPE) 22 | Output = proc.communicate()[0] 23 | except Exception: 24 | pass 25 | 26 | if Output: 27 | if newlines is True: 28 | Stdout = Output.split("\\n") 29 | else: 30 | Stdout = Output 31 | else: 32 | Stdout = [] 33 | if Error: 34 | Stderr = Error.split("\n") 35 | else: 36 | Stderr = [] 37 | 38 | return (Stdout, Stderr) 39 | 40 | 41 | def collect_ping(hostname): 42 | if sys.platform.startswith('linux') or sys.platform.startswith('freebsd'): 43 | #if sys.platform == "linux" or sys.platform == "linux2": 44 | response = str(system_command("ping -W 5 -c 1 " + hostname, False)[0]) 45 | try: 46 | matcher = re.compile(r'(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)') 47 | minping, avgping, maxping, jitter = _get_match_groups(response, matcher) 48 | response = avgping 49 | except Exception: 50 | #response = 9999 51 | response = -1 52 | elif sys.platform == "darwin": 53 | response = str(system_command("ping -c 1 " + hostname, False)[0]) 54 | # matcher = re.compile(r'min/avg/max/stddev = (\d+)/(\d+)/(\d+)/(\d+) ms') 55 | # min, avg, max, stddev = _get_match_groups(response, matcher) 56 | matcher = re.compile(r'(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)') 57 | matched = _get_match_groups(response, matcher) 58 | if matched is False: 59 | #response = 0 60 | response = -1 61 | else: 62 | minping, avgping, maxping, jitter = matched 63 | response = avgping 64 | elif sys.platform == "win32": 65 | #response = 0 66 | response = -1 67 | try: 68 | ping = Popen(["ping", "-n", "1 ", hostname], stdout=PIPE, stderr=PIPE) 69 | out, error = ping.communicate() 70 | if out: 71 | try: 72 | response = int(re.findall(r"Average = (\d+)", out)[0]) 73 | except Exception: 74 | pass 75 | else: 76 | #response = 0 77 | response = -1 78 | except CalledProcessError: 79 | pass 80 | else: 81 | #response = float(system_command("ping -W -c 1 " + hostname)) 82 | response = -1 83 | return {'avgping': response, 'host': hostname} 84 | 85 | 86 | class Plugin(plugins.BasePlugin): 87 | __name__ = 'ping' 88 | 89 | def run(self, config): 90 | data = {} 91 | my_hosts = config.get('ping', 'hosts').split(',') 92 | data['ping'] = [] 93 | for host in my_hosts: 94 | data['ping'].append(collect_ping(host)) 95 | return data['ping'] 96 | 97 | 98 | if __name__ == '__main__': 99 | Plugin().execute() 100 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/plugins.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import pickle 4 | import time 5 | import sys 6 | if sys.version_info >= (3,): 7 | import configparser 8 | else: 9 | import ConfigParser 10 | 11 | 12 | class BasePlugin: 13 | ''' 14 | Abstract class for plugins 15 | ''' 16 | __name__ = '' 17 | 18 | def __init__(self, agent_cache=[]): 19 | if isinstance(agent_cache, list): 20 | self.agent_cache = agent_cache 21 | else: 22 | raise TypeError('Type of agent_cache have to be list') 23 | 24 | # if not self.__name__: 25 | # self.__name__ = os.path.splitext(os.path.basename(__file__))[0] 26 | 27 | def run(self, config=None): 28 | ''' 29 | Virtual method for running the plugin 30 | ''' 31 | pass 32 | 33 | def execute(self): 34 | ''' 35 | Execution wrapper for the plugin 36 | argv[1]: ini_file 37 | ''' 38 | config = None 39 | if len(sys.argv) > 1: 40 | if sys.version_info >= (3,): 41 | config = configparser.RawConfigParser(defaults) 42 | else: 43 | config = ConfigParser.RawConfigParser(defaults) 44 | config.read(sys.argv[1]) 45 | pickle.dump(self.run(config), sys.stdout) 46 | 47 | def get_agent_cache(self): 48 | ''' 49 | Return agent cached value for this specific plugin. 50 | ''' 51 | try: 52 | return self.agent_cache[0] 53 | except Exception: 54 | return {} 55 | 56 | def set_agent_cache(self, cache): 57 | ''' 58 | Set agent cache value previously passed to this plugin instance. 59 | To enable caching existing agent_cache list have to be passed 60 | to Plugin on initialization. 61 | Minimally it should be list(). 62 | Agent will be able to see only changes in zero element of agent_cache, so 63 | do not manually override self.agent_cache, othervice cache will not be saved! 64 | 65 | If self.agent_cache is not a list appropriate exception will be raised. 66 | ''' 67 | try: 68 | self.agent_cache[0] = cache 69 | except IndexError: 70 | self.agent_cache.append(cache) 71 | 72 | def absolute_to_per_second(self, key, val, prev_cache): 73 | try: 74 | if val >= prev_cache[key]: 75 | value = \ 76 | (val - prev_cache[key]) / \ 77 | (time.time() - prev_cache['ts']) 78 | else: # previous cached value should not be higher than current value (service was restarted?) 79 | value = val / \ 80 | (time.time() - prev_cache['ts']) 81 | except Exception: # No cache yet, can't calculate 82 | value = 0 83 | return value 84 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/powerdns.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | try: 4 | from urllib.request import urlopen, Request 5 | except ImportError: 6 | from urllib2 import urlopen, Request 7 | import time 8 | import plugins 9 | import urllib2 10 | import json 11 | 12 | class Plugin(plugins.BasePlugin): 13 | __name__ = 'powerdns' 14 | 15 | def run(self, config): 16 | ''' 17 | Experimental plugin for PowerDNS authoritative server. Might also work with PowerDNS recursor, 18 | but it may need extra delta_keys / absolute_keys. 19 | Add to /etc/nixstats.ini: 20 | [powerdns] 21 | enabled=yes 22 | statistics_url=http://localhost:8081/api/v1/servers/localhost/statistics 23 | api_key=changeme 24 | ;ca_file= 25 | ;ca_path= 26 | ;timeout=10 27 | ''' 28 | # Create request to configured URL 29 | request = urllib2.Request(config.get(__name__, 'statistics_url'), headers={'X-API-Key': '%s' % config.get(__name__, 'api_key')}) 30 | # defaults 31 | timeout = 10 32 | results = dict() 33 | raw_response = None 34 | # next / previous cached metrics (for calculating deltas) 35 | next_cache = dict() 36 | prev_cache = self.get_agent_cache() 37 | # use timeout from config if specified 38 | if config.has_option(__name__, 'timeout'): 39 | timeout = int(config.get(__name__, 'timeout')) 40 | # create response based on configuration 41 | if config.has_option(__name__, 'ca_file'): 42 | raw_response = urllib2.urlopen(request, timeout=timeout, cafile=config.get(__name__, 'ca_file')) 43 | elif config.has_option(__name__, 'ca_path'): 44 | raw_response = urllib2.urlopen(request, timeout=timeout, capath=config.get(__name__, 'ca_path')) 45 | else: 46 | raw_response = urllib2.urlopen(request, timeout=timeout) 47 | # set next_cache timestamp 48 | next_cache['ts'] = time.time() 49 | # parse raw response as JSON 50 | try: 51 | stats = json.loads(raw_response.read()) 52 | except Exception: 53 | return False 54 | # keys for which we should calculate the delta 55 | delta_keys = ( 56 | 'corrupt-packets', 57 | 'deferred-cache-inserts', 58 | 'deferred-cache-lookup', 59 | 'deferred-packetcache-inserts', 60 | 'deferred-packetcache-lookup', 61 | 'dnsupdate-answers', 62 | 'dnsupdate-changes', 63 | 'dnsupdate-queries', 64 | 'dnsupdate-refused', 65 | 'incoming-notifications', 66 | 'overload-drops', 67 | 'packetcache-hit', 68 | 'packetcache-miss', 69 | 'query-cache-hit', 70 | 'query-cache-miss', 71 | 'rd-queries', 72 | 'recursing-answers', 73 | 'recursing-questions', 74 | 'recursion-unanswered', 75 | 'servfail-packets', 76 | 'signatures', 77 | 'sys-msec', 78 | 'tcp-answers', 79 | 'tcp-answers-bytes', 80 | 'tcp-queries', 81 | 'tcp4-answers', 82 | 'tcp4-answers-bytes', 83 | 'tcp4-queries', 84 | 'tcp6-answers', 85 | 'tcp6-answers-bytes', 86 | 'tcp6-queries', 87 | 'timedout-packets', 88 | 'udp-answers', 89 | 'udp-answers-bytes', 90 | 'udp-do-queries', 91 | 'udp-in-errors', 92 | 'udp-noport-errors', 93 | 'udp-queries', 94 | 'udp-recvbuf-errors', 95 | 'udp-sndbuf-errors', 96 | 'udp4-answers', 97 | 'udp4-answers-bytes', 98 | 'udp4-queries', 99 | 'udp6-answers', 100 | 'udp6-answers-bytes', 101 | 'udp6-queries', 102 | 'user-msec' 103 | ) 104 | 105 | # keys for which we should store the absolute value: 106 | absolute_keys = ( 107 | 'key-cache-size', 108 | 'latency', 109 | 'fd-usage', 110 | 'meta-cache-size', 111 | 'open-tcp-connections', 112 | 'packetcache-size', 113 | 'qsize-q', 114 | 'query-cache-size', 115 | 'real-memory-usage', 116 | 'security-status', 117 | 'signature-cache-size', 118 | 'uptime' 119 | ) 120 | data = dict() 121 | for stat in stats: 122 | if 'name' in stat and 'value' in stat and 'type' in stat: 123 | if stat['type'] == 'StatisticItem': 124 | if stat['name'] in delta_keys: 125 | results[stat['name']] = self.absolute_to_per_second(stat['name'], float(stat['value']), prev_cache) 126 | data[stat['name']] = float(stat['value']) 127 | elif stat['name'] in absolute_keys: 128 | results[stat['name']] = float(stat['value']) 129 | data['ts'] = time.time() 130 | self.set_agent_cache(data) 131 | return results 132 | 133 | if __name__ == '__main__': 134 | Plugin().execute() 135 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | import sys 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'process' 9 | 10 | def run(self, *unused): 11 | process = [] 12 | for proc in psutil.process_iter(): 13 | try: 14 | pinfo = proc.as_dict(attrs=[ 15 | 'pid', 'name', 'ppid', 'exe', 'cmdline', 'username', 16 | 'cpu_percent', 'memory_percent', 'io_counters' 17 | ]) 18 | 19 | try: 20 | pinfo['cmdline'] = ' '.join(pinfo['cmdline']).strip() 21 | except: 22 | pass 23 | if sys.version_info < (3,): 24 | pinfo['cmdline'] = unicode(pinfo['cmdline'], sys.getdefaultencoding(), errors="replace").strip() 25 | pinfo['name'] = unicode(pinfo['name'], sys.getdefaultencoding(), errors="replace") 26 | pinfo['username'] = unicode(pinfo['username'], sys.getdefaultencoding(), errors="replace") 27 | try: 28 | pinfo['exe'] = unicode(pinfo['exe'], sys.getdefaultencoding(), errors="replace") 29 | except: 30 | pass 31 | except psutil.NoSuchProcess: 32 | pass 33 | except psutil.AccessDenied: 34 | pass 35 | except: 36 | pass 37 | else: 38 | process.append(pinfo) 39 | return process 40 | 41 | 42 | if __name__ == '__main__': 43 | Plugin().execute() 44 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/rabbitmq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | try: 4 | from urllib.parse import urlparse, urlencode 5 | from urllib.request import urlopen, Request 6 | from urllib.error import HTTPError 7 | except ImportError: 8 | from urlparse import urlparse 9 | from urllib import urlencode 10 | from urllib2 import urlopen, Request, HTTPError 11 | import time 12 | import plugins 13 | import json 14 | import requests 15 | from requests.auth import HTTPBasicAuth 16 | import sys 17 | 18 | 19 | class Plugin(plugins.BasePlugin): 20 | __name__ = 'rabbitmq' 21 | 22 | def run(self, config): 23 | ''' 24 | rabbitmq status page metrics 25 | ''' 26 | def ascii_encode_dict(data): 27 | ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x 28 | return dict(map(ascii_encode, pair) for pair in data.items()) 29 | 30 | results = dict() 31 | next_cache = dict() 32 | 33 | try: 34 | username = config.get('rabbitmq', 'username') 35 | password = config.get('rabbitmq', 'password') 36 | user_pass = (username, password) 37 | except: 38 | user_pass = False 39 | 40 | request = requests.get(config.get('rabbitmq', 'status_page_url'), auth=user_pass) 41 | 42 | if request.status_code == 401: 43 | request = requests.get(config.get('rabbitmq', 'status_page_url'), auth=HTTPBasicAuth(username, password)) 44 | 45 | if request.status_code is 200: 46 | try: 47 | j = request.json() 48 | except Exception as e: 49 | return e 50 | else: 51 | return "Could not load status page: {}".format(request.text) 52 | 53 | next_cache['ts'] = time.time() 54 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 55 | try: 56 | prev_cache['message_stats'] 57 | except: 58 | prev_cache['message_stats'] = {} 59 | next_cache['message_stats'] = j 60 | next_cache['message_stats']['ts'] = time.time(); 61 | results['published'] = self.absolute_to_per_second('published', j['message_stats']['publish'], prev_cache['message_stats']) 62 | results['published_total'] = j['message_stats']['publish'] 63 | results['ack'] = self.absolute_to_per_second('ack', j['message_stats']['ack'], prev_cache['message_stats']) 64 | results['ack_total'] = j['message_stats']['ack'] 65 | results['deliver_get'] = self.absolute_to_per_second('deliver_get', j['message_stats']['deliver_get'], prev_cache['message_stats']) 66 | results['deliver_get_total'] = j['message_stats']['deliver_get'] 67 | results['redeliver'] = self.absolute_to_per_second('redeliver', j['message_stats']['redeliver'], prev_cache['message_stats']) 68 | results['redeliver_total'] = j['message_stats']['redeliver'] 69 | results['deliver'] = self.absolute_to_per_second('deliver', j['message_stats']['deliver'], prev_cache['message_stats']) 70 | results['deliver_total'] = j['message_stats']['deliver'] 71 | 72 | results['messages'] = j['queue_totals']['messages'] 73 | results['messages_ready'] = j['queue_totals']['messages_ready'] 74 | results['messages_unacknowledged'] = j['queue_totals']['messages_unacknowledged'] 75 | 76 | results['listeners'] = len(j['listeners']) 77 | 78 | results['consumers'] = j['object_totals']['consumers'] 79 | results['queues'] = j['object_totals']['queues'] 80 | results['exchanges'] = j['object_totals']['exchanges'] 81 | results['connections'] = j['object_totals']['connections'] 82 | results['channels'] = j['object_totals']['channels'] 83 | 84 | self.set_agent_cache(next_cache) 85 | 86 | return results 87 | 88 | 89 | if __name__ == '__main__': 90 | Plugin().execute() 91 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/redis_stat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import redis 5 | 6 | ### Uncomment/Comment the Attribute Names to be monitored 7 | METRICS = { 8 | # Server section 9 | #"redis_version": "redis version", 10 | #"redis_git_sha1": "redis git sha1", 11 | #"redis_git_dirty": "redis git dirty", 12 | #"redis_build_id": "redis build id", 13 | #"redis_mode": "redis mode", 14 | #"os": "os", 15 | #"arch_bits": "arch bits", 16 | #"multiplexing_api": "multiplexing api", 17 | #"gcc_version": "gcc version", 18 | #"process_id": "process id", 19 | #"run_id": "run id", 20 | #"tcp_port": "tcp port", 21 | "uptime_in_seconds": "uptime", 22 | #"uptime_in_days": "uptime in days", 23 | #"hz": "hz", 24 | #"lru_clock": "lru clock", 25 | #"executable": "redis path", 26 | #"config_file": "config file", 27 | 28 | # Clients section 29 | "connected_clients": "connected clients", 30 | #"client_longest_output_list": "client longest output list", 31 | #"client_biggest_input_buf": "client biggest input buf", 32 | #"blocked_clients": "blocked clients", 33 | 34 | # Memory section 35 | "used_memory": "used memory", 36 | #"used_memory_human": "used memory human", 37 | #"used_memory_rss": "used memory rss", 38 | #"used_memory_rss_human": "used memory rss human", 39 | #"used_memory_peak": "used memory peak", 40 | "used_memory_peak_human": "used memory peak human", 41 | #"total_system_memory": "total system memory", 42 | #"total_system_memory_human": "total system memory human", 43 | #"used_memory_lua": "used memory lua", 44 | #"used_memory_lua_human": "used memory lua human", 45 | "maxmemory": "maxmemory", 46 | #"maxmemory_human": "maxmemory human", 47 | "maxmemory_policy": "maxmemory policy", 48 | #"mem_fragmentation_ratio": "mem fragmentation ratio", 49 | #"mem_allocator": "mem allocator", 50 | 51 | # Persistence section 52 | #"loading": "loading", 53 | #"rdb_changes_since_last_save": "rdb changes since last save", 54 | #"rdb_bgsave_in_progress": "rdb bgsave in progress", 55 | #"rdb_last_save_time": "rdb last save time", 56 | #"rdb_last_bgsave_status": "rdb last bgsave status", 57 | #"rdb_last_bgsave_time_sec": "rdb last bgsave time sec", 58 | #"rdb_current_bgsave_time_sec": "rdb current bgsave time sec", 59 | #"aof_enabled": "aof enabled", 60 | #"aof_rewrite_in_progress": "aof rewrite in progress", 61 | #"aof_rewrite_scheduled": "aof rewrite scheduled", 62 | #"aof_last_rewrite_time_sec": "aof last rewrite time", 63 | #"aof_current_rewrite_time_sec": "aof current rewrite time", 64 | #"aof_last_bgrewrite_status": "aof last bgrewrite status", 65 | #"aof_last_write_status": "aof last write status", 66 | #"aof_current_size": "aof current size", 67 | #"aof_base_size": "aof base size", 68 | #"aof_pending_rewrite": "aof pending rewrite", 69 | #"aof_buffer_length": "aof buffer length", 70 | #"aof_rewrite_buffer_length": "aof rewrite buffer length", 71 | #"aof_pending_bio_fsync": "aof pending bio fsync", 72 | #"aof_delayed_fsync": "aof delayed fsync", 73 | 74 | # Stats section 75 | #"total_connections_received": "total connections received", 76 | "total_commands_processed": "total commands processed", 77 | #"instantaneous_ops_per_sec": "instantaneous ops per sec", 78 | "total_net_input_bytes": "total net input bytes", 79 | "total_net_output_bytes": "total net output bytes", 80 | #"instantaneous_input_kbps": "instantaneous input kbps", 81 | #"instantaneous_output_kbps": "instantaneous output kbps", 82 | #"rejected_connections": "rejected connections", 83 | #"sync_full": "sync full", 84 | #"sync_partial_ok": "sync partial ok", 85 | #"sync_partial_err": "sync partial err", 86 | "expired_keys": "expired keys", 87 | "evicted_keys": "evicted keys", 88 | "keyspace_hits": "keyspace hits", 89 | "keyspace_misses": "keyspace misses", 90 | #"pubsub_channels": "pubsub channels", 91 | #"pubsub_patterns": "pubsub patterns", 92 | #"latest_fork_usec": "latest fork usec", 93 | #"migrate_cached_sockets": "migrate cached sockets", 94 | 95 | # Replication section 96 | #"role": "role", 97 | #"connected_slaves": "connected slaves", 98 | #"master_repl_offset": "master repl offset", 99 | #"repl_backlog_active": "repl backlog active", 100 | #"repl_backlog_size": "repl backlog size", 101 | #"repl_backlog_first_byte_offset": "repl backlog first byte offset", 102 | #"repl_backlog_histlen": "repl backlog histlen", 103 | 104 | # CPU section 105 | #"used_cpu_sys": "used cpu sys", 106 | #"used_cpu_user": "used cpu user", 107 | #"used_cpu_sys_children": "used cpu sys children", 108 | #"used_cpu_user_children": "used cpu user children", 109 | 110 | # Cluster section 111 | "cluster_enabled": "cluster enabled" 112 | } 113 | 114 | class Plugin(plugins.BasePlugin): 115 | __name__ = 'redis_stat' 116 | 117 | def run(self, config): 118 | data = {} 119 | stats = None 120 | try: 121 | redis_host = (config.get(__name__, 'host')) 122 | except: 123 | redis_host = '127.0.0.1' 124 | try: 125 | redis_port = (config.get(__name__, 'port')) 126 | except: 127 | redis_port = '6379' 128 | try: 129 | redis_db = (config.get(__name__, 'db')) 130 | except: 131 | redis_db = '0' 132 | try: 133 | redis_password = (config.get(__name__, 'password')) 134 | except: 135 | redis_password = '' 136 | 137 | try: 138 | redis_connection = redis.StrictRedis(host=redis_host,port=redis_port,db=redis_db,password=redis_password) 139 | stats = redis_connection.info() 140 | except Exception as e: 141 | data['status']=0 142 | data['msg']='Connection Error' 143 | if not stats: 144 | return data 145 | 146 | for name, value in stats.items(): 147 | if name in METRICS.keys() : 148 | data[METRICS[name]] = value 149 | return data 150 | 151 | if __name__ == '__main__': 152 | Plugin().execute() -------------------------------------------------------------------------------- /nixstatsagent/plugins/sleeper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import time 4 | import plugins 5 | 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'sleeper' 9 | 10 | def run(self, *unused): 11 | time.sleep(60 * 60 * 24) 12 | 13 | 14 | if __name__ == '__main__': 15 | Plugin().run() 16 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/swap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import psutil 4 | import plugins 5 | 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'swap' 9 | 10 | def run(self, *unused): 11 | swap = {} 12 | mem = psutil.swap_memory() 13 | for name in mem._fields: 14 | swap[name] = getattr(mem, name) 15 | return swap 16 | 17 | 18 | if __name__ == '__main__': 19 | Plugin().execute() 20 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/system.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | try: 4 | import netifaces 5 | except ImportError: 6 | netifaces = None 7 | import os 8 | import platform 9 | from subprocess import Popen, PIPE 10 | import sys 11 | import time 12 | import psutil 13 | import plugins 14 | try: 15 | import distro 16 | except ImportError: 17 | distro = None 18 | 19 | def systemCommand(Command, newlines=True): 20 | Output = "" 21 | Error = "" 22 | try: 23 | proc = Popen(Command.split(), stdout=PIPE) 24 | Output = proc.communicate()[0] 25 | except Exception: 26 | pass 27 | 28 | if Output: 29 | if newlines is True: 30 | Stdout = Output.split("\n") 31 | else: 32 | Stdout = Output 33 | else: 34 | Stdout = [] 35 | if Error: 36 | Stderr = Error.split("\n") 37 | else: 38 | Stderr = [] 39 | 40 | return (Stdout, Stderr) 41 | 42 | 43 | def ip_addresses(): 44 | ip_list = {} 45 | ip_list['v4'] = {} 46 | ip_list['v6'] = {} 47 | if netifaces is None: 48 | return ip_list 49 | for interface in netifaces.interfaces(): 50 | link = netifaces.ifaddresses(interface) 51 | if netifaces.AF_INET in link: 52 | if interface not in ip_list['v4']: 53 | ip_list['v4'][interface] = [] 54 | ip_list['v4'][interface].append(link[netifaces.AF_INET]) 55 | if netifaces.AF_INET6 in link: 56 | if interface not in ip_list['v6']: 57 | ip_list['v6'][interface] = [] 58 | ip_list['v6'][interface].append(link[netifaces.AF_INET6]) 59 | return ip_list 60 | 61 | 62 | class Plugin(plugins.BasePlugin): 63 | __name__ = 'system' 64 | 65 | def run(self, *unused): 66 | systeminfo = {} 67 | cpu = {} 68 | if(os.path.isfile("/proc/cpuinfo")): 69 | f = open('/proc/cpuinfo') 70 | if f: 71 | for line in f: 72 | # Ignore the blank line separating the information between 73 | # details about two processing units 74 | if line.strip(): 75 | if "model name" == line.rstrip('\n').split(':')[0].strip(): 76 | cpu['brand'] = line.rstrip('\n').split(':')[1].strip() 77 | if "Processor" == line.rstrip('\n').split(':')[0].strip(): 78 | cpu['brand'] = line.rstrip('\n').split(':')[1].strip() 79 | if "processor" == line.rstrip('\n').split(':')[0].strip(): 80 | cpu['count'] = line.rstrip('\n').split(':')[1].strip() 81 | else: 82 | cpu['brand'] = "Unknown CPU" 83 | cpu['count'] = 0 84 | mem = psutil.virtual_memory() 85 | if sys.platform == "linux" or sys.platform == "linux2": 86 | if distro is None: 87 | systeminfo['os'] = str(' '.join(platform.linux_distribution())) 88 | else: 89 | systeminfo['os'] = str(' '.join(distro.linux_distribution(full_distribution_name=True))) 90 | elif sys.platform == "darwin": 91 | systeminfo['os'] = "Mac OS %s" % platform.mac_ver()[0] 92 | cpu['brand'] = str(systemCommand('sysctl machdep.cpu.brand_string', False)[0]).split(': ')[1] 93 | #cpu['count'] = systemCommand('sysctl hw.ncpu') 94 | elif sys.platform == "freebsd10" or sys.platform == "freebsd11": 95 | systeminfo['os'] = "FreeBSD %s" % platform.release() 96 | cpu['brand'] = str(systemCommand('sysctl hw.model', False)[0]).split(': ')[1] 97 | cpu['count'] = systemCommand('sysctl hw.ncpu') 98 | elif sys.platform == "win32": 99 | systeminfo['os'] = "{} {}".format(platform.uname()[0], platform.uname()[2]) 100 | systeminfo['cpu'] = cpu['brand'] 101 | systeminfo['cores'] = cpu['count'] 102 | systeminfo['memory'] = mem.total 103 | systeminfo['psutil'] = '.'.join(map(str, psutil.version_info)) 104 | systeminfo['python_version'] = sys.version 105 | systeminfo['platform'] = platform.platform() 106 | systeminfo['uptime'] = int(time.time()-psutil.boot_time()) 107 | systeminfo['ip_addresses'] = ip_addresses() 108 | return systeminfo 109 | 110 | 111 | if __name__ == '__main__': 112 | Plugin().execute() 113 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/temp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import plugins 4 | import psutil 5 | import sys 6 | 7 | class Plugin(plugins.BasePlugin): 8 | __name__ = 'temp' 9 | 10 | def run(self, *unused): 11 | ''' 12 | expirimental plugin used to collect temperature from system sensors 13 | plugin can be tested by running nixstatsagent test temp 14 | ''' 15 | data = {} 16 | 17 | if sys.platform == "win32": 18 | try: 19 | import wmi 20 | except: 21 | return 'wmi module not installed.' 22 | 23 | try: 24 | w = wmi.WMI(namespace="root\OpenHardwareMonitor") 25 | temperature_infos = w.Sensor() 26 | for sensor in temperature_infos: 27 | if sensor.SensorType==u'Temperature': 28 | data[sensor.Parent.replace('/','-').strip('-')] = sensor.Value 29 | return data 30 | except: 31 | return 'Could not fetch temperature data from OpenHardwareMonitor.' 32 | if not hasattr(psutil, "sensors_temperatures"): 33 | return "platform not supported" 34 | 35 | try: 36 | temps = psutil.sensors_temperatures() 37 | except: 38 | return "can't read any temperature" 39 | 40 | for device, temp in temps.items(): 41 | for value in temp: 42 | type = value[0] 43 | if value[0] == '': 44 | type = device 45 | data[type] = value[1] 46 | return data 47 | 48 | 49 | if __name__ == '__main__': 50 | Plugin().execute() 51 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/unbound.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import plugins 3 | import subprocess 4 | 5 | # Needs: nixstats ALL=(ALL) NOPASSWD: /usr/sbin/unbound-control 6 | 7 | 8 | class Plugin(plugins.BasePlugin): 9 | 10 | __name__ = 'unbound' 11 | 12 | floatKeys = [ '.avg', '.median', '.now', '.up', '.elapsed' ] 13 | 14 | rate_metrics = [ 15 | "num.answer.bogus", 16 | "num.answer.rcode", 17 | "num.answer.secure", 18 | "num.cachehits", 19 | "num.cachemiss", 20 | "num.dnscrypt.cert", 21 | "num.dnscrypt.cleartext", 22 | "num.dnscrypt.crypted", 23 | "num.dnscrypt.malformed", 24 | "num.prefetch", 25 | "num.queries", 26 | "num.queries_ip_ratelimited", 27 | "num.query.aggressive", 28 | "num.query.authzone.down", 29 | "num.query.authzone.up", 30 | "num.query.class", 31 | "num.query.dnscrypt.replay", 32 | "num.query.dnscrypt.shared_secret.cachemiss", 33 | "num.query.edns", 34 | "num.query.flags", 35 | "num.query.ipv6", 36 | "num.query.opcode", 37 | "num.query.ratelimited", 38 | "num.query.subnet", 39 | "num.query.subnet_cache", 40 | "num.query.tcp", 41 | "num.query.tcpout", 42 | "num.query.tls", 43 | "num.query.tls.resume", 44 | "num.query.type", 45 | "num.recursivereplies", 46 | "num.rrset.bogus", 47 | "num.zero_ttl", 48 | "requestlist.exceeded", 49 | "requestlist.overwritten", 50 | "unwanted.queries", 51 | "unwanted.replies", 52 | ] 53 | 54 | gauge_metrics = [ 55 | "dnscrypt_nonce.cache.count", 56 | "dnscrypt_shared_secret.cache.count", 57 | "infra.cache.count", 58 | "key.cache.count", 59 | "mem.cache.dnscrypt_nonce", 60 | "mem.cache.dnscrypt_shared_secret", 61 | "mem.cache.message", 62 | "mem.cache.rrset", 63 | "mem.mod.iterator", 64 | "mem.mod.validator", 65 | "mem.streamwait", 66 | "msg.cache.count", 67 | "recursion.time.avg", 68 | "recursion.time.median", 69 | "requestlist.avg", 70 | "requestlist.current.all", 71 | "requestlist.current.user", 72 | "requestlist.max", 73 | "rrset.cache.count", 74 | "tcpusage", 75 | "time.elapsed", 76 | "time.now", 77 | "time.up", 78 | ] 79 | 80 | by_tag_labels = [ 81 | "num.answer.rcode", 82 | "num.query.aggressive", 83 | "num.query.class", 84 | "num.query.edns", 85 | "num.query.flags", 86 | "num.query.opcode", 87 | "num.query.type", 88 | ] 89 | 90 | def get_stats(self): 91 | cmd = 'sudo /usr/sbin/unbound-control stats' 92 | try: 93 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) 94 | except subprocess.CalledProcessError as e: 95 | error_msg = 'ERROR CALLING {0}: {1} {2}'.format(cmd, e, e.output) 96 | return None 97 | 98 | return output 99 | 100 | 101 | def parse_stat(self, stat): 102 | 103 | stats = {t[0]: t[2] for line in stat.splitlines() for t in [line.partition('=')]} 104 | for key, value in stats.items(): 105 | if key.endswith(tuple(self.floatKeys)): 106 | stats[key] = float(value) 107 | else: 108 | stats[key] = int(value) 109 | return stats 110 | 111 | def run(self, *unused): 112 | 113 | resdata = self.get_stats() 114 | final = {} 115 | 116 | if resdata is None: 117 | return False 118 | else: 119 | final = self.parse_stat(resdata) 120 | 121 | return final 122 | 123 | if __name__ == '__main__': 124 | 125 | Plugin().execute() 126 | -------------------------------------------------------------------------------- /nixstatsagent/plugins/vms.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import re, sys, os 3 | import libvirt 4 | import libxml2 5 | import time 6 | import plugins 7 | import psutil 8 | 9 | class Plugin(plugins.BasePlugin): 10 | __name__ = 'vms' 11 | 12 | def run(self, config): 13 | ''' 14 | Using the libvirt API to fetch statistics from guests 15 | running KVM, QEMU, Xen, Virtuozzo, VMWare ESX, LXC, 16 | BHyve and more 17 | ''' 18 | results = {} 19 | last_value = {} 20 | prev_cache = self.get_agent_cache() # Get absolute values from previous check 21 | uri = os.getenv("uri", "qemu:///system") 22 | values = self.fetch_values(uri) 23 | 24 | deltas = {} 25 | for key, value in values.items(): 26 | deltas[key] = {} 27 | for subkey, subvalue in value.items(): 28 | if subkey == 'mem_bytes' or subkey == 'soft_limit_bytes' or subkey == 'min_guarantee_bytes' or subkey == 'hard_limit_bytes': 29 | deltas[key][subkey] = value[subkey] 30 | else: 31 | deltas[key][subkey] = self.absolute_to_per_second('%s_%s' % (key, subkey), float(subvalue), prev_cache) 32 | last_value['%s_%s' % (key, subkey)] = float(value[subkey]) 33 | last_value['ts'] = time.time() 34 | self.set_agent_cache(last_value) 35 | return deltas 36 | 37 | def canon(self, name): 38 | return re.sub(r"[^a-zA-Z0-9_]", "_", name) 39 | 40 | def get_ifaces(self, dom): 41 | xml = dom.XMLDesc(0) 42 | doc = None 43 | try: 44 | doc = libxml2.parseDoc(xml) 45 | except: 46 | return [] 47 | ctx = doc.xpathNewContext() 48 | ifaces = [] 49 | try: 50 | ret = ctx.xpathEval("/domain/devices/interface") 51 | for node in ret: 52 | devdst = None 53 | for child in node.children: 54 | if child.name == "target": 55 | devdst = child.prop("dev") 56 | if devdst == None: 57 | continue 58 | ifaces.append(devdst) 59 | finally: 60 | if ctx != None: 61 | ctx.xpathFreeContext() 62 | if doc != None: 63 | doc.freeDoc() 64 | return ifaces 65 | 66 | def get_memtune(self, dom): 67 | memtune = { 'min_guarantee': 0, 'soft_limit': 0, 'hard_limit': 0 } 68 | xml = dom.XMLDesc(0) 69 | 70 | try: 71 | doc = libxml2.parseDoc(xml) 72 | except: 73 | return [] 74 | 75 | ctx = doc.xpathNewContext() 76 | try: 77 | for key in memtune: 78 | ret = ctx.xpathEval("/domain/memtune/%s" % key) 79 | try: 80 | for child in ret[0].children: 81 | memtune[key] = int(child.content) 82 | break 83 | except IndexError: 84 | # key not found in xml 85 | pass 86 | finally: 87 | if ctx != None: 88 | ctx.xpathFreeContext() 89 | if doc != None: 90 | doc.freeDoc() 91 | return memtune 92 | 93 | def fetch_values(self, uri): 94 | conn = libvirt.openReadOnly(uri) 95 | ids = conn.listDomainsID() 96 | results = {} 97 | for id in ids: 98 | data = {} 99 | data['net_rx_bytes'] = 0 100 | data['net_tx_bytes'] = 0 101 | try: 102 | dom = conn.lookupByID(id) 103 | name = dom.name() 104 | except libvirt.libvirtError as err: 105 | print("Id: %s: %s" % (id, err), file=sys.stderr) 106 | continue 107 | if name == "Domain-0": 108 | continue 109 | ifaces = self.get_ifaces(dom) 110 | for iface in ifaces: 111 | try: 112 | stats = dom.interfaceStats(iface) 113 | data['net_rx_bytes'] += stats[0] 114 | data['net_tx_bytes'] += stats[4] 115 | except: 116 | print >>sys.stderr, "Cannot get ifstats for '%s' on '%s'" % (iface, name) 117 | 118 | cputime = float(dom.info()[4]) 119 | cputime_percentage = 1.0e-7 * cputime 120 | data['cpu'] = cputime_percentage 121 | try: 122 | data['cpu_percentage'] = cputime_percentage / psutil.cpu_count() 123 | except Exception as e: 124 | pass 125 | 126 | maxmem, mem = dom.info()[1:3] 127 | mem *= 1024 128 | maxmem *= 1024 129 | data['mem_bytes'] = mem 130 | memtune = self.get_memtune(dom) 131 | data['min_guarantee_bytes'] = memtune['min_guarantee'] * 1024 132 | data['hard_limit_bytes'] = memtune['hard_limit'] * 1024 133 | data['soft_limit_bytes'] = memtune['soft_limit'] * 1024 134 | 135 | data['disk_rd_bytes'] = 0 136 | data['disk_wr_bytes'] = 0 137 | data['disk_wr_req'] = 0 138 | data['disk_rd_req'] = 0 139 | try: 140 | dom = conn.lookupByID(id) 141 | name = dom.name() 142 | except libvirt.libvirtError as err: 143 | print("Id: %s: %s" % (id, err), file=sys.stderr) 144 | continue 145 | if name == "Domain-0": 146 | continue 147 | disks = self.get_disks(dom) 148 | for disk in disks: 149 | try: 150 | rd_req, rd_bytes, wr_req, wr_bytes, errs = dom.blockStats(disk) 151 | data['disk_rd_bytes'] += rd_bytes 152 | data['disk_wr_bytes'] += wr_bytes 153 | data['disk_rd_req'] += rd_req 154 | data['disk_wr_req'] += wr_req 155 | except TypeError: 156 | print >>sys.stderr, "Cannot get blockstats for '%s' on '%s'" % (disk, name) 157 | 158 | results[self.canon(name)] = data 159 | return results 160 | 161 | def get_disks(self, dom): 162 | xml = dom.XMLDesc(0) 163 | doc = None 164 | try: 165 | doc = libxml2.parseDoc(xml) 166 | except: 167 | return [] 168 | ctx = doc.xpathNewContext() 169 | disks = [] 170 | try: 171 | ret = ctx.xpathEval("/domain/devices/disk") 172 | for node in ret: 173 | devdst = None 174 | for child in node.children: 175 | if child.name == "target": 176 | devdst = child.prop("dev") 177 | if devdst == None: 178 | continue 179 | disks.append(devdst) 180 | finally: 181 | if ctx != None: 182 | ctx.xpathFreeContext() 183 | if doc != None: 184 | doc.freeDoc() 185 | return disks 186 | 187 | 188 | if __name__ == '__main__': 189 | Plugin().execute() 190 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # by Al Nikolov 4 | 5 | 6 | import os 7 | import sys 8 | import setuptools 9 | 10 | 11 | here = os.path.abspath(os.path.dirname(__file__)) 12 | 13 | readme = open(os.path.join(here, 'README.md')).read() 14 | if sys.version.startswith('3.'): 15 | install_requires = ['psutil', 'netifaces', 'configparser', 'future', 'distro'] 16 | elif sys.version.startswith('2.6'): 17 | install_requires = ['psutil==5.7.0', 'netifaces', 'configparser==3.5.0', 'future'] 18 | elif sys.version.startswith('2.7'): 19 | install_requires = ['psutil', 'netifaces', 'configparser==3.5.0', 'future'] 20 | else: 21 | install_requires = ['psutil', 'netifaces', 'configparser', 'future'] 22 | 23 | 24 | setuptools.setup( 25 | name='nixstatsagent', 26 | version='1.2.18', 27 | description='NixStats agent', 28 | long_description_content_type='text/markdown', 29 | long_description=readme, 30 | url='https://github.com/NIXStats/nixstatsagent', 31 | author='NIXStats', 32 | author_email='vincent@nixstats.com', 33 | maintainer='Vincent', 34 | maintainer_email='vincent@nixstats.com', 35 | license='BSD-3-Clause', 36 | classifiers=[ 37 | 'Development Status :: 5 - Production/Stable', 38 | 'Environment :: No Input/Output (Daemon)', 39 | 'Intended Audience :: System Administrators', 40 | 'License :: OSI Approved :: BSD License', 41 | 'Natural Language :: English', 42 | 'Operating System :: POSIX :: Linux', 43 | 'Programming Language :: Python :: 2.6', 44 | 'Programming Language :: Python :: 2.7', 45 | 'Programming Language :: Python :: 3.4', 46 | 'Programming Language :: Python :: 3.5', 47 | 'Programming Language :: Python :: 3.6', 48 | 'Topic :: System :: Monitoring', 49 | ], 50 | keywords='nixstats system monitoring agent', 51 | install_requires=install_requires, 52 | packages=setuptools.find_packages(), 53 | entry_points={ 54 | 'console_scripts': [ 55 | 'nixstatsagent=nixstatsagent.nixstatsagent:main', 56 | 'nixstatshello=nixstatsagent.nixstatsagent:hello', 57 | ], 58 | }, 59 | data_files=[('share/doc/nixstatsagent', [ 60 | 'nixstats-example.ini', 61 | 'LICENSE', 62 | 'README.md', 63 | ])], 64 | ) 65 | --------------------------------------------------------------------------------