├── .eslintrc.js ├── .gitignore ├── .node_version ├── DEVELOPMENT.md ├── LICENSE ├── Makefile ├── README.md ├── Vagrantfile ├── bin ├── nad-log.sh └── statsd.sh ├── etc └── nad.conf ├── examples ├── plugins │ ├── example-config.json │ ├── example-config.sh │ ├── example-continous.sh │ ├── example-json.sh │ ├── example-native.js │ └── example-tsv.sh └── self-config │ ├── README.md │ └── illumos.json ├── freebsd-init └── nad ├── install-sh ├── lib ├── apiclient │ └── index.js ├── broker │ └── index.js ├── circwmi │ └── index.js ├── debug_util │ └── index.js ├── dtrace_aggr2 │ └── index.js ├── index.js ├── inventory │ └── index.js ├── ktap_aggr │ └── index.js ├── nad_self_configure │ └── index.js ├── package.json ├── plugins │ └── index.js ├── push_receiver │ └── index.js ├── reverse │ ├── index.js │ └── noit.js ├── settings │ └── index.js └── statsd │ ├── index.js │ └── lib │ ├── circonus │ ├── index.js │ └── trap │ │ └── index.js │ ├── helpers.js │ └── servers │ ├── tcp.js │ └── udp.js ├── linux-init ├── logrotate ├── rhel-init ├── systemd.service ├── ubuntu-init └── upstart ├── man ├── nad.8 └── nad.md ├── mkinstalldirs ├── package.json ├── packaging ├── example-omnibus.conf ├── make-omnibus ├── omnibus-rpm.spec.in ├── ubuntu-postinstall.sh └── ubuntu-postremove.sh ├── plugins ├── cassandra │ ├── cassandra_cfstats.sh │ ├── cassandra_compaction.sh │ ├── cassandra_gcstats.sh │ ├── cassandra_info.sh │ └── cassandra_po.sh ├── ceph │ ├── README.md │ ├── ceph_df.js │ ├── ceph_osd_perf.js │ ├── ceph_osd_pool.js │ └── ceph_status.js ├── circonus-inside │ └── circpkg.sh ├── common │ ├── Makefile │ ├── apcaccess.sh │ ├── file_cksum.sh │ ├── file_md5sum.sh │ ├── net_listen.pl │ ├── open_files.sh │ ├── openssl_certificate_expiration.js │ ├── process_memory.pl │ ├── ps.pl │ ├── src │ │ ├── Makefile │ │ ├── boot_time.c │ │ ├── file_stat.c │ │ ├── loadavg.c │ │ └── user_logins.c │ └── zpool.sh ├── docker │ ├── Makefile │ ├── README.md │ ├── events.js │ ├── lib │ │ ├── events │ │ │ └── index.js │ │ └── stats │ │ │ └── index.js │ ├── package.json │ └── stats.js ├── freebsd │ ├── Makefile │ ├── common.sh │ ├── cpu.sh │ ├── if.sh │ ├── src │ │ ├── Makefile │ │ ├── disk.c │ │ ├── fs.c │ │ └── zfs_hack.h │ ├── vm.sh │ └── zfsinfo.sh ├── haproxy │ └── haproxy_backends.sh ├── illumos │ ├── Makefile │ ├── cpu-1.sh │ ├── fq.dtrace │ ├── fq2.js │ ├── if.sh │ ├── iflink.sh │ ├── io.dtrace │ ├── io2.js │ ├── lib │ │ └── kstat.lib │ ├── sdinfo.sh │ ├── smf.sh │ ├── src │ │ ├── Makefile │ │ ├── aggcpu.c │ │ ├── cpu.c │ │ ├── fs.c │ │ ├── ipmi.c │ │ ├── swap.c │ │ └── zpoolio.c │ ├── syscall2.js │ ├── tcp.sh │ ├── udp.sh │ ├── vminfo.sh │ ├── vnic.sh │ ├── zfsinfo.sh │ └── zone_vfs.sh ├── linux │ ├── Makefile │ ├── bccbpf │ │ ├── Makefile │ │ ├── README.md │ │ ├── bpf.c │ │ ├── bpf.lua │ │ ├── iolatency.c │ │ ├── iolatency.py │ │ └── lua │ │ │ ├── circll.lua │ │ │ ├── dkjson.lua │ │ │ ├── mod_bio.lua │ │ │ ├── mod_runqlat.lua │ │ │ └── mod_syscall.lua │ ├── cpu.sh │ ├── disk.sh │ ├── diskstats.sh │ ├── if.sh │ ├── io.js │ ├── src │ │ ├── Makefile │ │ └── fs.c │ ├── systemd.sh │ └── vm.sh ├── mysql │ ├── README.md │ ├── mysql-conf.sh │ └── mysql.sh ├── ohai │ ├── find-ruby.sh │ ├── ohai.sh │ └── ohai2nad.rb ├── openbsd │ ├── Makefile │ ├── carp.sh │ ├── cpu.sh │ ├── if.sh │ └── src │ │ ├── Makefile │ │ └── fs.c ├── pf │ └── pf.pl ├── postgresql │ ├── pg-conf.sh │ ├── pg_bgwriter.sh │ ├── pg_cache.sh │ ├── pg_connections.sh │ ├── pg_db_size.sh │ ├── pg_functions.sh │ ├── pg_isready.sh │ ├── pg_locks.sh │ ├── pg_partitions.sh │ ├── pg_protocol_observer.sh │ ├── pg_repl_lag.sh │ ├── pg_repl_slots.sh │ ├── pg_replication.sh │ ├── pg_table_stats.sh │ ├── pg_transactions.sh │ └── pg_vacuum.sh ├── smartos │ ├── .index.json │ └── jinf.sh └── windows │ └── wmi │ ├── default.conf │ ├── exec │ └── wmi.js │ ├── wmi.bat │ └── wmi.json ├── sbin ├── .eslintrc.js ├── nad.js └── nad.sh └── smf ├── circonus-nad └── nad.xml /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | extends: [ 4 | '@maier/base', 5 | '@maier/node' 6 | ], 7 | rules: { 8 | 'max-len': [ 9 | 'error', 10 | { 11 | code : 100, 12 | tabWidth : 4, 13 | ignoreComments : true, 14 | ignoreTrailingComments : true, 15 | ignoreUrls : true, 16 | ignoreStrings : true, 17 | ignoreTemplateLiterals : true, 18 | ignoreRegExpLiterals : true 19 | } 20 | ], 21 | 'no-plusplus': 'off', 22 | 'no-underscore-dangle': 'off', 23 | 'prefer-destructuring': 'off' 24 | } 25 | }; 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # osx 2 | .DS_Store 3 | 4 | # temporary files 5 | *.out 6 | packaging/omnibus-rpm.spec 7 | # platform specific 8 | node_modules/ 9 | *.elf 10 | 11 | # vagrant/editor files 12 | node-*.tar.[gx]z 13 | .eslintcache 14 | .vscode/ 15 | *~ 16 | 17 | # custom local packaging configuration 18 | packaging/omnibus.conf 19 | # notes/todo 20 | TODO.md 21 | -------------------------------------------------------------------------------- /.node_version: -------------------------------------------------------------------------------- 1 | v6.10.2 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2013 Circonus, Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following 12 | disclaimer in the documentation and/or other materials provided 13 | with the distribution. 14 | * Neither the name Circonus, Inc. nor the names 15 | of its contributors may be used to endorse or promote products 16 | derived from this software without specific prior written 17 | permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /bin/nad-log.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2016 Circonus, Inc. All rights reserved. 4 | # Use of this source code is governed by a BSD-style 5 | # license that can be found in the LICENSE file. 6 | 7 | export PATH="$PATH:@@PREFIX@@/bin" 8 | LOG="@@LOG@@/nad.log" 9 | PINO="@@MODULES@@/.bin/pino" 10 | 11 | [[ -f $LOG ]] || { 12 | echo "Unable to find NAD log ($LOG)" 13 | exit 1 14 | } 15 | 16 | [[ -x $PINO ]] || { 17 | echo "Unable to find required command ($PINO)" 18 | exit 1 19 | } 20 | 21 | tail -F $LOG | $PINO 22 | -------------------------------------------------------------------------------- /bin/statsd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Very simple bash client to send metrics to a statsd server 4 | # Example with gauge: ./statsd-client.sh 'my_metric:100|g' 5 | # 6 | # Alexander Fortin 7 | # 8 | # from: https://github.com/etsy/statsd/blob/master/examples/statsd-client.sh 9 | # 10 | host="${STATSD_HOST:-127.0.0.1}" 11 | port="${STATSD_PORT:-8125}" 12 | 13 | if [ $# -ne 1 ]; then 14 | echo "Syntax: $0 ''" 15 | exit 1 16 | fi 17 | 18 | # Setup UDP socket with statsd server 19 | exec 3<> /dev/udp/$host/$port 20 | 21 | # Send data 22 | printf "$1" >&3 23 | 24 | # Close UDP socket 25 | exec 3<&- 26 | exec 3>&- 27 | -------------------------------------------------------------------------------- /etc/nad.conf: -------------------------------------------------------------------------------- 1 | # 2 | # NAD Options 3 | # 4 | # See: https://github.com/circonus-labs/nad#options 5 | # 6 | 7 | # top-level plugin directory 8 | NAD_PLUGIN_DIR="@@CONF@@" 9 | 10 | # listen on port 2609 11 | #NAD_LISTEN="2609" 12 | 13 | # enable statsd listener 14 | #NAD_STATSD="yes" 15 | # custom statsd configuration file 16 | #NAD_STATSD_CONFIG="" 17 | 18 | # no reverse by default 19 | #NAD_REVERSE="no" 20 | # check bundle id to use for reverse 21 | # if none supplied, use cosi if available, otherwise search for an existing check 22 | #NAD_REVERSE_CID="" 23 | # broker CA Certificate file 24 | # default is retrieved from API 25 | #NAD_REVERSE_BROKER_CA="" 26 | # specific target to use for reverse (searching for an existing check) 27 | # default is the current hostname (from os.hostname()) 28 | #NAD_REVERSE_TARGET="" 29 | 30 | # API Token Key 31 | # for reverse - if no key supplied, use cosi if available 32 | #NAD_API_KEY="" 33 | # API Token App 34 | #NAD_API_APP="nad" 35 | # API URL 36 | #NAD_API_URL="https://api.circonus.com/v2/" 37 | # API CA Certificate file (required if api url is not using public certs) 38 | #NAD_API_CA="" 39 | 40 | # SSL Listen 41 | #SSL_LISTEN="" 42 | # SSL Cert 43 | #SSL_CERT="@@ETC@@/na.crt" 44 | # SSL Key 45 | #SSL_KEY="@@ETC@@/na.key" 46 | # SSL Verify 47 | #SSL_VERIFY="no" 48 | # SSL CA - only used if SSL Verify is enabled 49 | #SSL_CA="@@ETC@@/na.ca" 50 | 51 | # Drop privileges to user id 52 | #NAD_UID="nobody" 53 | # Drop privileges to group id 54 | #NAD_GID="nobody" 55 | 56 | # Logging level 57 | #NAD_LOG_LEVEL="info" 58 | -------------------------------------------------------------------------------- /examples/plugins/example-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "instance_A" : ["123", "456"], 3 | "instance_B" : ["234", "567"] 4 | } 5 | -------------------------------------------------------------------------------- /examples/plugins/example-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | printf "%s\t%s\t%s\n" "an_integer_metric" "l" "$1" 4 | printf "%s\t%s\t%s\n" "a_float_metric" "n" "$2" 5 | -------------------------------------------------------------------------------- /examples/plugins/example-continous.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Example of a long running executable plugin that emits continous output 4 | # 5 | 6 | # Emit a blank metric set at the beginning, so that nad does not wait for us 7 | printf "\n" 8 | 9 | # take some time to initialize 10 | sleep 20 11 | 12 | while true 13 | do 14 | # emit metric data 15 | printf "%s\t%s\t%s\n" "time" "l" "$(date +%s)" 16 | # signal end of metric set, by emitting a newline 17 | printf "\n" 18 | # Only the last metric set is submitted. Hence we need to align emission with collection 19 | # interval: 20 | sleep 60 21 | done 22 | -------------------------------------------------------------------------------- /examples/plugins/example-json.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SCRIPT OUTPUT 4 | # 5 | # Executables in the configdir write metrics to standard output. 6 | # These can have either be tab separated, or as JSON documents. 7 | # 8 | # If you elect to product JSON formatted output in your programs, you 9 | # must provide a JSON object whose keys have values that look so: 10 | # 11 | # { "_type": , "_value": } 12 | # 13 | # Valid s are: 14 | # 15 | # i - indicating a signed 32bit integer value, 16 | # 17 | # I - indicating an unsigned 32bit integer value, 18 | # 19 | # l - indicating a signed 64bit integer value, 20 | # 21 | # L - indicating an unsigned 64bit integer value, 22 | # 23 | # n - indicating a value to be represented as a double, or 24 | # 25 | # s - indicating the the value is a string. 26 | # 27 | # 28 | # Numeric s can be provided as JSON number or string. This 29 | # is in order to allow ensure that very large, and high prevision 30 | # values, that exceed the float/int32 range, can be safely submitted 31 | # to the system. 32 | # 33 | cat <\t\t 9 | # Indicating the the metric specified has value 10 | # 11 | # \t 12 | # Indicating the the metric specified has a null value. 13 | # 14 | # 15 | # Valid values are: 16 | # 17 | # i - indicating a signed 32bit integer value, 18 | # 19 | # I - indicating an unsigned 32bit integer value, 20 | # 21 | # l - indicating a signed 64bit integer value, 22 | # 23 | # L - indicating an unsigned 64bit integer value, 24 | # 25 | # n - indicating a value to be represented as a double, or 26 | # 27 | # s - indicating the the value is a string. 28 | # 29 | printf "%s\n" '# { "timeout": 1.12 }' # optional timeout information 30 | printf "%s\t%s\t%s\n" "an_integer_metric" "l" "123456789" 31 | printf "%s\t%s\t%s\n" "a_float_metric" "n" "1.2345678" 32 | -------------------------------------------------------------------------------- /examples/self-config/README.md: -------------------------------------------------------------------------------- 1 | ## Automatic Configuration with Circonus 2 | 3 | nad can automatically configure itself with Circonus via a few command 4 | line options. When running in configuration mode, nad will create a check 5 | and graphs with Circonus, and then exit. It will not attempt to bind to any port, 6 | so is safe to use while running normally. 7 | 8 | * `--authtoken ` This is the Circonus API auth token to use when talking with the API. This "activates" the configuration mode. 9 | 10 | * `--target ` This should be either the IP or hostname that the Circonus broker can talk to this host at. Required. 11 | 12 | * `--hostname ` This is the hostname to use in the check and graph names. If not passed, nad will attempt to look it up via commands like /usr/bin/zonename 13 | 14 | * `--brokerid ` The ID from Circonus for the broker on which you wish to configure the check. Required. 15 | 16 | * `--configfile ` The path to the config file to use that defines the metrics and graphs to create in Circonus. Look at config/illumos.json for an example. Required. 17 | 18 | * `--debugdir ` Creates debug files for each script and write them to this directory. Optional. 19 | 20 | * `--wipedebugdir` Wipes debug files clean before each write. Optional. 21 | 22 | By default, nad talks to the main Circonus installation. You can also 23 | configure nad to talk to a Circonus Inside install with the following 24 | config options: 25 | 26 | * `--apihost ` An alternative host to 'api.circonus.com' 27 | 28 | * `--apiport An alternative port to `443` 29 | 30 | * `--apiprotocol ` An alternative protocol to 'https' (i.e. 'http') 31 | 32 | * `--apipath ` An alternative base path for the API server 33 | 34 | ### Config file 35 | 36 | The `--configfile` parameter defines which config file to use when setting up 37 | checks and graphs in Circonus. There are two keys the nad looks for. 38 | 39 | The check key contains the definition that will be passed to the check bundle 40 | endpoint in the Cirocnus API. You can set values like the period and timeout 41 | here, as well as config options (in the config key). The metrics key defines 42 | which metrics we will collect and has 2 subkeys, numeric and text, which are 43 | simply lists of metric names. When nad attempts to create the check, if it 44 | gets back a pre-existing check, then nad will update the check, adding the new 45 | metric names. 46 | 47 | The graphs key defines a collection of graphs to create. Each subkey is the 48 | name of the graph that will be created in Circonus, with the hostname 49 | prepended to it. Under the names, the structure is identical to the 50 | documentation for the Circonus graph API. Any values added will be passed to 51 | the API as is. 52 | 53 | -------------------------------------------------------------------------------- /freebsd-init/nad: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # PROVIDE: nad 4 | # REQUIRE: LOGIN NETWORKING 5 | # KEYWORD: shutdown 6 | 7 | . /etc/rc.subr 8 | 9 | name=nad 10 | rcvar="${name}_enable" 11 | pidfile="@@PID_FILE@@" 12 | start_cmd="${name}_start" 13 | stop_cmd="${name}_stop" 14 | 15 | nad_start() 16 | { 17 | echo "Starting ${name}." 18 | export PATH="$PATH:/usr/local/bin:/usr/local/sbin" 19 | @@SBIN@@/nad --daemon --pid_file $pidfile @@SYSLOG@@ 20 | } 21 | 22 | nad_stop() 23 | { 24 | echo "Stopping ${name}." 25 | kill $(cat $pidfile) 26 | } 27 | 28 | load_rc_config $name 29 | run_rc_command "$1" 30 | -------------------------------------------------------------------------------- /lib/debug_util/index.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | /* eslint-disable no-sync */ 8 | 9 | const fs = require('fs'); 10 | const path = require('path'); 11 | 12 | const nad = require('nad'); 13 | const settings = require(path.join(nad.lib_dir, 'settings')); 14 | const log = settings.logger.child({ module: 'debug_util' }); 15 | 16 | /** 17 | * debugging 18 | * @arg {String} script_name name 19 | * @arg {String} debug_dir directory 20 | * @returns {Undefined} nothing 21 | */ 22 | function init_debug(script_name, debug_dir) { 23 | const debug_file = path.resolve(path.join(debug_dir, `${script_name}.nad_debug`)); 24 | 25 | try { 26 | if (fs.existsSync(debug_file)) { 27 | fs.unlinkSync(debug_file); 28 | } 29 | } catch (err) { 30 | log.error({ err: err.message, file: debug_file }, 'initializing debug file'); 31 | } 32 | } 33 | 34 | /** 35 | * write debugging output 36 | * @arg {String} script_name name 37 | * @arg {Array} debug_lines lines to output 38 | * @arg {String} debug_dir directory 39 | * @arg {Boolean} wipe_debug_dir clear previous file 40 | * @returns {Undefined} nothing 41 | */ 42 | function write_debug_output(script_name, debug_lines, debug_dir, wipe_debug_dir) { 43 | const debug_file = path.resolve(path.join(debug_dir, `${script_name}.nad_debug`)); 44 | 45 | try { 46 | if (wipe_debug_dir) { 47 | init_debug(script_name, debug_dir); 48 | } 49 | fs.appendFile(debug_file, `-----START RECORD-----\n${debug_lines.join('\n')}\n-----END RECORD-----\n`); 50 | } catch (err) { 51 | log.error({ err: err.message, file: debug_file }, 'writing to debug file'); 52 | } 53 | } 54 | 55 | exports.init_debug = init_debug; 56 | exports.write_debug_output = write_debug_output; 57 | -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const fs = require('fs'); 8 | const path = require('path'); 9 | 10 | /** @private */ 11 | let instance = null; 12 | 13 | class NAD { 14 | 15 | /** 16 | * create instance 17 | * @constructor 18 | */ 19 | constructor() { 20 | if (instance !== null) { 21 | return instance; 22 | } 23 | 24 | /** 25 | * NAD installation directory - default: /opt/circonus/nad 26 | * @public 27 | */ 28 | this.nad_dir = fs.realpathSync(path.join(__dirname, '..', '..')); // eslint-disable-line no-sync 29 | 30 | /** 31 | * NAD etc directory 32 | * @public 33 | */ 34 | this.etc_dir = fs.realpathSync(path.join(this.nad_dir, 'etc')); // eslint-disable-line no-sync 35 | 36 | /** 37 | * NAD modules library directory - default: /opt/circonus/nad/node_modules/nad 38 | * @public 39 | */ 40 | this.lib_dir = fs.realpathSync(__dirname); // eslint-disable-line no-sync 41 | 42 | /** 43 | * top level installation directory - default: /opt/circonus 44 | * @public 45 | */ 46 | this.base_dir = fs.realpathSync(path.join(__dirname, '..', '..', '..')); // eslint-disable-line no-sync 47 | 48 | instance = this; // eslint-disable-line consistent-this 49 | 50 | return instance; 51 | } 52 | 53 | } 54 | 55 | module.exports = new NAD(); 56 | -------------------------------------------------------------------------------- /lib/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nad", 3 | "version": "1.0.0", 4 | "description": "NAD support module packages", 5 | "private": true 6 | } 7 | -------------------------------------------------------------------------------- /lib/push_receiver/index.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | /* 6 | 7 | Push receiver 8 | 9 | NOTE: metrics sent to the push receiver must be well formed JSON, with types and values. 10 | 11 | { 12 | "foo": { 13 | "_type": "n", 14 | "_value":1 15 | }, 16 | "bar": { 17 | "_type": "n", 18 | "_value": 2 19 | } 20 | } 21 | 22 | */ 23 | 24 | 'use strict'; 25 | 26 | const path = require('path'); 27 | 28 | const nad = require('nad'); 29 | const settings = require(path.join(nad.lib_dir, 'settings')); 30 | const log = settings.logger.child({ module: 'push_receiver' }); 31 | 32 | /** 33 | * aggregates samples for a period 34 | * @arg {Object} tgt target 35 | * @arg {Object} src source 36 | * @returns {Undefined} nothing 37 | */ 38 | function __aggr(tgt, src) { 39 | for (const key in src) { 40 | if (!{}.hasOwnProperty.call(tgt, key)) { 41 | tgt[key] = { _type: src[key]._type, _value: [] }; // eslint-disable-line no-param-reassign 42 | } 43 | if (Array.isArray(src[key]._value)) { 44 | tgt[key]._value = tgt[key]._value.concat(src[key]._value); // eslint-disable-line no-param-reassign 45 | } else { 46 | tgt[key]._value.push(src[key]._value); 47 | } 48 | } 49 | } 50 | 51 | module.exports = class PushReceiver { 52 | 53 | /** 54 | * initialize new instance 55 | */ 56 | constructor() { 57 | this.metrics = {}; 58 | } 59 | 60 | /** 61 | * run plugin 62 | * @arg {Object} details plugin definition 63 | * @arg {Function} cb callback 64 | * @arg {Object} req http request object 65 | * @arg {Object} args instance arguments 66 | * @arg {String} instance name/id 67 | * @returns {Undefined} nothing 68 | */ 69 | run(details, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 70 | const metrics = {}; 71 | 72 | let period = req ? req.headers['x-reconnoiter-period'] : 0; 73 | 74 | if (!period) { 75 | period = 60000; 76 | } 77 | 78 | for (const metric in this.metrics) { 79 | if (!{}.hasOwnProperty.call(this.metrics, metric)) { 80 | continue; 81 | } 82 | 83 | const start_idx = Math.floor(Date.now() / 1000) - Math.floor(period / 1000); 84 | const agg_metrics = {}; 85 | 86 | for (let i = start_idx; i <= this.metrics[metric].last_idx; i++) { 87 | __aggr(agg_metrics, this.metrics[metric].windows[i % 60]); 88 | } 89 | 90 | metrics[metric] = agg_metrics; 91 | } 92 | 93 | details.running = false; // eslint-disable-line no-param-reassign 94 | cb(details, metrics, instance); 95 | } 96 | 97 | /** 98 | * save pushed data 99 | * @arg {String} name name/id of metric group/category 100 | * @arg {Buffer} incoming_data raw data in JSON format 101 | * @returns {Undefined} nothing 102 | */ 103 | store_incoming_data(name, incoming_data) { 104 | log.debug({ name }, 'received data'); 105 | log.trace({ raw_data: incoming_data.toString() }, 'incoming data'); 106 | 107 | if (!{}.hasOwnProperty.call(this.metrics, name)) { 108 | this.metrics[name] = { 109 | last_idx : 0, 110 | windows : [] 111 | }; 112 | for (let i = 0; i < 60; i++) { 113 | this.metrics[name].windows[i] = {}; 114 | } 115 | } 116 | 117 | const ref = this.metrics[name]; 118 | const this_idx = Math.floor(Date.now() / 1000); 119 | 120 | if (!ref.last_idx) { 121 | ref.last_idx = this_idx; 122 | } 123 | 124 | /* everything from last up to now is moot */ 125 | for (let i = ref.last_idx + 1; i < this_idx; i++) { 126 | ref.windows[i % 60] = {}; 127 | } 128 | 129 | let data = null; 130 | 131 | try { 132 | data = JSON.parse(incoming_data); 133 | } catch (err) { 134 | log.error({ err: err.message }, 'parsing incoming data'); 135 | 136 | return; 137 | } 138 | 139 | log.trace({ 140 | location_idx : this_idx % 60, 141 | name, 142 | parsed_data : data 143 | }, 'storing data'); 144 | 145 | ref.windows[this_idx % 60] = data; 146 | ref.last_idx = this_idx; 147 | } 148 | 149 | }; 150 | -------------------------------------------------------------------------------- /lib/statsd/lib/helpers.js: -------------------------------------------------------------------------------- 1 | // a customized version of https://github.com/etsy/statsd 2 | 3 | /* eslint-disable require-jsdoc */ 4 | 5 | 'use strict'; 6 | 7 | 8 | function isNumber(str) { 9 | return Boolean(str && !isNaN(str)); 10 | } 11 | 12 | function isValidSampleRate(str) { 13 | let validSampleRate = false; 14 | 15 | if (str.length > 1 && str[0] === '@') { 16 | const numberStr = str.substring(1); 17 | 18 | validSampleRate = isNumber(numberStr) && numberStr[0] !== '-'; 19 | } 20 | 21 | return validSampleRate; 22 | } 23 | 24 | /** 25 | * filter out malformed packets 26 | * @arg {Array} fields Array of packet data (e.g. [ '100', 'ms', '@0.1' ]) 27 | * @returns {Boolean} valid packet 28 | */ 29 | function is_valid_packet(fields) { 30 | // test for existing metrics type 31 | if (typeof fields[1] === 'undefined') { 32 | return false; 33 | } 34 | 35 | // filter out malformed sample rates 36 | if (typeof fields[2] !== 'undefined') { 37 | if (!isValidSampleRate(fields[2])) { 38 | return false; 39 | } 40 | } 41 | 42 | // filter out invalid metrics values 43 | switch (fields[1]) { 44 | case 't': 45 | return true; 46 | case 's': 47 | return true; 48 | case 'g': 49 | return isNumber(fields[0]); 50 | case 'ms': 51 | return isNumber(fields[0]) && Number(fields[0]) >= 0; 52 | default: 53 | if (!isNumber(fields[0])) { 54 | return false; 55 | } 56 | 57 | return true; 58 | } 59 | } 60 | 61 | /** 62 | * histogram_bucket_id transforms a value into its correct 63 | * bucket and returns the bucket id as a string 64 | * @arg {Numer} origVal original values 65 | * @returns {String} histogram bucket id 66 | */ 67 | function histogram_bucket_id(origVal) { 68 | let val = origVal; 69 | let vString = ''; 70 | let exp = 0; 71 | 72 | if (val === 0) { 73 | return 'H[0]'; 74 | } 75 | 76 | if (val < 0) { 77 | vString = '-'; 78 | val *= -1; 79 | } 80 | 81 | while (val < 10) { 82 | val *= 10; 83 | exp -= 1; 84 | } 85 | 86 | while (val >= 100) { 87 | val /= 10; 88 | exp += 1; 89 | } 90 | 91 | val = Math.floor(val); 92 | val /= 10; 93 | exp += 1; 94 | 95 | return `H[${vString}${val.toString()}e${exp.toString()}]`; 96 | } 97 | 98 | 99 | /** 100 | * make_histogram takes a list of raw values and returns a list of bucket 101 | * strings parseable by the broker 102 | * @arg {Array} values to place in histogram 103 | * @returns {Array} histogram buckets 104 | */ 105 | function make_histogram(values) { 106 | const temp = {}; 107 | const ret = []; 108 | 109 | for (const value of values) { 110 | const bucket = histogram_bucket_id(value); 111 | 112 | if (!temp[bucket]) { 113 | temp[bucket] = 0; 114 | } 115 | temp[bucket] += 1; 116 | } 117 | 118 | for (const bkt in temp) { // eslint-disable-line guard-for-in 119 | ret.push(`${bkt}=${temp[bkt]}`); 120 | } 121 | 122 | return ret; 123 | } 124 | 125 | /** 126 | * sanitize_key returns clean metric name 127 | * @arg {String} key metric key name 128 | * @returns {String} sanitized key 129 | */ 130 | function sanitize_key(key) { 131 | return key. 132 | replace(/\s+/g, '_'). 133 | replace(/\//g, '-'). 134 | replace(/[^a-zA-Z0-9_`\-.]/g, ''); 135 | } 136 | 137 | module.exports.is_valid_packet = is_valid_packet; 138 | module.exports.make_histogram = make_histogram; 139 | module.exports.sanitize_key = sanitize_key; 140 | 141 | // END 142 | -------------------------------------------------------------------------------- /lib/statsd/lib/servers/tcp.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* eslint-disable no-invalid-this */ 4 | /* eslint-disable require-jsdoc */ 5 | /* eslint-disable callback-return */ 6 | /* eslint-disable no-sync */ 7 | 8 | const net = require('net'); 9 | const fs = require('fs'); 10 | 11 | function RInfo(tcpstream, data) { 12 | this.address = tcpstream.remoteAddress; 13 | this.port = tcpstream.remotePort; 14 | this.family = tcpstream.address() ? tcpstream.address().family : 'IPv4'; 15 | this.size = data.length; 16 | } 17 | 18 | exports.start = (config, callback) => { 19 | const server = net.createServer((stream) => { 20 | stream.setEncoding('ascii'); 21 | 22 | let buffer = ''; 23 | 24 | stream.on('data', (data) => { 25 | buffer += data; 26 | const offset = buffer.lastIndexOf('\n'); 27 | 28 | if (offset > -1) { 29 | const packet = buffer.slice(0, offset + 1); 30 | 31 | buffer = buffer.slice(offset + 1); 32 | callback(packet, new RInfo(stream, packet)); 33 | } 34 | }); 35 | }); 36 | 37 | server.on('listening', () => { 38 | if (config.socket && config.socket_mod) { 39 | fs.chmod(config.socket, config.socket_mod); 40 | } 41 | }); 42 | 43 | process.on('exit', () => { 44 | if (config.socket) { 45 | fs.unlinkSync(config.socket); 46 | } 47 | }); 48 | 49 | server.listen(config.socket || config.port || 8125, config.address || null); 50 | 51 | this.server = server; 52 | 53 | return true; 54 | }; 55 | -------------------------------------------------------------------------------- /lib/statsd/lib/servers/udp.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* eslint-disable require-jsdoc */ 4 | 5 | const dgram = require('dgram'); 6 | 7 | class UDPServer { 8 | 9 | start(config, callback) { 10 | const udp_version = config.address_ipv6 ? 'udp6' : 'udp4'; 11 | const server = dgram.createSocket(udp_version, callback); 12 | 13 | server.bind(config.port || 8125, config.address); 14 | 15 | this.server = server; 16 | 17 | return true; 18 | } 19 | 20 | } 21 | 22 | module.exports = new UDPServer(); 23 | 24 | // END 25 | -------------------------------------------------------------------------------- /linux-init/logrotate: -------------------------------------------------------------------------------- 1 | @@LOG@@/nad.log { 2 | dateext 3 | daily 4 | rotate 7 5 | delaycompress 6 | compress 7 | notifempty 8 | missingok 9 | copytruncate 10 | } 11 | -------------------------------------------------------------------------------- /linux-init/rhel-init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # nad Startup script for Circonus Node Agent Daemon 4 | # 5 | # chkconfig: - 98 02 6 | # description: Circonus Node Agent Daemon 7 | # processname: nad 8 | # 9 | ### BEGIN INIT INFO 10 | # Provides: nad 11 | # Required-Start: $local_fs 12 | # Required-Stop: $local_fs 13 | # Default-Start: 2 3 4 5 14 | # Default-Stop: 0 1 6 15 | # Short-Description: Circonus Node Agent Daemon 16 | # Description: A very thin, simply managed host agent written in Node.js 17 | # The node-agent daemon provides a simple mechanism to expose 18 | # systems and application metrics to external onlookers. 19 | # It inventories all executable programs in its config directory 20 | # and executes them upon external request, returning results 21 | # in JSON format. 22 | ### END INIT INFO 23 | 24 | # Source function library. 25 | . /etc/rc.d/init.d/functions 26 | 27 | prog="Circonus Node Agent Daemon" 28 | NAD="@@SBIN@@/nad" 29 | NAD_PIDFILE="@@PID_FILE@@" 30 | 31 | run_app() { 32 | $NAD --daemon --pid_file $NAD_PIDFILE 33 | RETVAL=$? 34 | if [[ $RETVAL -eq 0 ]]; then 35 | success 36 | else 37 | failure 38 | fi 39 | return $RETVAL 40 | } 41 | 42 | start() { 43 | echo -n $"Starting $prog: " 44 | RETVAL=3 45 | [[ -f $NAD_PIDFILE ]] && { __pids_var_run nad $NAD_PIDFILE; RETVAL=$?; } 46 | if [[ "$RETVAL" == "3" ]]; then 47 | # Not running, so start 48 | run_app 49 | RETVAL=$? 50 | elif [[ "$RETVAL" == "1" ]]; then 51 | # Stale pidfile 52 | rm $NAD_PIDFILE 53 | run_app 54 | RETVAL=$? 55 | else 56 | # Already running 57 | success 58 | RETVAL=0 59 | fi 60 | echo 61 | return $RETVAL 62 | } 63 | 64 | stop() { 65 | echo -n $"Stopping $prog: " 66 | killproc -p $NAD_PIDFILE 67 | RETVAL=$? 68 | echo 69 | return $RETVAL 70 | } 71 | 72 | # See how we were called. 73 | case "$1" in 74 | start) 75 | start 76 | ;; 77 | stop) 78 | stop 79 | ;; 80 | status) 81 | status -p $NAD_PIDFILE $NAD 82 | RETVAL=$? 83 | ;; 84 | reload|force-reload) 85 | echo "Reloading Circonus node agent daemon: not needed, as there is no config file." 86 | ;; 87 | restart) 88 | stop 89 | start 90 | ;; 91 | *) 92 | echo $"Usage: $0 {start|stop|reload|force-reload|status|restart}" 93 | RETVAL=2 94 | esac 95 | 96 | exit $RETVAL 97 | -------------------------------------------------------------------------------- /linux-init/systemd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Circonus Node Agent Daemon 3 | Documentation=http://github.com/circonus-labs/nad 4 | After=network.target 5 | 6 | [Service] 7 | Type=forking 8 | PIDFile=@@PID_FILE@@ 9 | ExecStart=@@SBIN@@/nad --daemon --pid_file @@PID_FILE@@ 10 | Restart=always 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /linux-init/ubuntu-init: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | ### BEGIN INIT INFO 4 | # Provides: nad 5 | # Required-Start: $local_fs 6 | # Required-Stop: $local_fs 7 | # Default-Start: 2 3 4 5 8 | # Default-Stop: 0 1 6 9 | # Short-Description: A very thin, simply managed host agent written in Node.js 10 | # Description: The node-agent daemon provides a simple mechanism to expose 11 | # systems and application metrics to external onlookers. 12 | # It inventories all executable programs in its config directory 13 | # and executes them upon external request, returning results 14 | # in JSON format. 15 | ### END INIT INFO 16 | 17 | set -e 18 | 19 | # /etc/init.d/nad: start and stop nad 20 | 21 | DAEMON="@@SBIN@@/nad" 22 | NAD_PID_FILE="@@PID_FILE@@" 23 | 24 | test -x $DAEMON || exit 0 25 | 26 | . /lib/lsb/init-functions 27 | 28 | nad_start() { 29 | if start-stop-daemon --start --quiet --pidfile $NAD_PID_FILE \ 30 | --exec $DAEMON -- --daemon --pid_file @@PID_FILE@@ 31 | then 32 | rc=0 33 | sleep 1 34 | if ! kill -0 $(cat $NAD_PID_FILE) >/dev/null 2>&1; then 35 | log_failure_msg "Circonus node agent daemon failed to start" 36 | rc=1 37 | fi 38 | else 39 | rc=1 40 | fi 41 | if [ $rc -eq 0 ]; then 42 | log_end_msg 0 43 | else 44 | log_end_msg 1 45 | rm -f $NAD_PID_FILE 46 | fi 47 | } # nad_start 48 | 49 | 50 | case "$1" in 51 | start) 52 | log_daemon_msg "Starting Circonus node agent daemon" "nad" 53 | if [ -s $NAD_PID_FILE ] && kill -0 $(cat $NAD_PID_FILE) >/dev/null 2>&1; then 54 | log_progress_msg "apparently already running" 55 | log_end_msg 0 56 | exit 0 57 | fi 58 | nad_start 59 | ;; 60 | stop) 61 | log_daemon_msg "Stopping Circonus node agent daemon" "nad" 62 | start-stop-daemon --stop --quiet --oknodo --pidfile $NAD_PID_FILE 63 | log_end_msg $? 64 | rm -f $NAD_PID_FILE 65 | ;; 66 | 67 | reload|force-reload) 68 | log_warning_msg "Reloading Circonus node agent daemon: not needed, as there is no config file." 69 | ;; 70 | 71 | restart) 72 | set +e 73 | log_daemon_msg "Restarting Circonus node agent daemon" "nad" 74 | if [ -s $NAD_PID_FILE ] && kill -0 $(cat $NAD_PID_FILE) >/dev/null 2>&1; then 75 | start-stop-daemon --stop --quiet --oknodo --pidfile $NAD_PID_FILE || true 76 | sleep 1 77 | else 78 | log_warning_msg "Circonus node agent daemon not running, attempting to start." 79 | rm -f $NAD_PID_FILE 80 | fi 81 | nad_start 82 | ;; 83 | 84 | status) 85 | status_of_proc -p $NAD_PID_FILE "$DAEMON" nad 86 | exit $? # notreached due to set -e 87 | ;; 88 | *) 89 | echo "Usage: /etc/init.d/nad {start|stop|reload|force-reload|restart|status}" 90 | exit 1 91 | esac 92 | 93 | exit 0 94 | -------------------------------------------------------------------------------- /linux-init/upstart: -------------------------------------------------------------------------------- 1 | description "Circonus Node Agent" 2 | author "circonus " 3 | 4 | start on (local-filesystems and stopped networking) 5 | stop on shutdown 6 | 7 | expect daemon 8 | exec @@SBIN@@/nad --daemon --pid_file @@PID_FILE@@ 9 | -------------------------------------------------------------------------------- /man/nad.8: -------------------------------------------------------------------------------- 1 | .TH "NAD" "8" "April 2017" "" "" 2 | .SH "NAME" 3 | \fBnad\fR - Node Agent Daemon 4 | .SH "SYNOPSIS" 5 | .P 6 | \fBnad \[lB]options\[rB]\fR 7 | .SH "DESCRIPTION" 8 | .P 9 | The node agent daemon (NAD) provides a simple mechanism to expose systems and application metrics to external onlookers. It inventories all executable programs/scripts in the \fIplugin directory\fR and executes them upon external request (via http or https) and returns the results in JSON format. 10 | .P 11 | Full documentation is available online in the NAD github repository - \fI\(lahttps://github.com/circonus-labs/nad\(ra\fR. 12 | .SH "OPTIONS" 13 | .P 14 | .RS 2 15 | .nf 16 | -h, --help output usage information 17 | -V, --version output the version number 18 | --plugin-dir Plugin directory \[lB]/opt/circonus/nad/etc/node-agent.d\[rB] 19 | -p, --listen Listening IP address and port \[lB]2609\[rB] 20 | -r, --reverse Use reverse connection to broker \[lB]false\[rB] 21 | --cid Check bundle id for reverse connection \[lB]\[rB] 22 | --broker-ca CA file for broker reverse connection and statsd \[lB]\[rB] 23 | --api-key Circonus API Token key \[lB]\[rB] 24 | --api-app Circonus API Token app \[lB]nad\[rB] 25 | --api-url Circonus API URL \[lB]https://api.circonus.com/v2/\[rB] 26 | --api-ca CA file for API URL \[lB]\[rB] 27 | --hostname Hostname self-configure to use in check and graph names \[lB]centos7\[rB] 28 | --brokerid Broker ID for self-configure to use for creating check \[lB]\[rB] 29 | --configfile File in plugin-dir for self-configure \[lB]\[rB] 30 | --target Target host \[lB]centos7\[rB] -- see Target below 31 | --ssl-listen SSL listening IP address and port \[lB]\[rB] 32 | --ssl-cert SSL certificate PEM file, required for SSL \[lB]/opt/circonus/nad/etc/na.crt\[rB] 33 | --ssl-key SSL certificate key PEM file, required for SSL \[lB]/opt/circonus/nad/etc/na.key\[rB] 34 | --ssl-ca SSL CA certificate PEM file, required for SSL w/verify \[lB]/opt/circonus/nad/etc/na.ca\[rB] 35 | --ssl-verify Enable SSL verification 36 | --no-statsd Disable builtin StatsD interface 37 | --statsd-config Config file for builtin StatsD interface \[lB]\[rB] 38 | -u, --uid User id to drop privileges to on start \[lB]\[rB] 39 | --log-level Log level (trace|debug|info|warn|error|fatal) \[lB]info\[rB] 40 | -d, --debug Enable debug logging (verbose) \[lB]false\[rB] 41 | -t, --trace Enable trace logging (very verbose) \[lB]false\[rB] 42 | --no-watch Disable automatic watches of plugin directory, script files, config files. Send SIGHUP to rescan plugins. \[lB]true\[rB] 43 | --debugdir Create debug files for each plugin and write to this directory \[lB]\[rB] 44 | --wipedebugdir Wipe debug directory clean before each write \[lB]false\[rB] 45 | -i, --inventory Offline inventory 46 | -c DEPRECATED use --plugin-dir 47 | -p DEPRECATED use --listen 48 | -s DEPRECATED use --ssl-listen 49 | -v DEPRECATED use --ssl-verify 50 | --authtoken DEPRECATED use --api-key 51 | --apihost DEPRECATED use --api-url 52 | --apiport DEPRECATED --api-url 53 | --apipath DEPRECATED --api-url 54 | --apiprotocol DEPRECATED --api-url 55 | --apiverbose DEPRECATED NOP, see --debug 56 | --sslcert DEPRECATED use --ssl-cert 57 | --sslkey DEPRECATED use --ssl-key 58 | --sslca DEPRECATED use --ssl-ca 59 | --cafile DEPRECATED use --broker-ca 60 | .fi 61 | .RE 62 | .SH "BUGS" 63 | .P 64 | \fI\(lahttps://github.com/circonus-labs/nad/issues\(ra\fR 65 | .SH "AUTHOR" 66 | .P 67 | Circonus, Inc. \fI\(lasupport@circonus.com\(ra\fR 68 | .SH "COPYRIGHT" 69 | .P 70 | Copyright \[co] 2017, Circonus, Inc. 71 | -------------------------------------------------------------------------------- /man/nad.md: -------------------------------------------------------------------------------- 1 | # nad(8) -- Node Agent Daemon 2 | 3 | ## SYNOPSIS 4 | 5 | `nad [options]` 6 | 7 | ## DESCRIPTION 8 | 9 | The node agent daemon (NAD) provides a simple mechanism to expose systems and application metrics to external onlookers. It inventories all executable programs/scripts in the *plugin directory* and executes them upon external request (via http or https) and returns the results in JSON format. 10 | 11 | Full documentation is available online in the NAD github repository - https://github.com/circonus-labs/nad. 12 | 13 | ## OPTIONS 14 | 15 | ``` 16 | -h, --help output usage information 17 | -V, --version output the version number 18 | --plugin-dir Plugin directory [/opt/circonus/nad/etc/node-agent.d] 19 | -p, --listen Listening IP address and port [2609] 20 | -r, --reverse Use reverse connection to broker [false] 21 | --cid Check bundle id for reverse connection [] 22 | --broker-ca CA file for broker reverse connection and statsd [] 23 | --api-key Circonus API Token key [] 24 | --api-app Circonus API Token app [nad] 25 | --api-url Circonus API URL [https://api.circonus.com/v2/] 26 | --api-ca CA file for API URL [] 27 | --hostname Hostname self-configure to use in check and graph names [centos7] 28 | --brokerid Broker ID for self-configure to use for creating check [] 29 | --configfile File in plugin-dir for self-configure [] 30 | --target Target host [centos7] -- see Target below 31 | --ssl-listen SSL listening IP address and port [] 32 | --ssl-cert SSL certificate PEM file, required for SSL [/opt/circonus/nad/etc/na.crt] 33 | --ssl-key SSL certificate key PEM file, required for SSL [/opt/circonus/nad/etc/na.key] 34 | --ssl-ca SSL CA certificate PEM file, required for SSL w/verify [/opt/circonus/nad/etc/na.ca] 35 | --ssl-verify Enable SSL verification 36 | --no-statsd Disable builtin StatsD interface 37 | --statsd-config Config file for builtin StatsD interface [] 38 | -u, --uid User id to drop privileges to on start [] 39 | --log-level Log level (trace|debug|info|warn|error|fatal) [info] 40 | -d, --debug Enable debug logging (verbose) [false] 41 | -t, --trace Enable trace logging (very verbose) [false] 42 | --no-watch Disable automatic watches of plugin directory, script files, config files. Send SIGHUP to rescan plugins. [true] 43 | --debugdir Create debug files for each plugin and write to this directory [] 44 | --wipedebugdir Wipe debug directory clean before each write [false] 45 | -i, --inventory Offline inventory 46 | -c DEPRECATED use --plugin-dir 47 | -p DEPRECATED use --listen 48 | -s DEPRECATED use --ssl-listen 49 | -v DEPRECATED use --ssl-verify 50 | --authtoken DEPRECATED use --api-key 51 | --apihost DEPRECATED use --api-url 52 | --apiport DEPRECATED --api-url 53 | --apipath DEPRECATED --api-url 54 | --apiprotocol DEPRECATED --api-url 55 | --apiverbose DEPRECATED NOP, see --debug 56 | --sslcert DEPRECATED use --ssl-cert 57 | --sslkey DEPRECATED use --ssl-key 58 | --sslca DEPRECATED use --ssl-ca 59 | --cafile DEPRECATED use --broker-ca 60 | ``` 61 | 62 | ## BUGS 63 | 64 | https://github.com/circonus-labs/nad/issues 65 | 66 | ## AUTHOR 67 | Circonus, Inc. 68 | 69 | ## COPYRIGHT 70 | Copyright © 2017, Circonus, Inc. 71 | -------------------------------------------------------------------------------- /mkinstalldirs: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # mkinstalldirs --- make directory hierarchy 3 | 4 | scriptversion=2005-06-29.22 5 | 6 | # Original author: Noah Friedman 7 | # Created: 1993-05-16 8 | # Public domain. 9 | # 10 | # This file is maintained in Automake, please report 11 | # bugs to or send patches to 12 | # . 13 | 14 | errstatus=0 15 | dirmode= 16 | 17 | usage="\ 18 | Usage: mkinstalldirs [-h] [--help] [--version] [-m MODE] DIR ... 19 | 20 | Create each directory DIR (with mode MODE, if specified), including all 21 | leading file name components. 22 | 23 | Report bugs to ." 24 | 25 | # process command line arguments 26 | while test $# -gt 0 ; do 27 | case $1 in 28 | -h | --help | --h*) # -h for help 29 | echo "$usage" 30 | exit $? 31 | ;; 32 | -m) # -m PERM arg 33 | shift 34 | test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } 35 | dirmode=$1 36 | shift 37 | ;; 38 | --version) 39 | echo "$0 $scriptversion" 40 | exit $? 41 | ;; 42 | --) # stop option processing 43 | shift 44 | break 45 | ;; 46 | -*) # unknown option 47 | echo "$usage" 1>&2 48 | exit 1 49 | ;; 50 | *) # first non-opt arg 51 | break 52 | ;; 53 | esac 54 | done 55 | 56 | for file 57 | do 58 | if test -d "$file"; then 59 | shift 60 | else 61 | break 62 | fi 63 | done 64 | 65 | case $# in 66 | 0) exit 0 ;; 67 | esac 68 | 69 | # Solaris 8's mkdir -p isn't thread-safe. If you mkdir -p a/b and 70 | # mkdir -p a/c at the same time, both will detect that a is missing, 71 | # one will create a, then the other will try to create a and die with 72 | # a "File exists" error. This is a problem when calling mkinstalldirs 73 | # from a parallel make. We use --version in the probe to restrict 74 | # ourselves to GNU mkdir, which is thread-safe. 75 | case $dirmode in 76 | '') 77 | if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then 78 | echo "mkdir -p -- $*" 79 | exec mkdir -p -- "$@" 80 | else 81 | # On NextStep and OpenStep, the `mkdir' command does not 82 | # recognize any option. It will interpret all options as 83 | # directories to create, and then abort because `.' already 84 | # exists. 85 | test -d ./-p && rmdir ./-p 86 | test -d ./--version && rmdir ./--version 87 | fi 88 | ;; 89 | *) 90 | if mkdir -m "$dirmode" -p --version . >/dev/null 2>&1 && 91 | test ! -d ./--version; then 92 | echo "mkdir -m $dirmode -p -- $*" 93 | exec mkdir -m "$dirmode" -p -- "$@" 94 | else 95 | # Clean up after NextStep and OpenStep mkdir. 96 | for d in ./-m ./-p ./--version "./$dirmode"; 97 | do 98 | test -d $d && rmdir $d 99 | done 100 | fi 101 | ;; 102 | esac 103 | 104 | for file 105 | do 106 | case $file in 107 | /*) pathcomp=/ ;; 108 | *) pathcomp= ;; 109 | esac 110 | oIFS=$IFS 111 | IFS=/ 112 | set fnord $file 113 | shift 114 | IFS=$oIFS 115 | 116 | for d 117 | do 118 | test "x$d" = x && continue 119 | 120 | pathcomp=$pathcomp$d 121 | case $pathcomp in 122 | -*) pathcomp=./$pathcomp ;; 123 | esac 124 | 125 | if test ! -d "$pathcomp"; then 126 | echo "mkdir $pathcomp" 127 | 128 | mkdir "$pathcomp" || lasterr=$? 129 | 130 | if test ! -d "$pathcomp"; then 131 | errstatus=$lasterr 132 | else 133 | if test ! -z "$dirmode"; then 134 | echo "chmod $dirmode $pathcomp" 135 | lasterr= 136 | chmod "$dirmode" "$pathcomp" || lasterr=$? 137 | 138 | if test ! -z "$lasterr"; then 139 | errstatus=$lasterr 140 | fi 141 | fi 142 | fi 143 | fi 144 | 145 | pathcomp=$pathcomp/ 146 | done 147 | done 148 | 149 | exit $errstatus 150 | 151 | # Local Variables: 152 | # mode: shell-script 153 | # sh-indentation: 2 154 | # eval: (add-hook 'write-file-hooks 'time-stamp) 155 | # time-stamp-start: "scriptversion=" 156 | # time-stamp-format: "%:y-%02m-%02d.%02H" 157 | # time-stamp-end: "$" 158 | # End: 159 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nad", 3 | "version": "2.6.2", 4 | "private": true, 5 | "license": "SEE LICENSE IN LICENSE", 6 | "description": "Circonus Node Agent", 7 | "keywords": [ 8 | "circonus", 9 | "agent", 10 | "nad", 11 | "metrics" 12 | ], 13 | "author": "Circonus, Inc.", 14 | "files": [ 15 | "sbin/nad.js", 16 | "lib/" 17 | ], 18 | "dependencies": { 19 | "chalk": "2.4.1", 20 | "commander": "2.17.1", 21 | "https-proxy-agent": "~> 2.2.3", 22 | "pino": "5.3.1", 23 | "thirty-two": "1.0.2" 24 | }, 25 | "devDependencies": { 26 | "@maier/eslint-config-base": "0.2.0", 27 | "@maier/eslint-config-node": "0.1.0", 28 | "@maier/eslint-config-node-cmd": "0.1.0", 29 | "eslint": "5.4.0", 30 | "remark-cli": "5.0.0", 31 | "remark-man": "5.1.0" 32 | }, 33 | "scripts": { 34 | "lint": "node_modules/.bin/eslint --cache sbin/ lib/ plugins/", 35 | "md2man": "node_modules./bin/remark man/nad.md --use man --output man/nad.8" 36 | }, 37 | "repository": { 38 | "type": "git", 39 | "url": "git+https://github.com/circonus-labs/nad.git" 40 | }, 41 | "bugs": { 42 | "url": "https://github.com/circonus-labs/nad/issues" 43 | }, 44 | "homepage": "https://github.com/circonus-labs/nad#readme" 45 | } 46 | -------------------------------------------------------------------------------- /packaging/example-omnibus.conf: -------------------------------------------------------------------------------- 1 | # Local overrides for make-omnibus 2 | 3 | # the repository URL for NAD 4 | # NAD_REPO="https://github.com/circonus-labs/nad.git" 5 | 6 | # the branch to build in the NAD repository 7 | # NAD_BRANCH="master" 8 | 9 | # prefix path for NAD installation 10 | # PREFIX=/opt/circonus/nad 11 | 12 | # temporary directory for building NAD (where repository will get cloned) 13 | # BUILDDIR="/tmp/nad-omnibus-build" 14 | 15 | # temporary directory for installing NAD during package creation 16 | # INSTALLDIR="/tmp/nad-omnibus-install" 17 | 18 | # publish directory, where the final package is placed 19 | # directory must exist, it will not be created by make-omnibus 20 | # PUBLISHDIR="/mnt/node-agent" 21 | -------------------------------------------------------------------------------- /packaging/omnibus-rpm.spec.in: -------------------------------------------------------------------------------- 1 | %define rversion @@RPMVER@@ 2 | %define rrelease 1 3 | %define _prefix /opt/circonus 4 | %define app_dir %{_prefix}/nad 5 | 6 | # perl is *optional* not required 7 | # rpmbuild autoreq will include it by default because of scripts with perl shebang 8 | %if 0%{?el7} 9 | %define __requires_exclude perl 10 | %endif 11 | %if 0%{?el6} 12 | # rpmbuild on el6 is pre-4.9, does not understand __requires_exclude 13 | AutoReq: 0 14 | %endif 15 | 16 | Name: nad-omnibus 17 | Version: %{rversion} 18 | Release: %{rrelease}%{?dist} 19 | Summary: Circonus Node Agent 20 | Prefix: %{_prefix} 21 | Group: Applications/System 22 | License: BSD 23 | Vendor: Circonus, Inc. 24 | URL: https://github.com/circonus-labs/nad 25 | BuildRoot: %{_tmppath}/%{name}-%{rversion}-%{rrelease} 26 | 27 | BuildRequires: rsync 28 | Conflicts: circonus-field-nad, circonus-nodejs 29 | 30 | %description 31 | NAD is a portable, extensible, lightweight metric collection agent. It is the recommended way to collect system metrics for the [Circonus](https://circonus.com/) monitoring platform. 32 | 33 | %install 34 | rm -rf $RPM_BUILD_ROOT 35 | rsync -a /tmp/nad-omnibus-install/ $RPM_BUILD_ROOT/ 36 | 37 | %clean 38 | rm -rf $RPM_BUILD_ROOT 39 | 40 | %post 41 | if [ -f /lib/systemd/system/nad.service ]; then 42 | /bin/systemctl enable nad 43 | /bin/systemctl start nad >/dev/null 2>&1 44 | elif [ -f /etc/init/nad.conf ]; then 45 | /sbin/initctl reload-configuration 46 | elif [ -f /etc/init.d/nad ]; then 47 | /sbin/chkconfig --add nad 48 | /sbin/service nad start >/dev/null 2>&1 49 | fi 50 | 51 | %preun 52 | if [ $1 = 0 ]; then 53 | if [ -f /lib/systemd/system/nad.service ]; then 54 | /bin/systemctl disable nad 55 | /bin/systemctl stop nad >/dev/null 2>&1 56 | elif [ -f /etc/init/nad.conf ]; then 57 | /sbin/initctl stop nad 58 | elif [ -f /etc/init.d/nad ]; then 59 | /sbin/chkconfig --del nad 60 | /sbin/service nad stop >/dev/null 2>&1 61 | fi 62 | fi 63 | exit 0 64 | 65 | %files 66 | %defattr(-, root, root, 755) 67 | %if 0%{?el7} 68 | /lib/systemd/system/nad.service 69 | %endif 70 | %if 0%{?el6} 71 | ### -upstart disabled- /etc/init/nad.conf 72 | /etc/init.d/nad 73 | %endif 74 | /etc/logrotate.d/nad 75 | %config(noreplace) %{app_dir}/etc/nad.conf 76 | %{app_dir}/package.json 77 | %{app_dir}/man 78 | %{app_dir}/etc/node-agent.d 79 | %{app_dir}/bin/nad-log 80 | %{app_dir}/bin/statsd.sh 81 | %{app_dir}/sbin/nad 82 | %{app_dir}/sbin/nad.js 83 | %{app_dir}/node_modules 84 | %dir %attr(0755, nobody, nobody) %{app_dir}/var/run 85 | %{_prefix}/bin/node 86 | %{_prefix}/lib 87 | %{_prefix}/nodejs 88 | -------------------------------------------------------------------------------- /packaging/ubuntu-postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -f /lib/systemd/system/nad.service ]; then 4 | /bin/systemctl enable nad 5 | /bin/systemctl start nad >/dev/null 2>&1 6 | elif [ -f /etc/init/nad.conf ]; then 7 | /sbin/initctl reload-configuration 8 | /sbin/initctl start nad 9 | elif [ -f /etc/init.d/nad ]; then 10 | /usr/sbin/update-rc.d nad defaults 98 02 11 | /etc/init.d/nad start 12 | fi 13 | -------------------------------------------------------------------------------- /packaging/ubuntu-postremove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -f /lib/systemd/system/nad.service ]; then 4 | /bin/systemctl disable nad 5 | /bin/systemctl stop nad >/dev/null 2>&1 6 | elif [ -f /etc/init/nad.conf ]; then 7 | /sbin/initctl stop nad 8 | elif [ -f /etc/init.d/nad ]; then 9 | /usr/sbin/update-rc.d nad remove 10 | fi 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /plugins/cassandra/cassandra_cfstats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which nodetool &>/dev/null || { 4 | >&2 echo "Unable to find 'nodetool' in path" 5 | exit 1 6 | } 7 | 8 | nodetool cfstats -F json 2>/dev/null 9 | 10 | # END 11 | -------------------------------------------------------------------------------- /plugins/cassandra/cassandra_compaction.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which nodetool &>/dev/null || { 4 | >&2 echo "Unable to find 'nodetool' in path" 5 | exit 1 6 | } 7 | 8 | which awk &>/dev/null || { 9 | >&2 echo "Unable to find 'awk' in path" 10 | exit 1 11 | } 12 | 13 | : ${CASS_USER:=admin} 14 | 15 | nodetool compactionstats 2>/dev/null | awk ' 16 | /pending tasks/ { print "pending_tasks\tL\t"$3 } 17 | ' 18 | 19 | # END 20 | -------------------------------------------------------------------------------- /plugins/cassandra/cassandra_gcstats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which nodetool &>/dev/null || { 4 | >&2 echo "Unable to find 'nodetool' in path" 5 | exit 1 6 | } 7 | 8 | which awk &>/dev/null || { 9 | >&2 echo "Unable to find 'awk' in path" 10 | exit 1 11 | } 12 | 13 | nodetool gcstats 2>/dev/null | awk 'NR>1 { print "interval_ms\tL\t"$1"\nmax_gc_ms\tL\t"$2"\ntotal_gc_ms\tL\t"$3"\nstddev_gc_ms\tn\t"$4"\nreclaimed_mb\tn\t"$5"\ngc_collections_count\tL\t"$6"\ndirect_memory_bytes\tl\t"$7 };' 14 | 15 | # END 16 | -------------------------------------------------------------------------------- /plugins/cassandra/cassandra_info.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which nodetool &>/dev/null || { 4 | >&2 echo "Unable to find 'nodetool' in path" 5 | exit 1 6 | } 7 | 8 | which gawk &>/dev/null || { 9 | >&2 echo "Unable to find 'gawk' in path" 10 | exit 1 11 | } 12 | 13 | nodetool info 2>/dev/null | gawk ' 14 | function hr_to_bytes(NUMBER, U) { 15 | if (toupper(U) == "KB" || toupper(U) == "KIB") 16 | return (NUMBER * 1024); 17 | else if (toupper(U) == "MB" || toupper(U) == "MIB") 18 | return (NUMBER * 1024 * 1024); 19 | else if (toupper(U) == "GB" || toupper(U) == "GIB") 20 | return (NUMBER * 1024 * 1024 * 1024); 21 | else 22 | return NUMBER 23 | } 24 | 25 | function or_one(NUMBER) { 26 | if (NUMBER == 0) { 27 | return 1.0; 28 | } 29 | return NUMBER; 30 | } 31 | 32 | /^Load/ { 33 | printf "load_bytes\tL\t%0.0f\n", hr_to_bytes($3, $4) 34 | next 35 | } 36 | /^Generation No/ { 37 | print "generation_number\tL\t"$4 38 | next 39 | } 40 | /^Uptime \(seconds\)/ { 41 | print "uptime_secs\tL\t"$4 42 | next 43 | } 44 | /^Heap Memory \(MB\)/ { 45 | printf "heap_mem_used\tL\t%0.0f\nheap_mem_max\tL\t%0.0f\n", hr_to_bytes($5, "MB"), hr_to_bytes($7, "MB") 46 | next 47 | } 48 | /^Off Heap Memory \(MB\)/ { 49 | printf "off_heap_mem_used\tL\t%0.0f\n", hr_to_bytes($6, "MB") 50 | next 51 | } 52 | /Key Cache/ { 53 | sub(/,/,"", $5) 54 | sub(/,/, "", $8) 55 | sub(/,/, "", $11) 56 | printf "key_cache_entries\tL\t%s\nkey_cache_size\tL\t%0.0f\nkey_cache_capacity\tL\t%0.0f\nkey_cache_hits\tL\t%s\nkey_cache_requests\tL\t%s\nkey_cache_hit_pct\tn\t%s\n", $5, hr_to_bytes($7, $8), hr_to_bytes($10, $11), $12, $14, ($12/or_one($14)) * 100 57 | next 58 | } 59 | /Counter Cache/ { 60 | sub(/,/,"", $5) 61 | sub(/,/, "", $8) 62 | sub(/,/, "", $11) 63 | printf "counter_cache_entries\tL\t%s\ncounter_cache_size\tL\t%0.0f\ncounter_cache_capacity\tL\t%0.0f\ncounter_cache_hits\tL\t%s\ncounter_cache_requests\tL\t%s\ncounter_cache_hit_pct\tn\t%s\n", $5, hr_to_bytes($7, $8), hr_to_bytes($10, $11), $12, $14, ($12/or_one($14)) * 100 64 | next 65 | } 66 | /Chunk Cache/ { 67 | sub(/,/,"", $5) 68 | sub(/,/, "", $8) 69 | sub(/,/, "", $11) 70 | printf "chunk_cache_entries\tL\t%s\nchunk_cache_size\tL\t%0.0f\nchunk_cache_capacity\tL\t%0.0f\nchunk_cache_misses\tL\t%s\nchunk_cache_requests\tL\t%s\nchunk_cache_hit_pct\tn\t%s\nchunk_cache_miss_latency_ms\tn\t%s\n", $5, hr_to_bytes($7, $8), hr_to_bytes($10, $11), $12, $14, $16 * 100, $20 71 | next 72 | } 73 | ' 74 | 75 | # END 76 | -------------------------------------------------------------------------------- /plugins/cassandra/cassandra_po.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # if protocol_observer is already running, exit 3 | popid=$(pgrep -n -f 'protocol_observer -wire cassandra_cql') 4 | [[ -n "$popid" ]] && { 5 | printf "pid\tL\t%s\n" $popid 6 | exit 0 7 | } 8 | 9 | # check sudo access for user running NAD if not id 0 10 | SUDO_CMD="" 11 | if [[ $UID -ne 0 ]]; then 12 | sudo -l /dev/null 2>&1 13 | [[ $? -ne 0 ]] && { 14 | echo "Error checking sudo access for $UID" 15 | exit 1 16 | } 17 | SUDO_CMD="sudo -b" 18 | fi 19 | 20 | # default location 21 | po=/opt/circonus/bin/protocol_observer 22 | 23 | if [[ ! -x $po ]]; then 24 | po=`type -P protocol_observer` 25 | [[ $? -eq 0 ]] || { 26 | >&2 echo 'Unable to location protocol_observer binary' 27 | exit 1 28 | } 29 | fi 30 | 31 | po_conf=/opt/circonus/etc/cassandra_po_conf.sh 32 | [[ -s $po_conf ]] && source $po_conf 33 | 34 | : ${IFACE:=auto} 35 | : ${NADURL:=http://localhost:2609} 36 | NADURL=${NADURL%/} 37 | 38 | $SUDO_CMD $po -wire cassandra_cql -iface $IFACE -submissionurl ${NADURL}/write/cassandra_protocol_observer > /dev/null 39 | 40 | printf "pid\tL\t-1\n" 41 | 42 | exit 0 43 | # END 44 | -------------------------------------------------------------------------------- /plugins/ceph/README.md: -------------------------------------------------------------------------------- 1 | # Ceph metrics plugin 2 | 3 | 4 | ## Use 5 | 6 | ```sh 7 | cd /opt/circonus/etc/node-agent.d 8 | ln -s ceph/.js . 9 | ``` 10 | 11 | e.g. 12 | 13 | ```sh 14 | cd /opt/circonus/etc/node-agent.d 15 | ln -s ceph/ceph_status.js . 16 | ``` 17 | 18 | ## Configuration 19 | 20 | The default location for the `ceph` command is `/usr/bin/ceph`. If this command is installed elsewhere, it may be configured in `/opt/circonus/etc/ceph.json`: 21 | 22 | ```json 23 | { 24 | "ceph_bin": "path to ceph binary" 25 | } 26 | ``` 27 | 28 | ## Notes 29 | 30 | `ceph osd pool stats` (`ceph_osd_pool.js`) produces metrics for the current point in time, it is **not** an aggregate. The result will be the read/write ops/bytes per second occurring at the point in time when the command runs. If there are no operations running, these metrics will be 0. 31 | -------------------------------------------------------------------------------- /plugins/ceph/ceph_df.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const path = require('path'); 8 | const child = require('child_process'); 9 | 10 | class CephDF { 11 | 12 | /** 13 | * creates instances 14 | */ 15 | constructor() { 16 | this.config = {}; 17 | 18 | try { 19 | this.config = require(path.resolve(path.join(__dirname, '..', '..', 'ceph.json'))); // eslint-disable-line global-require 20 | } catch (err) { 21 | if (err.code !== 'MODULE_NOT_FOUND') { 22 | console.error(err); 23 | 24 | return; 25 | } 26 | } 27 | 28 | this.ceph_bin = this.config.ceph_bin || '/usr/bin/ceph'; 29 | this.ceph_cmd = 'df'; 30 | this.ceph_cmd_args = ''; 31 | } 32 | 33 | /** 34 | * called by nad to run the plugin 35 | * @arg {Object} plugin definition 36 | * @arg {Function} cb callback 37 | * @arg {Object} req http request object 38 | * @arg {Object} args instance arguments 39 | * @arg {String} instance id 40 | * @returns {Undefined} nothing 41 | */ 42 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 43 | this.probe((err, metrics) => { 44 | if (err !== null) { 45 | console.error(err); 46 | plugin.running = false; // eslint-disable-line no-param-reassign 47 | cb(plugin, { metric_collection_error: err.message }); 48 | 49 | return; 50 | } 51 | plugin.running = false; // eslint-disable-line no-param-reassign 52 | cb(plugin, metrics, instance); 53 | }); 54 | } 55 | 56 | /** 57 | * called to start the command 58 | * @arg {Function} cb callback 59 | * @returns {Undefined} nothing 60 | * 61 | * cb called with err|null, metrics 62 | */ 63 | probe(cb) { 64 | const metrics = {}; 65 | const cmd = `${this.ceph_bin} ${this.ceph_cmd} ${this.ceph_cmd_args} -f json 2>/dev/null`; 66 | 67 | this._runCommand(cmd, (err, result) => { 68 | if (err !== null) { 69 | cb(err); 70 | 71 | return; 72 | } 73 | 74 | if ({}.hasOwnProperty.call(result, 'pools')) { 75 | for (const pool of result.pools) { 76 | metrics[`${pool.name}\`used_bytes`] = pool.stats.bytes_used; 77 | metrics[`${pool.name}\`max_avail_bytes`] = pool.stats.max_avail; 78 | metrics[`${pool.name}\`objects`] = pool.stats.objects; 79 | } 80 | } 81 | 82 | cb(null, metrics); 83 | }); 84 | } 85 | 86 | /** 87 | * runs the command 88 | * @arg {String} cmd to run 89 | * @arg {Function} cb callback 90 | * @returns {Undefined} nothing 91 | * 92 | * cb called with err|null, results 93 | */ 94 | _runCommand(cmd, cb) { // eslint-disable-line class-methods-use-this 95 | child.exec(cmd, (execErr, stdout, stderr) => { 96 | if (execErr !== null) { 97 | cb(new Error(`${execErr} ${stderr}`)); 98 | 99 | return; 100 | } 101 | 102 | let result = null; 103 | 104 | try { 105 | result = JSON.parse(stdout); 106 | } catch (parseErr) { 107 | cb(parseErr); 108 | 109 | return; 110 | } 111 | 112 | cb(null, result); 113 | }); 114 | } 115 | 116 | } 117 | 118 | module.exports = CephDF; 119 | 120 | if (!module.parent) { 121 | const ceph = new CephDF(); 122 | 123 | ceph.probe((err, metrics) => { 124 | if (err !== null) { 125 | throw err; 126 | } 127 | console.dir(metrics); 128 | }); 129 | } 130 | -------------------------------------------------------------------------------- /plugins/ceph/ceph_osd_perf.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserveplugin. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const path = require('path'); 8 | const child = require('child_process'); 9 | 10 | class CephOSDPerf { 11 | 12 | /** 13 | * creates instances 14 | */ 15 | constructor() { 16 | this.config = {}; 17 | 18 | try { 19 | this.config = require(path.resolve(path.join(__dirname, '..', '..', 'ceph.json'))); // eslint-disable-line global-require 20 | } catch (err) { 21 | if (err.code !== 'MODULE_NOT_FOUND') { 22 | console.error(err); 23 | 24 | return; 25 | } 26 | } 27 | 28 | this.ceph_bin = this.config.ceph_bin || '/usr/bin/ceph'; 29 | this.ceph_cmd = 'osd'; 30 | this.ceph_cmd_args = 'perf'; 31 | } 32 | 33 | /** 34 | * called by nad to run the plugin 35 | * @arg {Object} plugin definition 36 | * @arg {Function} cb callback 37 | * @arg {Object} req http request object 38 | * @arg {Object} args instance arguments 39 | * @arg {String} instance id 40 | * @returns {Undefined} nothing 41 | */ 42 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 43 | this.probe((err, metrics) => { 44 | if (err !== null) { 45 | console.error(err); 46 | plugin.running = false; // eslint-disable-line no-param-reassign 47 | cb(plugin, { metric_collection_error: err.message }); 48 | 49 | return; 50 | } 51 | plugin.running = false; // eslint-disable-line no-param-reassign 52 | cb(plugin, metrics, instance); 53 | }); 54 | } 55 | 56 | /** 57 | * called to start the command 58 | * @arg {Function} cb callback 59 | * @returns {Undefined} nothing 60 | * 61 | * cb called with err|null, metrics 62 | */ 63 | probe(cb) { 64 | const metrics = {}; 65 | const cmd = `${this.ceph_bin} ${this.ceph_cmd} ${this.ceph_cmd_args} -f json 2>/dev/null`; 66 | 67 | this._runCommand(cmd, (err, result) => { 68 | if (err !== null) { 69 | cb(err); 70 | 71 | return; 72 | } 73 | 74 | if ({}.hasOwnProperty.call(result, 'osd_perf_infos')) { 75 | for (const osd of result.osd_perf_infos) { 76 | metrics[`osd${osd.id}`] = osd.perf_stats; 77 | } 78 | } 79 | 80 | cb(null, metrics); 81 | }); 82 | } 83 | 84 | /** 85 | * runs the command 86 | * @arg {String} cmd to run 87 | * @arg {Function} cb callback 88 | * @returns {Undefined} nothing 89 | * 90 | * cb called with err|null, results 91 | */ 92 | _runCommand(cmd, cb) { // eslint-disable-line class-methods-use-this 93 | child.exec(cmd, (execErr, stdout, stderr) => { 94 | if (execErr !== null) { 95 | cb(new Error(`${execErr} ${stderr}`)); 96 | 97 | return; 98 | } 99 | 100 | let result = null; 101 | 102 | try { 103 | result = JSON.parse(stdout); 104 | } catch (parseErr) { 105 | cb(parseErr); 106 | 107 | return; 108 | } 109 | 110 | cb(null, result); 111 | }); 112 | } 113 | 114 | } 115 | 116 | module.exports = CephOSDPerf; 117 | 118 | if (!module.parent) { 119 | const ceph = new CephOSDPerf(); 120 | 121 | ceph.probe((err, metrics) => { 122 | if (err !== null) { 123 | throw err; 124 | } 125 | console.dir(metrics); 126 | }); 127 | } 128 | -------------------------------------------------------------------------------- /plugins/ceph/ceph_osd_pool.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const path = require('path'); 8 | const child = require('child_process'); 9 | 10 | class CephOSDPool { 11 | 12 | /** 13 | * creates instances 14 | */ 15 | constructor() { 16 | this.config = {}; 17 | 18 | try { 19 | this.config = require(path.resolve(path.join(__dirname, '..', '..', 'ceph.json'))); // eslint-disable-line global-require 20 | } catch (err) { 21 | if (err.code !== 'MODULE_NOT_FOUND') { 22 | console.error(err); 23 | 24 | return; 25 | } 26 | } 27 | 28 | this.ceph_bin = this.config.ceph_bin || '/usr/bin/ceph'; 29 | this.ceph_cmd = 'osd'; 30 | this.ceph_cmd_args = 'pool stats'; 31 | } 32 | 33 | /** 34 | * called by nad to run the plugin 35 | * @arg {Object} plugin definition 36 | * @arg {Function} cb callback 37 | * @arg {Object} req http request object 38 | * @arg {Object} args instance arguments 39 | * @arg {String} instance id 40 | * @returns {Undefined} nothing 41 | */ 42 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 43 | this.probe((err, metrics) => { 44 | if (err !== null) { 45 | console.error(err); 46 | plugin.running = false; // eslint-disable-line no-param-reassign 47 | cb(plugin, { metric_collection_error: err.message }); 48 | 49 | return; 50 | } 51 | plugin.running = false; // eslint-disable-line no-param-reassign 52 | cb(plugin, metrics, instance); 53 | }); 54 | } 55 | 56 | /** 57 | * called to start the command 58 | * @arg {Function} cb callback 59 | * @returns {Undefined} nothing 60 | * 61 | * cb called with err|null, metrics 62 | */ 63 | probe(cb) { 64 | const metrics = {}; 65 | const cmd = `${this.ceph_bin} ${this.ceph_cmd} ${this.ceph_cmd_args} -f json 2>/dev/null`; 66 | 67 | this._runCommand(cmd, (err, result) => { 68 | if (err !== null) { 69 | cb(err); 70 | 71 | return; 72 | } 73 | 74 | for (const pool of result) { 75 | metrics[pool.pool_name] = {}; 76 | metrics[pool.pool_name].read_ops_sec = pool.client_io_rate.read_op_per_sec || 0; 77 | metrics[pool.pool_name].write_ops_sec = pool.client_io_rate.write_op_per_sec || 0; 78 | metrics[pool.pool_name].read_bytes_sec = pool.client_io_rate.read_bytes_sec || 0; 79 | metrics[pool.pool_name].write_bytes_sec = pool.client_io_rate.write_bytes_sec || 0; 80 | } 81 | 82 | cb(null, metrics); 83 | }); 84 | } 85 | 86 | /** 87 | * runs the command 88 | * @arg {String} cmd to run 89 | * @arg {Function} cb callback 90 | * @returns {Undefined} nothing 91 | * 92 | * cb called with err|null, results 93 | */ 94 | _runCommand(cmd, cb) { // eslint-disable-line class-methods-use-this 95 | child.exec(cmd, (execErr, stdout, stderr) => { 96 | if (execErr !== null) { 97 | cb(new Error(`${execErr} ${stderr}`)); 98 | 99 | return; 100 | } 101 | 102 | let result = null; 103 | 104 | try { 105 | result = JSON.parse(stdout); 106 | } catch (parseErr) { 107 | cb(parseErr); 108 | 109 | return; 110 | } 111 | 112 | cb(null, result); 113 | }); 114 | } 115 | 116 | } 117 | 118 | module.exports = CephOSDPool; 119 | 120 | if (!module.parent) { 121 | const ceph = new CephOSDPool(); 122 | 123 | ceph.probe((err, metrics) => { 124 | if (err !== null) { 125 | throw err; 126 | } 127 | console.dir(metrics); 128 | }); 129 | } 130 | -------------------------------------------------------------------------------- /plugins/circonus-inside/circpkg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OUTPUT_DIR=/opt/circonus/nad/var/run 4 | OUTPUT_FILE=$OUTPUT_DIR/cpkg.list 5 | SUPPRESS_FILE=$OUTPUT_DIR/cpkg.wip 6 | CACHE_MINUTES=5 7 | 8 | suppressions() { 9 | if [ -r $SUPPRESS_FILE ]; then 10 | while read -r line || [[ -n "$line" ]]; do 11 | pkg=`echo $line | awk -F= '{print $1;}'` 12 | user=`echo $line | awk -F= '{if($2) {print $2;} else { print "unspecified"; }}'` 13 | echo "$pkg s wip:$user" 14 | done < $SUPPRESS_FILE 15 | fi 16 | } 17 | 18 | if [ ! -d $OUTPUT_DIR ]; then 19 | echo "error\ts\tbad cache directory" 20 | OUTPUT_FILE=/dev/null 21 | else 22 | find $OUTPUT_FILE -mmin +$CACHE_MINUTES -exec rm {} \; 2>/dev/null 23 | if [ -r $OUTPUT_FILE ]; then 24 | LMOD=`/bin/stat -c "%Y" $OUTPUT_FILE` 25 | CTIME=`/bin/date +%s` 26 | ((AGE=$CTIME-$LMOD)) 27 | printf "cached\tl\t%d\n" $AGE 28 | cat $OUTPUT_FILE 29 | suppressions 30 | exit 31 | fi 32 | if [ ! -w $OUTPUT_FILE ]; then 33 | if ! touch $OUTPUT_FILE 2> /dev/null; then 34 | echo "error\ts\tcannot create cache file" 35 | OUTPUT_FILE=/dev/null 36 | fi 37 | fi 38 | fi 39 | 40 | case `uname -s` in 41 | Linux) 42 | /bin/rpm -qa --queryformat '%{NAME}\ts\t%{VERSION}-%{RELEASE}\n' circonus* | /usr/bin/tee $OUTPUT_FILE 43 | suppressions 44 | ;; 45 | SunOS) 46 | /bin/pkg list -v | /bin/perl -n -e 's#^pkg://circonus/([^@]+)@([^-]+).*$#$1\ts\t$2#g && print;' | /bin/tee $OUTPUT_FILE 47 | suppressions 48 | ;; 49 | *) 50 | echo "error\ts\tunsuported platform" 51 | exit 52 | ;; 53 | esac 54 | 55 | -------------------------------------------------------------------------------- /plugins/common/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | $(MAKE) -C src 3 | -------------------------------------------------------------------------------- /plugins/common/apcaccess.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Circonus 2016 3 | 4 | # Simple script to grab basic APC UPS status 5 | # add '-f configfile' if needed 6 | 7 | apcaccess -u | while read x; do 8 | echo $x | grep STATUS | sed 's/:/s/' 9 | echo $x | grep TIMELEFT | sed 's/:/L/' 10 | echo $x | grep BCHARGE | sed 's/:/n/' 11 | echo $x | grep BATTV | sed 's/:/L/' 12 | done 13 | -------------------------------------------------------------------------------- /plugins/common/file_cksum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CKSUM=`cksum $1 |cut -f1 -d' '` 3 | printf "cksum\tL\t$CKSUM\n" 4 | -------------------------------------------------------------------------------- /plugins/common/file_md5sum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | MD5SUM=`md5sum $1 |cut -f1 -d' '` 3 | printf "md5\ts\t$MD5SUM\n" 4 | -------------------------------------------------------------------------------- /plugins/common/net_listen.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | use strict; 3 | use warnings; 4 | 5 | # Given a port or ip:port, determines what (if anything) is listening there 6 | # listening I 1 7 | # command s node /opt/circonus/sbin/nad -c /opt/circonus/etc/node-agent.d -p 2609 8 | 9 | my $portspec = shift; 10 | 11 | my %YAY_PORTABILITY = 12 | ( 13 | 'freebsd' => { 14 | netstat => 'sockstat -l -L -4 -6 | grep -v ^USER', 15 | all_iface => '*', 16 | separator => ':', 17 | match_col => 5, 18 | proc_col => 2, 19 | }, 20 | 'linux' => { 21 | netstat => "netstat -nlp --inet 2>/dev/null | grep LISTEN", 22 | all_iface => '0.0.0.0', 23 | separator => ':', # Can't we agree on anything? 24 | match_col => 3, 25 | proc_col => 6, 26 | }, 27 | 'solaris' => { 28 | netstat => "netstat -n -a -f inet | grep LISTEN", 29 | all_iface => '*', 30 | separator => '.', # Can't we agree on anything? 31 | match_col => 0, 32 | proc_col => undef, # Not without pfiles or lsof anyway :effort: 33 | }, 34 | ); 35 | 36 | unless ($portspec) { 37 | die "Usage: $0 [IP:]PORT\n"; 38 | } 39 | if (@ARGV) { 40 | die "Usage: $0 [IP:]PORT\n"; 41 | } 42 | 43 | 44 | if ($portspec !~ /:/) { 45 | $portspec = $YAY_PORTABILITY{$^O}{all_iface} . ':'. $portspec; 46 | } 47 | my ($sought_ip, $sought_port) = split(':', $portspec); 48 | $portspec = join($YAY_PORTABILITY{$^O}{separator}, split(':', $portspec)); 49 | 50 | my $cmd = $YAY_PORTABILITY{$^O}{netstat}; 51 | my $matched = 0; 52 | my $pid = undef; 53 | foreach my $listener (`$cmd`) { 54 | $listener =~ s/^\s+//; 55 | my @cols = split(/\s+/, $listener); 56 | my $local = $cols[$YAY_PORTABILITY{$^O}{match_col}]; 57 | next unless $local && $local eq $portspec; 58 | 59 | # Yay 60 | $matched = 1; 61 | if ($YAY_PORTABILITY{$^O}{proc_col}) { 62 | ($pid) = $cols[$YAY_PORTABILITY{$^O}{proc_col}] =~ /^(\d+)/ 63 | } 64 | last; 65 | } 66 | 67 | # Solaris pfiles hunt? 68 | if ($^O eq 'solaris') { 69 | unless ($>) { 70 | # I'm root, it's worth a try 71 | my $matcher = $sought_ip eq $YAY_PORTABILITY{$^O}{all_iface} ? 72 | "sockname: AF_INET.+port:\\s+$sought_port" : 73 | "sockname: AF_INET.+$sought_ip\\s+port:\\s+$sought_port"; 74 | 75 | PROC: 76 | foreach my $cpid (`ps -e -o pid=`) { 77 | LINE: 78 | foreach my $line (`pfiles $cpid 2>/dev/null`) { 79 | # sockname: AF_INET6 ::ffff:10.0.2.15 port: 22 80 | if ($line =~ $matcher) { 81 | $pid = $cpid; 82 | last PROC; 83 | } 84 | } 85 | } 86 | } 87 | } 88 | 89 | print "listening\tI\t$matched\n"; 90 | if ($pid) { 91 | my $command = `ps -o args= -p $pid`; 92 | chomp $command; 93 | print "command\ts\t$command\n"; 94 | } else { 95 | print "command\ts\n"; 96 | } 97 | 98 | 99 | -------------------------------------------------------------------------------- /plugins/common/open_files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OPEN_FILE_COUNT=`ls -1U /proc/[0-9]*/fd/* 2>/dev/null | wc -l` 4 | printf "count\tL\t$OPEN_FILE_COUNT\n" 5 | 6 | -------------------------------------------------------------------------------- /plugins/common/openssl_certificate_expiration.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable */ 2 | /* author needs to lint */ 3 | 4 | const path = require('path'), 5 | spawn = require('child_process').spawnSync; 6 | 7 | var certexpire = function(){}; 8 | 9 | certexpire.prototype.run = function(d, cb, req, args, instance) { 10 | var config = {}, 11 | data = {}; 12 | 13 | /* 14 | * Without the config there is nothing to do so bail if we can't find it 15 | * 16 | * Config file format: 17 | * { "files": [ { "location": "path/to/certificate", "alias": "optional name alias" } ] } 18 | * 19 | */ 20 | try { 21 | config = require(path.resolve(__dirname,'..','openssl_certificate_expiration.json')); 22 | } 23 | catch (err) { 24 | console.log(err); 25 | cb(d, data, instance); 26 | d.running = false; 27 | return; 28 | } 29 | 30 | // find the openssl command 31 | const openssl = spawn('command', ['-v', 'openssl']); 32 | if ( openssl.status != 0 ) { 33 | console.error("Could not find the 'openssl' command"); 34 | cb(d, data, instance); 35 | d.running = false; 36 | return; 37 | } 38 | 39 | for ( var i = 0, len = config.files.length; i < len; i++ ) { 40 | // Yes annoyingly I could find no good baked in way to parse / extract data from a certificate within node, so we 41 | // are forced to just call openssl to get the enddate, and then parse that 42 | var endDate = spawn(openssl.stdout.toString().trim(), ['x509', '-enddate', '-noout', '-in', config.files[i].location]), 43 | name = config.files[i].alias || config.files[i].location; // just use the full file path is we don't have an alias 44 | 45 | if ( endDate.status != 0 ) { 46 | console.error("Error getting enddate from cert via openssl"); 47 | } 48 | else { 49 | data[name] = { "expires_in": parseInt((Date.parse(endDate.stdout.toString().trim().replace(/notAfter=/,"")) / 1000) - (Date.now()/1000), 10) }; 50 | } 51 | } 52 | 53 | cb(d, data, instance); 54 | d.running = false; 55 | }; 56 | 57 | module.exports = certexpire; 58 | -------------------------------------------------------------------------------- /plugins/common/process_memory.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | use strict; 3 | use warnings; 4 | 5 | use List::Util qw(sum max min); 6 | 7 | # Given a regex pattern to match against the full command line (like pgrep -f) 8 | # Find matching processes in process table 9 | # Report on count of processes 10 | # Report on vsize (vsz) 11 | # Report on resident size (rss) 12 | # report on memory % (pmem) 13 | # 14 | # count L 2 15 | # vsz_sum L 233272 16 | # vsz_min L 108396 17 | # vsz_max L 124876 18 | # vsz_median n 116636 19 | # rss_sum L 4644 20 | # rss_min L 2028 21 | # rss_max L 2616 22 | # rss_median n 2322 23 | # pmem_sum n 0.3 24 | # pmem_min n 0.1 25 | # pmem_max n 0.2 26 | # pmem_median n 0.15 27 | 28 | 29 | my $pattern = shift; 30 | 31 | unless ($pattern) { 32 | die "Usage: $0 PATTERN\n"; 33 | } 34 | if (@ARGV) { 35 | die "Usage: $0 PATTERN\n"; 36 | } 37 | 38 | # We need: a pid, virt mem size, resident size, % memory, command and args 39 | # no header please 40 | # -o pid= -o vsz= -o rss= -o pmem= -o args= 41 | # above works on solaris and gnu ps 42 | my $cmd = "ps -e -o pid= -o vsz= -o rss= -o pmem= -o args="; 43 | my @matches = (); 44 | foreach my $proc (`$cmd`) { 45 | my %info = (); 46 | @info{qw(pid vsz rss pmem cmd)} = $proc =~ /^\s*(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+)\s+(.+)$/; 47 | # This ought to be thrilling 48 | if ($info{cmd} =~ $pattern) { 49 | push @matches, \%info; 50 | } 51 | } 52 | 53 | print "count\tL\t" . (scalar @matches) . "\n"; 54 | foreach my $metric (qw(vsz rss pmem)) { 55 | my $type = $metric eq 'pmem' ? 'n' : 'L'; 56 | my @vals = sort map { $_->{$metric} } @matches; 57 | foreach my $val (@vals) { 58 | print "${metric}\t$type\t$val\n"; 59 | } 60 | print "${metric}_sum\t$type\t" . sum(@vals) . "\n"; 61 | print "${metric}_min\t$type\t" . min(@vals) . "\n"; 62 | print "${metric}_max\t$type\t" . max(@vals) . "\n"; 63 | print "${metric}_median\tn\t" . ((scalar @vals) % 2 ? $vals[$#vals/2] : ($vals[$#vals/2 -0.5] + $vals[$#vals/2 +0.5])/2) . "\n"; 64 | } 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /plugins/common/ps.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | use strict; 3 | use warnings; 4 | 5 | # So let's see 6 | # We want... 7 | # total processes 8 | # process count by state 9 | # Parametrically report above restricted by username(s) 10 | 11 | my $username_opt = @ARGV ? "-u " . join(',', @ARGV) : '-e '; 12 | 13 | # We need: user, a pid, and state 14 | # no header please 15 | # -o pid= -o user= -o s= 16 | # above works on solaris and gnu ps 17 | # If nonexistent user specified, we get a usage message to STDERR - ignore it 18 | my $state_fmt = "s="; 19 | if ($^O eq 'darwin') { 20 | $state_fmt = "state="; 21 | } 22 | my $cmd = "ps -o pid= -o user= -o $state_fmt $username_opt 2>/dev/null"; 23 | 24 | my %count_by_state = map { $_ => 0 } qw(D R S Z O total); # Others? 25 | for my $proc (`$cmd`) { 26 | $proc =~ s/^\s+//; 27 | my ($pid, $user, $state) = split(/\s+/, $proc); 28 | $count_by_state{$state}++; 29 | $count_by_state{total}++; 30 | } 31 | 32 | for my $kind (keys %count_by_state) { 33 | print "count`$kind\tL\t" . $count_by_state{$kind} . "\n"; 34 | } 35 | -------------------------------------------------------------------------------- /plugins/common/src/Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | 3 | all: ../boot_time.elf ../loadavg.elf ../user_logins.elf ../file_stat.elf 4 | 5 | ../boot_time.elf: boot_time.c 6 | $(CC) -o $@ boot_time.c 7 | 8 | ../file_stat.elf: file_stat.c 9 | $(CC) -o $@ file_stat.c 10 | 11 | ../loadavg.elf: loadavg.c 12 | $(CC) -o $@ loadavg.c 13 | 14 | ../user_logins.elf: user_logins.c 15 | $(CC) -o $@ user_logins.c 16 | -------------------------------------------------------------------------------- /plugins/common/src/boot_time.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main() { 6 | struct utmpx *ut; 7 | while((ut = getutxent()) != NULL && ut->ut_type != BOOT_TIME); 8 | if(ut != NULL) { 9 | char time_str[64]; 10 | struct tm *tm; 11 | time_t as_time_t = ut->ut_tv.tv_sec; 12 | tm = gmtime(&as_time_t); 13 | strftime(time_str, sizeof(time_str), "%Y-%m-%d %H:%M:%S", tm); 14 | printf("epoch\tL\t%llu\n", (long long unsigned)ut->ut_tv.tv_sec); 15 | printf("date\ts\t%s\n", time_str); 16 | } else { 17 | printf("epoch\tL\t[[null]]\n"); 18 | printf("date\ts\t[[null]]\n"); 19 | } 20 | endutxent(); 21 | return 0; 22 | } 23 | -------------------------------------------------------------------------------- /plugins/common/src/file_stat.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #ifndef MAXPATHLEN 12 | #define MAXPATHLEN 4096 13 | #endif 14 | 15 | #ifdef __APPLE__ 16 | #define STAT_TIMESPEC 17 | #else 18 | #define STAT_TIME_T 19 | #endif 20 | 21 | void print_user(uid_t uid) { 22 | struct passwd *pw; 23 | pw = getpwuid(uid); 24 | if(pw) printf("owner\ts\t%s\n", pw->pw_name); 25 | else printf("owner\ts\t%d\n", uid); 26 | } 27 | void print_group(gid_t gid) { 28 | struct group *gr; 29 | gr = getgrgid(gid); 30 | if(gr) printf("group\ts\t%s\n", gr->gr_name); 31 | else printf("group\ts\t%d\n", gid); 32 | } 33 | int main(int argc, char **argv) { 34 | struct stat sb; 35 | if(argc < 2) { 36 | fprintf(stderr, "Error: %s \n", argv[0]); 37 | exit(-2); 38 | } 39 | if(lstat(argv[1], &sb) == 0) { 40 | if(sb.st_mode & S_IFLNK) { 41 | char tgt[MAXPATHLEN]; 42 | if(readlink(argv[1], tgt, sizeof(tgt)) > 0) { 43 | printf("link\ts\t%s\n", tgt); 44 | } 45 | } 46 | } 47 | time_t now = time(NULL); 48 | if(stat(argv[1], &sb) == 0) { 49 | printf("exists\tL\t1\n"); 50 | #if defined(STAT_TIME_T) 51 | #define PTIME(TYPE) do { \ 52 | printf(#TYPE "time\tL\t%ld\n", sb.st_##TYPE##time); \ 53 | printf(#TYPE "age\tL\t%ld\n", now - sb.st_##TYPE##time); \ 54 | } while(0) 55 | #elif defined(STAT_TIMESPEC) 56 | #define PTIME(TYPE) do { \ 57 | printf(#TYPE "time\tL\t%ld\n", sb.st_##TYPE##timespec.tv_sec); \ 58 | printf(#TYPE "age\tL\t%ld\n", now - sb.st_##TYPE##timespec.tv_sec); \ 59 | } while(0) 60 | #else 61 | #define PTIME(TYPE) do { \ 62 | printf(#TYPE "time\tL\t%ld\n", sb.st_##TYPE##time.tv_sec); \ 63 | printf(#TYPE "age\tL\t%ld\n", now - sb.st_##TYPE##time.tv_sec); \ 64 | } while(0) 65 | #endif 66 | PTIME(m); 67 | PTIME(a); 68 | PTIME(c); 69 | printf("hardlinks\tL\t%lu\n", (unsigned long)sb.st_nlink); 70 | printf("size\tL\t%llu\n", (unsigned long long)sb.st_size); 71 | printf("permissions\ts\t%04o\n", 0xfff & sb.st_mode); 72 | printf("type\ts\t%c\n", 73 | S_ISREG(sb.st_mode) ? 'f' : 74 | S_ISDIR(sb.st_mode) ? 'd' : 75 | S_ISLNK(sb.st_mode) ? 'l' : 76 | S_ISBLK(sb.st_mode) ? 'b' : 77 | S_ISCHR(sb.st_mode) ? 'c' : 78 | S_ISFIFO(sb.st_mode) ? 'p' : 79 | S_ISSOCK(sb.st_mode) ? 's' : '?'); 80 | print_user(sb.st_uid); 81 | print_group(sb.st_gid); 82 | } else { 83 | printf("exists\tL\t1\n"); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /plugins/common/src/loadavg.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int getloadavg(double loadavg[], int nelem); 5 | 6 | int main() { 7 | double la[3]; 8 | int i, rv; 9 | rv = getloadavg(la, 3); 10 | for(i=0;i 2 | #include 3 | #include 4 | #include 5 | 6 | int main() { 7 | int i; 8 | struct utmpx *ut; 9 | char **users = malloc(1024 * sizeof(*users)); 10 | int n_users = 0, n_uniq = 0; 11 | int allocd_users = 1024; 12 | 13 | while((ut = getutxent()) != NULL) { 14 | if(ut->ut_type == LOGIN_PROCESS || ut->ut_type == USER_PROCESS) { 15 | if(n_users == allocd_users) { 16 | allocd_users *= 2; 17 | users = realloc(users, (allocd_users * sizeof(*users))); 18 | if(!users) exit(-2); 19 | } 20 | users[n_users++] = strdup(ut->ut_user); 21 | } 22 | } 23 | qsort(users, n_users, sizeof(*users), (int (*)(const void *, const void *))strcmp); 24 | 25 | printf("logged_in_users\ts\t"); 26 | for(i=0;i/dev/null) 2 | ifeq (,$(NPM_BIN)) 3 | ifneq (,$(wildcard /opt/circonus/embedded/bin/npm)) 4 | NPM_BIN = /opt/circonus/embedded/bin/npm 5 | else ifneq (,$(wildcard /opt/circonus/bin/npm)) 6 | NPM_BIN = /opt/circonus/bin/npm 7 | else 8 | $(error Unable to locate npm command) 9 | endif 10 | endif 11 | 12 | install: 13 | $(NPM_BIN) install --production 14 | 15 | .PHONY: clean 16 | clean: 17 | -rm -rf node_modules 18 | -------------------------------------------------------------------------------- /plugins/docker/README.md: -------------------------------------------------------------------------------- 1 | # Docker 2 | 3 | ## dev setup 4 | 5 | ```sh 6 | 7 | cd .. 8 | vagrant up 9 | vagrant ssh -c "sudo yum install -y docker-engine && service docker start" 10 | # WARNING - see note below 11 | vagrant ssh -c "sudo usermod -G docker nobody" 12 | ``` 13 | 14 | > **Security Note**: There are security implications with regards to _who_ can 15 | access the Docker API. User _nobody_ is added to the _docker_ group above for 16 | the purposes of _demonstration_ **only**. It is not a good idea to give 17 | the user _nobody_ access to the Docker API as **anything** running as that user 18 | would have full access to **control** Docker. (start/stop containers, manipulate images, etc.) 19 | 20 | get a sample container up and running to have something output for testing: 21 | 22 | ```sh 23 | vagrant ssh 24 | sudo docker run --name redis -d redis redis-server 25 | ``` 26 | 27 | ## install 28 | 29 | 1. Change to installation directory `cd /opt/circonus/nad/etc/node-agent.d/docker` 30 | 1. Make `make` or just do `npm install --production` if npm is in path 31 | 1. Create a symlink (from `docker/stats.js` or `docker/events.js`) in `/opt/circonus/etc/node-agent.d`. The base name of the symlink will be used as the first part of the metric names. (e.g. if using only stats, `cd /opt/circonus/etc/node-agent.d && ln -s docker/stats.js dockerstats.js`, NAD would add the docker container stats all prefixed with **dockerstats**.) 32 | 33 | ## stats 34 | 35 | metrics from running containers. cpu, memory, block io, and network. 36 | 37 | 38 | ## events 39 | 40 | docker events. note, events are pulled since the last request, or 60 seconds if it's the first request. 41 | 42 | 43 | ## config 44 | 45 | `/opt/circonus/etc/docker.json` 46 | 47 | Default configuration is `null` resulting in default settings being pulled from the environment (e.g. DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH) or using the default Docker socket `/var/run/docker.sock`. See [docker-modem](https://github.com/apocas/docker-modem) for Docker API connection settings/implementation details. 48 | 49 | ```json 50 | { 51 | "socketPath": "/var/run/docker.sock", 52 | "protocol": "", 53 | "host": "", 54 | "port": "", 55 | "version": "", 56 | "key": "", 57 | "cert": "", 58 | "ca": "", 59 | "timeout": 15, 60 | "checkSeverIdentity": true 61 | } 62 | ``` 63 | -------------------------------------------------------------------------------- /plugins/docker/events.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const path = require('path'); 8 | 9 | const Events = require(path.resolve(path.join(__dirname, 'lib', 'events'))); 10 | 11 | class DockerEvents { 12 | 13 | /** 14 | * creates new instance 15 | */ 16 | constructor() { 17 | this.config = null; 18 | 19 | try { 20 | this.config = require(path.resolve(path.join(__dirname, '..', '..', 'docker.json'))); // eslint-disable-line global-require 21 | } catch (err) { 22 | if (err.code !== 'MODULE_NOT_FOUND') { 23 | console.error(err); 24 | } 25 | } 26 | } 27 | 28 | /** 29 | * called by nad to run the plugin 30 | * @arg {Object} plugin definition 31 | * @arg {Function} cb callback 32 | * @arg {Object} req http request object 33 | * @arg {Object} args instance arguments 34 | * @arg {String} instance id 35 | * @returns {Undefined} nothing 36 | */ 37 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 38 | const containerEvents = new Events(this.config); 39 | 40 | containerEvents.getEvents((err, eventMetrics) => { 41 | if (err) { 42 | console.error(err); 43 | plugin.running = false; // eslint-disable-line no-param-reassign 44 | cb(plugin, { 'docker`api.error': err.message }); 45 | 46 | return; 47 | } 48 | 49 | plugin.running = false; // eslint-disable-line no-param-reassign 50 | cb(plugin, eventMetrics, instance); 51 | }); 52 | } 53 | 54 | } 55 | 56 | module.exports = DockerEvents; 57 | -------------------------------------------------------------------------------- /plugins/docker/lib/events/index.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const Docker = require('docker-modem'); 8 | 9 | module.exports = class DockerDaemonEvents { 10 | 11 | /** 12 | * creates new instance 13 | * @arg {Object} opts docker options 14 | */ 15 | constructor(opts) { 16 | this.docker = new Docker(opts); 17 | this.lastCheck = null; 18 | } 19 | 20 | /** 21 | * getEvents fetches metrics from docker 22 | * @arg {Function} cb callback 23 | * @returns {Undefined} nothing 24 | * 25 | * cb called with err|null, metrics(object) 26 | */ 27 | getEvents(cb) { 28 | const self = this; 29 | const tm = Math.floor(Date.now() / 1000); 30 | 31 | if (this.lastCheck === null) { 32 | this.lastCheck = tm - 60; 33 | } 34 | 35 | const opts = { 36 | method : 'GET', 37 | options : { 38 | since : tm - (tm - this.lastCheck), 39 | until : tm/* , 40 | filters: { 41 | type: [ "container", "image" ] 42 | }*/ 43 | }, 44 | path : '/events?', 45 | statusCodes : { 46 | 200 : true, 47 | 500 : 'server error' 48 | } 49 | }; 50 | 51 | this.docker.dial(opts, (err, data) => { 52 | if (err) { 53 | cb(err); 54 | 55 | return; 56 | } 57 | 58 | self.lastCheck = tm; 59 | 60 | let eventList = []; 61 | 62 | try { 63 | eventList = data.split('\n'). 64 | filter((eventItem) => { 65 | return eventItem.length > 0; 66 | }). 67 | map((eventItem) => { 68 | return JSON.parse(eventItem); 69 | }); 70 | } catch (err2) { 71 | cb(err2); 72 | 73 | return; 74 | } 75 | 76 | const currEvents = { num_events: eventList.length }; 77 | 78 | for (let i = 0; i < eventList.length; i++) { 79 | const eventItem = eventList[i]; 80 | const metricName = `${eventItem.Type}\`${eventItem.Action}`; 81 | 82 | if (!{}.hasOwnProperty.call(currEvents, metricName)) { 83 | currEvents[metricName] = 0; 84 | } 85 | currEvents[metricName] += 1; 86 | } 87 | cb(null, currEvents); 88 | }); 89 | } 90 | 91 | }; 92 | -------------------------------------------------------------------------------- /plugins/docker/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "circonus-nad-docker", 3 | "version": "0.1.0", 4 | "description": "expose docker events and container stats via NAD", 5 | "private": true, 6 | "bin": [ 7 | "stats.js", 8 | "events.js" 9 | ], 10 | "keywords": [ 11 | "circonus", 12 | "docker", 13 | "nad", 14 | "metrics", 15 | "events", 16 | "stats" 17 | ], 18 | "author": "matt maier (https://github.com/maier/)", 19 | "license": "GPL-3.0", 20 | "dependencies": { 21 | "docker-modem": "0.3.0" 22 | }, 23 | "repository": { 24 | "type": "git", 25 | "url": "git+https://github.com/maier/circonus-nad-plugins.git" 26 | }, 27 | "bugs": { 28 | "url": "https://github.com/maier/circonus-nad-plugins/issues" 29 | }, 30 | "homepage": "https://github.com/maier/circonus-nad-plugins#readme" 31 | } 32 | -------------------------------------------------------------------------------- /plugins/docker/stats.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | const path = require('path'); 8 | 9 | const Stats = require(path.resolve(path.join(__dirname, 'lib', 'stats'))); 10 | 11 | class DockerStats { 12 | 13 | /** 14 | * creates a new instance 15 | */ 16 | constructor() { 17 | this.config = null; 18 | 19 | try { 20 | this.config = require(path.resolve(path.join(__dirname, '..', '..', 'docker.json'))); // eslint-disable-line global-require 21 | } catch (err) { 22 | if (err.code !== 'MODULE_NOT_FOUND') { 23 | console.error(err); 24 | } 25 | } 26 | } 27 | 28 | /** 29 | * called by nad to run the plugin 30 | * @arg {Object} plugin definition 31 | * @arg {Function} cb callback 32 | * @arg {Object} req http request object 33 | * @arg {Object} args instance arguments 34 | * @arg {String} instance id 35 | * @returns {Undefined} nothing 36 | */ 37 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 38 | const containerStats = new Stats(this.config); 39 | 40 | containerStats.getStats((err, metrics) => { 41 | if (err) { 42 | console.error(err); 43 | plugin.running = false; // eslint-disable-line no-param-reassign 44 | cb(plugin, { 'docker`api.error': err.message }); 45 | 46 | return; 47 | } 48 | 49 | plugin.running = false; // eslint-disable-line no-param-reassign 50 | cb(plugin, metrics, instance); 51 | }); 52 | } 53 | 54 | } 55 | 56 | module.exports = DockerStats; 57 | -------------------------------------------------------------------------------- /plugins/freebsd/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | $(MAKE) -C src 3 | $(MAKE) -C ../common 4 | 5 | clean: 6 | $(MAKE) -C src clean 7 | -------------------------------------------------------------------------------- /plugins/freebsd/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -- 2 | # 3 | # Common paths 4 | 5 | exec 2>&1 6 | set -e 7 | 8 | BIN_AWK=/usr/bin/awk 9 | BIN_BC=/usr/bin/bc 10 | BIN_CUT=/usr/bin/cut 11 | BIN_EXPR=/bin/expr 12 | BIN_NETSTAT=/usr/bin/netstat 13 | BIN_PSTAT=/usr/sbin/pstat 14 | BIN_SED=/usr/bin/sed 15 | BIN_SYSCTL=/sbin/sysctl 16 | 17 | PATH_CONF=@@CONF@@ 18 | -------------------------------------------------------------------------------- /plugins/freebsd/cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -- 2 | # 3 | # Common CPU usage statistics, via sysctl 4 | 5 | . @@CONF@@/freebsd/common.sh 6 | 7 | # Print ordinary metrics 8 | print_cssum() { 9 | printf "%s\tL\t%s\n" $1 $2 10 | } 11 | 12 | # Print metrics normalized to a single CPU and 100Hz tick rate 13 | print_norm_cssum() { 14 | per_cpu_count="$(( $2 / $NCPUS))" 15 | rate_factor=`${BIN_BC} -e "scale=2; 100/$STATHZ" -e quit` 16 | value=`${BIN_BC} -e "$per_cpu_count*$rate_factor" -e quit` 17 | printf "%s\tL\t%.0f\n" $1 $value 18 | } 19 | 20 | NCPUS=`${BIN_SYSCTL} -n hw.ncpu` 21 | STATHZ=`${BIN_SYSCTL} -n kern.clockrate | ${BIN_AWK} '{ print $13 }'` 22 | 23 | ALLCPU=`${BIN_SYSCTL} -n kern.cp_time` 24 | CPU_USER_NORMAL=`echo $ALLCPU | ${BIN_CUT} -d' ' -f1` 25 | CPU_USER_NICE=`echo $ALLCPU | ${BIN_CUT} -d' ' -f2` 26 | CPU_SYS=`echo $ALLCPU | ${BIN_CUT} -d' ' -f3` 27 | CPU_IDLE_NORMAL=`echo $ALLCPU | ${BIN_CUT} -d' ' -f5` 28 | 29 | # Interrupts come from vm stats 30 | CPU_IRQ=`${BIN_SYSCTL} -n vm.stats.sys.v_intr` 31 | CPU_SOFTIRQ=`${BIN_SYSCTL} -n vm.stats.sys.v_soft` 32 | 33 | # Not implemented 34 | CPU_WAIT_IO=0 35 | CPU_STEAL=0 36 | CPU_GUEST=0 37 | CPU_GUEST_NICE=0 38 | 39 | # Summarize interrupts 40 | CPU_INTR=$(( $CPU_IRQ + $CPU_SOFTIRQ )) 41 | 42 | # Summarize kernel time 43 | # 44 | # "guest" and "guest_nice" are time spent running virtual CPUs, and count as 45 | # kernel time 46 | CPU_KERNEL=$(( $CPU_SYS + $CPU_GUEST + $CPU_GUEST_NICE )) 47 | 48 | # Summarize idle time 49 | # 50 | # "steal" is time while we, a guest, are runnable but a real CPU isn't 51 | # servicing our virtual CPU 52 | CPU_IDLE=$(( $CPU_IDLE_NORMAL + $CPU_STEAL )) 53 | 54 | # Summarize user time 55 | CPU_USER=$(( $CPU_USER_NORMAL + $CPU_USER_NICE )) 56 | 57 | # Context switches 58 | CTXT=`${BIN_SYSCTL} -n vm.stats.sys.v_swtch` 59 | 60 | # System calls 61 | SYSCALL=`${BIN_SYSCTL} -n vm.stats.sys.v_syscall` 62 | 63 | print_norm_cssum user $CPU_USER 64 | print_norm_cssum user\`normal $CPU_USER_NORMAL 65 | print_norm_cssum user\`nice $CPU_USER_NICE 66 | print_norm_cssum kernel $CPU_KERNEL 67 | print_norm_cssum kernel\`sys $CPU_SYS 68 | print_norm_cssum kernel\`guest $CPU_GUEST 69 | print_norm_cssum kernel\`guest_nice $CPU_GUEST_NICE 70 | print_norm_cssum idle $CPU_IDLE 71 | print_norm_cssum idle\`normal $CPU_IDLE_NORMAL 72 | print_norm_cssum idle\`steal $CPU_STEAL 73 | print_norm_cssum wait_io $CPU_WAIT_IO 74 | print_norm_cssum intr $CPU_INTR 75 | print_norm_cssum intr\`hard $CPU_IRQ 76 | print_norm_cssum intr\`soft $CPU_SOFTIRQ 77 | print_cssum context_switch $CTXT 78 | print_cssum syscall $SYSCALL 79 | -------------------------------------------------------------------------------- /plugins/freebsd/if.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -- 2 | 3 | # Network interface statistics 4 | 5 | . @@CONF@@/freebsd/common.sh 6 | 7 | ${BIN_NETSTAT} -i -b -n -W -f link | ${BIN_AWK} '{ 8 | if ($1 == "Name") next; 9 | if ($1 ~ /^lo[0-9]/) next; 10 | sub(/\*$/,"",$1); 11 | printf("%s`in_bytes\tL\t%d\n", $1, $8); 12 | printf("%s`in_packets\tL\t%d\n", $1, $5); 13 | printf("%s`in_errors\tL\t%d\n", $1, $6); 14 | printf("%s`out_bytes\tL\t%d\n", $1, $11); 15 | printf("%s`out_packets\tL\t%d\n", $1, $9); 16 | printf("%s`out_errors\tL\t%d\n", $1, $10); 17 | }' 18 | -------------------------------------------------------------------------------- /plugins/freebsd/src/Makefile: -------------------------------------------------------------------------------- 1 | CC=cc 2 | CFLAGS=-O2 3 | CPPFLAGS=-DHAVE_ZFS 4 | BINS=../fs.elf ../disk.elf 5 | 6 | all:: $(BINS) 7 | 8 | ../fs.elf: fs.c 9 | $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ fs.c -lzfs -lzfs_core -luutil -lgeom 10 | 11 | ../disk.elf: disk.c 12 | $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ disk.c -ldevstat 13 | 14 | clean:: 15 | /bin/rm -f $(BINS) 16 | -------------------------------------------------------------------------------- /plugins/freebsd/src/disk.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define MAXSHOWDEVS 1024 6 | 7 | void dump(struct devstat *d) { 8 | if(d->device_type != DEVSTAT_TYPE_DIRECT && 9 | d->device_type != DEVSTAT_TYPE_IF_SCSI && 10 | d->device_type != DEVSTAT_TYPE_IF_IDE) 11 | return; 12 | #define FMT(name, type, fmt, cast, el) printf("%s%d`%s\t" #type "\t" #fmt "\n", d->device_name, d->device_number, #name, (cast)d->el) 13 | FMT(reads, L, %llu, unsigned long long, operations[DEVSTAT_READ]); 14 | FMT(nread, L, %llu, unsigned long long, bytes[DEVSTAT_READ]); 15 | FMT(writes, L, %llu, unsigned long long, operations[DEVSTAT_WRITE]); 16 | FMT(nwritten, L, %llu, unsigned long long, bytes[DEVSTAT_WRITE]); 17 | printf("%s%d`qlen\tl\t%d\n", d->device_name, d->device_number, d->start_count - d->end_count); 18 | } 19 | 20 | int main() { 21 | int i, nstats, rv; 22 | struct statinfo *stats; 23 | struct device_selection *dev_select; 24 | long generation; 25 | int num_devices, num_selected; 26 | int num_selections; 27 | long select_generation; 28 | 29 | if(devstat_checkversion(NULL) != 0) { 30 | printf("error\ts\t%s\n", devstat_errbuf); 31 | return -1; 32 | } 33 | nstats = devstat_getnumdevs(NULL); 34 | stats = calloc(sizeof(*stats), nstats); 35 | for(i=0;idinfo->numdevs; 50 | generation = stats->dinfo->generation; 51 | 52 | dev_select = NULL; 53 | 54 | /* 55 | * At this point, selectdevs will almost surely indicate that the 56 | * device list has changed, so we don't look for return values of 0 57 | * or 1. If we get back -1, though, there is an error. 58 | */ 59 | if (devstat_selectdevs(&dev_select, &num_selected, &num_selections, 60 | &select_generation, generation, stats->dinfo->devices, num_devices, 61 | NULL, 0, NULL, 0, DS_SELECT_ADD, MAXSHOWDEVS, 0) == -1) { 62 | printf("error\ts\t%s\n", "devlist changed"); 63 | return -1; 64 | } 65 | 66 | rv = devstat_getdevs(NULL, stats); 67 | if(rv != 0) { 68 | printf("error\ts\t%s\n", devstat_errbuf); 69 | return -1; 70 | } 71 | 72 | for(i=0;inumdevs;ndev++) { 76 | struct devstat *c = &stats[i].dinfo->devices[ndev]; 77 | dump(c); 78 | } 79 | } 80 | return 0; 81 | } 82 | -------------------------------------------------------------------------------- /plugins/freebsd/vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -- 2 | # 3 | # Memory usage statistics, via sysctl 4 | 5 | . @@CONF@@/freebsd/common.sh 6 | 7 | print_vm() { 8 | printf "%s\`%s\tL\t%s\n" $1 $2 $3 9 | } 10 | 11 | # Total system memory: active + wired + cache + inactive + free 12 | # "Used" is active + wired 13 | # "Free" is cache + inactive + free 14 | # These are counted in pages, so we multiply by pagesize to get bytes 15 | 16 | PGSIZE=`${BIN_SYSCTL} -n vm.stats.vm.v_page_size` 17 | PG_ACTIVE=`${BIN_SYSCTL} -n vm.stats.vm.v_active_count` 18 | PG_WIRED=`${BIN_SYSCTL} -n vm.stats.vm.v_wire_count` 19 | PG_CACHE=`${BIN_SYSCTL} -n vm.stats.vm.v_cache_count` 20 | PG_INACTIVE=`${BIN_SYSCTL} -n vm.stats.vm.v_inactive_count` 21 | PG_FREE=`${BIN_SYSCTL} -n vm.stats.vm.v_free_count` 22 | 23 | ACTIVE=$(($PG_ACTIVE*$PGSIZE)) 24 | WIRED=$(($PG_WIRED*$PGSIZE)) 25 | CACHE=$(($PG_CACHE*$PGSIZE)) 26 | INACTIVE=$(($PG_INACTIVE*$PGSIZE)) 27 | FREE=$(($PG_FREE*$PGSIZE)) 28 | 29 | MEM_USED=$(($ACTIVE+$WIRED)) 30 | MEM_TOTAL=$(($ACTIVE+$WIRED+$CACHE+$INACTIVE+$FREE)) 31 | MEM_PERC=`${BIN_BC} -e "scale=2;$MEM_USED/$MEM_TOTAL" -e quit` 32 | 33 | # Swap 34 | # There doesn't seem to be a direct sysctl for swap used. Use pstat instead. 35 | # pstat seems to always use 'M' as the unit for swap 36 | stats=`${BIN_PSTAT} -T | ${BIN_AWK} '$2 == "swap" { print $1; }'` 37 | SWAP_USED=$((`echo $stats | ${BIN_CUT} -d'/' -f1 | ${BIN_SED} -e 's/M//'`*1048576)) 38 | SWAP_TOTAL=$((`echo $stats | ${BIN_CUT} -d'/' -f2 | ${BIN_SED} -e 's/M//'`*1048576)) 39 | SWAP_FREE=$(($SWAP_TOTAL-$SWAP_USED)) 40 | SWAP_PERC=`${BIN_BC} -e "scale=2;$SWAP_USED/$SWAP_TOTAL" -e quit` 41 | 42 | print_vm memory total $(($ACTIVE+$WIRED+$CACHE+$INACTIVE+$FREE)) 43 | print_vm memory used $(($ACTIVE+$WIRED)) 44 | print_vm memory free $(($CACHE+$INACTIVE+$FREE)) 45 | printf "memory\`percent_used\tn\t%0.2f\n" $MEM_PERC 46 | print_vm swap total $SWAP_TOTAL 47 | print_vm swap used $SWAP_USED 48 | print_vm swap free $SWAP_FREE 49 | printf "swap\`percent_used\tn\t%0.2f\n" $SWAP_PERC 50 | -------------------------------------------------------------------------------- /plugins/freebsd/zfsinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -- 2 | 3 | . @@CONF@@/freebsd/common.sh 4 | 5 | ${BIN_SYSCTL} kstat.zfs vfs.zfs | ${BIN_AWK} -F':' '{ 6 | printf("%s\tL\t%d\n",$1,$2); 7 | }' 8 | -------------------------------------------------------------------------------- /plugins/haproxy/haproxy_backends.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATH="/opt/local/bin:/opt/local/sbin:/usr/bin:/usr/sbin"; 4 | 5 | # Set field delimiters to line breaks 6 | OLDIFS=$IFS 7 | LINEBREAKS=$( echo -en "\n\b" ) 8 | IFS=$LINEBREAKS 9 | 10 | # get all backends from all proxies 11 | backends=`echo "show stat -1 2 -1" | nc -U /var/run/haproxy.sock` 12 | for backend in $backends; do 13 | 14 | # skip comments and headers 15 | if [[ "$backend" = "#"* ]]; then continue; fi 16 | 17 | IFS="," 18 | DATA=( `echo "${backend}"` ) 19 | PREFIX="haproxy:${DATA[0]}:backend:" 20 | 21 | echo -e "${PREFIX}queued\tL\t${DATA[2]}" 22 | echo -e "${PREFIX}queue_max\tL\t${DATA[3]}" 23 | echo -e "${PREFIX}sessions\tL\t${DATA[4]}" 24 | echo -e "${PREFIX}session_max\tL\t${DATA[5]}" 25 | echo -e "${PREFIX}session_limit\tL\t${DATA[6]}" 26 | echo -e "${PREFIX}session_count\tL\t${DATA[7]}" 27 | echo -e "${PREFIX}bytes_in\tL\t${DATA[8]}" 28 | echo -e "${PREFIX}bytes_out\tL\t${DATA[9]}" 29 | echo -e "${PREFIX}denied_requests\tL\t${DATA[10]}" 30 | echo -e "${PREFIX}denied_responses\tL\t${DATA[11]}" 31 | echo -e "${PREFIX}request_errors\tL\t${DATA[12]}" 32 | echo -e "${PREFIX}conn_err\tL\t${DATA[13]}" 33 | echo -e "${PREFIX}resp_err\tL\t${DATA[14]}" 34 | echo -e "${PREFIX}retries\tL\t${DATA[15]}" 35 | echo -e "${PREFIX}redispatches\tL\t${DATA[16]}" 36 | echo -e "${PREFIX}status\ts\t${DATA[17]}" 37 | echo -e "${PREFIX}total_weight\ts\t${DATA[18]}" 38 | echo -e "${PREFIX}active_servers\ts\t${DATA[19]}" 39 | echo -e "${PREFIX}backup_servers\ts\t${DATA[20]}" 40 | echo -e "${PREFIX}checks_failed\tL\t${DATA[21]}" 41 | echo -e "${PREFIX}checks_down\tL\t${DATA[22]}" 42 | echo -e "${PREFIX}last_status_change\tL\t${DATA[23]}" 43 | echo -e "${PREFIX}downtime\tL\t${DATA[24]}" 44 | echo -e "${PREFIX}queue_limit\tL\t${DATA[25]}" 45 | echo -e "${PREFIX}instance_id\tL\t${DATA[26]}" 46 | echo -e "${PREFIX}proxy_id\tL\t${DATA[27]}" 47 | echo -e "${PREFIX}service_id\tL\t${DATA[28]}" 48 | echo -e "${PREFIX}throttle\tL\t${DATA[29]}" 49 | echo -e "${PREFIX}server_selected_count\tL\t${DATA[30]}" 50 | echo -e "${PREFIX}type\tI\t${DATA[32]}" 51 | echo -e "${PREFIX}session_rate_per_s\tL\t${DATA[33]}" 52 | echo -e "${PREFIX}session_limit_per_s\tL\t${DATA[34]}" 53 | echo -e "${PREFIX}session_max_per_s\tL\t${DATA[35]}" 54 | echo -e "${PREFIX}check_status\ts\t${DATA[36]}" 55 | echo -e "${PREFIX}check_code\ts\t${DATA[37]}" 56 | echo -e "${PREFIX}check_duration\tL\t${DATA[38]}" 57 | echo -e "${PREFIX}1xx_responses\tL\t${DATA[39]}" 58 | echo -e "${PREFIX}2xx_responses\tL\t${DATA[40]}" 59 | echo -e "${PREFIX}3xx_responses\tL\t${DATA[41]}" 60 | echo -e "${PREFIX}4xx_responses\tL\t${DATA[42]}" 61 | echo -e "${PREFIX}5xx_responses\tL\t${DATA[43]}" 62 | echo -e "${PREFIX}other_responses\tL\t${DATA[44]}" 63 | echo -e "${PREFIX}failed_health_checks\tL\t${DATA[45]}" 64 | echo -e "${PREFIX}requests_per_second\tL\t${DATA[46]}" 65 | echo -e "${PREFIX}request_max_per_second\tL\t${DATA[47]}" 66 | echo -e "${PREFIX}request_count\tL\t${DATA[48]}" 67 | echo -e "${PREFIX}client_aborts\tL\t${DATA[49]}" 68 | echo -e "${PREFIX}server_aborts\tL\t${DATA[50]}" 69 | 70 | done 71 | 72 | # restore field delimiter 73 | IFS=$OLDIFS 74 | -------------------------------------------------------------------------------- /plugins/illumos/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | (cd src && gmake) 3 | (cd ../common && gmake) 4 | -------------------------------------------------------------------------------- /plugins/illumos/cpu-1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | _kstat -m cpu_stat 8 | -------------------------------------------------------------------------------- /plugins/illumos/fq.dtrace: -------------------------------------------------------------------------------- 1 | #!/usr/sbin/dtrace -s 2 | #pragma D option quiet 3 | fq*:::message-deliver 4 | { 5 | @l[strjoin(substr(args[0]->pretty,0,index(args[0]->pretty,"/")), "`latency_ns")] = llquantize(args[2]->latency,10,0,9,100); 6 | @l["payload_len"] = llquantize(args[2]->payload_len,10,0,9,100); 7 | } 8 | tick-1sec 9 | { 10 | printf(">START\n"); 11 | printa("=%s%@d\n", @l); 12 | printf(">END\n"); 13 | 14 | trunc(@l); 15 | } 16 | -------------------------------------------------------------------------------- /plugins/illumos/fq2.js: -------------------------------------------------------------------------------- 1 | /* eslint-env node, es6 */ 2 | 3 | 'use strict'; 4 | 5 | /* 6 | This plugin requires node v4.4+ (if the version of node installed is v0.10, use the io.js plugin). 7 | 8 | To use this module the user NAD runs as needs privileges to run DTrace. 9 | 10 | By default NAD runs as the unprivileged user 'nobody'. The 'nobody' user, by default, 11 | cannot run dtrace. What additional privileges are required can be displayed by running 12 | 'ppriv -eD /opt/circonus/etc/node\-agent.d/illumos/fq.dtrace' as user nobody. The final 13 | attempt will simply yield an error 'dtrace: failed to initialize dtrace: DTrace 14 | requires additional privileges' which indicates the 'dtrace_kernel' privilege 15 | is required, 'kernel' because this plugin tracks system-wide fq events. 16 | 17 | Adding a line such as the following to /etc/user_attr will add the two privileges 18 | required (dtrace_kernel and file_dac_read) in a default OmniOS install. Different 19 | OSes and/or local security modfications may require different privileges: 20 | 21 | nobody::::type=normal;defaultpriv=basic,dtrace_kernel,file_dac_read 22 | 23 | Note: if there is already a line for nobody, modify it by adding the additional privileges. 24 | 25 | ### enabling the plugin ### 26 | 27 | 1. Disable NAD: 28 | 29 | svcadm disable nad 30 | 31 | 2. Enable the plugin: 32 | 33 | cd /opt/circonus/etc/node-agent.d 34 | ln -s illumous/fq2.js fq.js 35 | 36 | 3. Add additional privileges for nobody if not already done. (see above) 37 | 38 | 4. Enable NAD: 39 | 40 | svcadm enable nad 41 | 42 | 43 | ### disabling the plugin ### 44 | 45 | 1. Disable NAD: 46 | 47 | svcadm disable nad 48 | 49 | 2. Remove the symlink for the plugin: 50 | 51 | cd /opt/circonus/etc/node-agent.d 52 | rm fq.js 53 | 54 | 3. Remove the additional privileges for nobody that were added to /etc/user_attr. 55 | If a line was added, remove it. If an existing line was modified, remove the modifications. 56 | 57 | 4. Enable NAD: 58 | 59 | svcadm enable nad 60 | 61 | */ 62 | 63 | const path = require('path'); 64 | const Dtrace = require('dtrace_aggr2'); 65 | 66 | const DEFAULT_SAMPLES = 60; 67 | const MILLISECOND = 1000; 68 | 69 | let singleton = null; 70 | 71 | module.exports = class FQ { 72 | 73 | /** 74 | * creates new instance 75 | */ 76 | constructor() { 77 | if (singleton !== null) { 78 | return singleton; 79 | } 80 | 81 | this.dtrace = new Dtrace(path.resolve(path.join(__dirname, 'fq.dtrace'))); 82 | this.dtrace.start(); 83 | 84 | singleton = this; // eslint-disable-line consistent-this 85 | 86 | return singleton; 87 | } 88 | 89 | /** 90 | * called by nad to run the plugin 91 | * @arg {Object} plugin definition 92 | * @arg {Function} cb callback 93 | * @arg {Object} req http request object 94 | * @arg {Object} args instance arguments 95 | * @arg {String} instance id 96 | * @returns {Undefined} nothing 97 | */ 98 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 99 | let samples = DEFAULT_SAMPLES; 100 | 101 | if (req && {}.hasOwnProperty.call(req, 'headers') && {}.hasOwnProperty.call(req.headers, 'x-reconnoiter-period')) { 102 | samples = Math.floor(req.headers['x-reconnoiter-period'] / MILLISECOND); 103 | } 104 | 105 | const metrics = this.dtrace.flush(samples); 106 | 107 | plugin.running = false; // eslint-disable-line no-param-reassign 108 | cb(plugin, metrics, instance); 109 | } 110 | 111 | }; 112 | -------------------------------------------------------------------------------- /plugins/illumos/if.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | kstat_opts="-p -m" 8 | distro=`awk 'NR==1 { print $1 }' /etc/release` 9 | case $distro in 10 | SmartOS|Joyent) 11 | kstat_opts="-p -c net -n" 12 | ;; 13 | esac 14 | 15 | IFACES=`ifconfig -a | awk -F: '/^[^\t]/ {if($1 != "lo0") {print $1}}' | uniq` 16 | 17 | for iface in $IFACES 18 | do 19 | /usr/bin/kstat $kstat_opts $iface | \ 20 | /usr/xpg4/bin/awk '{ 21 | if(match($1, /:(class|crtime|snaptime|zonename)$/)) next; \ 22 | if(match($1, /_fanout[0-9]+:/)) next; \ 23 | if(match($1, /_misc_/) && !match($1, /(brd|multi)/)) next; \ 24 | if(index($2,".")) { print $1"\tn\t"$2; } \ 25 | else { print $1"\tL\t"$2; } 26 | }' | \ 27 | sed -e 's/^z[0-9]*_//g;' \ 28 | -e 's/_[hs]wlane[0-9]*//g;' \ 29 | -e 's/:[0-9][0-9]*:mac[^:]*//;' 30 | done 31 | -------------------------------------------------------------------------------- /plugins/illumos/iflink.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | KSTAT="/usr/bin/kstat" 6 | SED="/usr/gnu/bin/sed" 7 | 8 | [[ ! -x $KSTAT ]] && { 9 | echo "kstat '$KSTAT' not found."; 10 | exit 1; 11 | } 12 | 13 | [[ ! -x $SED ]] && { 14 | echo "sed '$SED' not found."; 15 | exit 1; 16 | } 17 | 18 | kstat_opts="-p -m link" 19 | 20 | $KSTAT $kstat_opts | $SED -e '/:\(class\|crtime\|snaptime\)/d; s/^link:[0-9]*://; s/:/`/g; s/\t/\tL\t/' 21 | -------------------------------------------------------------------------------- /plugins/illumos/io.dtrace: -------------------------------------------------------------------------------- 1 | #!/usr/sbin/dtrace -s 2 | #pragma D option quiet 3 | io:::start 4 | { 5 | ts[arg0] = timestamp; 6 | } 7 | io:::done 8 | /this->ts = ts[arg0]/ 9 | { 10 | this->delta = timestamp - this->ts; 11 | this->us = this->delta / 1000; 12 | @l[strjoin(args[1]->dev_statname,strjoin("`",args[0]->b_flags & B_READ ? "read_latency_us" : "write_latency_us"))] = llquantize(this->us, 10, 0, 6, 100); 13 | @l[strjoin(args[1]->dev_statname,"`latency_us")] = llquantize(this->us, 10, 0, 6, 100); 14 | @l[strjoin(args[1]->dev_name,"`latency_us")] = llquantize(this->us, 10, 0, 6, 100); 15 | ts[arg0] = 0; 16 | } 17 | tick-1sec 18 | { 19 | printf(">START\n"); 20 | printa("=%s%@d\n", @l); 21 | printf(">END\n"); 22 | 23 | trunc(@l); 24 | } 25 | -------------------------------------------------------------------------------- /plugins/illumos/io2.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | 'use strict'; 6 | 7 | /* 8 | This plugin requires node v4.4+ (if the version of node installed is v0.10, use the io.js plugin). 9 | 10 | To use this module the user NAD runs as needs privileges to run DTrace. 11 | 12 | By default NAD runs as the unprivileged user 'nobody'. The 'nobody' user, by default, 13 | cannot run dtrace. What additional privileges are required can be displayed by running 14 | 'ppriv -eD /opt/circonus/etc/node\-agent.d/illumos/io.dtrace' as user nobody. The final 15 | attempt will simply yield an error 'dtrace: failed to initialize dtrace: DTrace 16 | requires additional privileges' which indicates the 'dtrace_kernel' privilege 17 | is required, 'kernel' because this plugin tracks system-wide IO. 18 | 19 | Adding a line such as the following to /etc/user_attr will add the two privileges 20 | required (dtrace_kernel and file_dac_read) in a default OmniOS install. Different 21 | OSes and/or local security modfications may require different privileges: 22 | 23 | nobody::::type=normal;defaultpriv=basic,dtrace_kernel,file_dac_read 24 | 25 | Note: if there is already a line for nobody, modify it by adding the additional privileges. 26 | 27 | ### enabling the IO plugin ### 28 | 29 | 1. Disable NAD: 30 | 31 | svcadm disable nad 32 | 33 | 2. Enable the IO plugin: 34 | 35 | cd /opt/circonus/etc/node-agent.d 36 | ln -s illumous/io2.js io.js 37 | 38 | 3. Add additional privileges for nobody if not already done. (see above) 39 | 40 | 4. Enable NAD: 41 | 42 | svcadm enable nad 43 | 44 | 45 | ### disabling the IO plugin ### 46 | 47 | 1. Disable NAD: 48 | 49 | svcadm disable nad 50 | 51 | 2. Remove the symlink for the IO plugin: 52 | 53 | cd /opt/circonus/etc/node-agent.d 54 | rm io.js 55 | 56 | 3. Remove the additional privileges for nobody that were added to /etc/user_attr. 57 | If a line was added, remove it. If an existing line was modified, remove the modifications. 58 | 59 | 4. Enable NAD: 60 | 61 | svcadm enable nad 62 | 63 | */ 64 | 65 | const path = require('path'); 66 | const Dtrace = require('dtrace_aggr2'); 67 | 68 | const DEFAULT_SAMPLES = 60; 69 | const MILLISECOND = 1000; 70 | 71 | let singleton = null; 72 | 73 | module.exports = class IO { 74 | 75 | /** 76 | * creates new instance 77 | */ 78 | constructor() { 79 | if (singleton !== null) { 80 | return singleton; 81 | } 82 | 83 | this.dtrace = new Dtrace(path.resolve(path.join(__dirname, 'io.dtrace'))); 84 | this.dtrace.start(); 85 | 86 | singleton = this; // eslint-disable-line consistent-this 87 | 88 | return singleton; 89 | } 90 | 91 | /** 92 | * called by nad to run the plugin 93 | * @arg {Object} plugin definition 94 | * @arg {Function} cb callback 95 | * @arg {Object} req http request object 96 | * @arg {Object} args instance arguments 97 | * @arg {String} instance id 98 | * @returns {Undefined} nothing 99 | */ 100 | run(plugin, cb, req, args, instance) { // eslint-disable-line max-params, no-unused-vars 101 | let samples = DEFAULT_SAMPLES; 102 | 103 | if (req && {}.hasOwnProperty.call(req, 'headers') && {}.hasOwnProperty.call(req.headers, 'x-reconnoiter-period')) { 104 | samples = Math.floor(req.headers['x-reconnoiter-period'] / MILLISECOND); 105 | } 106 | 107 | const metrics = this.dtrace.flush(samples); 108 | 109 | plugin.running = false; // eslint-disable-line no-param-reassign 110 | cb(plugin, metrics, instance); 111 | } 112 | 113 | }; 114 | -------------------------------------------------------------------------------- /plugins/illumos/lib/kstat.lib: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | _kstat() { 4 | for c in $*; do 5 | if [ "$c" = "--" ]; then 6 | shift; 7 | break; 8 | fi 9 | args="$args $c" 10 | shift 11 | done 12 | if [ -n "$*" ]; then 13 | /usr/bin/kstat -p $args | \ 14 | /usr/xpg4/bin/awk '{ 15 | if(match($1, /:(class|crtime|snaptime)$/)) next; \ 16 | if(index($2,".")) { print $1"\tn\t"$2; } \ 17 | else { print $1"\tL\t"$2; } 18 | }' | $* 19 | else 20 | /usr/bin/kstat -p $args | \ 21 | /usr/xpg4/bin/awk '{ 22 | if(match($1, /:(class|crtime|snaptime)$/)) next; \ 23 | if(index($2,".")) { print $1"\tn\t"$2; } \ 24 | else { print $1"\tL\t"$2; } 25 | }' 26 | fi 27 | } 28 | 29 | _kstat_val() { 30 | for c in $*; do 31 | if [ "$c" = "--" ]; then 32 | shift; 33 | break; 34 | fi 35 | args="$args $c" 36 | shift 37 | done 38 | if [ -n "$*" ]; then 39 | /usr/bin/kstat -p $args | \ 40 | /usr/xpg4/bin/awk '{ 41 | if(match($1, /:(class|crtime|snaptime)$/)) next; \ 42 | if(index($2,".")) { print $2; } \ 43 | else { print $2; } 44 | }' | $* 45 | else 46 | /usr/bin/kstat -p $args | \ 47 | /usr/xpg4/bin/awk '{ 48 | if(match($1, /:(class|crtime|snaptime)$/)) next; \ 49 | if(index($2,".")) { print $2; } \ 50 | else { print $2; } 51 | }' 52 | fi 53 | } 54 | -------------------------------------------------------------------------------- /plugins/illumos/sdinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | _kstat -m sd 8 | -------------------------------------------------------------------------------- /plugins/illumos/smf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ((cat ; /bin/svcs -H) | sed -e 's/\*//g;' | cut -f1 -d' ' | sort | uniq -c | nawk '{print "state`"$2"\tL\t"($1-1);}') << EOF 4 | online 5 | disabled 6 | maintenance 7 | legacy_run 8 | EOF 9 | /bin/svcs -s stime | nawk 'BEGIN{S["maintenance"] = "[[null]]"; S["offline"] = "[[null]]"; } /^(offline|maintenance)/ {S[$1] = $3;} END {print("service`offline\ts\t" S["offline"]); print("service`maintenance\ts\t" S["maintenance"]);}' 10 | -------------------------------------------------------------------------------- /plugins/illumos/src/Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | HAVE_LOGICALUSED:=$(shell grep ZFS_PROP_LOGICALUSED /usr/include/sys/fs/zfs.h) 3 | CFLAGS+=-D_LARGEFILE64_SOURCE 4 | ifeq ($(HAVE_LOGICALUSED),) 5 | # you don't have these properties 6 | else 7 | CFLAGS+=-DHAVE_LOGICAL_USED 8 | endif 9 | 10 | all: ../aggcpu.elf ../cpu.elf ../fs.elf ../zpoolio.elf ../swap.elf 11 | 12 | ../aggcpu.elf: aggcpu.c 13 | $(CC) -o $@ aggcpu.c -lkstat 14 | 15 | ../cpu.elf: cpu.c 16 | $(CC) -o $@ cpu.c -lkstat 17 | 18 | ../fs.elf: fs.c 19 | $(CC) -m64 $(CFLAGS) -o $@ fs.c -lzfs 20 | 21 | ../swap.elf: swap.c 22 | $(CC) -m64 $(CFLAGS) -o $@ swap.c 23 | 24 | ../zpoolio.elf: zpoolio.c 25 | $(CC) -m64 -o $@ zpoolio.c -lzfs -lnvpair 26 | -------------------------------------------------------------------------------- /plugins/illumos/src/cpu.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define CSSUM(name) sum.cpu_sysinfo.name += cpu.cpu_sysinfo.name 9 | 10 | int main(int argc, char **argv) { 11 | kstat_ctl_t *kc; 12 | kstat_t *ksp; 13 | kstat_io_t kio; 14 | kstat_named_t *knp; 15 | int ncpus = 0; 16 | long scale; 17 | cpu_stat_t sum; 18 | cpu_stat_t cpu; 19 | 20 | /* scale for clock frequency */ 21 | scale = sysconf(_SC_CLK_TCK)/100; 22 | memset(&sum, 0, sizeof(sum)); 23 | kc = kstat_open(); 24 | ksp = kstat_lookup(kc, "cpu_stat", -1, NULL); 25 | for (; ksp != NULL; ksp = ksp->ks_next) { 26 | if(!strcmp(ksp->ks_module, "cpu_stat")) { 27 | ncpus++; 28 | kstat_read(kc,ksp,&cpu); 29 | CSSUM(cpu[CPU_IDLE]); 30 | CSSUM(cpu[CPU_USER]); 31 | CSSUM(cpu[CPU_KERNEL]); 32 | CSSUM(cpu[CPU_WAIT]); 33 | CSSUM(wait[W_IO]); 34 | CSSUM(intr); 35 | CSSUM(inv_swtch); 36 | CSSUM(pswitch); 37 | CSSUM(syscall); 38 | } 39 | } 40 | 41 | /* 42 | * Some stats are not implemented (yet), so make them zero. 43 | */ 44 | printf("%s\tL\t%llu\n", "user", sum.cpu_sysinfo.cpu[CPU_USER]/ncpus/scale); 45 | printf("%s\tL\t%llu\n", "user`normal", sum.cpu_sysinfo.cpu[CPU_USER]/ncpus/scale); 46 | printf("%s\tL\t%llu\n", "user`nice", 0); 47 | printf("%s\tL\t%llu\n", "kernel", sum.cpu_sysinfo.cpu[CPU_KERNEL]/ncpus/scale); 48 | printf("%s\tL\t%llu\n", "kernel`sys", sum.cpu_sysinfo.cpu[CPU_KERNEL]/ncpus/scale); 49 | printf("%s\tL\t%llu\n", "kernel`guest", 0); 50 | printf("%s\tL\t%llu\n", "kernel`guest_nice", 0); 51 | printf("%s\tL\t%llu\n", "idle", sum.cpu_sysinfo.cpu[CPU_IDLE]/ncpus/scale); 52 | printf("%s\tL\t%llu\n", "idle`normal", sum.cpu_sysinfo.cpu[CPU_IDLE]/ncpus/scale); 53 | printf("%s\tL\t%llu\n", "idle`steal", 0); 54 | printf("%s\tL\t%llu\n", "wait_io", sum.cpu_sysinfo.wait[W_IO]); 55 | printf("%s\tL\t%llu\n", "intr", sum.cpu_sysinfo.intr); 56 | /* We do not distinguish hard from soft interrupts on illumos */ 57 | printf("%s\tL\t%s\n", "intr`hard", "[[null]]"); 58 | printf("%s\tL\t%s\n", "intr`soft", "[[null]]"); 59 | printf("%s\tL\t%llu\n", "context_switch", (sum.cpu_sysinfo.inv_swtch + sum.cpu_sysinfo.pswitch)); 60 | printf("%s\tL\t%llu\n", "syscall", sum.cpu_sysinfo.syscall); 61 | 62 | return 0; 63 | } 64 | -------------------------------------------------------------------------------- /plugins/illumos/src/fs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | static const char *suppress_fstype[] = { 12 | "autofs", "ctfs", "dev", "fd", "lofs", "mntfs", "objfs", "proc", NULL 13 | }; 14 | 15 | int main(int argc, char **argv) { 16 | struct extmnttab mnt; 17 | FILE *fp; 18 | 19 | fp = fopen("/etc/mnttab", "r"); 20 | if(!fp) { 21 | perror("fopen"); 22 | exit(-1); 23 | } 24 | 25 | while(getextmntent(fp, &mnt, sizeof (struct extmnttab)) == 0) { 26 | struct statvfs buf; 27 | int i; 28 | 29 | for(i=0;suppress_fstype[i] != NULL;i++) 30 | if(!strcmp(mnt.mnt_fstype, suppress_fstype[i])) break; 31 | 32 | if (suppress_fstype[i] == NULL && statvfs(mnt.mnt_mountp, &buf) == 0) { 33 | if(!strcmp(mnt.mnt_fstype, "zfs")) { 34 | uint64_t used, avail; 35 | uint64_t *space_used = NULL, *space_avail = NULL; 36 | libzfs_handle_t *zfsh = libzfs_init(); 37 | zfs_handle_t *handle = zfs_path_to_zhandle(zfsh, (char *)mnt.mnt_mountp, ZFS_TYPE_FILESYSTEM); 38 | if(handle) { 39 | char source[ZFS_MAXPROPLEN]; 40 | zprop_source_t srctype; 41 | int rv; 42 | #define ZFS_PULL_N_PRINT(prop, name, T, F, expr) do { \ 43 | uint64_t datum; \ 44 | if(zfs_prop_get_numeric(handle, prop, \ 45 | &datum, &srctype, source, sizeof(source)) == 0) { \ 46 | printf("zfs`%s`" name "\t" T" \t" F "\n", mnt.mnt_mountp, expr); \ 47 | } \ 48 | } while(0) 49 | 50 | uint64_t used = -1, avail = -1; 51 | if(zfs_prop_get_numeric(handle, ZFS_PROP_USEDDS, 52 | &used, &srctype, 53 | source, sizeof(source)) == 0) { 54 | printf("zfs`%s`used\tL\t%llu\n", mnt.mnt_mountp, used); 55 | } 56 | if(zfs_prop_get_numeric(handle, ZFS_PROP_AVAILABLE, 57 | &avail, &srctype, 58 | source, sizeof(source)) == 0) { 59 | printf("zfs`%s`avail\tL\t%llu\n", mnt.mnt_mountp, avail); 60 | } 61 | if(used != -1 && avail != -1) { 62 | printf("zfs`%s`used_percent\tn\t%f\n", mnt.mnt_mountp, 100.0 * (used / (double)(used + avail))); 63 | } 64 | 65 | ZFS_PULL_N_PRINT(ZFS_PROP_USEDCHILD, "used_children", "L", "%llu", datum); 66 | ZFS_PULL_N_PRINT(ZFS_PROP_USEDSNAP, "used_snapshot", "L", "%llu", datum); 67 | ZFS_PULL_N_PRINT(ZFS_PROP_REFERENCED, "referenced", "L", "%llu", datum); 68 | ZFS_PULL_N_PRINT(ZFS_PROP_RECORDSIZE, "record_size", "L", "%llu", datum); 69 | ZFS_PULL_N_PRINT(ZFS_PROP_QUOTA, "quota", "L", "%llu", datum); 70 | ZFS_PULL_N_PRINT(ZFS_PROP_RESERVATION, "reservation", "L", "%llu", datum); 71 | ZFS_PULL_N_PRINT(ZFS_PROP_REFRESERVATION, "ref_reservation", "L", "%llu", datum); 72 | ZFS_PULL_N_PRINT(ZFS_PROP_USEDREFRESERV, "ref_reservation_used", "L", "%llu", datum); 73 | #ifdef HAVE_LOGICAL_USED 74 | ZFS_PULL_N_PRINT(ZFS_PROP_LOGICALUSED, "logical_used", "L", "%llu", datum); 75 | ZFS_PULL_N_PRINT(ZFS_PROP_LOGICALREFERENCED, "logical_referenced", "L", "%llu", datum); 76 | #endif 77 | ZFS_PULL_N_PRINT(ZFS_PROP_COMPRESSRATIO, "compress_ratio", "n", "%f", (double)datum/100.0); 78 | zfs_close(handle); 79 | } 80 | libzfs_fini(zfsh); 81 | } 82 | else { 83 | printf("fs`%s`f_bsize\tL\t%llu\n", mnt.mnt_mountp, buf.f_bsize); 84 | printf("fs`%s`f_frsize\tL\t%llu\n", mnt.mnt_mountp, buf.f_frsize); 85 | printf("fs`%s`f_blocks\tL\t%llu\n", mnt.mnt_mountp, buf.f_blocks); 86 | printf("fs`%s`f_bfree\tL\t%llu\n", mnt.mnt_mountp, buf.f_bfree); 87 | printf("fs`%s`f_bavail\tL\t%llu\n", mnt.mnt_mountp, buf.f_bavail); 88 | printf("fs`%s`f_files\tL\t%llu\n", mnt.mnt_mountp, buf.f_blocks); 89 | printf("fs`%s`f_ffree\tL\t%llu\n", mnt.mnt_mountp, buf.f_ffree); 90 | printf("fs`%s`f_favail\tL\t%llu\n", mnt.mnt_mountp, buf.f_favail); 91 | } 92 | } 93 | } 94 | exit(0); 95 | } 96 | -------------------------------------------------------------------------------- /plugins/illumos/src/ipmi.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | static void print_line(char *name, char *name2, char *val) { 5 | 6 | char *typename = NULL, *endptr, newval[128]; 7 | (void)strtod(val, &endptr); 8 | if(*endptr == '\0') typename = "n"; 9 | if(NULL == typename) { 10 | long foo, base = 10; 11 | if(val[0] == '0' && (val[1] == 'x' || val[1] == 'X')) { 12 | val += 2; 13 | base = 16; 14 | } else if (val[0] == '0') { 15 | base = 8; 16 | } 17 | foo = strtol(val, &endptr, base); 18 | if(*endptr == '\0') { 19 | typename = "l"; 20 | snprintf(newval, sizeof(newval), "%ld", foo); 21 | val = newval; 22 | } 23 | } 24 | if(NULL == typename) typename = "s"; 25 | printf("%s:%s\t%s\t%s\n", name, name2, typename, val); 26 | } 27 | static char *trim_clean(char *str) { 28 | char *cp; 29 | while(*str && isspace(*str)) str++; 30 | cp = str; 31 | while(*cp) { 32 | if(*cp == '\t') *cp = ' '; 33 | cp++; 34 | } 35 | while(--cp > str) { 36 | if(isspace(*cp)) *cp = '\0'; 37 | else break; 38 | } 39 | return str; 40 | } 41 | int main() { 42 | FILE *output; 43 | char buf[256]; 44 | output = popen("/usr/sbin/ipmitool sensor", "r"); 45 | while(NULL != fgets(buf, sizeof(buf), output)) { 46 | char *col1, *col2, *col3, *col4, *cp; 47 | cp = buf; 48 | #define EAT_COLUMN(cvar, cp) do { \ 49 | cvar = cp; \ 50 | while(*cp && *cp != '|') cp++; \ 51 | if(!cp) continue; \ 52 | *cp++ = '\0'; \ 53 | } while(0) 54 | EAT_COLUMN(col1, cp); 55 | EAT_COLUMN(col2, cp); 56 | EAT_COLUMN(col3, cp); 57 | EAT_COLUMN(col4, cp); 58 | col1 = trim_clean(col1); 59 | col2 = trim_clean(col2); 60 | col4 = trim_clean(col4); 61 | cp = col1; 62 | while(*cp) { 63 | if(isspace(*cp)) *cp = ':'; 64 | cp++; 65 | } 66 | print_line(col1, "value", col2); 67 | print_line(col1, "status", col4); 68 | } 69 | pclose(output); 70 | return 0; 71 | } 72 | -------------------------------------------------------------------------------- /plugins/illumos/src/swap.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | void bail(const char *err) { 15 | fprintf(stderr, "fatal: %s\n", err); 16 | exit(-1); 17 | } 18 | 19 | int main() { 20 | int num, i; 21 | struct swaptable *st; 22 | struct swapent *swapent; 23 | struct stat64 statbuf; 24 | char *path; 25 | char fullpath[MAXPATHLEN]; 26 | 27 | if((num = swapctl(SC_GETNSWP, NULL)) == -1) { 28 | bail("Failed to get swap devices"); 29 | } 30 | if(num == 0) return 0; 31 | 32 | st = malloc(num * sizeof(swapent_t) + sizeof(int)); 33 | if(st == NULL) bail("malloc failure"); 34 | path = malloc(num * MAXPATHLEN); 35 | if(path == NULL) bail("malloc failure"); 36 | swapent = st->swt_ent; 37 | for(i = 0; i < num; i++, swapent++) { 38 | swapent->ste_path = path; 39 | path += MAXPATHLEN; 40 | } 41 | 42 | st->swt_n = num; 43 | if((num = swapctl(SC_LIST, st)) == -1) { 44 | bail("failed to list swap devices"); 45 | } 46 | 47 | int diskblks_per_page = (int)(sysconf(_SC_PAGESIZE) >> DEV_BSHIFT); 48 | unsigned long long total_bytes = 0, free_bytes = 0; 49 | for(swapent = st->swt_ent, i = 0; i < num; i++, swapent++) { 50 | if(*swapent->ste_path != '/') 51 | snprintf(fullpath, sizeof(fullpath), "/dev/%s", swapent->ste_path); 52 | else 53 | snprintf(fullpath, sizeof(fullpath), "%s", swapent->ste_path); 54 | if(stat64(fullpath, &statbuf) == 0) { 55 | total_bytes += swapent->ste_pages * diskblks_per_page * DEV_BSIZE; 56 | printf("swap`%s`total\tL\t%llu\n", swapent->ste_path, 57 | swapent->ste_pages * diskblks_per_page * DEV_BSIZE); 58 | free_bytes += swapent->ste_free * diskblks_per_page * DEV_BSIZE; 59 | printf("swap`%s`free\tL\t%llu\n", swapent->ste_path, 60 | swapent->ste_free * diskblks_per_page * DEV_BSIZE); 61 | } 62 | } 63 | printf("swap`total\tL\t%llu\n", total_bytes); 64 | printf("swap`free\tL\t%llu\n", free_bytes); 65 | return 0; 66 | } 67 | -------------------------------------------------------------------------------- /plugins/illumos/src/zpoolio.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | /* 9 | * NOTE: libzfs is an unstable interface. 10 | * This module may or may not work on your illumos distribution/version. 11 | * Compile with: gcc -lzfs -lnvpair zpoolio.c -o zpoolio 12 | */ 13 | 14 | int print_stats(zpool_handle_t *zhp, void *data) { 15 | uint_t c; 16 | boolean_t missing; 17 | 18 | nvlist_t *nv, *config; 19 | vdev_stat_t *vs; 20 | 21 | if (zpool_refresh_stats(zhp, &missing) != 0) 22 | return (1); 23 | 24 | config = zpool_get_config(zhp, NULL); 25 | 26 | if (nvlist_lookup_nvlist(config, 27 | ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) { 28 | return 2; 29 | } 30 | 31 | if (nvlist_lookup_uint64_array(nv, 32 | ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) != 0) { 33 | return 3; 34 | } 35 | 36 | if (!data || strcmp(zpool_get_name(zhp),data) == 0) { 37 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "read_ops", vs->vs_ops[ZIO_TYPE_READ]); 38 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "write_ops", vs->vs_ops[ZIO_TYPE_WRITE]); 39 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "read_bytes", vs->vs_bytes[ZIO_TYPE_READ]); 40 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "write_bytes", vs->vs_bytes[ZIO_TYPE_WRITE]); 41 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "used_space", vs->vs_alloc); 42 | printf("%s`%s\tL\t%llu\n", zpool_get_name(zhp), "free_space", vs->vs_space - vs->vs_alloc); 43 | } 44 | return 0; 45 | } 46 | 47 | int main(int argc, char *argv[]) { 48 | libzfs_handle_t *g_zfs; 49 | g_zfs = libzfs_init(); 50 | if (argc > 1) { 51 | return(zpool_iter(g_zfs, print_stats, argv[1])); 52 | } else { 53 | return(zpool_iter(g_zfs, print_stats, NULL)); 54 | } 55 | } 56 | 57 | -------------------------------------------------------------------------------- /plugins/illumos/tcp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | if [ "`/usr/bin/zonename`" = global ]; then 8 | _kstat -m tcp -n tcp -i 0 -- cut -f1,4- -d: 9 | else 10 | _kstat -m tcp -n tcp -- cut -f1,4- -d: 11 | fi 12 | -------------------------------------------------------------------------------- /plugins/illumos/udp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | if [ "`/usr/bin/zonename`" = global ]; then 8 | _kstat -m udp -n udp -i 0 -- cut -f1,4- -d: 9 | else 10 | _kstat -m udp -n udp -- cut -f1,4- -d: 11 | fi 12 | -------------------------------------------------------------------------------- /plugins/illumos/vminfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | physmem=`_kstat_val :::physmem` 8 | pagesfree=`_kstat_val :::pagesfree` 9 | pagesused=$(($physmem-$pagesfree)) 10 | pagesize=`pagesize` 11 | a=$(($physmem * $pagesize)) 12 | b=$(($pagesused * $pagesize)) 13 | mem_perc=`printf "%s\n" "scale = 2; $b/$a" | bc ` 14 | 15 | printf "mempercent_used\tn\t%0.2f\n" $mem_perc 16 | 17 | 18 | _kstat -n vminfo 19 | -------------------------------------------------------------------------------- /plugins/illumos/vnic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | _kstat -m vnic 8 | -------------------------------------------------------------------------------- /plugins/illumos/zfsinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | _kstat -m zfs 8 | _kstat -m unix -n vopstats_zfs 9 | -------------------------------------------------------------------------------- /plugins/illumos/zone_vfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | DIR=`dirname $0` 3 | if [ -d $DIR/illumos ]; then . $DIR/illumos/lib/kstat.lib 4 | else . $DIR/lib/kstat.lib 5 | fi 6 | 7 | if [ "`/usr/bin/zonename`" = global ]; then 8 | _kstat -m zone_vfs 9 | else 10 | _kstat -m zone_vfs -- cut -f1,4- -d: 11 | fi 12 | -------------------------------------------------------------------------------- /plugins/linux/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | (cd src && make) 3 | (cd ../common && make) 4 | (cd bccbpf && make) 5 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | 3 | all: bpf.elf iolatency.elf 4 | 5 | %.elf : %.c 6 | $(CC) $< -o $@ 7 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/bpf.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | // 5 | // Binary setuid wrapper for bpf.lua 6 | // 7 | // We can't use setuid on scripts directly. 8 | // 9 | int main() { 10 | int rc = setuid(0); 11 | if(rc) { 12 | fprintf(stderr, "Privilege escalation failed. Is the setuid bit set (chmod u+s bcc.elf)?\n"); 13 | return -1; 14 | } 15 | putenv("LUA_PATH=/opt/circonus/nad/etc/node-agent.d/linux/bccbpf/lua/?.lua"); 16 | system("/opt/circonus/nad/etc/node-agent.d/linux/bccbpf/bpf.lua"); 17 | return 0; 18 | } 19 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/bpf.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bcc-lua 2 | 3 | local ffi = require("ffi") 4 | local json = require("dkjson") 5 | local bpf_preamble = require("circll").text 6 | 7 | local mods = { 8 | runq = require("mod_runqlat"), 9 | syscall = require("mod_syscall"), 10 | bio = require("mod_bio"), 11 | } 12 | local INTERVAL = tonumber(arg[1]) or 60 13 | 14 | ffi.cdef "unsigned int sleep(unsigned int seconds);" 15 | 16 | local function submit_nad(metrics) 17 | io.stdout:write(json.encode(metrics)) 18 | io.stdout:write("\n\n") 19 | io.stdout:flush() 20 | end 21 | 22 | return function(BPF) 23 | -- submit an empty record, so that we don't block nad on stratup 24 | submit_nad({}) 25 | 26 | local BPF_TEXT = bpf_preamble 27 | for mod_name, mod in pairs(mods) do 28 | BPF_TEXT = BPF_TEXT .. mod.text .. "\n" 29 | end 30 | 31 | local bpf = BPF:new{ text=BPF_TEXT, debug=0 } 32 | 33 | for mod_name, mod in pairs(mods) do 34 | mod:init(bpf) 35 | end 36 | 37 | -- output 38 | while(true) do 39 | ffi.C.sleep(INTERVAL) 40 | local metrics = {} 41 | for mod_name, mod in pairs(mods) do 42 | for metric_name, val in pairs(mod:pull()) do 43 | metrics[mod_name .. '`' .. metric_name] = val 44 | end 45 | end 46 | submit_nad(metrics) 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/iolatency.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | // 5 | // Binary setuid wrapper for iolatency.py 6 | // 7 | // We can't use setuid on scripts directly. 8 | // 9 | int main() { 10 | int rc = setuid(0); 11 | if(rc) { 12 | fprintf(stderr, "Privilege escalation failed. Is the setuid bit set (chmod u+s iolatency.elf)?\n"); 13 | return -1; 14 | } 15 | system("/opt/circonus/nad/etc/node-agent.d/linux/bccbpf/iolatency.py"); 16 | return 0; 17 | } 18 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/iolatency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """Emits io latency as circonus histogram in json""" 3 | # pylint: disable=C0301 4 | 5 | # This program was created as a modification to Brendan Gregg's 6 | # biolatency script. 7 | 8 | from __future__ import print_function 9 | import json 10 | import sys 11 | from time import sleep 12 | 13 | from bcc import BPF # pylint: disable=E0401 14 | 15 | # define BPF program 16 | BPF_TEXT = """ 17 | #include 18 | #include 19 | 20 | typedef struct disk_key { 21 | char disk[DISK_NAME_LEN]; 22 | u64 slot; 23 | } disk_key_t; 24 | 25 | BPF_HASH(start, struct request *); 26 | BPF_HASH(dist, disk_key_t); 27 | 28 | // time block I/O 29 | int trace_req_start(struct pt_regs *ctx, struct request *req) { 30 | u64 ts = bpf_ktime_get_ns(); 31 | start.update(&req, &ts); 32 | return 0; 33 | } 34 | 35 | #define LLN() if(v > 100) { exp++; v /= 10; } else goto good; 36 | #define LLN2() LLN() LLN() 37 | #define LLN4() LLN2() LLN2() 38 | #define LLN8() LLN4() LLN4() 39 | #define LLN16() LLN8() LLN8() 40 | #define LLN32() LLN16() LLN16() 41 | #define LLN64() LLN32() LLN32() 42 | #define LLN128() LLN64() LLN64() 43 | 44 | static unsigned int bpf_circll(unsigned long v) { 45 | int exp = 1; 46 | if(v == 0) return 0; 47 | if(v < 10) return (v*10 << 8) | exp; 48 | LLN128() 49 | if(v > 100) return 0xff00; 50 | good: 51 | return (v << 8) | (exp & 0xff); 52 | } 53 | 54 | // output 55 | int trace_req_completion(struct pt_regs *ctx, struct request *req) { 56 | u64 *old, *tsp, delta, zero = 0; 57 | 58 | // fetch timestamp and calculate delta 59 | tsp = start.lookup(&req); 60 | if (tsp == 0) { 61 | return 0; // missed issue 62 | } 63 | delta = bpf_ktime_get_ns() - *tsp; 64 | delta /= 1000; 65 | 66 | // store as histogram 67 | disk_key_t key = {.slot = bpf_circll(delta)}; 68 | bpf_probe_read(&key.disk, sizeof(key.disk), req->rq_disk->disk_name); 69 | old = dist.lookup_or_init(&key, &zero); 70 | (*old)++; 71 | memcpy(key.disk, "sd", 3); 72 | old = dist.lookup_or_init(&key, &zero); 73 | (*old)++; 74 | start.delete(&req); 75 | return 0; 76 | } 77 | """ 78 | 79 | def main(): 80 | # don't block the nad process at startup 81 | print("\n") 82 | 83 | # load BPF program 84 | bpf = BPF(text=BPF_TEXT) 85 | 86 | bpf.attach_kprobe(event="blk_start_request", fn_name="trace_req_start") 87 | bpf.attach_kprobe(event="blk_mq_start_request", fn_name="trace_req_start") 88 | bpf.attach_kprobe(event="blk_account_io_completion", fn_name="trace_req_completion") 89 | 90 | # output 91 | interval = 60 92 | dist = bpf.get_table("dist") 93 | while 1: 94 | try: 95 | sleep(int(interval)) 96 | except KeyboardInterrupt: 97 | exit() 98 | 99 | metrics = {} 100 | for key, val in dist.items(): 101 | # print(" [%-8s (%f)] %d" % (key.disk, ((0xff & (key.slot >> 8)) / 10.0) * 10.0 ** (key.slot & 0xff), val.value)) 102 | dsk = key.disk 103 | bkt = ((0xff & (key.slot >> 8)) / 10.0) * 10.0 ** (key.slot & 0xff) 104 | cnt = val.value 105 | 106 | if dsk not in metrics: 107 | metrics[dsk] = {'_type': 'n', '_value': []} 108 | 109 | metrics[dsk]['_value'].append('H[%.2f]=%d' % (bkt, cnt)) 110 | 111 | if len(metrics) > 0: 112 | print(json.dumps(metrics), '\n') 113 | sys.stdout.flush() 114 | 115 | dist.clear() 116 | 117 | if __name__ == "__main__": 118 | main() 119 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/lua/circll.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- BPF circllhist helper 3 | -- 4 | 5 | local circll = {} 6 | 7 | circll.text = [[ 8 | 9 | typedef struct { 10 | s8 val; 11 | s8 exp; 12 | } circll_bin_t; 13 | 14 | #define LLN() if(v > 100) { exp++; v /= 10; } else goto good; 15 | #define LLN2() LLN() LLN() 16 | #define LLN4() LLN2() LLN2() 17 | #define LLN8() LLN4() LLN4() 18 | #define LLN16() LLN8() LLN8() 19 | #define LLN32() LLN16() LLN16() 20 | #define LLN64() LLN32() LLN32() 21 | #define LLN128() LLN64() LLN64() 22 | 23 | static circll_bin_t circll_bin(u64 v, s8 exp_offset) { 24 | s8 exp = 1; 25 | if(v == 0) return (circll_bin_t) {.val = 0, .exp = 0 + exp_offset}; 26 | if(v < 10) return (circll_bin_t) {.val = v*10, .exp = 1 + exp_offset}; 27 | LLN128() 28 | if(v > 100) return (circll_bin_t) {.val = -1, .exp = 0 + exp_offset}; 29 | good: 30 | return (circll_bin_t) {.val = v, .exp = exp + exp_offset}; 31 | } 32 | ]] 33 | 34 | circll.bin = function(circll_bin) 35 | return circll_bin.val * 10.0 ^ (circll_bin.exp - 1) 36 | end 37 | 38 | -- this should really be in bcc 39 | circll.clear = function(hash) 40 | -- don't interate over hash table we are mutating 41 | local keys = {} 42 | for k,v in hash:items() do 43 | keys[#keys+1] = k 44 | end 45 | for _,k in ipairs(keys) do 46 | hash:delete(k) 47 | end 48 | end 49 | 50 | local mt_hist = { 51 | __index = { 52 | add = function(self, slot, val, exp_offset) 53 | local bin = circll.bin(slot, exp_offset) 54 | local cnt = tonumber(val) 55 | self._value[#(self._value) + 1] = string.format("H[%.1e]=%d", bin, cnt) 56 | return self 57 | end, 58 | } 59 | } 60 | 61 | circll.hist = function() 62 | return setmetatable({ _type = "n", _value = {} }, mt_hist) 63 | end 64 | 65 | return circll 66 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/lua/mod_bio.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- iolatency 3 | -- 4 | -- based on biolatency.py 5 | -- 6 | -- Metric 'sd' contains comulative statistics for all block devices 7 | -- 8 | 9 | local ffi = require("ffi") 10 | local circll = require("circll") 11 | 12 | return { 13 | text = [[ 14 | #include 15 | #include 16 | 17 | typedef struct disk_key { 18 | char disk[DISK_NAME_LEN]; 19 | circll_bin_t bin; 20 | } disk_key_t; 21 | 22 | BPF_HASH(iolat_start, struct request *); 23 | BPF_HASH(iolat_dist, disk_key_t); 24 | BPF_HASH(iosize_dist, disk_key_t); 25 | 26 | // time block I/O 27 | int trace_req_iolat_start(struct pt_regs *ctx, struct request *req) { 28 | u64 ts = bpf_ktime_get_ns(); 29 | iolat_start.update(&req, &ts); 30 | return 0; 31 | } 32 | 33 | // output 34 | int trace_req_completion(struct pt_regs *ctx, struct request *req) { 35 | u64 *old, *tsp, delta, size, zero = 0; 36 | 37 | // fetch timestamp and calculate delta 38 | tsp = iolat_start.lookup(&req); 39 | if (tsp == 0) { 40 | return 0; // missed issue 41 | } 42 | delta = bpf_ktime_get_ns() - *tsp; 43 | size = req->__data_len / 1024; 44 | 45 | // store as histogram 46 | disk_key_t lat_key = {.bin = circll_bin(delta, -9)}; 47 | disk_key_t size_key = {.bin = circll_bin(size, 0)}; 48 | 49 | // 1) current disk 50 | bpf_probe_read(&lat_key.disk, sizeof(lat_key.disk), req->rq_disk->disk_name); 51 | bpf_probe_read(&size_key.disk, sizeof(size_key.disk), req->rq_disk->disk_name); 52 | old = iolat_dist.lookup_or_init(&lat_key, &zero); (*old)++; 53 | old = iosize_dist.lookup_or_init(&size_key, &zero); (*old)++; 54 | 55 | // 2) aggregated disk 56 | memcpy(lat_key.disk, "sd", 3); 57 | memcpy(size_key.disk, "sd", 3); 58 | old = iolat_dist.lookup_or_init(&lat_key, &zero); (*old)++; 59 | old = iosize_dist.lookup_or_init(&size_key, &zero); (*old)++; 60 | 61 | // cleanup 62 | iolat_start.delete(&req); 63 | return 0; 64 | } 65 | ]], 66 | 67 | init = function(self, bpf) 68 | bpf:attach_kprobe{event="blk_start_request", fn_name="trace_req_iolat_start"} 69 | bpf:attach_kprobe{event="blk_mq_start_request", fn_name="trace_req_iolat_start"} 70 | bpf:attach_kprobe{event="blk_account_io_completion", fn_name="trace_req_completion"} 71 | self.pipe_lat = bpf:get_table("iolat_dist") 72 | self.pipe_size = bpf:get_table("iosize_dist") 73 | end, 74 | 75 | pull = function(self) 76 | local metrics = {} 77 | for k, v in self.pipe_lat:items() do 78 | local m = "latency`" .. ffi.string(k.disk) 79 | metrics[m] = metrics[m] or circll.hist() 80 | metrics[m]:add(k.bin, v) 81 | end 82 | circll.clear(self.pipe_lat) 83 | for k, v in self.pipe_size:items() do 84 | local m = "size`" .. ffi.string(k.disk) 85 | metrics[m] = metrics[m] or circll.hist() 86 | metrics[m]:add(k.bin, v) 87 | end 88 | circll.clear(self.pipe_size) 89 | return metrics 90 | end, 91 | } 92 | -------------------------------------------------------------------------------- /plugins/linux/bccbpf/lua/mod_runqlat.lua: -------------------------------------------------------------------------------- 1 | local circll = require("circll") 2 | 3 | local BPF_TEXT = [[ 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | typedef struct pid_key { 10 | u64 id; // work around 11 | u64 slot; 12 | } pid_key_t; 13 | 14 | typedef struct pidns_key { 15 | u64 id; // work around 16 | u64 slot; 17 | } pidns_key_t; 18 | 19 | BPF_HASH(runqlat_start, u32); 20 | BPF_HASH(runqlat_dist, circll_bin_t); 21 | 22 | struct rq; 23 | 24 | // record enqueue timestamp 25 | int trace_enqueue(struct pt_regs *ctx, struct rq *rq, struct task_struct *p, 26 | int flags) 27 | { 28 | u32 tgid = p->tgid; 29 | u32 pid = p->pid; 30 | u64 ts = bpf_ktime_get_ns(); 31 | runqlat_start.update(&pid, &ts); 32 | return 0; 33 | } 34 | 35 | // calculate latency 36 | int trace_run(struct pt_regs *ctx, struct task_struct *prev) 37 | { 38 | u32 pid, tgid; 39 | 40 | // ivcsw: treat like an enqueue event and store timestamp 41 | if (prev->state == TASK_RUNNING) { 42 | tgid = prev->tgid; 43 | pid = prev->pid; 44 | u64 ts = bpf_ktime_get_ns(); 45 | runqlat_start.update(&pid, &ts); 46 | } 47 | 48 | tgid = bpf_get_current_pid_tgid() >> 32; 49 | pid = bpf_get_current_pid_tgid(); 50 | u64 *tsp, delta; 51 | 52 | // fetch timestamp and calculate delta 53 | tsp = runqlat_start.lookup(&pid); 54 | if (tsp == 0) { 55 | return 0; // missed enqueue 56 | } 57 | delta = bpf_ktime_get_ns() - *tsp; 58 | 59 | // store as histogram 60 | u64 *val, zero = 0; 61 | circll_bin_t key = circll_bin(delta, -9); 62 | val = runqlat_dist.lookup_or_init(&key, &zero); 63 | (*val)++; 64 | 65 | runqlat_start.delete(&pid); 66 | return 0; 67 | } 68 | ]] 69 | 70 | return { 71 | 72 | text = BPF_TEXT, 73 | 74 | init = function(self, bpf) 75 | for event in io.open("/sys/kernel/debug/tracing/available_filter_functions"):lines() do 76 | if event:match("enqueue_task_.*") then 77 | bpf:attach_kprobe { event=event, fn_name="trace_enqueue" } 78 | end 79 | end 80 | bpf:attach_kprobe { event="finish_task_switch", fn_name="trace_run" } 81 | self.pipe = bpf:get_table("runqlat_dist") 82 | end, 83 | 84 | pull = function(self) 85 | local hist = circll.hist() 86 | for k,v in self.pipe:items() do 87 | hist:add(k, v) 88 | end 89 | circll.clear(self.pipe) 90 | return { latency = hist } 91 | end 92 | } 93 | -------------------------------------------------------------------------------- /plugins/linux/cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CPU usage statistics, via /proc/stat. See proc(5). 4 | 5 | # Print ordinary metrics 6 | print_cssum() { 7 | printf "%s\tL\t%s\n" $1 $2 8 | } 9 | 10 | # Print metrics normalized to a single CPU and 100 Hz tick rate 11 | print_norm_cssum() { 12 | let norm_val="($2 / $NCPUS) / $NORMHZ" 13 | printf "%s\tL\t%s\n" $1 $norm_val 14 | } 15 | 16 | if [[ -x /usr/bin/nproc ]]; then 17 | NCPUS=$(/usr/bin/nproc) 18 | else 19 | NCPUS=$(/bin/grep -c ^processor /proc/cpuinfo) 20 | fi 21 | 22 | # System tick rate, and divisor to normalize to 100 Hz 23 | TICKHZ=`getconf CLK_TCK` 24 | NORMHZ=`expr $TICKHZ / 100` 25 | 26 | # Kernel version madness. Number of columns in cpu line changed several times. 27 | OSREV=`/bin/uname -r` 28 | KVER=($(echo ${OSREV%%-*} | tr "." "\n")) 29 | 30 | ALLCPU=($(awk '$1 == "cpu" { print }' /proc/stat)) 31 | CPU_USER_NORMAL=${ALLCPU[1]} 32 | CPU_USER_NICE=${ALLCPU[2]} 33 | CPU_SYS=${ALLCPU[3]} 34 | CPU_IDLE_NORMAL=${ALLCPU[4]} 35 | CPU_WAIT_IO=${ALLCPU[5]} 36 | CPU_IRQ=${ALLCPU[6]} 37 | CPU_SOFTIRQ=${ALLCPU[7]} 38 | CPU_STEAL=${ALLCPU[8]} 39 | 40 | if [[ ${KVER[0]} > 2 || ("${KVER[0]}" == "2" && ${KVER[2]} -ge 24) ]]; then 41 | CPU_GUEST=${ALLCPU[9]} 42 | else 43 | CPU_GUEST=0 44 | fi 45 | 46 | if [[ ${KVER[0]} > 2 || ("${KVER[0]}" == "2" && ${KVER[2]} -ge 33) ]]; then 47 | CPU_GUEST_NICE=${ALLCPU[10]} 48 | else 49 | CPU_GUEST_NICE=0 50 | fi 51 | 52 | # Summarize interrupts 53 | let CPU_INTR=$CPU_IRQ+$CPU_SOFTIRQ 54 | 55 | # Summarize kernel time 56 | # 57 | # "guest" and "guest_nice" are time spent running virtual CPUs, and count as 58 | # kernel time 59 | let CPU_KERNEL=$CPU_SYS+$CPU_GUEST+$CPU_GUEST_NICE 60 | 61 | # Summarize idle time 62 | # 63 | # "steal" is time while we, a guest, are runnable but a real CPU isn't 64 | # servicing our virtual CPU 65 | let CPU_IDLE=$CPU_IDLE_NORMAL+$CPU_STEAL 66 | 67 | # Summarize user time 68 | let CPU_USER=$CPU_USER_NORMAL+$CPU_USER_NICE 69 | 70 | # Context switches 71 | CTXT=($(awk '$1 == "ctxt" { print $2 }' /proc/stat)) 72 | 73 | # Linux does not provide a metric for total syscalls 74 | SYSCALL='[[null]]' 75 | 76 | print_norm_cssum user $CPU_USER 77 | print_norm_cssum user\`normal $CPU_USER_NORMAL 78 | print_norm_cssum user\`nice $CPU_USER_NICE 79 | print_norm_cssum kernel $CPU_KERNEL 80 | print_norm_cssum kernel\`sys $CPU_SYS 81 | print_norm_cssum kernel\`guest $CPU_GUEST 82 | print_norm_cssum kernel\`guest_nice $CPU_GUEST_NICE 83 | print_norm_cssum idle $CPU_IDLE 84 | print_norm_cssum idle\`normal $CPU_IDLE_NORMAL 85 | print_norm_cssum idle\`steal $CPU_STEAL 86 | print_norm_cssum wait_io $CPU_WAIT_IO 87 | print_norm_cssum intr $CPU_INTR 88 | print_norm_cssum intr\`hard $CPU_IRQ 89 | print_norm_cssum intr\`soft $CPU_SOFTIRQ 90 | print_cssum context_switch $CTXT 91 | print_cssum syscall $SYSCALL 92 | 93 | # Process statistics 94 | while IFS=" " read NAME VAL 95 | do 96 | # METRIC { name="cpu`processes", desc="number of processes created since boot, e.g. by fork()/clone() syscalls" } 97 | [[ $NAME = processes ]] && print_cssum $NAME $VAL 98 | # Rename: procs_running -> procs_runnable, since that is what's reported (http://lxr.linux.no/linux+v2.6.29/kernel/sched.c#L2699) 99 | # METRIC { name="cpu`procs_runnable", desc="The number of processes currently in a runnable state." } 100 | [[ $NAME = procs_running ]] && print_cssum procs_runnable $VAL 101 | # METRIC { name="cpu`procs_blocked", desc="The number of processes currently blocked, waiting for I/O to complete" } 102 | [[ $NAME = procs_blocked ]] && print_cssum $NAME $VAL 103 | done < /proc/stat 104 | 105 | exit 0 106 | -------------------------------------------------------------------------------- /plugins/linux/disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Block device I/O metrics 4 | # See Documentation/iostats.txt in the Linux kernel source. 5 | 6 | print_bdev() { 7 | printf "%s\`%s\tL\t%s\n" $1 $2 $3 8 | } 9 | 10 | DEVICES=($(/bin/ls /sys/block)) 11 | for bdev in "${DEVICES[@]}" ; do 12 | DEVSTAT=($(cat /sys/block/$bdev/stat)) 13 | 14 | # If physical sector size is discoverable, use it. 15 | # Otherwise, assume 512b. iostat always assumes 512b. 16 | if [[ -f /sys/block/$bdev/queue/physical_block_size ]]; then 17 | SECSZ=$(cat /sys/block/$bdev/queue/physical_block_size) 18 | else 19 | SECSZ=512 20 | fi 21 | 22 | let R_BYTES=${DEVSTAT[2]}*$SECSZ 23 | let W_BYTES=${DEVSTAT[6]}*$SECSZ 24 | 25 | print_bdev $bdev reads ${DEVSTAT[0]} 26 | print_bdev $bdev writes ${DEVSTAT[4]} 27 | print_bdev $bdev nread $R_BYTES 28 | print_bdev $bdev nwritten $W_BYTES 29 | done 30 | -------------------------------------------------------------------------------- /plugins/linux/diskstats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | awk ' 4 | BEGIN { 5 | # define devices to exclude 6 | excluded_devices[0] = "^ram" 7 | excluded_devices[1] = "^loop" 8 | } 9 | function foo(device, mdmap, result) { 10 | for (ij in mdmap) { 11 | split(ij, xx, SUBSEP) 12 | if (xx[1] == device) { 13 | result[xx[2]] = 1 14 | } 15 | } 16 | } 17 | { 18 | if (NR == FNR) { 19 | FS = ":" 20 | $0=$0 21 | if (match($1, "md[0-9]+")) { 22 | mddevice = $1 23 | gsub(/ /, "", mddevice) 24 | gsub(/\[[0-9]+\]/, "", $2) 25 | split($2, devices, " ") 26 | dev_status[mddevice] = devices[1] 27 | for (i in devices) { 28 | # first two fields are status, type 29 | if (i > 2) { 30 | mdmap[mddevice,devices[i]] = 1 31 | } 32 | } 33 | } 34 | } else { 35 | FS = " " 36 | $0=$0 37 | 38 | # exclude devices 39 | for (i in excluded_devices) { 40 | if ($3 ~ excluded_devices[i]) { 41 | next 42 | } 43 | } 44 | 45 | rd_ms[$3] = $7 46 | wr_ms[$3] = $11 47 | io_in_progress[$3] = $12 48 | io_ms[$3] = $13 49 | 50 | if ($3 ~ /md/) { 51 | # initialize array 52 | split("", devices) 53 | foo($3, mdmap, devices) 54 | for (d in devices) { 55 | $7+= rd_ms[d] 56 | $11 += wr_ms[d] 57 | $12 += io_in_progress[d] 58 | $13 += io_ms[d] 59 | } 60 | } 61 | 62 | print $3"`rd_completed\tL\t"$4 63 | print $3"`rd_merged\tL\t"$5 64 | print $3"`rd_sectors\tL\t"$6 65 | print $3"`rd_ms\tL\t"$7 66 | print $3"`wr_completed\tL\t"$8 67 | print $3"`wr_merged\tL\t"$9 68 | print $3"`wr_sectors\tL\t"$10 69 | print $3"`wr_ms\tL\t"$11 70 | print $3"`io_in_progress\tL\t"$12 71 | print $3"`io_ms\tL\t"$13 72 | print $3"`io_ms_weighted\tL\t"$14 73 | } 74 | } 75 | ' /proc/mdstat /proc/diskstats 76 | -------------------------------------------------------------------------------- /plugins/linux/if.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Network interface statistics 4 | 5 | print_iface() { 6 | printf "%s\`%s\tL\t%s\n" $1 $2 $3 7 | } 8 | 9 | # If numbers get large, they can use up whitespace separating interface name 10 | # from first value. We don't want the ':' anyway, so we convert it to space. 11 | # We also don't care about the lo interface. 12 | DEVICES=( 13 | $( 14 | tr ':' ' ' < /proc/net/dev | \ 15 | awk '{ 16 | if(match($1, /^Inter/)) next; \ 17 | if($1 == "face") next; \ 18 | if($1 == "lo") next; \ 19 | print $1 }' 20 | ) 21 | ) 22 | 23 | for iface in "${DEVICES[@]}" ; do 24 | IFSTAT=($(grep $iface /proc/net/dev | tr ':' ' ')) 25 | # METRIC { name="if``in_bytes", desc="Number of received bytes", unit="byte" } 26 | print_iface $iface in_bytes ${IFSTAT[1]} 27 | # METRIC { name="if``in_packets", desc="Number of packets received" } 28 | print_iface $iface in_packets ${IFSTAT[2]} 29 | # METRIC { name="if``in_errors", desc="Number of bad packets received" } 30 | print_iface $iface in_errors ${IFSTAT[3]} 31 | # METRIC { name="if``in_drop", desc="Number of dropped packets due to lack of space in kernel buffers" } 32 | print_iface $iface in_drop ${IFSTAT[4]} 33 | # METRIC { name="if``in_fifo_overrun", desc="Number of fifo overrun errors" } 34 | print_iface $iface in_fifo_overrun ${IFSTAT[5]} 35 | 36 | # METRIC { name="if``out_bytes", desc="Number of tranmitted bytes", unit="byte" } 37 | print_iface $iface out_bytes ${IFSTAT[9]} 38 | # METRIC { name="if``out_packets", desc="Number of outgoing packets" } 39 | print_iface $iface out_packets ${IFSTAT[10]} 40 | # METRIC { name="if``out_errors", desc="Number of errors that happend while transmitting packets" } 41 | print_iface $iface out_errors ${IFSTAT[11]} 42 | # METRIC { name="if``out_drop", desc="Number of dropped packets due to lack of space in kernel buffers" } 43 | print_iface $iface out_drop ${IFSTAT[12]} 44 | # METRIC { name="if``out_fifo_overrun", desc="Number of fifo overrun errors" } 45 | print_iface $iface out_fifo_overrun ${IFSTAT[13]} 46 | done 47 | 48 | # Read segment retransmitted from /proc/net/snmp 49 | # METRIC { name="if`tcp`segments_retransmitted", descr="Retransmitted tcp segments, systemwide" } 50 | let ROW=0 51 | while IFS=":" read HEAD TAIL 52 | do 53 | [[ $HEAD = "Tcp" ]] && let ROW+=1 54 | [[ $HEAD = "Tcp" ]] && [[ $ROW -gt 1 ]] && break 55 | done < /proc/net/snmp 56 | FIELDS=( $TAIL ) 57 | printf "%s\tL\t%s\n" tcp\'segments_retransmitted ${FIELDS[11]} 58 | 59 | # Connection Statistics from /proc/net/socstat{,6} 60 | # METRIC { name="if`tcp`connections", descr="Number of currently open TCP connections." } 61 | # It would be much better if this was a counter: "number of connections since boot", so we could calculate #con/sec, etc. 62 | let CONNECTIONS=0 63 | while IFS=" " read HEAD inuse COUNT 64 | do 65 | [[ $HEAD == "TCP:" ]] && let CONNECTIONS+=$COUNT && break 66 | done 100) { \n` + 62 | ` delta = delta / 10 \n` + 63 | ` mult = mult * 10 \n` + 64 | ` } \n` + 65 | ` if (hist[dev] != nil) { \n` + 66 | ` hist[dev][delta*mult] += 1 \n` + 67 | ` } \n` + 68 | ` ios[idx] = nil \n` + 69 | `} \n` + 70 | ` \n` + 71 | `tick-1s { \n` + 72 | ` printf("ts:%d\\n", gettimeofday_us()) \n`; 73 | for (const dname in dlist) { 74 | if ({}.hasOwnProperty.call(dlist, dname)) { 75 | script = `${script} printf("key:${dname}\\n") \n` + 76 | ` print_hist(hist[${dlist[dname]}]) \n` + 77 | ` delete(hist[${dlist[dname]}]) \n`; 78 | } 79 | } 80 | script = `${script}}`; 81 | 82 | return script; 83 | } 84 | 85 | } 86 | 87 | 88 | module.exports = IO; 89 | -------------------------------------------------------------------------------- /plugins/linux/src/Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | 3 | all: ../fs.elf 4 | 5 | ../fs.elf: fs.c 6 | $(CC) -o $@ fs.c 7 | -------------------------------------------------------------------------------- /plugins/linux/src/fs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | // 2016-09-29T13:57:29Z add pstore - https://www.kernel.org/doc/Documentation/ABI/testing/pstore 10 | static const char *suppress_fstype[] = { 11 | "rootfs", "proc", "sysfs", "selinuxfs", "usbfs", "devpts", "devtmpfs", 12 | "binfmt_misc", "rpc_pipefs", "autofs", "debugfs", "securityfs", "fusectl", 13 | "cgroup", "configfs", "mqueue", "hugetlbfs", "fuse.gvfs-fuse-daemon", "xenfs", 14 | "pstore", 15 | NULL 16 | }; 17 | 18 | int main(int argc, char **argv) { 19 | struct mntent mnt; 20 | char why_buf[1024]; 21 | FILE *fp; 22 | 23 | fp = fopen("/proc/mounts", "r"); 24 | if(!fp) { 25 | perror("fopen"); 26 | exit(-1); 27 | } 28 | 29 | while(getmntent_r(fp, &mnt, why_buf, sizeof(why_buf)) != NULL) { 30 | struct statvfs buf; 31 | int i; 32 | 33 | for(i=0;suppress_fstype[i] != NULL;i++) 34 | if(!strcmp(mnt.mnt_type, suppress_fstype[i])) break; 35 | 36 | if (suppress_fstype[i] == NULL && statvfs(mnt.mnt_dir, &buf) == 0) { 37 | long long unsigned int used = 0, adj = 0; 38 | double pct = 0, df_pct = 0; 39 | 40 | printf("%s`f_bsize\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_bsize); 41 | printf("%s`f_frsize\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_frsize); 42 | printf("%s`f_blocks\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_blocks); 43 | printf("%s`f_bfree\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_bfree); 44 | printf("%s`f_bavail\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_bavail); 45 | printf("%s`f_files\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_files); 46 | printf("%s`f_ffree\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_ffree); 47 | printf("%s`f_favail\tL\t%llu\n", mnt.mnt_dir, (long long unsigned int) buf.f_favail); 48 | 49 | pct = 0; 50 | df_pct = 0; 51 | if (buf.f_blocks > 0) { 52 | used = buf.f_blocks - buf.f_bfree; 53 | adj = buf.f_blocks - buf.f_bfree + buf.f_bavail; 54 | df_pct = (used * 100) / adj + ((used * 100) % adj != 0); 55 | pct = 100.0*(double)used/(double)adj; 56 | } 57 | printf("%s`df_used_percent\tn\t%0.2f\n", mnt.mnt_dir, df_pct); 58 | printf("%s`used_percent\tn\t%0.2f\n", mnt.mnt_dir, pct); 59 | 60 | pct = 0; 61 | df_pct = 0; 62 | if(buf.f_files > 0) { 63 | used = buf.f_files - buf.f_ffree; 64 | df_pct = (used * 100) / buf.f_files + ((used * 100) % buf.f_files != 0); 65 | pct = 100.0*(double)(buf.f_files - buf.f_ffree)/(double)buf.f_files; 66 | } 67 | printf("%s`df_used_inode_percent\tn\t%0.2f\n", mnt.mnt_dir, df_pct); 68 | printf("%s`used_inode_percent\tn\t%0.2f\n", mnt.mnt_dir, pct); } 69 | } 70 | exit(0); 71 | } 72 | -------------------------------------------------------------------------------- /plugins/linux/systemd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Systemd module, shows the count of each systemd unit in various states, and 3 | # also will print out the name of a failed/errored unit (most recent only). 4 | # By default all units are included (including inactive units) 5 | # If you want to only include active units, pass the 'active_only' parameter 6 | # to the check. In other words, from create a systemd.json file with the 7 | # following contents: [["active_only"]] 8 | PARAMS="--all" 9 | [[ $1 == "active_only" ]] && PARAMS= 10 | systemctl --full --no-legend --no-pager $PARAMS | awk ' 11 | BEGIN { 12 | # Prepopulate some metrics, so they show even if there are no services 13 | # in the state. 14 | load["loaded"] = 0 15 | load["error"] = 0 16 | load["masked"] = 0 17 | state["active"] = 0 18 | state["reloading"] = 0 19 | state["activating"] = 0 20 | state["deactivating"] = 0 21 | state["inactive"] = 0 22 | state["failed"] = 0 23 | service_error = "[[null]]" 24 | service_failed = "[[null]]" 25 | } 26 | { 27 | load[$2]++ 28 | state[$3]++ 29 | } 30 | # This will only store one service at a time, but it is good as a basic 31 | # indicator 32 | $2 == "error" { service_error = $1 } 33 | $3 == "failed" { service_failed = $1 } 34 | END { 35 | for (i in load) { 36 | print "load`" i "\tL\t" load[i] 37 | } 38 | for (i in state) { 39 | print "state`" i "\tL\t" state[i] 40 | } 41 | print "service`error\ts\t" service_error 42 | print "service`failed\ts\t" service_failed 43 | } 44 | ' 45 | -------------------------------------------------------------------------------- /plugins/linux/vm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | AWK=$(type -P awk) 6 | [[ $? -eq 0 ]] || { 7 | echo "Unable to find 'awk'" 8 | exit 1 9 | } 10 | 11 | print_vm() { 12 | printf "%s\`%s\tL\t%s\n" $1 $2 $3 13 | } 14 | 15 | PROCFILE="/proc/meminfo" 16 | [[ -f "$PROCFILE" ]] || { 17 | echo "Unable to find '${PROCFILE}'" 18 | exit 1 19 | } 20 | 21 | $AWK 'BEGIN { 22 | list[""] = 0; 23 | } 24 | { 25 | item=$1 26 | value=$2 27 | 28 | if ((idx = index(item, ":")) > 0) { 29 | item = substr(item, 1, idx - 1) 30 | } 31 | 32 | if (substr(item, 0, 9) == "HugePages") { 33 | list[item] = value 34 | } else { 35 | list[item] = value * 1024 36 | } 37 | } 38 | END { 39 | memTotal=list["MemTotal"] 40 | 41 | memFree=list["MemFree"] 42 | memBuffers=list["Buffers"] 43 | memCached=list["Cached"] 44 | memFreeTotal=(memFree + memBuffers + memCached) 45 | 46 | memUsed=memTotal - memFreeTotal 47 | memFreePct=memFreeTotal / memTotal * 100 48 | memUsedPct=memUsed / memTotal * 100 49 | 50 | swapTotal=list["SwapTotal"] 51 | swapFree=list["SwapFree"] 52 | swapUsed=swapTotal - swapFree 53 | if (swapTotal > 0) { 54 | swapFreePct=swapFree / swapTotal * 100 55 | swapUsedPct=swapUsed / swapTotal * 100 56 | } else { 57 | swapFreePct=0 58 | swapUsedPct=0 59 | } 60 | 61 | for (key in list) { 62 | if (key != "") { 63 | printf("meminfo`%s\tL\t%.0f\n", key, list[key]) 64 | } 65 | } 66 | 67 | printf("memory`total\tL\t%.0f\n", memTotal) 68 | printf("memory`used\tL\t%.0f\n", memUsed) 69 | printf("memory`free\tL\t%.0f\n", memFreeTotal) 70 | printf("memory`percent_used\tn\t%.02f\n", memUsedPct / 100) # deprecated 71 | printf("memory`percent_free\tn\t%.02f\n", memFreePct / 100) # deprecated 72 | printf("memory`used_percent\tn\t%.02f\n", memUsedPct) 73 | printf("memory`free_percent\tn\t%.02f\n", memFreePct) 74 | printf("swap`total\tL\t%.0f\n", swapTotal) 75 | printf("swap`used\tL\t%.0f\n", swapUsed) 76 | printf("swap`free\tL\t%.0f\n", swapFree) 77 | printf("swap`percent_used\tn\t%.02f\n", swapUsedPct / 100) # deprecated 78 | printf("swap`percent_free\tn\t%.02f\n", swapFreePct / 100) # deprecated 79 | printf("swap`used_percent\tn\t%.02f\n", swapUsedPct) 80 | printf("swap`free_percent\tn\t%.02f\n", swapFreePct) 81 | }' < $PROCFILE 82 | 83 | 84 | PROCFILE="/proc/vmstat" 85 | [[ -f "$PROCFILE" ]] || { 86 | echo "Unable to find '${PROCFILE}'" 87 | exit 1 88 | } 89 | 90 | PG_SCAN=0 91 | PG_FAULTS=0 92 | PG_MAJFAULTS=0 93 | while IFS=" " read NAME VAL 94 | do 95 | [[ "$NAME" = pgfault ]] && PG_FAULTS="$VAL" 96 | [[ "$NAME" = pgmajfault ]] && PG_MAJFAULTS="$VAL" 97 | [[ "$NAME" = pswp* ]] && print_vm vmstat $NAME $VAL 98 | [[ "$NAME" = pgscan* ]] && PG_SCAN=$(($PG_SCAN + $VAL)) 99 | done < $PROCFILE 100 | 101 | let PG_MINFAULTS=$PG_FAULTS-$PG_MAJFAULTS 102 | 103 | print_vm info page_fault $PG_FAULTS 104 | print_vm info page_fault\`minor $PG_MINFAULTS 105 | print_vm info page_fault\`major $PG_MAJFAULTS 106 | print_vm info page_scan $PG_SCAN 107 | -------------------------------------------------------------------------------- /plugins/mysql/README.md: -------------------------------------------------------------------------------- 1 | # NAD MySQL 2 | 3 | Metrics from a **local** MySQL server. 4 | 5 | 6 | ## default metrics 7 | 8 | * Aborted_clients 9 | * Aborted_connects 10 | * Bytes_received 11 | * Bytes_sent 12 | * Connections 13 | * Open_files 14 | * Qcache_hits 15 | * Qcache_inserts 16 | * Queries 17 | * Slow_queries 18 | * Table_locks_waited 19 | * Threads_connected 20 | * Threads_running 21 | * Uptime_since_flush_status 22 | 23 | 24 | ## config 25 | 26 | see [mysql-conf.sh](./mysql-conf.sh) 27 | -------------------------------------------------------------------------------- /plugins/mysql/mysql-conf.sh: -------------------------------------------------------------------------------- 1 | # 2 | # to use: 3 | # 4 | # cp mysql-conf.sh /opt/circonus/etc 5 | 6 | # --- edit and update variables --- 7 | # vi /opt/circonus/etc/mysql-conf.sh 8 | # 9 | # --- if nad daemon runs as 'nobody', do not leave world-readable --- 10 | # chgrp nobody /opt/circonus/etc/mysql-conf.sh 11 | # chmod 640 /opt/circonus/etc/mysql-conf.sh 12 | # 13 | # set credentials accordingly, if not set, auth is not used for 14 | # the account running the query (whatever id owns the nad process). 15 | # IOW, create and test a dedicated user for predictive results. 16 | # 17 | # a simple mysql CREATE USER is all that is required. no additional 18 | # GRANTs are needed for SHOW STATUS. 19 | # e.g. 20 | # $ mysql -e "CREATE USER 'nad'@'localhost' IDENTIFIED BY 'some_password'" -p 21 | # 22 | # note: additional grants are needed for replication status and 23 | # a different query for the replication stats beyond what 24 | # is in show status. 25 | # 26 | 27 | # Options, uncomment and set accordingly: 28 | 29 | # MYSQL_USER="" 30 | 31 | # MYSQL_PASS="" 32 | 33 | # MYSQL_HOST='127.0.0.1' 34 | 35 | # MYSQL_PORT='3306' 36 | 37 | # MYSQL_METRICS=( \ 38 | # Aborted_clients \ 39 | # Aborted_connects \ 40 | # Bytes_received \ 41 | # Bytes_sent \ 42 | # Connections \ 43 | # Open_files \ 44 | # Qcache_hits \ 45 | # Qcache_inserts \ 46 | # Queries \ 47 | # Slow_queries \ 48 | # Table_locks_waited \ 49 | # Threads_connected \ 50 | # Threads_running \ 51 | # Uptime_since_flush_status 52 | #) 53 | 54 | ## END 55 | -------------------------------------------------------------------------------- /plugins/mysql/mysql.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | config_file='/opt/circonus/etc/mysql-conf.sh' 8 | [ -f $config_file ] && source $config_file 9 | 10 | user=${MYSQL_USER:-none} 11 | pass=${MYSQL_PASS:-none} 12 | host=${MYSQL_HOST:-127.0.0.1} 13 | port=${MYSQL_PORT:-3306} 14 | metrics=("${MYSQL_METRICS[@]:-default}") 15 | [ "${metrics}" == "default" ] && metrics=(Aborted_clients Aborted_connects \ 16 | Bytes_received Bytes_sent Connections Open_files Qcache_hits Qcache_inserts \ 17 | Queries Slow_queries Table_locks_waited Threads_connected Threads_running \ 18 | Uptime_since_flush_status) 19 | 20 | mysql=$(which mysql) 21 | 22 | mysql_opts="--batch --silent --host=${host} --port=${port}" 23 | [ "${user}" != "none" ] && mysql_opts="${mysql_opts} --user=${user}" 24 | [ "${pass}" != "none" ] && mysql_opts="${mysql_opts} --password=${pass}" 25 | 26 | # format the metric list (quoted strings separated by commas) 27 | function join { local IFS="${1}"; shift; echo "$*"; } 28 | var_list=$(join , $(printf "\"%s\" " "${metrics[@]}")) 29 | query="SHOW STATUS WHERE variable_name IN (${var_list})" 30 | 31 | echo $query | $mysql $mysql_opts | while read line; do 32 | printf "%s\tL\t%s\n" $line 33 | done 34 | 35 | ### END 36 | -------------------------------------------------------------------------------- /plugins/ohai/find-ruby.sh: -------------------------------------------------------------------------------- 1 | #!bash 2 | OHAI=`which ohai` 3 | SHEBANG=`head -1 $OHAI` 4 | export RUBY_BIN=`echo $SHEBANG | sed -e 's/#!//'` 5 | -------------------------------------------------------------------------------- /plugins/ohai/ohai.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Need to figure out our script dir, resolving symlinks. 4 | SOURCE="${BASH_SOURCE[0]}" 5 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 6 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 7 | SOURCE="$(readlink "$SOURCE")" 8 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 9 | done 10 | SRCDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | 12 | 13 | 14 | . $SRCDIR/find-ruby.sh 15 | $RUBY_BIN $SRCDIR/ohai2nad.rb 16 | -------------------------------------------------------------------------------- /plugins/ohai/ohai2nad.rb: -------------------------------------------------------------------------------- 1 | # No shebang; We assume something has found ruby for us 2 | 3 | require 'json' 4 | 5 | # A more clever thing to do would be to run ohai as a gem, and interact intelligently 6 | # require 'ohai' 7 | # For today, cleverness-- 8 | 9 | def puts_nad (key, type, value) 10 | if value.nil? then 11 | puts "#{key}\t#{type}" 12 | else 13 | puts "#{key}\t#{type}\t#{value}" 14 | end 15 | end 16 | 17 | def handle_top_level(p, r) 18 | # this could be less dumb 19 | case p 20 | when *[ 21 | # These are all simple scalars 22 | "uptime_seconds", 23 | "hostname", 24 | "fqdn", 25 | "domain", 26 | "os", 27 | "os_version", 28 | "platform", 29 | "platform_version", 30 | "platform_family", 31 | "ipaddress", 32 | "macaddress", 33 | "ohai_time", 34 | ] 35 | h_scalar(p,r) 36 | when *[ 37 | # We ignore these explicitly 38 | "uptime", 39 | "command", 40 | "languages", 41 | "chef_packages", 42 | "keys", 43 | "current_user", 44 | "idletime", 45 | "idletime_seconds", 46 | "memory", # Nad provides vm.sh for this 47 | "block_device", 48 | "dmi", 49 | "lsb", 50 | "virtualization", 51 | "network", # Nad provides if.sh for this 52 | "counters", # Nad provides if.sh for this 53 | ] 54 | 55 | when "filesystem" 56 | h_filesystem(p,r) # custom 57 | when "etc" 58 | h_etc(p,r) # custom 59 | when "cpu" 60 | h_cpu(p,r) # custom 61 | when "kernel" 62 | h_kernel(p,r) # custom 63 | else 64 | # Ignore if unknown 65 | # puts "Unknown ohai plugin #{p}" 66 | end 67 | end 68 | 69 | def h_inspect(p,r) 70 | require 'pp' 71 | puts "----" 72 | puts p 73 | puts "----" 74 | pp r 75 | end 76 | 77 | def nad_type_scalar(v) 78 | if v.kind_of?(Fixnum) then 79 | return 'L' 80 | elsif v.kind_of?(Float) then 81 | return 'n' 82 | else 83 | return 's' 84 | end 85 | end 86 | 87 | def h_filesystem(p,r) 88 | r.each do |device, fs_details| 89 | next unless fs_details['mount'] 90 | next unless fs_details['kb_size'] 91 | 92 | puts_nad("filesystem`#{fs_details['mount']}`device",'s',device) 93 | fs_details.each do |key, value| 94 | next if (key == 'mount_options') 95 | next if (key == 'mount') # we invert this 96 | 97 | # Ohai reports these all as strings, boo 98 | if ['kb_size', 'kb_available', 'kb_used', 'percent_used'].include?(key) then 99 | value = value.sub('%', '').to_i 100 | end 101 | 102 | puts_nad("filesystem`#{fs_details['mount']}`#{key}",nad_type_scalar(value),value) 103 | end 104 | end 105 | end 106 | 107 | def h_etc(p,r) 108 | # Here we just list the count of users and groups 109 | puts_nad('etc`user_count', 'L', r['passwd'].keys.size) 110 | puts_nad('etc`group_count', 'L', r['group'].keys.size) 111 | end 112 | 113 | def h_cpu(p,r) 114 | # List cpu count, and model info for each 115 | puts_nad('cpu`total', 'L', r['total']) 116 | puts_nad('cpu`real', 'L', r['real']) 117 | r.select{|k,v| k.match(/^\d+$/)}.each do |cpu_id, details| 118 | puts_nad("cpu`#{cpu_id}`model_name", 's', r[cpu_id]['model_name']) 119 | end 120 | end 121 | 122 | def h_kernel(p,r) 123 | # List everything but the module list 124 | r.reject{|k,v| k == 'modules' }.each do |k,v| 125 | puts_nad("kernel`#{k}", 's', v) 126 | end 127 | end 128 | 129 | def h_scalar(p,r) 130 | puts_nad(p,nad_type_scalar(r),r) 131 | end 132 | 133 | def main 134 | ohai_json = `ohai` 135 | ohai_data = JSON.parse(ohai_json) 136 | 137 | ohai_data.each do |ohai_plugin, results| 138 | handle_top_level(ohai_plugin, results) 139 | end 140 | end 141 | 142 | main() 143 | -------------------------------------------------------------------------------- /plugins/openbsd/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | (cd src && $(MAKE)) 3 | -------------------------------------------------------------------------------- /plugins/openbsd/carp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Exposes details of CARP interfaces. See carp(4). 4 | 5 | /sbin/ifconfig carp | /usr/bin/awk ' 6 | $1 ~ /^carp[0-9]+/ { 7 | iface = substr($1,0,length($1)-1); 8 | } 9 | 10 | $1 == "carp:" { 11 | printf("%s`carpdev s %s\n", iface, $4); 12 | printf("%s`vhid I %d\n", iface, $6); 13 | printf("%s`advbase I %d\n", iface, $8); 14 | printf("%s`advskew I %d\n", iface, $10); 15 | } 16 | 17 | $1 == "status:" { 18 | printf("%s`status s %s\n", iface, $2); 19 | } 20 | ' 21 | -------------------------------------------------------------------------------- /plugins/openbsd/cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # CPU usage statistics, via sysctl 4 | 5 | # Print ordinary metrics 6 | print_cssum() { 7 | printf "%s\tL\t%s\n" $1 $2 8 | } 9 | 10 | # Print metrics normalized to a single CPU and 100Hz tick rate 11 | print_norm_cssum() { 12 | per_cpu_count=`expr $2 / $NCPUS` 13 | rate_factor=`echo "scale=2; 100/$STATHZ" | bc` 14 | value=`echo "$per_cpu_count*$rate_factor" | bc` 15 | printf "%s\tL\t%.0f\n" $1 $value 16 | } 17 | 18 | NCPUS=`sysctl -n hw.ncpu` 19 | STATHZ=`sysctl -n kern.clockrate | awk '{ print $15 }'` 20 | 21 | ALLCPU=`sysctl -n kern.cp_time` 22 | CPU_USER_NORMAL=`echo $ALLCPU | cut -d',' -f1` 23 | CPU_USER_NICE=`echo $ALLCPU | cut -d',' -f2` 24 | CPU_SYS=`echo $ALLCPU | cut -d',' -f3` 25 | CPU_IRQ=`echo $ALLCPU | cut -d',' -f4` 26 | CPU_IDLE_NORMAL=`echo $ALLCPU | cut -d',' -f5` 27 | 28 | # Not implemented 29 | CPU_WAIT_IO=0 30 | CPU_STEAL=0 31 | CPU_GUEST=0 32 | CPU_GUEST_NICE=0 33 | CPU_SOFTIRQ=0 34 | 35 | # Summarize interrupts 36 | CPU_INTR=`expr $CPU_IRQ + $CPU_SOFTIRQ` 37 | 38 | # Summarize kernel time 39 | # 40 | # "guest" and "guest_nice" are time spent running virtual CPUs, and count as 41 | # kernel time 42 | CPU_KERNEL=`expr $CPU_SYS + $CPU_GUEST + $CPU_GUEST_NICE` 43 | 44 | # Summarize idle time 45 | # 46 | # "steal" is time while we, a guest, are runnable but a real CPU isn't 47 | # servicing our virtual CPU 48 | CPU_IDLE=`expr $CPU_IDLE_NORMAL + $CPU_STEAL` 49 | 50 | # Summarize user time 51 | CPU_USER=`expr $CPU_USER_NORMAL + $CPU_USER_NICE` 52 | 53 | # Not implemented; these are not exposed via sysctl and would need to be 54 | # accessed via a C library call. See uvm(9). 55 | # Context switches 56 | CTXT=0 57 | # System calls 58 | SYSCALL=0 59 | 60 | print_norm_cssum user $CPU_USER 61 | print_norm_cssum user\`normal $CPU_USER_NORMAL 62 | print_norm_cssum user\`nice $CPU_USER_NICE 63 | print_norm_cssum kernel $CPU_KERNEL 64 | print_norm_cssum kernel\`sys $CPU_SYS 65 | print_norm_cssum kernel\`guest $CPU_GUEST 66 | print_norm_cssum kernel\`guest_nice $CPU_GUEST_NICE 67 | print_norm_cssum idle $CPU_IDLE 68 | print_norm_cssum idle\`normal $CPU_IDLE_NORMAL 69 | print_norm_cssum idle\`steal $CPU_STEAL 70 | print_norm_cssum wait_io $CPU_WAIT_IO 71 | print_norm_cssum intr $CPU_INTR 72 | print_norm_cssum intr\`hard $CPU_IRQ 73 | print_norm_cssum intr\`soft $CPU_SOFTIRQ 74 | print_cssum context_switch $CTXT 75 | print_cssum syscall $SYSCALL 76 | -------------------------------------------------------------------------------- /plugins/openbsd/if.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Network interface statistics 4 | # 5 | # Since OpenBSD's netstat(1) can only report packet OR byte stats at any given 6 | # time, we must invoke it twice per interface. 7 | 8 | # List of datalink interfaces, excluding local and inactive interfaces (*) 9 | ints=`netstat -n -i | awk '$1 !~ /(^lo[0-9]|\*)/ && $3 == "" { print $1 }'` 10 | 11 | for iface in $ints; do \ 12 | netstat -n -I $iface | grep Link | awk '{ 13 | if (NF == 8) { 14 | inp = $4; ine = $5; outp = $6; oute = $7; } 15 | if (NF == 9) { 16 | inp = $5; ine = $6; outp = $7; oute = $8; } 17 | printf("%s`in_packets\tL\t%d\n", $1, inp); 18 | printf("%s`in_errors\tL\t%d\n", $1, ine); 19 | printf("%s`out_packets\tL\t%d\n", $1, outp); 20 | printf("%s`out_errors\tL\t%d\n", $1, oute); 21 | inp = ""; ine = ""; outp = ""; oute = ""; 22 | }' 23 | netstat -n -I $iface -b | grep Link | awk '{ 24 | if (NF == 5) { 25 | inb = $4; outb = $5; } 26 | if (NF == 6) { 27 | inb = $5; outb = $6; } 28 | printf("%s`in_bytes\tL\t%d\n", $1, inb); 29 | printf("%s`out_bytes\tL\t%d\n", $1, outb); 30 | inb = ""; outb = ""; 31 | }' 32 | done 33 | -------------------------------------------------------------------------------- /plugins/openbsd/src/Makefile: -------------------------------------------------------------------------------- 1 | CC=cc 2 | CFLAGS= 3 | CPPFLAGS= 4 | 5 | all: ../fs.elf 6 | 7 | ../fs.elf: fs.c 8 | $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ fs.c 9 | -------------------------------------------------------------------------------- /plugins/openbsd/src/fs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | static const char *suppress_fstype[] = { 12 | "devfs", "autofs", "ctfs", "dev", "fd", "lofs", "mntfs", "objfs", "proc", NULL 13 | }; 14 | 15 | int main(int argc, char **argv) { 16 | int cnt, i, idx; 17 | struct statfs *stats; 18 | FILE *fp; 19 | 20 | cnt = getmntinfo(&stats, MNT_WAIT); 21 | if(cnt <= 0) { 22 | perror("getmntinfo"); 23 | exit(-1); 24 | } 25 | 26 | for(idx = 0; idxf_fstypename, suppress_fstype[i])) break; 31 | 32 | if (suppress_fstype[i] == NULL) { 33 | long long unsigned int used, adj; 34 | printf("%s`f_bsize\tL\t%ld\n", mnt->f_mntonname, mnt->f_bsize); 35 | printf("%s`f_blocks\tL\t%lu\n", mnt->f_mntonname, mnt->f_blocks); 36 | printf("%s`f_bfree\tL\t%lu\n", mnt->f_mntonname, mnt->f_bfree); 37 | printf("%s`f_bavail\tL\t%ld\n", mnt->f_mntonname, mnt->f_bavail); 38 | printf("%s`f_files\tL\t%lu\n", mnt->f_mntonname, mnt->f_blocks); 39 | printf("%s`f_ffree\tL\t%ld\n", mnt->f_mntonname, mnt->f_ffree); 40 | used = mnt->f_blocks - mnt->f_bfree; 41 | adj = mnt->f_blocks - mnt->f_bfree + mnt->f_bavail; 42 | if (adj != 0) { 43 | double pct = (used * 100) / adj + ((used * 100) % adj != 0); 44 | printf("%s`df_used_percent\tL\t%0.2f\n", mnt->f_mntonname, pct); 45 | printf("%s`used_percent\tL\t%0.2f\n", mnt->f_mntonname, 100.0*(double)used/(double)adj); 46 | if (mnt->f_files > 0) { 47 | used = mnt->f_files - mnt->f_ffree; 48 | pct = (used * 100) / mnt->f_files + ((used * 100) % mnt->f_files != 0); 49 | printf("%s`df_used_inode_percent\tL\t%0.2f\n", mnt->f_mntonname, pct); 50 | printf("%s`used_inode_percent\tL\t%0.2f\n", mnt->f_mntonname, 100.0*(double)(mnt->f_files - mnt->f_ffree)/(double)mnt->f_files); 51 | } 52 | } 53 | } 54 | } 55 | exit(0); 56 | } 57 | -------------------------------------------------------------------------------- /plugins/pf/pf.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # Provides pf statistics. See pfctl(8), pf.conf(5) 4 | # - info (pfctl -s info) 5 | # - labels (pfctl -s labels), summarized by label 6 | # 7 | # Copyright 2015 Circonus, Inc. 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are 12 | # met: 13 | # 14 | # * Redistributions of source code must retain the above copyright 15 | # notice, this list of conditions and the following disclaimer. 16 | # * Redistributions in binary form must reproduce the above 17 | # copyright notice, this list of conditions and the following 18 | # disclaimer in the documentation and/or other materials provided 19 | # with the distribution. 20 | # * Neither the name of the copyright holder nor the names 21 | # of its contributors may be used to endorse or promote products 22 | # derived from this software without specific prior written 23 | # permission. 24 | # 25 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 | 37 | use strict; 38 | use Math::BigInt try => 'GMP'; 39 | 40 | my $pfctl = '/sbin/pfctl'; 41 | 42 | ## 43 | # Status 44 | ## 45 | 46 | my $status = {}; 47 | my $getstats = $pfctl . ' -si'; 48 | my ($in_state_tbl, $in_counters); 49 | 50 | open(my $gs, '-|', $getstats); 51 | while(<$gs>) { 52 | my $statline = $_; 53 | chomp $statline; 54 | 55 | if ($statline =~ /^Status: (\S+)/) { 56 | $status->{'status'} = $1; 57 | } 58 | if ($statline =~ /^State Table/) { 59 | $in_state_tbl = 1; 60 | } 61 | if ($in_state_tbl) { 62 | if ($statline =~ /^\s+current entries\s+(\d+)/) { 63 | $status->{'state'}{'current_entries'} = Math::BigInt->new($1); 64 | } 65 | if ($statline =~ /^\s+(\S+)\s+(\d+)/) { 66 | $status->{'state'}{$1} = Math::BigInt->new($2); 67 | } 68 | } 69 | if ($statline =~ /^Counters/) { 70 | $in_state_tbl = undef; 71 | $in_counters = 1; 72 | } 73 | if ($in_counters) { 74 | if ($statline =~ /^\s+(\S+)\s+(\d+)/) { 75 | $status->{'counters'}{$1} = Math::BigInt->new($2); 76 | } 77 | } 78 | } 79 | close($gs); 80 | 81 | printf("status s %s\n", $status->{'status'}); 82 | foreach my $tblstat (keys $status->{'state'}) { 83 | printf("state`%s L %d\n", $tblstat, $status->{'state'}{$tblstat}->bstr()); 84 | } 85 | foreach my $counter (keys $status->{'counters'}) { 86 | printf("counter`%s L %d\n", $counter, $status->{'counters'}{$counter}->bstr()); 87 | } 88 | 89 | 90 | ## 91 | # Labels 92 | ## 93 | 94 | my $labels = {}; 95 | my $getlabels = $pfctl . ' -sl'; 96 | open(my $gl, '-|', $getlabels); 97 | while(<$gl>) { 98 | my $lline = $_; 99 | chomp $lline; 100 | my @line = split(/\s+/, $lline); 101 | my $label = shift @line; 102 | if (exists $labels->{$label}) { 103 | foreach (0 .. $#line) { 104 | my $idx = $_; 105 | $labels->{$label}->[$idx]->badd(Math::BigInt->new($line[$idx])); 106 | } 107 | } else { 108 | $labels->{$label} = [ map { Math::BigInt->new($_) } @line ]; 109 | } 110 | } 111 | close($gl); 112 | 113 | foreach my $label (keys %$labels) { 114 | printf("label`%s`evals L %d\n", $label, $labels->{$label}->[0]->bstr()); 115 | printf("label`%s`pkts L %d\n", $label, $labels->{$label}->[1]->bstr()); 116 | printf("label`%s`octets L %d\n", $label, $labels->{$label}->[2]->bstr()); 117 | printf("label`%s`inpkts L %d\n", $label, $labels->{$label}->[3]->bstr()); 118 | printf("label`%s`inoctets L %d\n", $label, $labels->{$label}->[4]->bstr()); 119 | printf("label`%s`outpkts L %d\n", $label, $labels->{$label}->[5]->bstr()); 120 | printf("label`%s`outoctets L %d\n", $label, $labels->{$label}->[6]->bstr()); 121 | printf("label`%s`states L %d\n", $label, $labels->{$label}->[7]->bstr()); 122 | } 123 | 124 | exit; 125 | -------------------------------------------------------------------------------- /plugins/postgresql/pg-conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file will be sourced by each of the postgres plugin scripts. nad looks 4 | # for this file in /opt/circonus/etc/pg-conf.sh. 5 | # 6 | # to use: 7 | # 8 | # cp pg-conf.sh /opt/circonus/etc 9 | 10 | # --- edit and update variables -- 11 | # vi /opt/circonus/etc/pg-conf.sh 12 | 13 | # --- if nad daemon runs as 'nobody', do not leave world-readable --- 14 | # chgrp nobody /opt/circonus/etc/pg-conf.sh 15 | # chmod 640 /opt/circonus/etc/pg-conf.sh 16 | # 17 | # set credentials accordingly, if not set, auth is not used for 18 | # the account running the query (whatever id owns the nad process). 19 | # IOW, create and test a dedicated user for predictive results. 20 | # 21 | # Options, uncomment and set accordingly: 22 | 23 | ## The user to run pg_stat queries as. 24 | # PGUSER="postgres" 25 | 26 | ## The password for the PGUSER 27 | # PGPASS="" 28 | 29 | ## The port that postgres is listening on, on this host 30 | # PGPORT=5432 31 | 32 | ## The database to use when querying database specific info like pg_statio_user_tables for cache hits. 33 | ## usually this would be set to the database you want to monitor. 34 | # PGDATABASE="postgres" 35 | 36 | ## The postgresql plugin requires that `psql` and `pg_isready` is in the exec PATH, so add here 37 | ## if they are not already 38 | # PATH=$PATH:/path/to/psql 39 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_bgwriter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | DB_LIST=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "SELECT buffers_checkpoint, buffers_clean, buffers_backend, buffers_alloc FROM pg_stat_bgwriter") 12 | 13 | for db in $DB_LIST; do 14 | IFS=',' 15 | DATA=( $db ) 16 | print_uint "buffers_at_checkpoint" ${DATA[0]} 17 | print_uint "buffers_cleaned" ${DATA[1]} 18 | print_uint "buffers_by_backend" ${DATA[2]} 19 | print_uint "buffers_allocated" ${DATA[3]} 20 | done 21 | 22 | # END 23 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_cache.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | DB_LIST=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select sum(heap_blks_read) * current_setting('block_size')::NUMERIC, sum(heap_blks_hit) * current_setting('block_size')::NUMERIC, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read) + 0.000001) from pg_statio_user_tables") 12 | 13 | for db in $DB_LIST; do 14 | IFS=',' 15 | DATA=( $db ) 16 | print_uint "${PGDATABASE}\`disk_bytes_read" ${DATA[0]} 17 | print_uint "${PGDATABASE}\`cache_bytes_read" ${DATA[1]} 18 | print_dbl "${PGDATABASE}\`cache_hit_ratio" ${DATA[2]} 19 | done 20 | 21 | # END 22 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_connections.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | IFS=',' 10 | LINEBREAKS=$'\n\b' 11 | CONNECTIONS=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select 'connections', max_connections, total_used, coalesce(round(100*(total_used/max_connections)),0) as pct_used, idle, idle_in_txn, ((total_used - idle) - idle_in_txn) as active, (select coalesce(extract(epoch from (max(now() - query_start))),0) from pg_stat_activity where query = ' in transaction') as max_idle_in_txn from (select count(*) as total_used, coalesce(sum(case when query = '' then 1 else 0 end),0) as idle, coalesce(sum(case when query = ' in transaction' then 1 else 0 end),0) as idle_in_txn from pg_stat_activity) x join (select setting::float AS max_connections FROM pg_settings WHERE name = 'max_connections') xx ON (true);") 12 | 13 | DATA=( $CONNECTIONS ) 14 | 15 | MAX_CONNECTIONS=${DATA[1]} 16 | TOTAL_USED=${DATA[2]} 17 | PCT_USED=${DATA[3]} 18 | IDLE=${DATA[4]} 19 | IDLE_IN_TXN=${DATA[5]} 20 | ACTIVE=${DATA[6]} 21 | MAX_IDLE_IN_TXN=${DATA[7]} 22 | 23 | print_uint max_connections $MAX_CONNECTIONS 24 | print_uint total_used $TOTAL_USED 25 | print_uint pct_used $PCT_USED 26 | print_uint idle $IDLE 27 | print_uint idle_in_txn $IDLE_IN_TXN 28 | print_uint active $ACTIVE 29 | print_uint max_idle_in_txn $MAX_IDLE_IN_TXN 30 | 31 | # END 32 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_db_size.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | DB_LIST=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select datname,pg_database_size(datname) from pg_database;") 12 | 13 | for db in $DB_LIST; do 14 | IFS=',' 15 | DATA=( $db ) 16 | print_uint ${DATA[0]} ${DATA[1]} 17 | done 18 | 19 | # END 20 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_functions.sh: -------------------------------------------------------------------------------- 1 | print_int() { printf "%s\tl\t%s\n" $1 $2; } 2 | print_uint() { printf "%s\tL\t%s\n" $1 $2; } 3 | print_dbl() { printf "%s\tn\t%s\n" $1 $2; } 4 | print_str() { printf "%s\ts\t%s\n" $1 $2; } 5 | 6 | pgconf="/opt/circonus/nad/etc/pg-conf.sh" 7 | [[ ! -f $pgconf ]] && pgconf="/opt/circonus/etc/pg-conf.sh" # check old nad etc location 8 | [[ -f $pgconf ]] && source $pgconf 9 | 10 | PSQL=${PSQL_CMD:-} 11 | if [[ -z ${PSQL:-} ]]; then 12 | PSQL=$(command -v psql) 13 | [[ $? -eq 0 ]] || { echo "Unable to find 'psql' command"; exit 1; } 14 | fi 15 | 16 | [[ -n $PSQL && -x $PSQL ]] || { echo "'${PSQL}' not executable"; exit 1; } 17 | 18 | : ${PGUSER:=postgres} 19 | : ${PGDATABASE:=postgres} 20 | : ${PGPASS:=} 21 | : ${PGPORT:=5432} 22 | [[ -n ${PGPASS:-} && -z ${PGPASSWORD:-} ]] && export PGPASSWORD="$PGPASS" 23 | 24 | pg_functions=1 25 | 26 | # END 27 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_isready.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | PGCMD=$(command -v pg_isready) 10 | [[ $? -eq 0 ]] || { echo "Unable to find 'pg_isready' command"; exit 1; } 11 | 12 | $PGCMD -U $PGUSER -d $PGDATABASE -p $PGPORT -q 13 | DATA=$? 14 | 15 | print_int "isready_status" $DATA 16 | 17 | # END 18 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_locks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | IFS=',' 10 | LINEBREAKS=$'\n\b' 11 | LOCKS=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select 'locks', count(*) as total, count(nullif(granted,true)) as waiting, count(nullif(mode ilike '%exclusive%',false)) as exclusive from pg_locks") 12 | 13 | DATA=( $LOCKS ) 14 | 15 | TOTAL=${DATA[1]} 16 | WAITING=${DATA[2]} 17 | EXCLUSIVE=${DATA[3]} 18 | 19 | print_uint total $TOTAL 20 | print_uint waiting $WAITING 21 | print_uint exclusive $EXCLUSIVE 22 | 23 | # END 24 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_partitions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | IFS=',' 10 | LINEBREAKS=$'\n\b' 11 | 12 | PARTITIONS=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select 'childnum', coalesce(count(distinct inhrelid),0) as count from pg_inherits") 13 | 14 | DATA=( $PARTITIONS ) 15 | 16 | print_int ${DATA[0]} ${DATA[1]} 17 | 18 | # END 19 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_protocol_observer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | popid=$(pgrep -n -f 'protocol_observer -wire postgres') 4 | if [[ -n $popid ]]; then 5 | echo "protocol_observer is already running with PID ${popid}" 6 | exit 0 7 | fi 8 | 9 | # check sudo access for user running NAD if not id 0 10 | SUDO="" 11 | if [[ $UID -ne 0 ]]; then 12 | SUDO=sudo 13 | $SUDO -l /dev/null 2>&1 14 | [[ $? -ne 0 ]] && { echo "Error checking sudo access for $UID"; exit 1; } 15 | fi 16 | 17 | po_conf=/opt/circonus/etc/pg_po_conf.sh 18 | [[ -s $po_conf ]] && source $po_conf 19 | 20 | # default location 21 | po=/opt/circonus/bin/protocol_observer 22 | 23 | if [[ ! -x $po ]]; then 24 | po=`type -P protocol_observer` 25 | [[ $? -eq 0 ]] || { echo 'Unable to locate protocol_observer binary'; exit 1; } 26 | fi 27 | IFACE="${IFACE:="auto"}" 28 | NADURL="${NADURL:="http://localhost:2609"}" 29 | 30 | NADURL=${NADURL%/} 31 | 32 | $SUDO $po -wire postgres -submissionurl ${NADURL}/write/postgres_protocol_observer & 33 | 34 | # END 35 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_repl_lag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | LAG=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "SELECT application_name, pg_xlog_location_diff(pg_current_xlog_insert_location(), flush_location) AS lag_bytes FROM pg_stat_replication") 12 | 13 | for a in $LAG; do 14 | IFS=',' 15 | DATA=( $a ) 16 | print_str "application_name" ${DATA[0]} 17 | print_uint "lag" ${DATA[1]} 18 | done 19 | 20 | # END 21 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_repl_slots.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | SLOT=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "SELECT slot_name, active, pg_xlog_location_diff(pg_current_xlog_insert_location(), restart_lsn) AS retained_bytes FROM pg_replication_slots") 12 | 13 | for a in $SLOT; do 14 | IFS=',' 15 | DATA=( $a ) 16 | print_str "slot_name" ${DATA[0]} 17 | print_str "active" ${DATA[1]} 18 | print_uint "retained_bytes" ${DATA[2]} 19 | done 20 | 21 | # END 22 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_replication.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | LINEBREAKS=$'\n\b' 10 | 11 | MASTER_LIST=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select client_addr, pg_xlog_location_diff(sent_location, write_location) from pg_stat_replication") 12 | REPLICA=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F " " -Atc "select pg_xlog_location_diff(pg_last_xlog_receive_location(), pg_last_xlog_replay_location()), extract(epoch from now()) - extract(epoch from pg_last_xact_replay_timestamp())") 13 | 14 | # This check runs on a master. Large numbers can indicate problems sending 15 | # xlogs to replicas, ie network problems 16 | # 17 | # each client comes in the form 18 | # client_addr,xlog_diff(sent,written) 19 | for slave in $MASTER_LIST; do 20 | IFS=',' 21 | DATA=( $slave ) 22 | print_int "pg_replication:${DATA[0]}:master:xlog_sent_diff" ${DATA[1]} 23 | done 24 | 25 | # This check runs on replicas. Large numbers indicate problems applying xlogs 26 | # ie file system or disk saturation. 27 | # 28 | # replica data comes in the form 29 | # xlog_diff(received,written),time_since(commit|rollback) 30 | IFS=',' 31 | DATA=( $REPLICA ) 32 | 33 | print_int "pg_replication:0:replica:xlog_applied_diff" ${DATA[0]} 34 | print_int "pg_replication:0:replica:time_since_commit" ${DATA[1]} 35 | 36 | # END 37 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_table_stats.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | IFS=',' 10 | LINEBREAKS=$'\n\b' 11 | STATS=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select 'tables', sum(n_tup_ins) as inserts, sum(n_tup_upd) as updates, sum(n_tup_del) as deletes, sum(idx_scan) as index_scans, sum(seq_scan) as seq_scans, sum(idx_tup_fetch) as index_tup_fetch, sum(seq_tup_read) as seq_tup_read, coalesce(extract(epoch from now() - max(last_autovacuum))) as max_last_autovacuum , coalesce(extract(epoch from now() - max(last_vacuum))) as max_last_vacuum , coalesce(extract(epoch from now() - max(last_autoanalyze))) as max_last_autoanalyze , coalesce(extract(epoch from now() - max(last_analyze))) as max_last_analyze from pg_stat_all_tables") 12 | 13 | DATA=( $STATS ) 14 | 15 | INSERTS=${DATA[1]} 16 | UPDATES=${DATA[2]} 17 | DELETES=${DATA[3]} 18 | INDEX_SCANS=${DATA[4]} 19 | SEQ_SCANS=${DATA[5]} 20 | INDEX_TUP_FETCH=${DATA[6]} 21 | SEQ_TUP_READ=${DATA[7]} 22 | MAX_LAST_AUTOVACUUM=${DATA[8]} 23 | MAX_LAST_VACUUM=${DATA[9]} 24 | MAX_LAST_AUTOANALYZE=${DATA[10]} 25 | MAX_LAST_ANALYZE=${DATA[11]} 26 | 27 | print_uint inserts $INSERTS 28 | print_uint updates $UPDATES 29 | print_uint deletes $DELETES 30 | print_uint index_scans $INDEX_SCANS 31 | print_uint seq_scans $SEQ_SCANS 32 | print_uint index_tup_fetch $INDEX_TUP_FETCH 33 | print_uint seq_tup_read $SEQ_TUP_READ 34 | print_dbl max_last_autovacuum $MAX_LAST_AUTOVACUUM 35 | print_dbl max_last_vacuum $MAX_LAST_VACUUM 36 | print_dbl max_last_autoanalyze $MAX_LAST_AUTOANALYZE 37 | print_dbl max_last_analyze $MAX_LAST_ANALYZE 38 | 39 | # END 40 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_transactions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | IFS=',' 10 | LINEBREAKS=$'\n\b' 11 | TRANSACTIONS=$(psql -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "select 'transactions', txid_snapshot_xmax(txid_current_snapshot()) as xmax, commits, rollback from (select sum(xact_commit) as commits, sum(xact_rollback) as rollback from pg_stat_database) as x") 12 | 13 | DATA=( $TRANSACTIONS ) 14 | 15 | XMAX=${DATA[1]} 16 | COMMITS=${DATA[2]} 17 | ROLLBACK=${DATA[3]} 18 | 19 | print_uint xmax $XMAX 20 | print_uint commits $COMMITS 21 | print_uint rollback $ROLLBACK 22 | 23 | # END 24 | -------------------------------------------------------------------------------- /plugins/postgresql/pg_vacuum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | plugin_dir=$(dirname $(readlink -f ${BASH_SOURCE[0]})) 4 | pgfuncs="${plugin_dir}/pg_functions.sh" 5 | [[ -f $pgfuncs ]] || { echo "Unable to find pg functions ${pgfuncs}"; exit 1; } 6 | source $pgfuncs 7 | [[ ${pg_functions:-0} -eq 0 ]] && { echo "Invalid plugin configuration."; exit 1; } 8 | 9 | vacuum_data=$($PSQL -U $PGUSER -d $PGDATABASE -p $PGPORT -w -F, -Atc "WITH max_age AS (SELECT 2000000000 as max_old_xid, setting AS autovacuum_freeze_max_age FROM pg_settings WHERE name = 'autovacuum_freeze_max_age'), per_database_stats AS (SELECT datname, m.max_old_xid::int, m.autovacuum_freeze_max_age::int, age(d.datfrozenxid) AS oldest_current_xid FROM pg_database d JOIN max_age m ON (true) WHERE d.datallowconn) SELECT 'autovac', max(oldest_current_xid) AS oldest_current_xid, max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound, max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_emergency_autovac FROM per_database_stats;") 10 | 11 | IFS=',' 12 | DATA=( $vacuum_data ) 13 | 14 | print_uint oldest_current_xid ${DATA[1]} 15 | print_uint percent_towards_wraparound ${DATA[2]} 16 | print_uint percent_towards_emergency_autovac ${DATA[3]} 17 | 18 | # END 19 | -------------------------------------------------------------------------------- /plugins/smartos/.index.json: -------------------------------------------------------------------------------- 1 | { 2 | "aggcpu.elf": "aggregated statistics across all CPUs", 3 | "cpu.sh": "statistics for each CPU", 4 | "sdinfo.sh": "statistics for all disk devices on 'sd'", 5 | "vminfo.sh": "statistics related to the virtual memory subsystem", 6 | "zfsinfo.sh": "statistics for ZFS" 7 | } 8 | -------------------------------------------------------------------------------- /plugins/smartos/jinf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /opt/local/bin/jinf -p -c | sed -e 's/:/ n /; s/^/jinf:/' 4 | /opt/local/bin/jinf -p -m | sed -e 's/:/ L /; s/^/jinf:/' 5 | /opt/local/bin/jinf -p -s | sed -e 's/:/ L /; s/^/jinf:/' 6 | -------------------------------------------------------------------------------- /plugins/windows/wmi/default.conf: -------------------------------------------------------------------------------- 1 | Memory\Committed Bytes 2 | Processor(_Total)\% Processor Time 3 | Processor(_Total)\% User Time 4 | Processor(_Total)\% Privileged Time 5 | Processor(_Total)\% DPC Time 6 | Processor(_Total)\% Interrupt Time 7 | LogicalDisk(C:)\% Free Space 8 | LogicalDisk(C:)\Free Megabytes 9 | LogicalDisk(C:)\% Idle Time 10 | 11 | -------------------------------------------------------------------------------- /plugins/windows/wmi/wmi.bat: -------------------------------------------------------------------------------- 1 | @ECHO off 2 | node %1 -c %2 3 | 4 | -------------------------------------------------------------------------------- /plugins/windows/wmi/wmi.json: -------------------------------------------------------------------------------- 1 | { 2 | "wmi": [".\\plugins\\windows\\wmi\\exec\\wmi.js", ".\\plugins\\windows\\wmi\\default.conf"] 3 | } 4 | -------------------------------------------------------------------------------- /sbin/.eslintrc.js: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Circonus, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | module.exports = { 6 | root: true, 7 | extends: [ 8 | '@maier/base', 9 | '@maier/node', 10 | '@maier/node-cmd' 11 | ], 12 | rules: { 13 | // ...additional project specific rules 14 | } 15 | }; 16 | -------------------------------------------------------------------------------- /sbin/nad.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2016 Circonus, Inc. All rights reserved. 3 | # Use of this source code is governed by a BSD-style 4 | # license that can be found in the LICENSE file. 5 | 6 | set -o nounset 7 | set -o errexit 8 | set -o pipefail 9 | 10 | circonus_dir="@@PREFIX@@" 11 | nad_dir="@@APP_DIR@@" 12 | 13 | node_bin="${circonus_dir}/bin/node" 14 | nad_script="${nad_dir}/sbin/nad.js" 15 | lib_dir="${nad_dir}/node_modules" 16 | nad_conf="${nad_dir}/etc/nad.conf" 17 | log_dir="" 18 | 19 | [[ -d $lib_dir ]] || { 20 | echo "Unable to find NAD modules directory ${lib_dir}" 21 | exit 1 22 | } 23 | 24 | [[ -x $node_bin ]] || { 25 | node_bin=$(command -v node) 26 | [[ -x $node_bin ]] || { 27 | echo "Unable to find node binary in path ${PATH}:${circonus_dir}/bin" 28 | exit 1 29 | } 30 | } 31 | 32 | [[ -s $nad_script ]] || { 33 | echo "Unable to find NAD script ${nad_script}" 34 | exit 1 35 | } 36 | 37 | # set log_dir if nad logrotate config detected 38 | # linux freebsd 39 | [[ -f /etc/logrotate.d/nad || -f /usr/local/etc/logrotate.d/nad ]] && { 40 | log_dir="${nad_dir}/log" 41 | [[ -d $log_dir ]] || mkdir -p $log_dir 42 | } 43 | 44 | extra_opts="" 45 | pid_file="@@PID_FILE@@" 46 | daemon=0 47 | syslog=0 48 | 49 | while [[ $# -gt 0 ]]; do 50 | case $1 in 51 | --daemon) 52 | daemon=1 53 | ;; 54 | --syslog) 55 | syslog=1 56 | ;; 57 | --pid_file) 58 | pid_file="$2" 59 | shift 60 | ;; 61 | *) 62 | extra_opts="${extra_opts} $1" 63 | ;; 64 | esac 65 | shift 66 | done 67 | 68 | NAD_OPTS="" 69 | 70 | if [[ -s $nad_conf ]]; then 71 | set -o allexport 72 | source $nad_conf 73 | set +o allexport 74 | fi 75 | 76 | export NODE_PATH=$lib_dir #ensure node can find nad specific packages 77 | 78 | cmd="${node_bin} ${nad_script} ${NAD_OPTS} ${extra_opts}" 79 | 80 | if [[ $daemon -eq 1 ]]; then # start nad in background 81 | if [[ -n "$log_dir" ]]; then 82 | # Linux - sends to /opt/circonus/log/nad.log, rotates with logrotate 83 | $cmd >> ${log_dir}/nad.log 2>&1 & 84 | pid=$! 85 | ret=$? 86 | elif [[ $syslog -eq 1 ]]; then 87 | # FreeBSD - sends to /var/log/messages (newsyslog can't copytruncate) by default 88 | # Makefile and this script will detect if logrotate is installed and preference it 89 | $cmd | logger -t 'nad' 2>&1 & 90 | ret=${PIPESTATUS[0]} 91 | pid=$(pgrep -f -n sbin/nad) 92 | else 93 | # OmniOS (illumos) - send to svcs -L nad 94 | $cmd & 95 | pid=$! 96 | ret=$? 97 | fi 98 | echo $pid > $pid_file 99 | exit $ret 100 | fi 101 | 102 | # run nad in foreground 103 | $cmd 104 | 105 | #END 106 | -------------------------------------------------------------------------------- /smf/circonus-nad: -------------------------------------------------------------------------------- 1 | #!/sbin/sh 2 | 3 | . /lib/svc/share/smf_include.sh 4 | 5 | function startup 6 | { 7 | if smf_is_nonglobalzone; then 8 | @@SBIN@@/nad --daemon --pid_file @@PID_FILE@@ 9 | else 10 | /bin/ppriv -e -s EI+dtrace_kernel @@SBIN@@/nad --daemon --pid_file @@PID_FILE@@ 11 | fi 12 | } 13 | 14 | function shutdown 15 | { 16 | if [[ -f @@PID_FILE@@ ]]; then 17 | kill `cat @@PID_FILE@@` 18 | else 19 | echo "Missing PID file @@PID_FILE@@" >&2 20 | exit $SMF_EXIT_ERR_FATAL 21 | fi 22 | } 23 | 24 | case $1 in 25 | start) startup ;; 26 | stop) shutdown ;; 27 | *) 28 | echo "Usage: $0 {start | stop}" >&2 29 | exit $SMF_EXIT_ERR_FATAL 30 | ;; 31 | esac 32 | 33 | exit $SMF_EXIT_OK 34 | -------------------------------------------------------------------------------- /smf/nad.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 26 | 31 | 32 | 33 | 34 | 35 | 40 | 41 | 42 | --------------------------------------------------------------------------------