├── .gitmodules ├── snmpexport_test.py ├── snmpexporterd_test.py ├── mibresolver ├── Makefile ├── setup.py └── mibresolver.c ├── snmpexporterd.k8s.sh ├── .coveragerc ├── etc ├── auth.yaml ├── snmp.conf └── snmpexporter.yaml ├── snmpexporter ├── Makefile ├── prometheus_test.py ├── snmp.py ├── config.py ├── __init__.py ├── target.py ├── prometheus.py ├── poller.py ├── snmpimpl.py ├── annotator.py └── annotator_test.py ├── .gitignore ├── snmpexporterd.service ├── .travis.yml ├── Dockerfile ├── Makefile ├── tools ├── start_mocks.sh ├── install_mibs.sh └── snmp-agent │ ├── tableswitch.rb │ └── agent.rb ├── snmpexporterd.k8s.yaml ├── README.md ├── snmpexport.py ├── LICENSE └── snmpexporterd.py /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /snmpexport_test.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /snmpexporterd_test.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mibresolver/Makefile: -------------------------------------------------------------------------------- 1 | DESTDIR ?= / 2 | 3 | clean: 4 | rm -fr build/ 5 | 6 | install: 7 | python3 setup.py install --root $(DESTDIR) $(COMPILE) 8 | -------------------------------------------------------------------------------- /snmpexporterd.k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl create configmap snmpexporter --from-file etc/snmpexporter.yaml 4 | 5 | kubectl apply -f snmpexporterd.k8s.yaml 6 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | cover_pylib = False 3 | branch = False 4 | data_file = .coverage 5 | parallel = True 6 | omit = *_test.* 7 | source = 8 | snmpexporter 9 | . 10 | -------------------------------------------------------------------------------- /mibresolver/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | 3 | setup(name='mibresolver', version='0.1', ext_modules=[ 4 | Extension('mibresolver', sources=['mibresolver.c'], libraries=['netsnmp'])]) 5 | -------------------------------------------------------------------------------- /etc/auth.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | auth: my_sha 3 | auth_proto: SHA 4 | port: 161 5 | priv: my_aes 6 | priv_proto: AES 7 | sec_level: authPriv 8 | user: my_user 9 | version: 3 10 | wifi: 11 | community: public 12 | version: 2 13 | -------------------------------------------------------------------------------- /snmpexporter/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | python setup.py install --root $(DESTDIR) $(COMPILE) 3 | mkdir -p $(DESTDIR)/usr/share/snmpcollector/ 4 | cp src/*.py $(DESTDIR)/usr/share/snmpcollector/ 5 | rm -f $(DESTDIR)/usr/share/snmpcollector/*_test.py 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | *.db 3 | *.stats 4 | build/ 5 | *.pyc 6 | node_modules 7 | debian/files 8 | debian/*.log 9 | debian/*.substvars 10 | debian/analytics/ 11 | debian/pinger/ 12 | debian/snmpcollector/ 13 | debian/dhmon-common/ 14 | debian/*.debhelper 15 | debian/tmp 16 | .coverage 17 | -------------------------------------------------------------------------------- /snmpexporterd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus SNMP exporter 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=prober 8 | ExecStart=/opt/snmpexporter/snmpexporterd.py \ 9 | --config /etc/snmpexporter/snmpexporter.yaml \ 10 | --poller-pool=500 \ 11 | --annotator-pool=15 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /etc/snmp.conf: -------------------------------------------------------------------------------- 1 | mibs ALL 2 | mibdirs /var/lib/mibs/std:/var/lib/mibs/std/arista:/var/lib/mibs/std/barracuda:/var/lib/mibs/std/cisco:/var/lib/mibs/std/dell:/var/lib/mibs/std/drac:/var/lib/mibs/std/emc:/var/lib/mibs/std/f5:/var/lib/mibs/std/juniper:/var/lib/mibs/std/junos:/var/lib/mibs/std/junose:/var/lib/mibs/std/netapp:/var/lib/mibs/std/oepnbsd:/var/lib/mibs/std/supermicor:/var/lib/mibs/std/hp:/var/lib/mibs/custom 3 | -------------------------------------------------------------------------------- /snmpexporter/prometheus_test.py: -------------------------------------------------------------------------------- 1 | import prometheus 2 | import unittest 3 | 4 | 5 | class TestBytesToDatetime(unittest.TestCase): 6 | def testDatetime(self): 7 | time_data = b'\x07\xE2\x0B\x1D\x0E\x11\x0B\x00+\x00\x00' 8 | self.assertEqual(prometheus.bytes_to_datetime(time_data), 1543501031.0) 9 | 10 | 11 | def main(): 12 | unittest.main() 13 | 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /snmpexporter/snmp.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | 4 | ResultTuple = collections.namedtuple('ResultTuple', ['value', 'type']) 5 | 6 | 7 | class Error(Exception): 8 | """Base error class for this module.""" 9 | 10 | 11 | class TimeoutError(Error): 12 | """Timeout talking to the device.""" 13 | 14 | 15 | class NoModelOid(Error): 16 | """Could not locate a model for the switch.""" 17 | 18 | 19 | class SnmpError(Error): 20 | """A SNMP error occurred.""" 21 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: python 4 | 5 | python: 6 | - "3.4" 7 | 8 | before_install: 9 | - sudo apt-get update -qq 10 | - > 11 | sudo apt-get install -qq --install-recommends 12 | git-buildpackage devscripts build-essential python3-dev 13 | libsnmp-dev python3-yaml python3-mock python3 python3-pip 14 | 15 | install: 16 | - sudo pip3 install coveralls 17 | - sudo ln -sf /usr/bin/coverage3 /usr/bin/python3-coverage 18 | 19 | script: 20 | - make 21 | - sudo make install 22 | 23 | after_success: 24 | - coveralls 25 | 26 | before_deploy: 27 | - gem install mime-types -v 2.6.2 28 | -------------------------------------------------------------------------------- /snmpexporter/config.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import yaml 3 | 4 | 5 | class YamlLoader(yaml.SafeLoader): 6 | 7 | def __init__(self, stream): 8 | self._root = os.path.dirname(stream.name) 9 | super(YamlLoader, self).__init__(stream) 10 | 11 | def include(self, node): 12 | filename = os.path.join(self._root, self.construct_scalar(node)) 13 | with open(filename, 'r') as f: 14 | return yaml.load(f, YamlLoader) 15 | 16 | 17 | YamlLoader.add_constructor('!include', YamlLoader.include) 18 | 19 | 20 | def load(filename): 21 | with open(filename, 'r') as f: 22 | return yaml.load(f, YamlLoader) 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | RUN apk add --update gcc net-snmp-tools net-snmp-dev musl-dev make findutils \ 4 | wget && \ 5 | pip3 install python3-netsnmp --pre && \ 6 | pip3 install coverage pyyaml twisted objgraph && \ 7 | ln -sf /usr/local/bin/coverage3 /usr/local/bin/python3-coverage 8 | 9 | RUN (mkdir -p /var/lib/mibs/std /tmp/librenms; cd /tmp/librenms; \ 10 | wget https://github.com/librenms/librenms/archive/master.zip 2>&1 && \ 11 | unzip master.zip && mv librenms-master/mibs/* /var/lib/mibs/std/) && \ 12 | rm -r /tmp/librenms 13 | 14 | ADD etc/snmp.conf /etc/snmp/ 15 | 16 | ADD . /tmp/snmpexporter 17 | RUN make all install -C /tmp/snmpexporter && ls -laR /opt 18 | 19 | EXPOSE 9190 20 | CMD ["/opt/snmpexporter/snmpexporterd.py", \ 21 | "--config", "/etc/snmpexporter/snmpexporter.yaml"] 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | COVERAGE ?= 'python3-coverage' 2 | export PYTHONPATH=$(CURDIR) 3 | 4 | all: test 5 | 6 | install: 7 | make -C $(CURDIR)/mibresolver $@ 8 | mkdir -p $(DESTDIR)/opt/snmpexporter/ 9 | find . -name \*.py -not -name \*_test\* -not -name setup.py \ 10 | -printf '%P\n' | \ 11 | xargs -I{} install -m0644 -D {} $(DESTDIR)/opt/snmpexporter/{} 12 | chmod +x $(DESTDIR)/opt/snmpexporter/snmpexport.py \ 13 | $(DESTDIR)/opt/snmpexporter/snmpexporterd.py 14 | install -m600 etc/snmpexporter.yaml $(DESTDIR)/etc/ 15 | 16 | clean: 17 | rm -f .coverage 18 | make -C $(CURDIR)/mibresolver $@ 19 | 20 | distclean: clean 21 | 22 | test: 23 | $(COVERAGE) erase 24 | echo $(wildcard */*_test.py) | xargs -n 1 $(COVERAGE) run -p 25 | echo $(wildcard *_test.py) | xargs -n 1 $(COVERAGE) run -p 26 | $(COVERAGE) combine 27 | $(COVERAGE) report -m 28 | 29 | .PHONY: test clean install all distclean 30 | -------------------------------------------------------------------------------- /tools/start_mocks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script will set up a new interface for 10.0.0.0/10 and start 3 | # one snmp-agent per switch in the access and dist layer. 4 | 5 | set -e 6 | 7 | cd $(dirname $0) 8 | 9 | pkill -f 'ruby snmp-agent/tableswitch.rb' || true 10 | 11 | ip link del dhmon0 2>/dev/null || true 12 | ip link add dhmon0 type dummy 13 | ip addr add 10.0.0.0/10 dev dhmon0 14 | 15 | for row in $(sqlite3 /etc/ipplan.db "SELECT h.name, h.ipv4_addr_txt 16 | FROM host h, option o WHERE o.node_id = h.node_id AND o.name = 'layer' 17 | AND (o.value = 'dist' OR o.value = 'access')") 18 | do 19 | sw=$(echo $row | cut -f 1 -d '|') 20 | ip=$(echo $row | cut -f 2 -d '|') 21 | sed -i "/^$ip/d" /etc/hosts 22 | echo "$ip $sw" >> /etc/hosts 23 | ip addr add $ip/10 dev dhmon0 24 | screen -dmS mocksw-$sw ruby snmp-agent/tableswitch.rb $ip 25 | echo "Started $sw" 26 | done 27 | 28 | ip link set up dev dhmon0 29 | -------------------------------------------------------------------------------- /tools/install_mibs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | rm -f snmp-mibs-downloader_1.1_all.deb 6 | wget http://ftp.debian.org/debian/pool/non-free/s/snmp-mibs-downloader/snmp-mibs-downloader_1.1_all.deb 7 | dpkg -i snmp-mibs-downloader_1.1_all.deb || apt-get -f install -y 8 | rm -f snmp-mibs-downloader_1.1_all.deb 9 | 10 | cat << _EOF_ > /etc/snmp-mibs-downloader/snmp-mibs-downloader.conf 11 | # Master configuarion for mib-downloader 12 | # 13 | BASEDIR=/var/lib/mibs 14 | AUTOLOAD="rfc ianarfc iana cisco" 15 | _EOF_ 16 | 17 | cat << _EOF_ > /etc/snmp-mibs-downloader/cisco.conf 18 | HOST=ftp://ftp.cisco.com 19 | ARCHIVE=v2.tar.gz 20 | ARCHTYPE=tgz 21 | DIR=pub/mibs/v2/ 22 | ARCHDIR=auto/mibs/v2 23 | CONF=ciscolist 24 | DEST=cisco 25 | _EOF_ 26 | 27 | zcat /usr/share/doc/snmp-mibs-downloader/examples/ciscolist.gz \ 28 | | grep -Ev '(CISCO-802-TAP-MIB|CISCO-IP-TAP-CAPABILITY|CISCO-IP-TAP-MIB|CISCO-SYS-INFO-LOG-MIB|CISCO-TAP2-CAPABILITY|CISCO-TAP2-MIB|CISCO-TAP-MIB|CISCO-USER-CONNECTION-TAP-MIB)' \ 29 | | sudo tee /etc/snmp-mibs-downloader/ciscolist 30 | download-mibs 31 | -------------------------------------------------------------------------------- /snmpexporter/__init__.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import logging 3 | import multiprocessing 4 | 5 | import snmpexporter.target 6 | import snmpexporter.poller 7 | import snmpexporter.snmpimpl 8 | import snmpexporter.annotator 9 | 10 | 11 | class FakeResolver(object): 12 | 13 | def resolve(self, oid): 14 | _, iid = oid.rsplit('.', 1) 15 | return 'DUMMY-MIB::' + base64.b64encode( 16 | oid.encode('utf-8')).decode('utf-8') + '.' + iid, {} 17 | 18 | 19 | # TODO(bluecmd): mibresolver and netsnmp are both using the same library. 20 | # If they are in the same process they will compete about the output format 21 | # which is a giant pain. We solve this by running them in seperate processes 22 | # for now. 23 | class ForkedResolver(object): 24 | 25 | def __init__(self): 26 | self.lock = multiprocessing.Lock() 27 | self.request = multiprocessing.Queue() 28 | self.response = multiprocessing.Queue() 29 | self.thread = multiprocessing.Process(target=self.run, daemon=True) 30 | self.thread.start() 31 | 32 | def resolve(self, oid): 33 | with self.lock: 34 | self.request.put(oid) 35 | return self.response.get() 36 | 37 | def run(self): 38 | logging.debug('Initializing MIB resolver') 39 | import mibresolver 40 | while True: 41 | request = self.request.get() 42 | self.response.put(mibresolver.resolve(request)) 43 | -------------------------------------------------------------------------------- /snmpexporter/target.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class Error(Exception): 5 | """Generic error class for this module""" 6 | 7 | 8 | class LayerNotFound(Error): 9 | """The target has a layer that has no configuration""" 10 | 11 | 12 | class SnmpTarget(object): 13 | def __init__(self, host, layer, config): 14 | if config.get('snmp', layer) is None: 15 | raise LayerNotFound(layer) 16 | self._read_config(**config[layer]) 17 | self.host = host 18 | self.layer = layer 19 | self.full_host = "%s:%s" % (self.host, self.port) 20 | self.max_size = 256 21 | self.timeouts = 0 22 | self.errors = 0 23 | self.markers = [] 24 | 25 | def _read_config(self, version, community=None, 26 | user=None, auth_proto=None, auth=None, priv_proto=None, priv=None, 27 | sec_level=None, port=161): 28 | self.version = version 29 | self.community = community 30 | self.user = user 31 | self.auth_proto = auth_proto 32 | self.auth = auth 33 | self.priv_proto = priv_proto 34 | self.priv = priv 35 | self.sec_level = sec_level 36 | self.port = port 37 | 38 | def add_timeouts(self, timeouts): 39 | self.timeouts = self.timeouts + timeouts 40 | 41 | def add_errors(self, errors): 42 | self.errors = self.errors + errors 43 | 44 | def start(self, step): 45 | self.markers.append((step, time.time())) 46 | 47 | def done(self): 48 | self.markers.append(('done', time.time())) 49 | 50 | def timeline(self): 51 | return [ 52 | (fro[0], to[1] - fro[1]) 53 | for fro, to in zip(self.markers, self.markers[1:])] 54 | -------------------------------------------------------------------------------- /snmpexporterd.k8s.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: snmpexporter 6 | name: prober 7 | namespace: prod 8 | spec: 9 | ports: 10 | - name: probe 11 | port: 9190 12 | protocol: TCP 13 | targetPort: 9190 14 | selector: 15 | app: snmpexporter 16 | type: ClusterIP 17 | --- 18 | apiVersion: extensions/v1beta1 19 | kind: Deployment 20 | metadata: 21 | labels: 22 | app: snmpexporter 23 | name: snmpexporter 24 | namespace: prod 25 | spec: 26 | replicas: 20 27 | selector: 28 | matchLabels: 29 | app: snmpexporter 30 | strategy: 31 | rollingUpdate: 32 | maxSurge: 10 33 | maxUnavailable: 5 34 | type: RollingUpdate 35 | template: 36 | metadata: 37 | labels: 38 | app: snmpexporter 39 | name: snmpexporter 40 | spec: 41 | containers: 42 | - image: quay.io/dhtech/snmpexporter 43 | imagePullPolicy: Always 44 | name: snmpexporter 45 | volumeMounts: 46 | - name: config-volume 47 | mountPath: /etc/snmpexporter 48 | resources: 49 | requests: 50 | memory: "300Mi" 51 | limits: 52 | memory: "400Mi" 53 | livenessProbe: 54 | httpGet: 55 | path: /healthy 56 | port: 9190 57 | initialDelaySeconds: 15 58 | timeoutSeconds: 30 59 | restartPolicy: Always 60 | terminationGracePeriodSeconds: 30 61 | volumes: 62 | - name: config-volume 63 | configMap: 64 | name: snmpexporter 65 | --- 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/dhtech/snmpexporter.svg?branch=master)](https://travis-ci.org/dhtech/snmpexporter) 2 | [![Coverage Status](https://coveralls.io/repos/github/dhtech/snmpexporter/badge.svg?branch=master)](https://coveralls.io/github/dhtech/snmpexporter?branch=master) 3 | 4 | snmpexporter 5 | ===== 6 | 7 | SNMP Poller written for DreamHack. 8 | 9 | This product was previously called snmpcollector when it was part of the bigger 10 | monitoring system called "dhmon". It is nowadays fully standalone 11 | and should have very few if any ties back to DreamHack. 12 | 13 | ## What it is 14 | 15 | snmpexporter is a software that given a host that speaks SNMP will try to poll 16 | it and mangle the data into something more suitable as metrics. 17 | 18 | The core feature of the snmpexporter is its annotation feature where it can 19 | join different SNMP OIDs together to create something better than what SNMP 20 | already has today. It also supports MIBs. 21 | 22 | ## Why snmpexporter 23 | 24 | Compared to the official Prometheus SNMP exporter, this exporter is more 25 | flexible as it knows how to read MIBs. That's basically it. It has some 26 | minor annotation features that might be useful, but nothing extraordinary. 27 | 28 | ## Installation 29 | 30 | You can either install it directly or use Docker/Kubernetes to run the exporter. 31 | 32 | See Dockerfile for instructions on what dependencies are needed. Something like this (may not be up to date, again - see Dockerfile): 33 | 34 | ``` 35 | sudo apt-get install libsnmp-dev python3-distutils python3-dev python3-coverage python3-yaml python3-objgraph python3-twisted python3-pip python3-setuptools python3-wheel 36 | pip3 install python3-netsnmp 37 | 38 | mkdir -p /var/lib/mibs/std /tmp/librenms 39 | cd /tmp/librenms 40 | wget https://github.com/librenms/librenms/archive/master.zip 41 | unzip master.zip 42 | mv librenms-master/mibs/* /var/lib/mibs/std/ 43 | 44 | cp etc/snmp.conf /etc/snmp/ 45 | 46 | make 47 | make install 48 | ``` 49 | 50 | Run `make install` to install. 51 | 52 | To deploy to Kubernetes, upload the Docker image to a repository and modify 53 | `snmpexporterd.k8s.yaml`. 54 | 55 | ## Running 56 | 57 | There are two applications in snmpexporter: 58 | 59 | * snmpexporterd.py 60 | 61 | This is an API server capable of doing scrapes via HTTP requests. 62 | This is an excellent way to integrate SNMP into Prometheus. 63 | 64 | * snmpexport.py 65 | 66 | This is a utility script to test your configuration or debug SNMP polling 67 | behaviour. Run it to execute a one-off scraping. 68 | -------------------------------------------------------------------------------- /snmpexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import logging 4 | import sys 5 | 6 | import snmpexporter 7 | import snmpexporter.config 8 | import snmpexporter.prometheus 9 | 10 | 11 | def main(config_file, host, layer, annotate=True): 12 | config = snmpexporter.config.load(config_file) 13 | collections = config['collection'] 14 | overrides = config['override'] 15 | snmp_creds = config['snmp'] 16 | annotator_config = config['annotator'] 17 | exporter_config = config['exporter'] 18 | 19 | if not annotate: 20 | logging.debug('Will not annotate') 21 | 22 | resolver = snmpexporter.ForkedResolver() 23 | 24 | logging.debug('Initializing Net-SNMP implemention') 25 | snmpimpl = snmpexporter.snmpimpl.NetsnmpImpl() 26 | 27 | logging.debug('Constructing SNMP target') 28 | target = snmpexporter.target.SnmpTarget(host, layer, snmp_creds) 29 | 30 | target.start('poll') 31 | 32 | logging.debug('Creating SNMP poller') 33 | poller = snmpexporter.poller.Poller(collections, overrides, snmpimpl) 34 | 35 | logging.debug('Starting poll') 36 | data, timeouts, errors = poller.poll(target) 37 | target.add_timeouts(timeouts) 38 | target.add_errors(errors) 39 | 40 | if not annotate: 41 | for (oid, vlan), value in sorted(data.items()): 42 | print(str(vlan if vlan else '').ljust(5), oid.ljust(50), value) 43 | return 44 | 45 | target.start('annotate') 46 | 47 | logging.debug('Creating result annotator') 48 | annotator = snmpexporter.annotator.Annotator(annotator_config, resolver) 49 | 50 | logging.debug('Starting annotation') 51 | data = annotator.annotate(data) 52 | 53 | target.done() 54 | 55 | exporter = snmpexporter.prometheus.Exporter(exporter_config) 56 | for x in exporter.export(target, data): 57 | print(x) 58 | 59 | 60 | if __name__ == '__main__': 61 | import argparse 62 | 63 | parser = argparse.ArgumentParser(description='One-shot SNMP exporter.') 64 | parser.add_argument('--config', dest='config_file', type=str, 65 | help='config file to load', default='/etc/snmpexporter.yaml') 66 | parser.add_argument('--log-level', dest='log_level', type=str, 67 | help='log level', default='INFO') 68 | parser.add_argument('--annotate', dest='annotate', default=False, const=True, 69 | help='annotate the results', action='store_const') 70 | parser.add_argument('host', type=str, help='host to scrape') 71 | parser.add_argument('layer', type=str, help='layer to use for authentication') 72 | args = parser.parse_args() 73 | 74 | # Logging setup 75 | root = logging.getLogger() 76 | ch = logging.StreamHandler(sys.stderr) 77 | formatter = logging.Formatter( '%(asctime)s - %(name)s - ' 78 | '%(levelname)s - %(message)s' ) 79 | ch.setFormatter(formatter) 80 | root.addHandler(ch) 81 | root.setLevel(logging.getLevelName(args.log_level)) 82 | 83 | main(args.config_file, args.host, args.layer, annotate=args.annotate) 84 | -------------------------------------------------------------------------------- /mibresolver/mibresolver.c: -------------------------------------------------------------------------------- 1 | /* 2 | * MIB resolver for snmpcollector 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | 14 | #define MAX_OUTPUT 1024 15 | 16 | 17 | struct module_state { 18 | PyObject *error; 19 | }; 20 | 21 | static PyObject *resolve(PyObject *self, PyObject *args) { 22 | oid name[MAX_OID_LEN]; 23 | size_t name_length = MAX_OID_LEN; 24 | const char *input; 25 | char output[MAX_OUTPUT]; 26 | struct tree *tp; 27 | PyObject *enum_map; 28 | 29 | if (!PyArg_ParseTuple(args, "s", &input)) { 30 | return NULL; 31 | } 32 | 33 | if (read_objid(input, name, &name_length) != 1) { 34 | return Py_None; 35 | } 36 | 37 | /* Resolve the OID */ 38 | snprint_objid(output, sizeof(output), name, name_length); 39 | 40 | /* Resolve enum values if we have any */ 41 | enum_map = PyDict_New(); 42 | tp = get_tree(name, name_length, get_tree_head()); 43 | if (tp->enums) { 44 | struct enum_list *ep = tp->enums; 45 | while (ep) { 46 | PyObject *key = PyUnicode_FromFormat("%d", ep->value); 47 | PyObject *val = PyUnicode_FromString(ep->label); 48 | PyDict_SetItem(enum_map, key, val); 49 | Py_DECREF(key); 50 | Py_DECREF(val); 51 | ep = ep->next; 52 | } 53 | } 54 | 55 | PyObject* ret = Py_BuildValue("sO", output, enum_map); 56 | Py_DECREF(output); 57 | Py_DECREF(enum_map); 58 | return ret; 59 | } 60 | 61 | static int module_traverse(PyObject *m, visitproc visit, void *arg) { 62 | Py_VISIT(((struct module_state*)PyModule_GetState(m))->error); 63 | return 0; 64 | } 65 | 66 | static int module_clear(PyObject *m) { 67 | Py_CLEAR(((struct module_state*)PyModule_GetState(m))->error); 68 | return 0; 69 | } 70 | 71 | static PyMethodDef module_funcs[] = { 72 | { "resolve", resolve, METH_VARARGS, "Try to resolve a given OID." }, 73 | { NULL, NULL, 0, NULL } 74 | }; 75 | 76 | static struct PyModuleDef moduledef = { 77 | PyModuleDef_HEAD_INIT, 78 | "mibresolver", 79 | "MIB resolver utilities", 80 | sizeof(struct module_state), 81 | module_funcs, 82 | NULL, 83 | module_traverse, 84 | module_clear, 85 | NULL 86 | }; 87 | 88 | PyMODINIT_FUNC PyInit_mibresolver(void) { 89 | PyObject *module = PyModule_Create(&moduledef); 90 | 91 | if (module == NULL) 92 | return NULL; 93 | 94 | struct module_state *st = (struct module_state*)PyModule_GetState(module); 95 | 96 | st->error = PyErr_NewException("mibresolver.Error", NULL, NULL); 97 | if (st->error == NULL) { 98 | Py_DECREF(module); 99 | return NULL; 100 | } 101 | 102 | /* Turn off noisy MIB debug logging */ 103 | netsnmp_register_loghandler(NETSNMP_LOGHANDLER_NONE, 0); 104 | 105 | /* Print indexes in integer format and not ASCII converted */ 106 | netsnmp_ds_set_boolean( 107 | NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_DONT_BREAKDOWN_OIDS, 1); 108 | 109 | init_snmp("snmpapp"); 110 | return module; 111 | } 112 | -------------------------------------------------------------------------------- /tools/snmp-agent/tableswitch.rb: -------------------------------------------------------------------------------- 1 | $LOAD_PATH << File.dirname(__FILE__) 2 | require 'agent' 3 | 4 | logger = Logger.new(STDOUT) 5 | logger.level = Logger::INFO 6 | 7 | rng = Random.new() 8 | 9 | interval = 30 10 | 11 | agent = SNMP::Agent.new(:address => ARGV[0], 12 | :port => 1061, 13 | :logger => logger) 14 | 15 | $start = Time.now.to_i * 100 16 | def uptime 17 | Time.now.to_i * 100 - $start 18 | end 19 | 20 | # entPhysicalModelName 21 | agent.add_plugin('1.3.6.1.2.1.47.1.1.1.1.13.1') do 22 | "Tableswitch Mock" 23 | end 24 | 25 | overdrive = false 26 | 27 | # avgBusy1 28 | agent.add_plugin('1.3.6.1.4.1.9.2.1.57.0') do 29 | if rng.rand(1..100) == 1 30 | overdrive = true 31 | elsif rng.rand(1..30) == 1 32 | overdrive = false 33 | end 34 | 35 | SNMP::Integer.new(overdrive ? rng.rand(90..100) : rng.rand(50..70)) 36 | end 37 | 38 | # avgBusy5 39 | agent.add_plugin('1.3.6.1.4.1.9.2.1.58.0') do 40 | SNMP::Integer.new(overdrive ? rng.rand(90..100) : rng.rand(50..70)) 41 | end 42 | 43 | # busyPer 44 | agent.add_plugin('1.3.6.1.4.1.9.2.1.56.0') do 45 | SNMP::Integer.new(overdrive ? rng.rand(90..100) : rng.rand(50..70)) 46 | end 47 | 48 | port_up = true 49 | last_change = uptime 50 | 51 | # ifOperStatus 52 | agent.add_plugin('1.3.6.1.2.1.2.2.1.8.1') do 53 | if rng.rand(1..100) == 1 54 | port_up = !port_up 55 | last_change = uptime 56 | end 57 | SNMP::Integer.new(port_up ? 1 : 2) 58 | end 59 | 60 | full_speed = true 61 | 62 | # ifSpeed 63 | agent.add_plugin('1.3.6.1.2.1.2.2.1.5.1') do 64 | if rng.rand(1..60) == 1 65 | full_speed = !full_speed 66 | last_change = uptime 67 | end 68 | SNMP::Gauge32.new((full_speed ? 1000 : 100)*1000000) 69 | end 70 | 71 | # ifHighSpeed 72 | agent.add_plugin('1.3.6.1.2.1.31.1.1.1.15.1') do 73 | SNMP::Gauge32.new(full_speed ? 1000 : 100) 74 | end 75 | 76 | # ifLastChange 77 | agent.add_plugin('1.3.6.1.2.1.2.2.1.9.1') do 78 | SNMP::TimeTicks.new(last_change) 79 | end 80 | 81 | traffic_period = 20 * 60 * 100 82 | traffic_packets = 100000 83 | traffic_packet_size = 1300 84 | 85 | in_octets = 0 86 | out_octets = 0 87 | in_pkts = 0 88 | out_pkts = 0 89 | 90 | # ifInOctets 91 | agent.add_plugin('1.3.6.1.2.1.2.2.1.10.1') do 92 | in_octets += ((traffic_packet_size*traffic_packets) / 2 * (Math.sin( 93 | uptime.to_f / traffic_period * 2 * 3.1415) + 1) * interval) 94 | SNMP::Counter32.new(in_octets.modulo(2**32)) 95 | end 96 | 97 | # ifHCInOctets 98 | agent.add_plugin('1.3.6.1.2.1.31.1.1.1.6.1') do 99 | SNMP::Counter64.new(in_octets.modulo(2**64)) 100 | end 101 | 102 | # ifInUcastPkts 103 | agent.add_plugin('1.3.6.1.2.1.2.2.1.11.1') do 104 | in_pkts += (traffic_packets / 2 * (Math.sin( 105 | uptime.to_f / traffic_period * 2 * 3.1415) + 1) * interval) 106 | SNMP::Counter32.new(in_pkts.modulo(2**32)) 107 | end 108 | 109 | # ifHCInUcastPkts 110 | agent.add_plugin('1.3.6.1.2.1.31.1.1.1.7.1') do 111 | SNMP::Counter64.new(in_pkts.modulo(2**64)) 112 | end 113 | 114 | # ifOutOctets 115 | agent.add_plugin('1.3.6.1.2.1.2.2.1.16.1') do 116 | out_octets += ((traffic_packets*traffic_packet_size) / 2 * (Math.sin( 117 | uptime.to_f / traffic_period * 2 * 3.1415 + 3.1415) + 1) * interval) 118 | SNMP::Counter32.new(out_octets.modulo(2**32)) 119 | end 120 | 121 | # ifHCOutOctets 122 | agent.add_plugin('1.3.6.1.2.1.31.1.1.1.10.1') do 123 | SNMP::Counter64.new(out_octets.modulo(2**64)) 124 | end 125 | 126 | # ifOutUcastPkts 127 | agent.add_plugin('1.3.6.1.2.1.2.2.1.17.1') do 128 | out_pkts += (traffic_packets / 2 * (Math.sin( 129 | uptime.to_f / traffic_period * 2 * 3.1415 + 3.1415) + 1) * interval) 130 | SNMP::Counter32.new(out_pkts.modulo(2**32)) 131 | end 132 | 133 | # ifHCOutUcastPkts 134 | agent.add_plugin('1.3.6.1.2.1.31.1.1.1.11.1') do 135 | SNMP::Counter64.new(out_pkts.modulo(2**64)) 136 | end 137 | 138 | in_discards = 0 139 | in_errors = 0 140 | out_discards = 0 141 | out_errors = 0 142 | 143 | # ifInDiscards 144 | agent.add_plugin('1.3.6.1.2.1.2.2.1.13.1') do 145 | in_discards += rng.rand(0..10) 146 | SNMP::Counter32.new(in_discards) 147 | end 148 | 149 | # ifInErrors 150 | agent.add_plugin('1.3.6.1.2.1.2.2.1.14.1') do 151 | in_errors += rng.rand(0..10) 152 | SNMP::Counter32.new(in_errors) 153 | end 154 | 155 | # ifOutDiscards 156 | agent.add_plugin('1.3.6.1.2.1.2.2.1.19.1') do 157 | out_discards += rng.rand(0..10) 158 | SNMP::Counter32.new(out_discards) 159 | end 160 | 161 | # ifOutErrors 162 | agent.add_plugin('1.3.6.1.2.1.2.2.1.20.1') do 163 | out_errors += rng.rand(0..10) 164 | SNMP::Counter32.new(out_errors) 165 | end 166 | 167 | agent.start() 168 | 169 | -------------------------------------------------------------------------------- /snmpexporter/prometheus.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import datetime 3 | import logging 4 | 5 | 6 | Metric = collections.namedtuple( 7 | 'Metrics', ('name', 'type', 'labels', 'value')) 8 | 9 | class Exporter(object): 10 | 11 | NUMERIC_TYPES = set([ 12 | 'COUNTER', 'COUNTER64', 'INTEGER', 'INTEGER32', 'TICKS', 13 | 'GAUGE', 'ANNOTATED', 'UNSIGNED32']) 14 | 15 | def __init__(self, config): 16 | self.config = config 17 | # Sanity check converters 18 | self.convert = config.get('convert', {}) 19 | if set(self.convert.values()) - set(CONVERTERS.keys()): 20 | raise Exception('At least one export converter was not found') 21 | 22 | def export(self, target, results): 23 | grouped_metrics = collections.defaultdict(dict) 24 | cmetrics = 0 25 | for result in results.values(): 26 | grouped_metrics[(result.mib, result.obj)][result.index] = ( 27 | self._export(target, result)) 28 | for (mib, obj), metrics in grouped_metrics.items(): 29 | for x in self.format_metrics(mib, obj, metrics): 30 | yield x 31 | cmetrics += 1 32 | 33 | # Export statistics 34 | yield '# HELP snmp_export_latency Latency breakdown for SNMP poll' 35 | yield '# TYPE snmp_export_latency gauge' 36 | for (step, latency) in target.timeline(): 37 | yield 'snmp_export_latency{step="%s"} %s' % (step, latency) 38 | yield '# HELP snmp_export_errors Errors for SNMP poll' 39 | yield '# TYPE snmp_export_errors gauge' 40 | yield 'snmp_export_errors %s' % target.errors 41 | yield '# HELP snmp_export_timeouts Timeouts for SNMP poll' 42 | yield '# TYPE snmp_export_timeouts gauge' 43 | yield 'snmp_export_timeouts %s' % target.timeouts 44 | yield '# HELP snmp_exported_metrics_count Number of exported SNMP metrics' 45 | yield '# TYPE snmp_exported_metrics_count gauge' 46 | yield 'snmp_exported_metrics_count %s' % cmetrics 47 | 48 | def _export(self, target, result): 49 | if result.data.type == 'COUNTER64' or result.data.type == 'COUNTER': 50 | metric_type = 'counter' 51 | elif result.data.type in self.NUMERIC_TYPES: 52 | metric_type = 'gauge' 53 | else: 54 | metric_type = 'blob' 55 | 56 | labels = dict(result.labels) 57 | labels['index'] = result.index 58 | 59 | return Metric(result.obj, metric_type, labels, result.data.value) 60 | 61 | def is_only_numeric(self, labels_map): 62 | for metric in labels_map.values(): 63 | try: 64 | float(metric.value) 65 | except ValueError: 66 | return False 67 | return True 68 | 69 | def format_metrics(self, mib, obj, metrics): 70 | if not metrics: 71 | return 72 | out = [] 73 | converter = None 74 | if obj in self.config['convert']: 75 | converter = CONVERTERS[self.config['convert'][obj]] 76 | 77 | metrics_type = metrics[list(metrics.keys())[0]].type 78 | if metrics_type == 'blob': 79 | if converter is not None: 80 | metrics_type = 'gauge' 81 | # Some vendors (e.g. Fortigate) choose to have decimal values as 82 | # OCTETSTR instead of a scaled value. Try to convert all values, if 83 | # we succeed export this metric as guage. 84 | elif self.is_only_numeric(metrics): 85 | metrics_type = 'gauge' 86 | converter = lambda x: float(x) 87 | if metrics_type != 'counter' and metrics_type != 'gauge': 88 | return [] 89 | out.append('# HELP {0} {1}::{0}'.format(obj, mib)) 90 | out.append('# TYPE {0} {1}'.format(obj, metrics_type)) 91 | for i in sorted(metrics.keys()): 92 | metric = metrics[i] 93 | if metric.type != metrics_type and converter is None: 94 | # This happens if we have a collision somewhere ('local' is common) 95 | # Just ignore this for now. 96 | continue 97 | 98 | label_list = ['{0}="{1}"'.format(k, str(v).replace('"', '\\"')) 99 | for k, v in metric.labels.items()] 100 | label_string = ','.join(label_list) 101 | instance = ''.join([obj, '{', label_string, '}']) 102 | value = converter(metric.value) if converter is not None else metric.value 103 | out.append('{0} {1}'.format(instance, value)) 104 | return out 105 | 106 | 107 | def bytes_to_datetime(b): 108 | if len(b) != 11: 109 | return float('nan') 110 | year = int(b[0])*256+int(b[1]) 111 | month = int(b[2]) 112 | day = int(b[3]) 113 | hour = int(b[4]) 114 | minutes = int(b[5]) 115 | seconds = int(b[6]) 116 | 117 | if chr(b[8]) == '+': 118 | utc_hour=hour+int(b[9]) 119 | utc_minutes=minutes+int(b[10]) 120 | else: 121 | utc_hour=hour-int(b[9]) 122 | utc_minutes=minutes-int(b[10]) 123 | 124 | try: 125 | ct = datetime.datetime(year,month,day,utc_hour,utc_minutes,seconds, 126 | tzinfo=datetime.timezone.utc).timestamp() 127 | return ct 128 | except: 129 | return float('nan') 130 | 131 | 132 | CONVERTERS = { 133 | 'DateTime': bytes_to_datetime, 134 | } 135 | -------------------------------------------------------------------------------- /snmpexporter/poller.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import collections 3 | import logging 4 | import re 5 | 6 | from snmpexporter import snmp 7 | 8 | 9 | class Poller(object): 10 | 11 | def __init__(self, collections, overrides, snmpimpl): 12 | super(Poller, self).__init__() 13 | self.model_oid_cache = {} 14 | self.model_oid_cache_incarnation = 0 15 | self.snmpimpl = snmpimpl 16 | self.collections = collections 17 | self.overrides = overrides 18 | 19 | def assemble_walk_parameters(self, target, model): 20 | oids = set() 21 | vlan_aware_oids = set() 22 | options = dict() 23 | for collection_name, collection in self.collections.items(): 24 | for regexp in collection['models']: 25 | layers = collection.get('layers', None) 26 | if layers and target.layer not in layers: 27 | continue 28 | if 'oids' in collection and re.match(regexp, model): 29 | logging.debug( 30 | 'Model %s matches collection %s', model, collection_name) 31 | options.update(collection.get('options', {})) 32 | # VLAN aware collections are run against every VLAN. 33 | # We don't want to run all the other OIDs (there can be a *lot* of 34 | # VLANs). 35 | vlan_aware = collection.get('vlan_aware', False) 36 | if vlan_aware: 37 | vlan_aware_oids.update(set(collection['oids'])) 38 | else: 39 | oids.update(set(collection['oids'])) 40 | return (list(oids), list(vlan_aware_oids), options) 41 | 42 | def process_overrides(self, results): 43 | if not self.overrides: 44 | return results 45 | overridden_oids = set(self.overrides.keys()) 46 | 47 | overriden_results = results 48 | for (oid, vlan), result in results.items(): 49 | root = '.'.join(oid.split('.')[:-1]) 50 | if root in overridden_oids: 51 | overriden_results[(oid, vlan)] = snmp.ResultTuple( 52 | result.value, self.overrides[root]) 53 | return overriden_results 54 | 55 | def poll(self, target): 56 | results, errors, timeouts = self._walk(target) 57 | results = results if results else {} 58 | logging.debug('Done SNMP poll (%d objects) for "%s"', 59 | len(list(results.keys())), target.host) 60 | return results, timeouts, errors 61 | 62 | def _walk(self, target): 63 | try: 64 | model = self.snmpimpl.model(target) 65 | except snmp.TimeoutError as e: 66 | logging.exception('Could not determine model of %s:', target.host) 67 | raise 68 | except snmp.Error as e: 69 | logging.exception('Could not determine model of %s:', target.host) 70 | raise 71 | if not model: 72 | logging.error('Could not determine model of %s') 73 | raise 74 | 75 | logging.debug('Object %s is model %s', target.host, model) 76 | global_oids, vlan_oids, options = self.assemble_walk_parameters( 77 | target, model) 78 | 79 | # Apply walk options 80 | target.max_size = min( 81 | options.get('max-size', target.max_size), target.max_size) 82 | logging.debug('Using max_size %d for %s', target.max_size, target.host) 83 | 84 | timeouts = 0 85 | errors = 0 86 | 87 | # 'None' is global (no VLAN aware) 88 | vlans = set([None]) 89 | try: 90 | if vlan_oids: 91 | vlans.update(self.snmpimpl.vlans(target)) 92 | except snmp.Error as e: 93 | errors += 1 94 | logging.warning('Could not list VLANs: %s', str(e)) 95 | 96 | to_poll = [] 97 | for vlan in list(vlans): 98 | oids = vlan_oids if vlan else global_oids 99 | to_poll.append((target, vlan, oids)) 100 | 101 | results = {} 102 | for part_results, part_errors, part_timeouts in map(self._poll, to_poll): 103 | results.update(self.process_overrides(part_results)) 104 | errors += part_errors 105 | timeouts += part_timeouts 106 | return results, errors, timeouts 107 | 108 | def _poll(self, data): 109 | # TODO(bluecmd): Might want to have some sort of concurrency here 110 | # as experience tells me this can be slow to do for all VLANs. 111 | target, vlan, oids = data 112 | errors = 0 113 | timeouts = 0 114 | results = {} 115 | for oid in oids: 116 | logging.debug('Collecting %s on %s @ %s', oid, target.host, vlan) 117 | if not oid.startswith('.1'): 118 | logging.warning( 119 | 'OID %s does not start with .1, please verify configuration', oid) 120 | continue 121 | try: 122 | results.update( 123 | {(k, vlan): v for k, v in self.snmpimpl.walk( 124 | target, oid, vlan).items()}) 125 | except snmp.TimeoutError as e: 126 | timeouts += 1 127 | if vlan: 128 | logging.debug( 129 | 'Timeout, is switch configured for VLAN SNMP context? %s', e) 130 | else: 131 | logging.debug('Timeout, slow switch? %s', e) 132 | except snmp.Error as e: 133 | errors += 1 134 | logging.warning('SNMP error for OID %s@%s: %s', oid, vlan, str(e)) 135 | return results, errors, timeouts 136 | -------------------------------------------------------------------------------- /snmpexporter/snmpimpl.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from snmpexporter import snmp 6 | 7 | 8 | class Error(Exception): 9 | """Base error class for this module.""" 10 | pass 11 | 12 | 13 | class SnmpImpl(object): 14 | 15 | def model(self): 16 | pass 17 | 18 | def vlans(self): 19 | pass 20 | 21 | def get(self, oid): 22 | pass 23 | 24 | def walk(self, oid, vlan=None): 25 | pass 26 | 27 | 28 | class NetsnmpImpl(SnmpImpl): 29 | 30 | def __init__(self): 31 | import netsnmp 32 | self.netsnmp = netsnmp 33 | self.first_load = True 34 | 35 | def _snmp_session(self, target, vlan=None, timeout=1000000, retries=3): 36 | try: 37 | if self.first_load: 38 | # Loading MIBs can be very noisy, so we close stderr 39 | # Ideally we would just call netsnmp_register_loghandler but that isn't 40 | # exported :-( 41 | stderr = os.dup(sys.stderr.fileno()) 42 | null = os.open(os.devnull, os.O_RDWR) 43 | os.close(sys.stderr.fileno()) 44 | os.dup2(null, sys.stderr.fileno()) 45 | os.close(null) 46 | 47 | if target.version == 3: 48 | context = ('vlan-%s' % vlan) if vlan else '' 49 | session = self.netsnmp.Session(Version=3, DestHost=target.full_host, 50 | SecName=target.user, SecLevel=target.sec_level, Context=context, 51 | AuthProto=target.auth_proto, AuthPass=target.auth, 52 | PrivProto=target.priv_proto, PrivPass=target.priv, 53 | UseNumeric=1, Timeout=timeout, Retries=retries) 54 | else: 55 | community = ( 56 | '%s@%s' % (target.community, vlan)) if vlan else target.community 57 | session = self.netsnmp.Session( 58 | Version=target.version, DestHost=target.full_host, 59 | Community=community, UseNumeric=1, Timeout=timeout, 60 | Retries=retries) 61 | except self.netsnmp.Error as e: 62 | raise snmp.SnmpError('SNMP error while connecting to host %s: %s' % ( 63 | target.host, e.args[0])) 64 | finally: 65 | if self.first_load: 66 | # Restore stderr 67 | os.dup2(stderr, sys.stderr.fileno()) 68 | os.close(stderr) 69 | self.first_load = False 70 | return session 71 | 72 | def walk(self, target, oid, vlan=None): 73 | sess = self._snmp_session(target, vlan) 74 | ret = {} 75 | nextoid = oid 76 | offset = 0 77 | 78 | # Abort the walk when it exits the OID tree we are interested in 79 | while nextoid.startswith(oid): 80 | var_list = self.netsnmp.VarList(self.netsnmp.Varbind(nextoid, offset)) 81 | sess.getbulk(nonrepeaters=0, maxrepetitions=target.max_size, 82 | varlist=var_list) 83 | 84 | # WORKAROUND FOR NEXUS BUG (2014-11-24) 85 | # Indy told blueCmd that Nexus silently drops the SNMP response 86 | # if the packet is fragmented. Try with large size first, but drop down 87 | # to smaller one. 88 | if sess.ErrorStr == 'Timeout': 89 | if target.max_size == 1: 90 | raise TimeoutError( 91 | 'Timeout getting %s from %s' % (nextoid, target.host)) 92 | target.max_size = int(target.max_size / 16) 93 | logging.debug('Timeout getting %s from %s, lowering max size to %d' % ( 94 | nextoid, target.host, target.max_size)) 95 | continue 96 | if sess.ErrorStr != '': 97 | raise snmp.SnmpError('SNMP error while walking host %s: %s' % ( 98 | target.host, sess.ErrorStr)) 99 | 100 | for result in var_list: 101 | currentoid = '%s.%s' % (result.tag, int(result.iid)) 102 | # We don't want to save extra oids that the bulk walk might have 103 | # contained. 104 | if not currentoid.startswith(oid): 105 | break 106 | try: 107 | ret[currentoid] = snmp.ResultTuple( 108 | result.val.decode(), result.type) 109 | except UnicodeDecodeError: 110 | ret[currentoid] = snmp.ResultTuple(result.val,result.type) 111 | # Continue bulk walk 112 | offset = int(var_list[-1].iid) 113 | nextoid = var_list[-1].tag 114 | return ret 115 | 116 | def get(self, target, oid): 117 | # Nexus is quite slow sometimes to answer SNMP so use a high 118 | # timeout on these initial requests before failing out 119 | sess = self._snmp_session(target, timeout=5000000, retries=2) 120 | var = self.netsnmp.Varbind(oid) 121 | var_list = self.netsnmp.VarList(var) 122 | sess.get(var_list) 123 | if sess.ErrorStr != '': 124 | if sess.ErrorStr == 'Timeout': 125 | raise TimeoutError('Timeout getting %s from %s' % (oid, target.host)) 126 | raise snmp.SnmpError('SNMP error while talking to host %s: %s' % ( 127 | target.host, sess.ErrorStr)) 128 | 129 | return {var.tag: snmp.ResultTuple(var.val.decode(), var.type)} 130 | 131 | def model(self, target): 132 | model_oids = [ 133 | '.1.3.6.1.2.1.47.1.1.1.1.13.1', # Normal switches 134 | '.1.3.6.1.2.1.47.1.1.1.1.13.1001', # Stacked switches 135 | '.1.3.6.1.2.1.47.1.1.1.1.13.10', # Nexus 136 | '.1.3.6.1.2.1.1.1.0', # Other appliances (sysDescr) 137 | ] 138 | for oid in model_oids: 139 | model = self.get(target, oid) 140 | if not model: 141 | continue 142 | value = list(model.values()).pop().value 143 | if value: 144 | return value 145 | raise snmp.NoModelOid('No model OID contained a model') 146 | 147 | def vlans(self, target): 148 | try: 149 | oids = list(self.walk(target, '.1.3.6.1.4.1.9.9.46.1.3.1.1.2').keys()) 150 | vlans = {int(x.split('.')[-1]) for x in oids} 151 | return vlans 152 | except ValueError as e: 153 | logging.info('ValueError while parsing VLAN for %s: %s', target.host, e) 154 | return [] 155 | -------------------------------------------------------------------------------- /snmpexporter/annotator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import binascii 3 | import collections 4 | import logging 5 | 6 | from snmpexporter import snmp 7 | 8 | 9 | AnnotatedResultEntry = collections.namedtuple('AnnotatedResultEntry', 10 | ('data', 'mib', 'obj', 'index', 'labels')) 11 | 12 | 13 | class Annotator(object): 14 | """Annotation step where results are given meaningful labels.""" 15 | 16 | LABEL_TYPES = set(['OCTETSTR', 'IPADDR']) 17 | ALLOWED_CHARACTERS = ( 18 | '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' 19 | '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ') 20 | 21 | def __init__(self, config, mibresolver): 22 | super(Annotator, self).__init__() 23 | self.config = config 24 | self.mibresolver = mibresolver 25 | self.mibcache = dict() 26 | 27 | def annotate(self, results): 28 | annotations = self.config.get('annotations', []) 29 | 30 | # Calculate map to skip annotation if we're sure we're not going to annotate 31 | # TODO(bluecmd): This could be cached 32 | annotation_map = {} 33 | for annotation in annotations: 34 | for annotate in annotation['annotate']: 35 | # Support for processing the index (for OIDs that have X.Y where we're 36 | # interested in joining on X) 37 | if '[' in annotate: 38 | annotate, offset = annotate.split('[', 1) 39 | offset = int(offset.strip(']')) 40 | else: 41 | offset = None 42 | # Add '.' to not match .1.2.3 if we want to annotate 1.2.30 43 | annotation_map[(annotate + '.', offset)] = annotation['with'] 44 | 45 | labelification = set( 46 | [x + '.' for x in self.config.get('labelify', [])]) 47 | 48 | # Pre-fill the OID/Enum cache to allow annotations to get enum values 49 | cached_items = [] 50 | for (oid, ctxt), result in results.items(): 51 | resolve = self.mibresolver.resolve(oid) 52 | if resolve is None: 53 | logging.warning('Failed to look up OID %s, ignoring', oid) 54 | continue 55 | self.mibcache[oid] = resolve 56 | cached_items.append(((oid, ctxt), result)) 57 | 58 | # Calculate annotator map 59 | split_oid_map = collections.defaultdict(dict) 60 | for (oid, ctxt), result in cached_items: 61 | name, _ = self.mibcache[oid] 62 | if '.' not in name: 63 | continue 64 | _, index = name.split('.', 1) 65 | key = oid[:-(len(index))] 66 | split_oid_map[(key, ctxt)][index] = result.value 67 | 68 | annotated_results = {} 69 | for (oid, ctxt), result in cached_items: 70 | labels = {} 71 | vlan = None 72 | 73 | # TODO(bluecmd): If we support more contexts we need to be smarter here 74 | if not ctxt is None: 75 | vlan = ctxt 76 | 77 | name, enum = self.mibcache[oid] 78 | if not '::' in name: 79 | logging.warning('OID %s resolved to %s (no MIB), ignoring', oid, name) 80 | continue 81 | 82 | mib, part = name.split('::', 1) 83 | obj, index = part.split('.', 1) if '.' in part else (part, None) 84 | 85 | labels = {} 86 | if not vlan is None: 87 | labels['vlan'] = vlan 88 | labels.update( 89 | self.annotated_join( 90 | oid, index, ctxt, annotation_map, split_oid_map, results)) 91 | 92 | # Handle labelification 93 | if oid[:-(len(index) if index else 0)] in labelification: 94 | # Skip empty strings or non-strings that are up for labelification 95 | if result.value == '' or result.type not in self.LABEL_TYPES: 96 | continue 97 | 98 | bytes_value = result.value 99 | if isinstance(result.value, str): 100 | bytes_value = result.value.encode() 101 | labels['value'] = self.string_to_label_value(bytes_value) 102 | labels['hex'] = binascii.hexlify(bytes_value).decode() 103 | result = snmp.ResultTuple('NaN', 'ANNOTATED') 104 | 105 | # Do something almost like labelification for enums 106 | if enum: 107 | enum_value = enum.get(result.value, None) 108 | if enum_value is None: 109 | logging.warning('Got invalid enum value for %s (%s), not labling', 110 | oid, result.value) 111 | else: 112 | labels['enum'] = enum_value 113 | 114 | annotated_results[(oid, vlan)] = AnnotatedResultEntry( 115 | result, mib, obj, index, labels) 116 | 117 | logging.debug('Annotation completed for %d metrics', len(annotated_results)) 118 | return annotated_results 119 | 120 | def annotated_join(self, oid, index, ctxt, annotation_map, split_oid_map, 121 | results): 122 | for key, offset in annotation_map: 123 | if oid.startswith(key): 124 | break 125 | else: 126 | return {} 127 | 128 | if offset is not None: 129 | index_parts = index.split('.') 130 | index = '.'.join(index_parts[:-offset]) 131 | labels = {} 132 | for label, annotation_path in annotation_map[(key, offset)].items(): 133 | # Parse the annotation path 134 | annotation_keys = [x.strip() + '.' for x in annotation_path.split('>')] 135 | 136 | value = self.jump_to_value( 137 | annotation_keys, oid, ctxt, index, split_oid_map, results) 138 | if value is None: 139 | continue 140 | 141 | labels[label] = value 142 | return labels 143 | 144 | def jump_to_value(self, keys, oid, ctxt, index, split_oid_map, results): 145 | # Jump across the path seperated like: 146 | # OID.idx:value1 147 | # OID2.value1:value2 148 | # OID3.value3:final 149 | # label=final 150 | for key in keys: 151 | use_value = key[0] == '$' 152 | if use_value: 153 | key = key[1:] 154 | 155 | # Try to associate with context first 156 | part = split_oid_map.get((key, ctxt), None) 157 | if not part: 158 | # Fall back to the global context 159 | part = split_oid_map.get((key, None), None) 160 | # Do not allow going back into context when you have jumped into 161 | # the global one. 162 | # TODO(bluecmd): I have no reason *not* to support this more than 163 | # it feels like an odd behaviour and not something I would be 164 | # expecting the software to do, so let's not do that unless we find 165 | # a usecase in the future. 166 | ctxt = None 167 | if not part: 168 | return None 169 | 170 | # We either use the last index or the OID value, deterimed by 171 | # use_value above. 172 | if use_value: 173 | index = results[(oid, ctxt)].value 174 | 175 | oid = ''.join((key, index)) 176 | index = part.get(index, None) 177 | if not index: 178 | return None 179 | 180 | value = results[(oid, ctxt)].value 181 | 182 | # Try enum resolution 183 | _, enum = self.mibcache[oid] 184 | if enum: 185 | enum_value = enum.get(value, None) 186 | if enum_value is None: 187 | logging.warning('Got invalid enum value for %s (%s), ignoring', 188 | oid, value) 189 | return None 190 | value = enum_value 191 | return value 192 | 193 | def string_to_label_value(self, value): 194 | value = [x for x in value if x in self.ALLOWED_CHARACTERS.encode()] 195 | return bytes(value).decode().strip() 196 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /snmpexporterd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | from concurrent import futures 4 | import functools 5 | import logging 6 | import objgraph 7 | import sys 8 | import threading 9 | 10 | import snmpexporter 11 | import snmpexporter.config 12 | import snmpexporter.prometheus 13 | 14 | from twisted.internet import reactor, task, endpoints 15 | from twisted.python import log 16 | from twisted.web import server, resource 17 | 18 | 19 | # As we're using multiprocessing it's probably not needed for this to be 20 | # thread-local, but why not. 21 | tls = threading.local() 22 | tls.snmpimpl = None 23 | 24 | 25 | # Used to test health of the executors 26 | def do_nothing(): 27 | pass 28 | 29 | 30 | def poll(config, host, layer): 31 | try: 32 | if not tls.snmpimpl: 33 | logging.debug('Initializing Net-SNMP implemention') 34 | tls.snmpimpl = snmpexporter.snmpimpl.NetsnmpImpl() 35 | 36 | collections = config['collection'] 37 | overrides = config['override'] 38 | snmp_creds = config['snmp'] 39 | 40 | logging.debug('Constructing SNMP target') 41 | target = snmpexporter.target.SnmpTarget(host, layer, snmp_creds) 42 | 43 | target.start('poll') 44 | 45 | logging.debug('Creating SNMP poller') 46 | poller = snmpexporter.poller.Poller(collections, overrides, tls.snmpimpl) 47 | 48 | logging.debug('Starting poll') 49 | data, timeouts, errors = poller.poll(target) 50 | target.add_timeouts(timeouts) 51 | target.add_errors(errors) 52 | 53 | return target, data 54 | except: 55 | logging.exception('Poll exception') 56 | raise 57 | 58 | 59 | def annotate(config, resolver, f): 60 | try: 61 | target, data = f 62 | 63 | annotator_config = config['annotator'] 64 | exporter_config = config['exporter'] 65 | 66 | target.start('annotate') 67 | 68 | logging.debug('Creating result annotator') 69 | annotator = snmpexporter.annotator.Annotator(annotator_config, resolver) 70 | 71 | logging.debug('Starting annotation') 72 | result = annotator.annotate(data) 73 | 74 | target.done() 75 | 76 | exporter = snmpexporter.prometheus.Exporter(exporter_config) 77 | return exporter.export(target, result) 78 | except: 79 | logging.exception('Annotate exception') 80 | raise 81 | 82 | 83 | class PollerResource(resource.Resource): 84 | isLeaf = True 85 | 86 | def __init__(self, config_file, poller_pool, annotator_pool): 87 | super(PollerResource).__init__() 88 | # Use process pollers as netsnmp is not behaving well using just threads 89 | logging.debug('Starting poller pool ...') 90 | self.poller_executor = futures.ProcessPoolExecutor( 91 | max_workers=poller_pool) 92 | # Start MIB resolver after processes above (or it will fork it as well) 93 | logging.debug('Initializing MIB resolver ...') 94 | import mibresolver 95 | self.resolver = mibresolver 96 | 97 | logging.debug('Starting annotation pool ...') 98 | # .. but annotators are just CPU, so use lightweight threads. 99 | self.annotator_executor = futures.ThreadPoolExecutor( 100 | max_workers=annotator_pool) 101 | self.config_file = config_file 102 | 103 | def _response_failed(self, err, f): 104 | logging.debug('Request cancelled, cancelling future %s', f) 105 | f.cancel() 106 | 107 | def _reactor_annotate_done(self, request, f): 108 | reactor.callFromThread(self._annotate_done, request, f) 109 | 110 | def _annotate_done(self, request, f): 111 | if f.exception(): 112 | logging.error('Annotator failed: %s', repr(f.exception())) 113 | request.setResponseCode(500, message=( 114 | 'Annotator failed: %s' % repr(f.exception())).encode()) 115 | request.finish() 116 | return 117 | 118 | for row in f.result(): 119 | request.write(row.encode()) 120 | request.write('\n'.encode()) 121 | request.finish() 122 | 123 | def _reactor_poll_done(self, config, request, f): 124 | reactor.callFromThread(self._poll_done, config, request, f) 125 | 126 | def _poll_done(self, config, request, f): 127 | if f.exception(): 128 | logging.error('Poller failed: %s', repr(f.exception())) 129 | request.setResponseCode(500, message=( 130 | 'Poller failed: %s' % repr(f.exception())).encode()) 131 | request.finish() 132 | return 133 | 134 | logging.debug('Poller done, starting annotation') 135 | f = self.annotator_executor.submit( 136 | annotate, config, self.resolver, f.result()) 137 | f.add_done_callback(functools.partial(self._reactor_annotate_done, request)) 138 | request.notifyFinish().addErrback(self._response_failed, f) 139 | 140 | def render_GET(self, request): 141 | path = request.path.decode() 142 | request.setHeader("Content-Type", "text/plain; charset=UTF-8") 143 | if path == '/probe': 144 | return self.probe(request) 145 | elif path == '/healthy': 146 | return self.healthy(request) 147 | elif path == '/objects': 148 | return self.objects(request) 149 | else: 150 | logging.info('Not found: %s', path) 151 | request.setResponseCode(404) 152 | return '404 Not Found'.encode() 153 | 154 | def objects(self, request): 155 | types = objgraph.most_common_types(limit=1000) 156 | request.write('# HELP objgraph_objects active objects in memory'.encode()) 157 | request.write('# TYPE objgraph_objects gauge'.encode()) 158 | for name, count in types: 159 | request.write( 160 | ('objgraph_objects{name="%s"} %s\n' % (name, count)).encode()) 161 | return bytes() 162 | 163 | def _annotator_executor_healthy(self, request, completed_f): 164 | if completed_f.exception() or completed_f.cancelled(): 165 | request.setResponseCode(500, message=( 166 | 'Annotator health failed: %s' % repr( 167 | completed_f.exception())).encode()) 168 | request.finish() 169 | return 170 | request.write('I am healthy'.encode()) 171 | request.finish() 172 | 173 | def _poller_executor_healthy(self, request, completed_f): 174 | if completed_f.exception() or completed_f.cancelled(): 175 | request.setResponseCode(500, message=( 176 | 'Poller health failed: %s' % repr(completed_f.exception())).encode()) 177 | request.finish() 178 | return 179 | f = self.annotator_executor.submit(do_nothing) 180 | f.add_done_callback( 181 | lambda f: reactor.callFromThread( 182 | self._annotator_executor_healthy, request, f)) 183 | 184 | def healthy(self, request): 185 | # Send the healthy request through the pipeline executors to see 186 | # that everything works. 187 | f = self.poller_executor.submit(do_nothing) 188 | logging.debug('Starting healthy poll') 189 | f.add_done_callback( 190 | lambda f: reactor.callFromThread( 191 | self._poller_executor_healthy, request, f)) 192 | request.notifyFinish().addErrback(self._response_failed, f) 193 | return server.NOT_DONE_YET 194 | 195 | def probe(self, request): 196 | layer = request.args.get('layer'.encode(), [None])[0] 197 | target = request.args.get('target'.encode(), [None])[0] 198 | 199 | if not layer or not target: 200 | request.setResponseCode(400) 201 | return '400 Missing layer or target parameter'.encode() 202 | 203 | layer = layer.decode() 204 | target = target.decode() 205 | 206 | config = snmpexporter.config.load(self.config_file) 207 | 208 | f = self.poller_executor.submit(poll, config, target, layer) 209 | f.add_done_callback( 210 | functools.partial(self._reactor_poll_done, config, request)) 211 | 212 | logging.debug('Starting poll') 213 | request.notifyFinish().addErrback(self._response_failed, f) 214 | return server.NOT_DONE_YET 215 | 216 | 217 | if __name__ == '__main__': 218 | import argparse 219 | 220 | parser = argparse.ArgumentParser(description='One-shot SNMP exporter.') 221 | parser.add_argument('--config', dest='config_file', type=str, 222 | help='config file to load', default='/etc/snmpexporter.yaml') 223 | parser.add_argument('--log-level', dest='log_level', type=str, 224 | help='log level', default='INFO') 225 | parser.add_argument('--poller-pool', dest='poller_pool', type=int, 226 | help='number of simultaneous polls to do', default=10) 227 | parser.add_argument('--annotator-pool', dest='annotator_pool', type=int, 228 | help='number of threads to use to annotate', default=5) 229 | parser.add_argument('--port', dest='port', type=int, 230 | help='port to listen to', default=9190) 231 | args = parser.parse_args() 232 | 233 | # Logging setup 234 | observer = log.PythonLoggingObserver() 235 | observer.start() 236 | 237 | root = logging.getLogger() 238 | ch = logging.StreamHandler(sys.stderr) 239 | formatter = logging.Formatter( '%(asctime)s - %(name)s - ' 240 | '%(levelname)s - %(message)s' ) 241 | ch.setFormatter(formatter) 242 | root.addHandler(ch) 243 | root.setLevel(logging.getLevelName(args.log_level)) 244 | 245 | pr = PollerResource( 246 | args.config_file, args.poller_pool, args.annotator_pool) 247 | 248 | factory = server.Site(pr) 249 | 250 | logging.debug('Starting web server on port %d', args.port) 251 | endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port) 252 | endpoint.listen(factory) 253 | reactor.run() 254 | -------------------------------------------------------------------------------- /snmpexporter/annotator_test.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import collections 3 | import unittest 4 | import yaml 5 | 6 | from snmpexporter import annotator 7 | from snmpexporter import snmp 8 | 9 | 10 | MIB_RESOLVER = { 11 | '.1.2.3': 'testInteger1', 12 | '.1.2.4': 'testInteger2', 13 | '.1.2.5': 'testInteger3', 14 | '.10.1': 'interfaceString', 15 | '.10.2': 'aliasString', 16 | '.10.3': 'enumString' 17 | } 18 | 19 | ENUMS = collections.defaultdict(dict) 20 | ENUMS['.10.3'] = {'10': 'enumValue'} 21 | 22 | 23 | def snmpResult(x, type=None): 24 | # We don't care about the type in the annotator 25 | if type is None: 26 | type = 'INTEGER' if isinstance(x, int) else 'OCTETSTR' 27 | if isinstance(x, bytes): 28 | return snmp.ResultTuple(x, type) 29 | else: 30 | return snmp.ResultTuple(str(x), type) 31 | 32 | 33 | class MockMibResolver(object): 34 | 35 | def resolve_for_testing(self, oid): 36 | for key in MIB_RESOLVER: 37 | if oid.startswith(key + '.'): 38 | break 39 | else: 40 | return None, None, None, None 41 | 42 | index = oid[len(key)+1:] 43 | return ('DUMMY-MIB', MIB_RESOLVER[key], index, ENUMS[key]) 44 | 45 | def resolve(self, oid): 46 | mib, obj, index, enum = self.resolve_for_testing(oid) 47 | if mib is None: 48 | return None 49 | return '%s::%s.%s' % (mib, obj, index), enum 50 | 51 | 52 | class TestAnnotator(unittest.TestCase): 53 | 54 | def setUp(self): 55 | self.mibresolver = MockMibResolver() 56 | 57 | def runTest(self, expected_entries, result, cfg): 58 | c = yaml.load(cfg) or {} 59 | logic = annotator.Annotator( 60 | config=c.get('annotator', {}), 61 | mibresolver=self.mibresolver) 62 | expected_output = expected_entries 63 | output = logic.annotate(result) 64 | if output != expected_output: 65 | print('Output is not as expected!') 66 | print('Output:') 67 | for oid, v in output.items(): 68 | print((oid, v)) 69 | print('Expected:') 70 | for oid, v in expected_output.items(): 71 | print((oid, v)) 72 | self.assertEqual(output, expected_output) 73 | 74 | def createResultEntry(self, key, result, labels): 75 | # mib/objs etc. is tested in testResult so we can assume they are correct 76 | oid, ctxt = key 77 | mib, obj, index, _ = self.mibresolver.resolve_for_testing(oid) 78 | if not ctxt is None: 79 | labels = dict(labels) 80 | labels['vlan'] = ctxt 81 | return {key: annotator.AnnotatedResultEntry( 82 | result[key], mib, obj, index, labels)} 83 | 84 | def newExpectedFromResult(self, result): 85 | # We will most likely just pass through a lot of the results, so create 86 | # the basic annotated entries and just operate on the edge cases we are 87 | # testing. 88 | expected = {} 89 | for (key, ctxt), value in result.items(): 90 | expected.update(self.createResultEntry((key, ctxt), result, {})) 91 | return expected 92 | 93 | def testSmokeTest(self): 94 | """Test empty config and empty SNMP result.""" 95 | result = {} 96 | expected = {} 97 | self.runTest(expected, result, '') 98 | 99 | def testResult(self): 100 | """Test that results are propagated as we want.""" 101 | result = { 102 | ('.1.2.4.1', '100'): snmpResult(1337) 103 | } 104 | # NOTE(bluecmd): Do *not* use createResultEntry here to make sure the 105 | # assumptions we're doing in that function are holding. 106 | expected = { 107 | ('.1.2.4.1', '100'): annotator.AnnotatedResultEntry( 108 | data=snmpResult(1337), mib='DUMMY-MIB', obj='testInteger2', 109 | index='1', labels={'vlan': '100'}) 110 | } 111 | self.runTest(expected, result, '') 112 | 113 | def testSimpleAnnotation(self): 114 | """Test simple annotation and VLAN support.""" 115 | config = """ 116 | annotator: 117 | annotations: 118 | - annotate: 119 | - .1.2 120 | with: 121 | interface: .10.1 122 | alias: .10.2 123 | nonexistant: .10.3 124 | """ 125 | result = { 126 | ('.1.2.3.1', None): snmpResult(1337), 127 | ('.1.2.3.3', None): snmpResult(1338), 128 | ('.1.2.4.1', None): snmpResult(1339), 129 | ('.2.2.4.1', None): snmpResult(1339), 130 | ('.1.2.4.3.2', None): snmpResult(1340), 131 | ('.1.2.4.3.3', None): snmpResult(1341), 132 | ('.1.2.4.1', '100'): snmpResult(1339), 133 | ('.10.1.1', None): snmpResult('interface1'), 134 | ('.10.1.3.2', None): snmpResult('interface2'), 135 | ('.10.1.3.3', None): snmpResult('interface3'), 136 | ('.10.2.1', None): snmpResult('alias1'), 137 | ('.10.2.3.2', None): snmpResult('alias2'), 138 | } 139 | expected = self.newExpectedFromResult(result) 140 | # Remove the .2.2.4.1 entry as it is expected to be ignored due to 141 | # resolve failure 142 | del expected[('.2.2.4.1', None)] 143 | expected.update(self.createResultEntry(('.1.2.3.1', None), result, 144 | {'interface': 'interface1', 'alias': 'alias1'})) 145 | expected.update(self.createResultEntry(('.1.2.4.1', None), result, 146 | {'interface': 'interface1', 'alias': 'alias1'})) 147 | expected.update(self.createResultEntry(('.1.2.4.3.2', None), result, 148 | {'interface': 'interface2', 'alias': 'alias2'})) 149 | expected.update(self.createResultEntry(('.1.2.4.1', '100'), result, 150 | {'interface': 'interface1', 'alias': 'alias1'})) 151 | expected.update(self.createResultEntry(('.1.2.4.3.3', None), result, 152 | {'interface': 'interface3'})) 153 | self.runTest(expected, result, config) 154 | 155 | def testSimpleAnnotationDeepLevel(self): 156 | """Test simple annotation and VLAN support.""" 157 | config = """ 158 | annotator: 159 | annotations: 160 | - annotate: 161 | - .1.2[1] 162 | with: 163 | interface: .10.1 164 | alias: .10.2 165 | """ 166 | result = { 167 | ('.1.2.3.1.666', None): snmpResult(1337), 168 | ('.1.2.3.3.666', None): snmpResult(1338), 169 | ('.1.2.4.1.666', None): snmpResult(1339), 170 | ('.1.2.4.1.666', '100'): snmpResult(1339), 171 | ('.10.1.1', None): snmpResult('interface1'), 172 | ('.10.1.3.2', None): snmpResult('interface2'), 173 | ('.10.2.1', None): snmpResult('alias1'), 174 | ('.10.2.3.2', None): snmpResult('alias2'), 175 | } 176 | expected = self.newExpectedFromResult(result) 177 | expected.update(self.createResultEntry(('.1.2.3.1.666', None), result, 178 | {'interface': 'interface1', 'alias': 'alias1'})) 179 | expected.update(self.createResultEntry(('.1.2.4.1.666', None), result, 180 | {'interface': 'interface1', 'alias': 'alias1'})) 181 | expected.update(self.createResultEntry(('.1.2.4.1.666', '100'), result, 182 | {'interface': 'interface1', 'alias': 'alias1'})) 183 | self.runTest(expected, result, config) 184 | 185 | def testSimpleAnnotationMultiDeepLevel(self): 186 | """Test simple annotation and VLAN support.""" 187 | config = """ 188 | annotator: 189 | annotations: 190 | - annotate: 191 | - .1.2[1] 192 | with: 193 | interface: .10.1 194 | alias: .10.2 195 | """ 196 | result = { 197 | ('.1.2.3.1.1.666', None): snmpResult(1337), 198 | ('.1.2.3.1.3.666', None): snmpResult(1338), 199 | ('.1.2.4.1.1.666', None): snmpResult(1339), 200 | ('.1.2.4.1.1.666', '100'): snmpResult(1339), 201 | ('.10.1.1.1', None): snmpResult('interface1'), 202 | ('.10.1.3.1.2', None): snmpResult('interface2'), 203 | ('.10.2.1.1', None): snmpResult('alias1'), 204 | ('.10.2.3.1.2', None): snmpResult('alias2'), 205 | } 206 | expected = self.newExpectedFromResult(result) 207 | expected.update(self.createResultEntry(('.1.2.3.1.1.666', None), result, 208 | {'interface': 'interface1', 'alias': 'alias1'})) 209 | expected.update(self.createResultEntry(('.1.2.4.1.1.666', None), result, 210 | {'interface': 'interface1', 'alias': 'alias1'})) 211 | expected.update(self.createResultEntry(('.1.2.4.1.1.666', '100'), result, 212 | {'interface': 'interface1', 'alias': 'alias1'})) 213 | self.runTest(expected, result, config) 214 | 215 | def testMultiLevelAnnotation(self): 216 | """Test multi level annotation.""" 217 | config = """ 218 | annotator: 219 | annotations: 220 | - annotate: 221 | - .1.2.3 222 | with: 223 | interface: .1.2.4 > .1.2.5 > .10.1 224 | """ 225 | result = { 226 | ('.1.2.3.1', None): snmpResult(1337), 227 | ('.1.2.4.1', None): snmpResult(5), 228 | ('.1.2.5.5', None): snmpResult(3), 229 | ('.10.1.3', None): snmpResult('correct'), 230 | } 231 | expected = self.newExpectedFromResult(result) 232 | expected.update(self.createResultEntry(('.1.2.3.1', None), result, 233 | {'interface': 'correct'})) 234 | self.runTest(expected, result, config) 235 | 236 | def testMultiLevelAnnotationValue(self): 237 | """Test multi level annotation via value.""" 238 | config = """ 239 | annotator: 240 | annotations: 241 | - annotate: 242 | - .1.2.3 243 | with: 244 | interface: $.1.2.4 > .1.2.5 > .10.1 245 | """ 246 | result = { 247 | ('.1.2.3.1337', None): snmpResult(1), 248 | ('.1.2.4.1', None): snmpResult(5), 249 | ('.1.2.5.5', None): snmpResult(3), 250 | ('.10.1.3', None): snmpResult('correct'), 251 | } 252 | expected = self.newExpectedFromResult(result) 253 | expected.update(self.createResultEntry(('.1.2.3.1337', None), result, 254 | {'interface': 'correct'})) 255 | self.runTest(expected, result, config) 256 | 257 | def testMultiLevelAnnotationContext(self): 258 | """Test multi level annotation across contexts.""" 259 | config = """ 260 | annotator: 261 | annotations: 262 | - annotate: 263 | - .1.2.3 264 | with: 265 | interface: .1.2.4 > .1.2.5 > .10.1 266 | """ 267 | result = { 268 | ('.1.2.3.1', '100'): snmpResult(1337), 269 | ('.1.2.4.1', '100'): snmpResult(5), 270 | ('.1.2.5.5', None): snmpResult(3), 271 | ('.10.1.3', None): snmpResult('correct'), 272 | } 273 | expected = self.newExpectedFromResult(result) 274 | expected.update(self.createResultEntry(('.1.2.3.1', '100'), result, 275 | {'interface': 'correct'})) 276 | self.runTest(expected, result, config) 277 | 278 | def testMultiLevelAnnotationBroken(self): 279 | """Test multi level annotation where we do not have a match.""" 280 | config = """ 281 | annotator: 282 | annotations: 283 | - annotate: 284 | - .1.2.3 285 | with: 286 | interface: .1.2.4 > .1.2.5 > .10.1 287 | """ 288 | result = { 289 | ('.1.2.3.1', '100'): snmpResult(1337), 290 | ('.1.2.4.1', '100'): snmpResult(6), 291 | ('.1.2.5.5', None): snmpResult(3), 292 | ('.10.1.3', None): snmpResult('dummy'), 293 | } 294 | expected = self.newExpectedFromResult(result) 295 | self.runTest(expected, result, config) 296 | 297 | def testMultiLevelAnnotationNonExistant(self): 298 | """Test multi level annotation where we didn't scrape the OID.""" 299 | config = """ 300 | annotator: 301 | annotations: 302 | - annotate: 303 | - .1.2.3 304 | with: 305 | interface: .1.2.4 > .1.2.6 > .10.1 306 | """ 307 | result = { 308 | ('.1.2.3.1', '100'): snmpResult(1337), 309 | ('.1.2.4.1', '100'): snmpResult(5), 310 | ('.1.2.5.5', None): snmpResult(3), 311 | ('.10.1.3', None): snmpResult('dummy'), 312 | } 313 | expected = self.newExpectedFromResult(result) 314 | self.runTest(expected, result, config) 315 | 316 | def testLabelify(self): 317 | """Test conversion of strings to values.""" 318 | config = """ 319 | annotator: 320 | labelify: 321 | - .10.2 322 | """ 323 | result = { 324 | ('.10.2.1', None): snmpResult('correct'), 325 | ('.10.2.2', None): snmpResult('\xffabc\xff '), 326 | ('.10.2.3', None): snmpResult(''), 327 | ('.10.2.4', None): snmpResult(2), 328 | } 329 | identities = { 330 | ('.10.2.1', None): snmpResult('NaN', 'ANNOTATED'), 331 | ('.10.2.2', None): snmpResult('NaN', 'ANNOTATED'), 332 | } 333 | expected = self.newExpectedFromResult(result) 334 | expected.update(self.createResultEntry(('.10.2.1', None), identities, 335 | {'value': 'correct', 'hex': binascii.hexlify( 336 | 'correct'.encode()).decode()})) 337 | expected.update(self.createResultEntry(('.10.2.2', None), identities, 338 | {'value': 'abc', 'hex': binascii.hexlify( 339 | '\xffabc\xff '.encode()).decode()})) 340 | # Empty strings should not be included 341 | del expected[('.10.2.3', None)] 342 | # Only strings are labelified 343 | del expected[('.10.2.4', None)] 344 | self.runTest(expected, result, config) 345 | 346 | def testEnums(self): 347 | """Test conversion of enums to values.""" 348 | result = { 349 | ('.10.3.1', None): snmpResult(10), 350 | } 351 | expected = self.newExpectedFromResult(result) 352 | expected.update(self.createResultEntry(('.10.3.1', None), result, 353 | {'enum': 'enumValue'})) 354 | self.runTest(expected, result, '') 355 | 356 | def testEnumsInvalid(self): 357 | """Test conversion of enums to values (invalid value).""" 358 | result = { 359 | ('.10.3.1', None): snmpResult(9), 360 | } 361 | expected = self.newExpectedFromResult(result) 362 | self.runTest(expected, result, '') 363 | 364 | def testEnumsAnnotation(self): 365 | """Test conversion of enums to values in annotations.""" 366 | config = """ 367 | annotator: 368 | annotations: 369 | - annotate: 370 | - .1.2.3 371 | with: 372 | thing: .10.3 373 | """ 374 | 375 | result = { 376 | ('.1.2.3.1', None): snmpResult(10), 377 | ('.10.3.1', None): snmpResult(10), 378 | } 379 | expected = self.newExpectedFromResult(result) 380 | expected.update(self.createResultEntry(('.1.2.3.1', None), result, 381 | {'thing': 'enumValue'})) 382 | expected.update(self.createResultEntry(('.10.3.1', None), result, 383 | {'enum': 'enumValue'})) 384 | self.runTest(expected, result, config) 385 | 386 | def testBytes(self): 387 | """Testing handling of byte array.""" 388 | config = """ 389 | annotator: 390 | labelify: 391 | - .10.2 392 | """ 393 | time_data = b'\x07\xE2\x0B\x1D\x0E\x11\x0B\x00+\x00\x00' 394 | 395 | result = { 396 | ('.10.2.2', None): snmpResult(time_data), 397 | } 398 | identities = { 399 | ('.10.2.2', None): snmpResult('NaN', 'ANNOTATED'), 400 | } 401 | expected = self.newExpectedFromResult(result) 402 | expected.update(self.createResultEntry(('.10.2.2', None), identities, 403 | {'value': '+', 'hex': binascii.hexlify(time_data).decode()})) 404 | self.runTest(expected, result, config) 405 | 406 | 407 | def main(): 408 | unittest.main() 409 | 410 | 411 | if __name__ == '__main__': 412 | main() 413 | -------------------------------------------------------------------------------- /etc/snmpexporter.yaml: -------------------------------------------------------------------------------- 1 | snmp: 2 | !include auth.yaml 3 | 4 | override: 5 | # bsnDot11EssNumberOfMobileStations is reported as a Counter 6 | .1.3.6.1.4.1.14179.2.1.1.1.38: INTEGER 7 | 8 | annotator: 9 | 10 | # Labelification is used to turn strings into labels on metrics that 11 | # otherwise do not have any numeric data. The value will be fixed to 1 12 | # and the string value will be moved to a label called 'value' and 'hex'. 13 | # Use this if you don't have any sensible OID to annotate with the value or 14 | # there isn't a 1:1 match between the index and the value you wish to use. 15 | # 16 | # 'value' contains the human readable characters only and is striped. 17 | # 'hex' is the raw data but hex encoded. 18 | # If the raw string value is empty the result is dropped 19 | labelify: 20 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 21 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 22 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 23 | - .1.3.6.1.4.1.9.9.380.1.1.8 # cdsRelayAgentInfoOptRemoteIdSub 24 | - .1.3.6.1.4.1.232.22.2.4.1.1.1.4 # cpqRackServerBladeName 25 | - .1.3.6.1.4.1.232.22.2.4.1.1.1.16 # cpqRackServerBladeSerialNum 26 | - .1.3.6.1.4.1.232.22.2.4.1.1.1.17 # cpqRackServerBladeProductId 27 | - .1.3.6.1.4.1.232.22.2.4.1.1.1.27 # cpqRackServerBladeSystemBIOSRevision 28 | - .1.3.6.1.4.1.232.22.2.4.1.1.1.30 # cpqRackServerBladeManagementDeviceFirmwareRevision 29 | - .1.3.6.1.2.1.75.1.1.5.1.2 # fcFxPortName 30 | 31 | annotations: 32 | - annotate: 33 | - .1.3.6.1.2.1.2.2.1 # ifTable 34 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 35 | - .1.3.6.1.4.1.9.9.46.1.6.1.1 # vlanTrunkPortEntry 36 | # TODO(bluecmd): Requires special index slicing: 37 | # https://github.com/dhtech/dhmon/issues/23 38 | - .1.3.6.1.4.1.9.9.87.1.4.1.1.32 # c2900PortDuplexStatus 39 | # Note that cErrDisableIfStatusCause uses "ifIndex.0" as index, so we 40 | # need to use [1] to annotate against ifTable 41 | - .1.3.6.1.4.1.9.9.548.1.3.1.1.2[1] # cErrDisableIfStatusCause 42 | with: 43 | interface: .1.3.6.1.2.1.2.2.1.2 # ifDescr 44 | alias: .1.3.6.1.2.1.31.1.1.1.18 # ifAlias 45 | 46 | - annotate: 47 | - .1.3.6.1.2.1.17.4.3.1.2 # dot1dTpFdbStatus (mac -> port) 48 | - .1.3.6.1.2.1.17.2.15.1.3 # dot1dStpPortState 49 | with: 50 | # These OIDs use the port so we need to map the port to ifIndex first 51 | # through .1.3.6.1.2.1.17.1.4.1.2 52 | interface: .1.3.6.1.2.1.17.1.4.1.2 > .1.3.6.1.2.1.2.2.1.2 # ifDescr 53 | alias: .1.3.6.1.2.1.17.1.4.1.2 > .1.3.6.1.2.1.31.1.1.1.18 # ifAlias 54 | 55 | - annotate: 56 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.4 # entSensorValue 57 | with: 58 | sensor: .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 59 | type: .1.3.6.1.4.1.9.9.91.1.1.1.1.1 # entSensorType 60 | # TODO(bluecmd): This might be better to map to a proper 61 | # scale so that we can do entSensorValue / entSensorScale 62 | # TOOD(bluecmd): We want to do OID value resolution for 63 | # these kind of oids. Right now we save e.g "7", while we 64 | # should save "millis". 65 | scale: .1.3.6.1.4.1.9.9.91.1.1.1.1.2 # entSensorScale 66 | 67 | - annotate: 68 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 69 | with: 70 | inside: .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 71 | name: .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 72 | 73 | - annotate: 74 | - .1.3.6.1.4.1.14179.2.1.1.1.38 # bsnDot11EssNumberOfMobileStation 75 | with: 76 | essid: .1.3.6.1.4.1.14179.2.1.1.1.2 # bsnDot11EssSsid 77 | 78 | - annotate: 79 | - .1.3.6.1.4.1.9.9.548.1.3.1.1.2 # cErrDisableIfStatusCause 80 | with: 81 | vlan: .1.3.6.1.4.1.9.9.548.1.3.1.1.1 # cErrDisableIfStatusVlanIndex 82 | 83 | - annotate: 84 | - .1.3.6.1.4.1.9.9.513.1.1.1 # cLApTable 85 | - .1.3.6.1.4.1.9.9.513.1.2.2 # cLApEthernetIfTable 86 | with: 87 | ap: .1.3.6.1.4.1.9.9.513.1.1.1.1.5 # cLApName 88 | 89 | - annotate: 90 | - .1.3.6.1.4.1.14179.2.2.1 # bsnAPTable 91 | - .1.3.6.1.4.1.14179.2.2.2[1] # bsnAPIfTable 92 | with: 93 | ap: .1.3.6.1.4.1.14179.2.2.1.1.3 # bsnAPName 94 | 95 | - annotate: 96 | - .1.3.6.1.4.1.232.22.2.4.1.1 # cpqRackServerBladeTable 97 | with: 98 | name: .1.3.6.1.4.1.232.22.2.4.1.1.1.4 # cpqRackServerBladeName 99 | serial: .1.3.6.1.4.1.232.22.2.4.1.1.1.16 # cpqRackServerBladeSerialNum 100 | 101 | - annotate: 102 | - .1.3.6.1.4.1.12356.101.13.2 # fgHaTables 103 | with: 104 | serial: .1.3.6.1.4.1.12356.101.13.2.1.1.2 # fgHaStatsSerial 105 | name: .1.3.6.1.4.1.12356.101.13.2.1.1.11 # fgHaStatsHostname 106 | master: .1.3.6.1.4.1.12356.101.13.2.1.1.16 # fgHaStatsMasterSerial 107 | 108 | - annotate: 109 | - .1.3.6.1.4.1.12356.101.4.3.2.1.3 # fgHwSensorEntValue 110 | - .1.3.6.1.4.1.12356.101.4.3.2.1.4 # fgHwSensorEntAlarmStatus 111 | with: 112 | name: .1.3.6.1.4.1.12356.101.4.3.2.1.2 # fgHwSensorEntName 113 | 114 | # Juniper MX10003 (probably most other MXes too) modules 115 | - annotate: 116 | - 1.3.6.1.4.1.2636.3.1.13.1 # jnxOperatingEntry 117 | with: 118 | module: 1.3.6.1.4.1.2636.3.1.13.1.5 # jnxOperatingDescr 119 | 120 | exporter: 121 | convert: 122 | csyClockDateAndTime: DateTime 123 | 124 | collection: 125 | Default OIDs: 126 | models: 127 | - .* 128 | oids: 129 | - .1.3.6.1.2.1.1.3 # sysUptime 130 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 131 | 132 | Aruba: 133 | models: 134 | - ^Aruba 135 | oids: 136 | - .1.3.6.1.2.1.2.2.1.7 # ifAdminStatus 137 | - .1.3.6.1.2.1.2.2.1.8 # ifOperStatus 138 | - .1.3.6.1.2.1.2.2.1.14 # ifInErrors 139 | - .1.3.6.1.2.1.2.2.1.20 # ifOutErrors 140 | - .1.3.6.1.2.1.31.1.1.1.6 # ifHCInOctets 141 | - .1.3.6.1.2.1.31.1.1.1.10 # ifHCOutOctets 142 | - .1.3.6.1.2.1.31.1.1.1.15 # ifHighSpeed 143 | - .1.3.6.1.4.1.14823.2.2.1.5.2.1.7.1.9 # wlanAPBssidUpTime 144 | - .1.3.6.1.4.1.14823.2.2.1.5.2.1.4.1.19 # wlanAPStatus 145 | - .1.3.6.1.4.1.14823.2.2.1.5.2.1.4.1.13 # wlanAPModelName 146 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.2 # wlanAPNumClients 147 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.3 # wlanAPTxPkts 148 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.5 # wlanAPRxPkts 149 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.10 # wlanAPFrameRetryRate 150 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.15 # wlanAPFrameRetryErrorRate 151 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.16 # wlanAPFrameRetryErrorRate 152 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.17 # wlanAPFrameRetryErrorRate 153 | - .1.3.6.1.4.1.14823.2.2.1.5.3.1.1.1.27 # wlanAPFrameRetryErrorRate 154 | - .1.3.6.1.4.1.14823.2.2.1.5.3.2.3.1 # wlsxWlanStaDATypeStatsEntry 155 | 156 | Cisco Switch: 157 | models: 158 | - ^WS-C 159 | - ^AIR-CT.* 160 | - .*ASR9K.* 161 | oids: 162 | - .1.3.6.1.2.1.2.2.1.2 # ifDescr 163 | - .1.3.6.1.2.1.2.2.1.7 # ifAdminStatus 164 | - .1.3.6.1.2.1.2.2.1.8 # ifOperStatus 165 | - .1.3.6.1.2.1.2.2.1.14 # ifInErrors 166 | - .1.3.6.1.2.1.2.2.1.20 # ifOutErrors 167 | - .1.3.6.1.2.1.31.1.1.1.6 # ifHCInOctets 168 | - .1.3.6.1.2.1.31.1.1.1.10 # ifHCOutOctets 169 | - .1.3.6.1.2.1.31.1.1.1.15 # ifHighSpeed 170 | - .1.3.6.1.2.1.31.1.1.1.18 # ifAlias 171 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 172 | - .1.3.6.1.4.1.9.2.1 # lcpu 173 | - .1.3.6.1.4.1.9.9.46.1.3.1.1.2 # vtpVlanState 174 | - .1.3.6.1.4.1.9.9.46.1.6.1.1.14 # vlanTrunkPortDynamicStatus 175 | - .1.3.6.1.4.1.9.9.87.1.4.1.1.32 # c2900PortDuplexStatus 176 | - .1.3.6.1.4.1.9.9.109.1.1.1.1 # cisco CPU 177 | - .1.3.6.1.4.1.9.9.131.1.1 # csyClock 178 | 179 | Cisco Dist Switch: 180 | models: 181 | - ^WS-C 182 | - .*ASR9K.* 183 | layers: 184 | - dist 185 | - core 186 | oids: 187 | - .1.3.6.1.2.1.2.2 # ifTable 188 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 189 | # Warning: these are expensive on ASR, do not add new ones without 190 | # thinking it throgh and watching the collection latency 191 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 192 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 193 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 194 | - .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 195 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 196 | - .1.3.6.1.2.1.105.1 # pethObjects 197 | - .1.3.6.1.4.1.9.2.1 # lcpu 198 | - .1.3.6.1.4.1.9.9.23 # ciscoCdpMIB 199 | # Warning: these are expensive on ASR, do not add new ones without 200 | # thinking it throgh and watching the collection latency 201 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.1 # entSensorType 202 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.2 # entSensorScale 203 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.4 # entSensorValue 204 | - .1.3.6.1.4.1.9.9.380.1.1 # cdsGlobal 205 | - .1.3.6.1.4.1.9.9.548.1.3.1 # cErrDisableIfStatusTable 206 | 207 | Cisco Nexus Switch: 208 | # Nexus has a lot of weird things going with it w.r.t. SNMP 209 | # and handles unknown OIDs quite poorly. Keep it seperate 210 | # to only poll known good OIDs. 211 | models: 212 | - ^N.K- 213 | layers: 214 | - dist 215 | - core 216 | oids: 217 | # Known slow oids: 218 | # - .1.3.6.1.2.1.105.1.0 (pethObjects) 219 | # - .1.3.6.1.4.1.9.2.1.0 (lcpu) 220 | - .1.3.6.1.2.1.2.2 # ifTable 221 | - .1.3.6.1.2.1.2.2.1.14 # ifInErrors 222 | - .1.3.6.1.2.1.2.2.1.2 # ifDescr 223 | - .1.3.6.1.2.1.2.2.1.20 # ifOutErrors 224 | - .1.3.6.1.2.1.2.2.1.7 # ifAdminStatus 225 | - .1.3.6.1.2.1.2.2.1.8 # ifOperStatus 226 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 227 | - .1.3.6.1.2.1.31.1.1.1.10 # ifHCOutOctets 228 | - .1.3.6.1.2.1.31.1.1.1.15 # ifHighSpeed 229 | - .1.3.6.1.2.1.31.1.1.1.18 # ifAlias 230 | - .1.3.6.1.2.1.31.1.1.1.6 # ifHCInOctets 231 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 232 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 233 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 234 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 235 | - .1.3.6.1.4.1.9.9.109.1.1.1.1 # cisco CPU 236 | - .1.3.6.1.4.1.9.9.131.1.1 # csyClock 237 | - .1.3.6.1.4.1.9.9.380.1.1 # cdsGlobal 238 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.1 # entSensorType 239 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.2 # entSensorScale 240 | - .1.3.6.1.4.1.9.9.91.1.1.1.1.4 # entSensorValue 241 | - .1.3.6.1.4.1.9.9.548.1.3.1 # cErrDisableIfStatusTable 242 | 243 | Cisco WLC: 244 | models: 245 | - ^AIR-CT.* 246 | layers: 247 | - wifi 248 | oids: 249 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 250 | - .1.3.6.1.2.1.47.1.1.1.1 # Inventory (Versions, Assets, Transceivers) 251 | - .1.3.6.1.4.1.14179.2.1.1.1.2 # bsnDot11EssSsid 252 | - .1.3.6.1.4.1.14179.2.1.1.1.38 # bsnDot11EssNumberOfMobileStations 253 | - .1.3.6.1.4.1.14179.2.2.1 # bsnAPTable 254 | - .1.3.6.1.4.1.14179.2.2.2 # bsnAPIfTable 255 | - .1.3.6.1.4.1.9.9.23 # ciscoCdpMIB 256 | - .1.3.6.1.4.1.9.9.513.1.1.1 # cLApTable 257 | - .1.3.6.1.4.1.9.9.513.1.2.2 # cLApEthernetIfTable 258 | - .1.3.6.1.4.1.9.9.523.1.5.1.0 # ciscoLwappClRoamMIBObjects 259 | - .1.3.6.1.4.1.9.9.618 # ciscoLwappSysMIB 260 | - .1.3.6.1.4.1.9.9.198888 # ciscoLwappHaMIB 261 | 262 | 263 | Cisco Switch - VLAN aware: 264 | # DANGER! This collection type (vlan_aware) takes a looong time on switches 265 | # with a lot of VLANs. Please be careful and monitor SNMP collection latency 266 | vlan_aware: yes 267 | layers: 268 | - access 269 | models: 270 | - ^WS-C 271 | oids: 272 | - .1.3.6.1.2.1.17.1.4.1.2 # dot1dBasePortIfIndex (port -> ifindex) 273 | - .1.3.6.1.2.1.17.2.15.1.3 # dot1dStpPortState 274 | 275 | FortiGate: 276 | layers: 277 | - firewall 278 | models: 279 | - ^FGT 280 | options: 281 | # Some kind of UDP fragmentation issue with the Fortigates maybe. 282 | # Using the default 256 causes SNMP to lock up on the device for a 283 | # while. 284 | max-size: 1 285 | oids: 286 | - .1.3.6.1.4.1.12356.101.4 # fgSystem 287 | - .1.3.6.1.4.1.12356.101.13 # fgHighAvailability 288 | 289 | HP Onboard Administrator: 290 | layers: 291 | - services 292 | models: 293 | - ^HP Onboard Administrator 294 | oids: 295 | - .1.3.6.1.4.1.232.22 # cpqRackInfo 296 | 297 | ASR9K: 298 | models: 299 | - .*ASR9K.* 300 | layers: 301 | - core 302 | oids: 303 | - .1.3.6.1.4.1.9.9.176 # ciscoRFMIB 304 | 305 | Juniper 710: 306 | models: 307 | - ^710-.* 308 | oids: 309 | - .1.3.6.1.2.1.2.2 # ifTable 310 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 311 | # Warning: these are expensive, do not add new ones without 312 | # thinking it throgh and watching the collection latency 313 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 314 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 315 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 316 | - .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 317 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 318 | - .1.3.6.1.4.1.2636.3.1.13.1 # jnxOperatingEntry 319 | 320 | Juniper routers: 321 | models: 322 | - .*Juniper.*MX.* 323 | # MX10003 reports it's midplane model number in a field checked first, instead of the actual chassi model number 324 | - ^750-.* 325 | oids: 326 | - .1.3.6.1.2.1.2.2 # ifTable 327 | - .1.3.6.1.2.1.10.7.2.1.19 # dot3StatsDuplexStatus 328 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 329 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 330 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 331 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 332 | - .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 333 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 334 | - .1.3.6.1.4.1.2636.3.1.13.1.8 # jnxOperatingCPU 335 | - .1.3.6.1.4.1.2636.3.1.13.1.7 # jnxOperatingTemp 336 | - .1.3.6.1.4.1.2636.3.60.1.1.1.1.5 # jnxDomCurrentRxLaserPower 337 | - .1.3.6.1.4.1.2636.5.1.1.2.1.1.1.2 # jnxBgpM2PeerState 338 | 339 | Juniper switches: 340 | models: 341 | - .*Juniper.*ex.* 342 | - .*Juniper.*qfx.* 343 | oids: 344 | - .1.3.6.1.2.1.2.2 # ifTable 345 | - .1.3.6.1.2.1.10.7.2.1.19 # dot3StatsDuplexStatus 346 | - .1.3.6.1.2.1.17.7.1.4.3.1.5 # dot1qVlanStaticRowStatus 347 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 348 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 349 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 350 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 351 | - .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 352 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 353 | - .1.3.6.1.4.1.2636.3.60.1.1.1.1.5 # jnxDomCurrentRxLaserPower 354 | - .1.3.6.1.4.1.2636.5.1.1.2.1.1.1.2 # jnxBgpM2PeerState 355 | 356 | Juniper firewall: 357 | models: 358 | - .*Juniper.*srx.* 359 | oids: 360 | - .1.3.6.1.2.1.2.2 # ifTable 361 | - .1.3.6.1.2.1.10.7.2.1.19 # dot3StatsDuplexStatus 362 | - .1.3.6.1.2.1.17.7.1.4.3.1.5 # dot1qVlanStaticRowStatus 363 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 364 | - .1.3.6.1.2.1.47.1.1.1.1.11 # entPhysicalSerialNum 365 | - .1.3.6.1.2.1.47.1.1.1.1.13 # entPhysicalModelName 366 | - .1.3.6.1.2.1.47.1.1.1.1.2 # entPhysicalDesc 367 | - .1.3.6.1.2.1.47.1.1.1.1.4 # entPhysicalContainedIn 368 | - .1.3.6.1.2.1.47.1.1.1.1.9 # entPhysicalFirmwareRev 369 | - .1.3.6.1.4.1.2636.3.1.13 # jnxOperatingTable 370 | - .1.3.6.1.4.1.2636.3.39.1.12.1 # jnxJsSPUMonitoringMIB 371 | 372 | SAN Switch: 373 | # Note: The "Description" field is used as model in FabricOS 374 | models: 375 | - .*BR300.* # Brocade 300 SAN Switch 376 | - .*DS-C9148.* # Cisco MDS 9148 377 | oids: 378 | - .1.3.6.1.2.1.2.2 # ifTable 379 | - .1.3.6.1.2.1.31.1.1 # ifXEntry 380 | - .1.3.6.1.2.1.47.1.1.1 # entPhysicalTable 381 | - .1.3.6.1.2.1.75.1 # fcFeMIBObjects 382 | -------------------------------------------------------------------------------- /tools/snmp-agent/agent.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/ruby 2 | # 3 | # Portions Copyright (c) 2004 David R. Halliday 4 | # All rights reserved. 5 | # 6 | # This SNMP library is free software. Redistribution is permitted under the 7 | # same terms and conditions as the standard Ruby distribution. See the 8 | # COPYING file in the Ruby distribution for details. 9 | # 10 | # Portions Copyright (c) 2006 Matthew Palmer 11 | # All rights reserved. 12 | # 13 | # This program is free software; you can redistribute it and/or 14 | # modify it under the terms of the GNU General Public License 15 | # as published by the Free Software Foundation (version 2 of the License) 16 | # This program is distributed in the hope that it will be useful, 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 | # GNU General Public License for more details. 20 | # You should have received a copy of the GNU General Public License 21 | # along with this program; if not, write to the Free Software 22 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston MA 02110-1301 USA 23 | # 24 | 25 | require 'snmp' 26 | require 'socket' 27 | require 'logger' 28 | 29 | module SNMP # :nodoc: 30 | 31 | ## 32 | # = SNMP Agent skeleton 33 | # 34 | # Objects of this class are capable of acting as SNMP agents -- that is, 35 | # receiving SNMP PDUs and (possibly) returning data as a result of those 36 | # requests. 37 | # 38 | # We call this class a skeleton, though, since as it stands this agent won't 39 | # do much of anything -- it only has support for the most basic of system 40 | # information (sysDescr, sysUptime, sysContact, sysName, and sysLocation). 41 | # In order to get more interesting data out of it, you'll need to define 42 | # code to examine the host machine and it's environment and return data. 43 | # 44 | # What values get returned is determined by "plugins", small chunks of code 45 | # that return values that the agent can then send back to the requestor. 46 | # 47 | # == A simple example agent 48 | # 49 | # require 'snmp/agent' 50 | # 51 | # agent = SNMP::Agent.new(:port => 16161, :logger => Logger.new(STDOUT)) 52 | # agent.add_plugin('1.3.6.1.2.1.25.1.1.0') do 53 | # SNMP::TimeTicks.new(File.read('/proc/uptime').split(' ')[0].to_f * 100).to_i) 54 | # end 55 | # agent.start() 56 | # 57 | # This agent will respond to requests for the given OID (hrSystemUptime in 58 | # this case, as it happens) and return the number of time ticks as read from 59 | # the /proc/uptime file. In this plugin, we've defined the exact and 60 | # complete OID that we want to return the value of, but that's by no means 61 | # necessary -- one plugin can handle a larger number of OIDs in itself by 62 | # simply defining the 'base' OID it wants to handle, and returning 63 | # structured data when it's called. The pre-defined plugin for basic system 64 | # parameters is a good (if basic) example of how you structure your data. 65 | # 66 | # == Writing plugins 67 | # 68 | # I've tried to make writing plugins as painless as possible, but 69 | # unfortunately there's still a fair amount of hassle that's required in 70 | # some circumstances. A basic understanding of how SNMP MIBs and OIDs work 71 | # will help immensely. 72 | # 73 | # The basic layout of all plugins is the same -- you map a base OID to a 74 | # chunk of code, and then any requests for OIDs in that subtree cause the 75 | # code to be executed to determine the value (or lack thereof). You use 76 | # SNMP::Agent#add_plugin to add a new plugin. This method takes a base OID 77 | # (as a string or an array of integers) and a block of code to be run when 78 | # the requested OID matches the given base OID. 79 | # 80 | # The result from the block of code should either be a single value (if you 81 | # want the base OID to return a value itself), a simple array or hash (if 82 | # the base OID maps to a list of entries), or a tree of arrays and/or hashes 83 | # that describes the data underneath the base OID. 84 | # 85 | # For example, if you want OID .1.2.3 to return the single value 42, you 86 | # would do something like this: 87 | # 88 | # agent = SNMP::Agent.new 89 | # agent.add_plugin('1.2.3') { 42 } 90 | # 91 | # Internally, when a Get request for the OID .1.2.3 is received, the agent 92 | # will find the plugin, run it, and return a PDU containing 'INTEGER: 42'. 93 | # Any request for an OID below .1.2.3 will be answered with NoSuchObject. 94 | # 95 | # If you want to return a list of dwarves, you could do this: 96 | # 97 | # agent.add_plugin('1.2.4') { %w{sleepy grumpy doc crazy hungry} } 98 | # 99 | # In this case, requesting the OID '1.2.4' will get you NoSuchObject, but 100 | # requesting '1.2.4.0' will get you the OCTET STRING 'sleepy', and 101 | # requesting '1.2.4.3' will return 'crazy'. You could also walk the whole 102 | # of the '1.2.4' subtree and you'll get each of the dwarves in turn. 103 | # 104 | # "Sparse" data can be handled in much the same way, but with a hash instead 105 | # of an array. So a list of square roots, indexed by the squared value, 106 | # might look like this: 107 | # 108 | # agent.add_plugin('1.2.5') { {1 => 1, 4 => 2, 9 => 3, 16 => 4, 25 => 5} } 109 | # 110 | # Now, if you get '1.2.5.9', you'll get the INTEGER 3, but if you get either 111 | # of '1.2.5.8' or '1.2.5.10' you'll get noSuchObject. 112 | # 113 | # More complicated tree structures are possible, too -- such as a 114 | # two-dimensional "multiplication table", like so: 115 | # 116 | # agent.add_plugin('1.2.6') { [[0, 0, 0, 0, 0, 0], 117 | # [0, 1, 2, 3, 4, 5], 118 | # [0, 2, 4, 6, 8, 10], 119 | # [0, 3, 6, 9, 12, 15], 120 | # [0, 4, 8, 12, 16, 20], 121 | # [0, 5, 10, 15, 20, 25] 122 | # ] 123 | # } 124 | # 125 | # Now you can get the product of any two numbers between 0 and 5 by simply 126 | # doing a get on your agent for '1.2.6.n.m' -- or you could use a 127 | # calculator. 128 | # 129 | # The real value of plugins isn't static data like the above examples, it's 130 | # dynamic creation of data -- reading things from files, parsing kernel 131 | # data, that sort of thing. The only limitation is that it has to fit into 132 | # the SNMP way of doing things (tables and lists and values, oh my!) and you 133 | # need to be able to write the code for it. 134 | # 135 | # === Restrictions for plugin OIDs 136 | # 137 | # You cannot have a plugin respond to a subtree of another plugin. That is, 138 | # if you have one plugin which has registered itself as handling '1.2.3', 139 | # you cannot have another plugin that says it handles '1.2' or '1.2.3.4' -- 140 | # in either case, the two plugins will conflict. 141 | # 142 | # This restriction isn't really an SNMP one, it's more of a sanity-saving 143 | # measure. Imagine the confusion from having to troubleshoot wrong values 144 | # in a heavily nested plugin tree... 145 | # 146 | # If you have a deep and abiding need to nest plugins, however, get in 147 | # contact and we'll see about removing the limitation. 148 | # 149 | # === Plugins and data types 150 | # 151 | # There is a limted amount of type interpolation in the plugin handler. 152 | # At present, integer values will be kept as integers, and most everything 153 | # else will be converted to an OCTET STRING. If you have a particular need 154 | # to return values of particular SNMP types, the agent will pass-through any 155 | # SNMP value objects that are created, so if you just *had* to return a 156 | # Gauge32 for a particular OID, you could do: 157 | # 158 | # agent.add_plugin('1.2.3') { SNMP::Gauge32.new(42) } 159 | # 160 | # === Caching plugin data 161 | # 162 | # Often, running a plugin to collect data is quite expensive -- if you're 163 | # calling out to a web service or doing a lot of complex calculations, and 164 | # generating a large resulting tree, you really don't want to be re-doing 165 | # all that work for every SNMP request (and remember, during a walk, that 166 | # tree is going to be completely recreated for every element walked in that 167 | # tree). 168 | # 169 | # To prevent this problem, the SNMP agent provides a fairly simple caching 170 | # mechanism within itself. If you return the data from your plugin as a 171 | # hash, you can add an extra element to that hash, with a key of 172 | # :cache, which should have a value of how many seconds you want 173 | # the agent to retain your data for before re-running the plugin. So, a 174 | # simple cached data tree might look like: 175 | # 176 | # {:cache => 30, 0 => [0, 1, 2], 1 => ['a', 'b', 'c']} 177 | # 178 | # So the agent will cache the given data ({0 => [...], 1 => [...]}) for 179 | # 30 seconds before running the plugin again to get a new set of data. 180 | # 181 | # How long should you cache data for? That's up to you. The tradeoffs are 182 | # between the amount of time it takes to create the data tree, how quickly 183 | # the data "goes stale", and how large the data tree is. The longer it 184 | # takes to re-create the tree and the larger the tree is, the longer you 185 | # should cache for. Large trees should be cached for longer because big 186 | # trees take longer to walk, and it'd be annoying if, half-way through the 187 | # walk, the plugin got called again. How long the data is relevant for is 188 | # essentially the upper bound on cache time -- there's no point in keeping 189 | # data around for longer than it's valid. 190 | # 191 | # What if, for some reason, you can't come up with a reasonable cache 192 | # timeout value? You've got a huge tree, that takes ages to produce, but it 193 | # needs to be refreshed really often. Try splitting your single monolithic 194 | # plugin into smaller "chunks", each of which can be cached separately. The 195 | # smaller trees will take less time to walk, and hopefully you won't need to 196 | # do the full set of processing to obtain the subset of values, so it'll be 197 | # quicker to process. 198 | # 199 | # The one limitation on caching is that you can't cache a single value, 200 | # because you need to return a hash to provide the :cache key. This is yet 201 | # to cause anyone any problems, however I am aware of the potential problem, 202 | # and if it causes anyone major grief, please get in touch and we'll work out 203 | # an alternate solution. 204 | # 205 | # === Communities in Plugins 206 | # 207 | # If you have a need to examine the community that was passed to the SNMP 208 | # request that caused your plugin to be run, you can provide an argument to 209 | # your block and have the community put in there. For instance: 210 | # 211 | # agent.add_plugin('1.2.3.4') { |c| c } 212 | # 213 | # Will return the community name passed to any request for the OID .1.2.3.4. 214 | # 215 | # Note that plugin value caching and community inspection do not play well 216 | # together at present -- if you return a value and ask for it to be cached, 217 | # it will be cached regardless of the community that is used in subsequent 218 | # requests. Thus, if you have a need to examine the community in your plugin, 219 | # don't ask the agent to cache the response. 220 | # 221 | # === "Declining" a request 222 | # 223 | # If you're writing a plugin that, in some instances, should completely fail 224 | # to respond, you can raise a DontReplyException. This will cause the agent 225 | # to not send a response PDU. Note the difference between raising 226 | # DontReplyException and returning nil -- the latter will cause a 227 | # NoSuchObject response, while the former will make the server look like a 228 | # black hole. 229 | # 230 | # There is a potential issue with raising DontReplyException if there are 231 | # multiple OIDs in the request PDU, in that no response will be sent if 232 | # *any* of the OIDs return a DontReplyException. Hence they should be used 233 | # with caution. 234 | # 235 | # === Bulk plugin loading 236 | # 237 | # If you've got a large collection of plugins that you want to include in 238 | # your system, you don't have to define them all by hand within your code -- 239 | # you can use the add_plugin_dir method to load all of the plugins 240 | # present in a directory. 241 | # 242 | # There are two sorts of plugin files recognised by the loader: 243 | # 244 | # - Any files whose names look like OIDs. In this case, the filename is 245 | # used as the base OID for the plugin, and the contents of the file are 246 | # taken as the complete code to run for the plugin. This method is 247 | # really only suitable for fairly simple plugins, and is mildly 248 | # deprecated -- practical experience has shown that this method of 249 | # defining a plugin is actually fairly confusing. 250 | # 251 | # - Any file in the plugin directory which ends in .rb is evaluated 252 | # as ruby code, in the context of the SNMP::Agent object which is running 253 | # add_plugin_dir. This means that any methods or classes defined in 254 | # the file are in the scope of the SNMP::Agent object itself. To 255 | # actually add a plugin in this instance, you need to run 256 | # self.add_plugin explicitly. This method of defining plugins 257 | # externally is preferred, since although it is more verbose, it is much 258 | # more flexible and lends itself to better modularity of plugins. 259 | # 260 | # == Proxying to other SNMP agents 261 | # 262 | # Although the Ruby SNMP agent is quite versatile, it currently lacks a lot 263 | # of the standard MIB trees that we know and love. This means, of course, 264 | # that if you want to walk standard trees, like load averages, disk 265 | # partitions, and network statistics, you'll need to be running another SNMP 266 | # agent on your machines in addition to this agent. Rather than doing the 267 | # dirty and making you remember whatever non-standard port you may have put 268 | # one (or both) of the agents on, you can instead proxy the other agent 269 | # through the Ruby SNMP agent. 270 | # 271 | # The syntax for this is very simple: 272 | # 273 | # agent.add_proxy(oid, host, port) 274 | # 275 | # This simple call will cause any request to any part of the MIB subtree 276 | # rooted at to be fulfilled by making an SNMP request to the agent 277 | # running on and listening on and returning whatever that 278 | # agent sends back to us. 279 | # 280 | # A (minor) limitation at the moment is that you can't proxy a subtree 281 | # provided by the backend agent to a different subtree in the Ruby SNMP 282 | # agent. I don't consider this to be a major limitation, as -- due to the 283 | # globally-unique and globally-meaningful semantics of the MIB -- you 284 | # shouldn't have too much call for changing OIDs in proxies. 285 | # 286 | # There are some oddities in the proxy in the area of communities, and as far 287 | # as I am aware nobody is doing anything particularly taxing with the proxy, 288 | # so it may harbour unpleasant corner cases. Sorry about that. 289 | # 290 | 291 | class Agent # :doc: 292 | DefaultSettings = { :address => "127.0.0.1", 293 | :port => 161, 294 | :max_packet => 8000, 295 | :logger => Logger.new('/dev/null'), 296 | :sysContact => "Someone", 297 | :sysName => "Ruby SNMP agent", 298 | :sysLocation => "Unknown", 299 | :community => nil 300 | } 301 | 302 | # Create a new agent. 303 | # 304 | # You can provide a list of settings to the new agent, as a hash of 305 | # symbols and values. Currently valid settings (and their defaults) 306 | # are as follows: 307 | # 308 | # [:port] The UDP port to listen on. Default: 161 309 | # [:max_packet] The largest UDP packet that will be read. Default: 8000 310 | # [:logger] A Logger object to write all messages to. Default: sends all 311 | # messages to /dev/null. 312 | # [:sysContact] A string to provide when an SNMP request is made for 313 | # sysContact. Default: "Someone" 314 | # [:sysName] A string to provide when an SNMP request is made for 315 | # sysName. Default: "Ruby SNMP agent" 316 | # [:sysLocation] A string to provide when an SNMP request is made for 317 | # sysLocation. Default: "Unknown" 318 | # [:community] Either a string or array of strings which specify the 319 | # community/communities which this SNMP agent will respond 320 | # to. The default is nil, which means that the agent will 321 | # respond to any SNMP PDU, regardless of the community name 322 | # encoded in the PDU. 323 | # 324 | def initialize(settings = {}) 325 | settings = DefaultSettings.merge(settings) 326 | 327 | @address = settings[:address] 328 | @port = settings[:port] 329 | @log = settings[:logger] 330 | @max_packet = settings[:max_packet] 331 | @community = settings[:community] 332 | @socket = nil 333 | 334 | @mib_tree = MibNodeTree.new(:logger => @log) 335 | 336 | agent_start_time = Time.now 337 | self.add_plugin('1.3.6.1.2.1.1') { {1 => [`uname -a`], 338 | 3 => [SNMP::TimeTicks.new(((Time.now - agent_start_time) * 100).to_i)], 339 | 4 => [settings[:sysContact]], 340 | 5 => [settings[:sysName]], 341 | 6 => [settings[:sysLocation]] 342 | } 343 | } 344 | end 345 | 346 | # Handle a new OID. 347 | # 348 | # See the class documentation for full information on how to use this method. 349 | # 350 | def add_plugin(base_oid, &block) 351 | raise ArgumentError.new("Must pass a block to add_plugin") unless block_given? 352 | @mib_tree.add_node(base_oid, MibNodePlugin.new(:logger => @log, :oid => base_oid, &block)) 353 | end 354 | 355 | # Add a directory full of plugins to the agent. 356 | # 357 | # To make it as simple as possible to provide plugins to the SNMP agent, 358 | # you can create a directory and fill it with files containing plugin 359 | # code, then tell the agent where to find all that juicy code. 360 | # 361 | # The files in the plugin directory are simply named after the base OID, 362 | # and the contents are the code you want to execute, exactly as you would 363 | # put it inside a block. 364 | # 365 | def add_plugin_dir(dir) 366 | orig_verbose = $VERBOSE 367 | $VERBOSE = nil 368 | Dir.entries(dir).each do |f| 369 | @log.info("Looking at potential plugin #{File.join(dir, f)}") 370 | if f =~ /^([0-9]\.?)+$/ 371 | begin 372 | self.add_plugin(f, &eval("lambda do\n#{File.read(File.join(dir, f))}\nend\n")) 373 | rescue SyntaxError => e 374 | @log.warn "Syntax error in #{File.join(dir, f)}: #{e.message}" 375 | end 376 | elsif f =~ /\.rb$/ 377 | begin 378 | self.instance_eval(File.read(File.join(dir, f))) 379 | rescue SyntaxError => e 380 | @log.warn "Syntax error in #{File.join(dir, f)}: #{e.message}" 381 | rescue Exception => e 382 | @log.warn "Some error occured while loading #{File.join(dir, f)}: #{e.message}" 383 | end 384 | end 385 | end 386 | 387 | $VERBOSE = orig_verbose 388 | end 389 | 390 | def add_proxy(base_oid, host, port) 391 | @mib_tree.add_node(base_oid, SNMP::MibNodeProxy.new(:base_oid => base_oid, 392 | :host => host, 393 | :port => port, 394 | :logger => @log) 395 | ) 396 | end 397 | 398 | # Main connection handling loop. 399 | # 400 | # Call this method when you're ready to respond to some SNMP messages. 401 | # 402 | # Caution: this method blocks (does not return until it's finished 403 | # serving SNMP requests). As a result, you should run it in a separate 404 | # thread or catch one or more signals so that you can actually call 405 | # +shutdown+ to stop the agent. 406 | def start 407 | open_socket if @socket.nil? 408 | 409 | @log.info "SNMP agent running" 410 | @socket.listen do |data| 411 | begin 412 | @log.debug "Received #{data.length} bytes" 413 | @log.debug data.inspect 414 | 415 | message = Message.decode(data) 416 | 417 | # Community access checks 418 | community_ok = false 419 | if @community.nil? 420 | community_ok = true 421 | else 422 | @log.debug "Checking community" 423 | community_ok = if @community.class == String 424 | @log.debug "Checking if #{message.community} is #{@community}" 425 | @community == message.community 426 | elsif @community.class == Array 427 | @log.debug "Checking if #{message.community} is in #{@community.inspect}" 428 | @community.include? message.community 429 | else 430 | @log.error "Invalid setting for :community" 431 | false 432 | end 433 | if community_ok 434 | @log.debug "Community OK" 435 | else 436 | @log.debug "Community invalid" 437 | end 438 | end 439 | 440 | if community_ok 441 | case message.pdu 442 | when GetRequest 443 | @log.debug "GetRequest received" 444 | response = process_get_request(message) 445 | when GetNextRequest 446 | @log.debug "GetNextRequest received" 447 | response = process_get_next_request(message) 448 | when GetBulkRequest 449 | @log.debug "GetBulkRequest received" 450 | response = process_get_next_request(message) 451 | else 452 | raise SNMP::UnknownMessageError.new("invalid message #{message.inspect}") 453 | end 454 | encoded_message = response.encode 455 | @log.debug encoded_message.inspect 456 | encoded_message 457 | else 458 | nil 459 | end 460 | rescue SNMP::UnknownMessageError => e 461 | @log.error "Unknown SNMP message: #{e.message}" 462 | nil 463 | rescue IOError => e 464 | raise if e.message == 'stream closed' or e.message == 'closed stream' 465 | @log.warn "IO Error: #{e.message}" 466 | nil 467 | rescue DontReplyException => e 468 | nil 469 | rescue Errno::EBADF 470 | raise 471 | rescue => e 472 | @log.error "Error in handling message: #{e.message}: #{e.backtrace.join("\n")}" 473 | nil 474 | end 475 | end 476 | end 477 | 478 | # Stop the running agent. 479 | # 480 | # Close the socket and stop the agent from running. It can be started again 481 | # just by calling +start+ again. You will, of course, need to be catching 482 | # signals or be multi-threaded in order to be able to actually call this 483 | # method, because +start+ itself is a blocking method. 484 | # 485 | def shutdown 486 | @log.info "SNMP agent stopping" 487 | @socket.close 488 | end 489 | 490 | # Open the socket. Call this early if you want to drop elevated 491 | # privileges before starting the agent itself. 492 | def open_socket 493 | @socket = UDPSocketPool.new(@log, @address, @port) 494 | end 495 | 496 | private 497 | def process_get_request(message) 498 | response = message.response 499 | response.pdu.varbind_list.each do |v| 500 | @log.debug "GetRequest OID: #{v.name}, #{message.community}" 501 | v.value = get_snmp_value(v.name, message.community) 502 | end 503 | 504 | response 505 | end 506 | 507 | def process_get_next_request(message) 508 | response = message.response 509 | response.pdu.varbind_list.length.times do |idx| 510 | v = response.pdu.varbind_list[idx] 511 | @log.debug "OID: #{v.name}" 512 | v.name = next_oid_in_tree(v.name) 513 | @log.debug "pgnr: Next OID is #{v.name.to_s}" 514 | if SNMP::EndOfMibView == v.name 515 | @log.debug "Setting error status" 516 | v.name = ObjectId.new('0') 517 | response.pdu.error_status = :noSuchName 518 | response.pdu.error_index = idx 519 | else 520 | @log.debug "Regular value" 521 | v.value = get_snmp_value(v.name) 522 | end 523 | end 524 | 525 | response 526 | end 527 | 528 | def get_snmp_value(oid, community = nil) 529 | @log.debug("get_snmp_value(#{oid.to_s})") 530 | data_value = get_mib_entry(oid, community).value 531 | 532 | if data_value.is_a? ::Integer 533 | SNMP::Integer.new(data_value) 534 | elsif data_value.is_a? String 535 | SNMP::OctetString.new(data_value) 536 | elsif data_value.nil? 537 | SNMP::NoSuchObject 538 | elsif data_value.respond_to? :asn1_type 539 | # Assuming that we got given back a literal SNMP type 540 | data_value 541 | else 542 | SNMP::OctetString.new(data_value.to_s) 543 | end 544 | end 545 | 546 | def get_mib_entry(oid, community = nil) 547 | @log.debug "Looking for MIB entry #{oid.to_s}" 548 | oid = ObjectId.new(oid) unless oid.is_a? ObjectId 549 | @mib_tree.get_node(oid, community) 550 | end 551 | 552 | def next_oid_in_tree(oid) 553 | @log.debug "Looking for the next OID from #{oid.to_s}" 554 | oid = ObjectId.new(oid) unless oid.is_a? ObjectId 555 | 556 | next_oid = @mib_tree.next_oid_in_tree(oid) 557 | 558 | if next_oid.nil? 559 | next_oid = SNMP::EndOfMibView 560 | end 561 | 562 | next_oid 563 | end 564 | 565 | end 566 | 567 | class MibNode # :nodoc: 568 | # Create a new MibNode (of some type) 569 | # 570 | # This is quite a tricky piece of work -- we have to work out whether 571 | # we're being asked to create a MibNodeTree (initial_data is a hash or 572 | # array), a MibNodeValue (initial_data is some sort of scalar), a 573 | # MibNodeProxy (initial_data consists of :host and :port), or a 574 | # MibNodePlugin (a block was given). 575 | # 576 | # What comes out the other end is something that will respond to the 577 | # standard MibNode interface, whatever it may be underneath. 578 | # 579 | def self.create(initial_data = {}, opts = {}, &block) 580 | if initial_data.respond_to? :next_oid_in_tree 581 | return initial_data 582 | end 583 | 584 | if initial_data.instance_of? Array 585 | initial_data = initial_data.to_hash 586 | end 587 | 588 | if initial_data.is_a? Hash 589 | initial_data.merge! opts 590 | if block_given? 591 | return MibNodePlugin.new(initial_data, &block) 592 | elsif initial_data.keys.member? :host and initial_data.keys.member? :port 593 | return MibNodeProxy.new(initial_data) 594 | else 595 | return MibNodeTree.new(initial_data.merge(opts)) 596 | end 597 | else 598 | return MibNodeValue.new({:value => initial_data}.merge(opts)) 599 | end 600 | end 601 | end 602 | 603 | class MibNodeTree < MibNode # :nodoc: 604 | def initialize(initial_data = {}) 605 | @log = initial_data.keys.include?(:logger) ? initial_data.delete(:logger) : Logger.new('/dev/null') 606 | @subnodes = Hash.new { |h,k| h[k] = SNMP::MibNodeTree.new(:logger => @log) } 607 | 608 | initial_data.keys.each do |k| 609 | raise ArgumentError.new("MIB key #{k} is not an integer") unless k.is_a? ::Integer 610 | @subnodes[k] = MibNode.create(initial_data[k], :logger => @log) 611 | end 612 | end 613 | 614 | def to_hash 615 | output = {} 616 | keys.each do |k| 617 | output[k] = @subnodes[k].respond_to?(:to_hash) ? @subnodes[k].to_hash : @subnodes[k] 618 | end 619 | 620 | output 621 | end 622 | 623 | def empty? 624 | length == 0 625 | end 626 | 627 | def value 628 | nil 629 | end 630 | 631 | def get_node(oid, community = nil) 632 | oid = ObjectId.new(oid) 633 | @log.debug("get_node(#{oid.to_s})") 634 | 635 | next_idx = oid.shift 636 | if next_idx.nil? 637 | # End of the road, bud 638 | return self 639 | else 640 | return sub_node(next_idx).get_node(oid, community) 641 | end 642 | end 643 | 644 | def add_node(oid, node) 645 | oid = ObjectId.new(oid) unless oid.is_a? ObjectId 646 | @log.debug("Adding a #{node.class} at #{oid.to_s}") 647 | 648 | sub = oid.shift 649 | 650 | if oid.length == 0 651 | if @subnodes.has_key? sub 652 | raise ArgumentError.new("OID #{oid} is already occupied by something; cannot put a node here") 653 | else 654 | @log.debug("Inserted") 655 | @subnodes[sub] = node 656 | @log.debug("#{self.object_id}.subnodes[#{sub}] is now a #{@subnodes[sub].class}") 657 | end 658 | else 659 | @subnodes[sub].add_node(oid, node) 660 | end 661 | end 662 | 663 | # Return the path down the 'left' side of the MIB tree from this point. 664 | # The 'left' is, of course, the smallest node in each subtree until we 665 | # get to a leaf. It is possible that the subtree doesn't contain any 666 | # actual data; in this instance, left_path will return nil to indicate 667 | # "no tree here, look somewhere else". 668 | def left_path() 669 | @log.debug("left_path") 670 | path = nil 671 | 672 | keys.sort.each do |next_idx| 673 | @log.debug("Boink (#{next_idx})") 674 | # Dereference into the subtree. Let's see what we've got here, shall we? 675 | next_node = sub_node(next_idx) 676 | 677 | path = next_node.left_path() 678 | unless path.nil? 679 | # Add ourselves to the front of the path, and we're done 680 | path.unshift(next_idx) 681 | return path 682 | end 683 | end 684 | 685 | # We chewed through all the keys and all the subtrees were completely 686 | # empty. Bugger. 687 | return nil 688 | end 689 | 690 | # Return the next OID strictly larger than the given OID from this node. 691 | # Returns nil if there is no larger OID in the subtree. 692 | def next_oid_in_tree(oid) 693 | @log.debug("MibNodeTree#next_oid_in_tree(#{oid})") 694 | oid = ObjectId.new(oid) 695 | 696 | # End of the line, bub 697 | return self.left_path if oid.length == 0 698 | 699 | sub = oid.shift 700 | 701 | next_oid = sub_node(sub).next_oid_in_tree(oid) 702 | 703 | @log.debug("Got #{next_oid.inspect} from call to subnodes[#{sub}].next_oid_in_tree(#{oid.to_s})") 704 | 705 | if next_oid.nil? 706 | @log.debug("No luck asking subtree #{sub}; how about the next subtree(s)?") 707 | sub = @subnodes.keys.sort.find { |k| 708 | if k > sub 709 | @log.debug("Examining subtree #{k}") 710 | !sub_node(k).left_path.nil? 711 | else 712 | false 713 | end 714 | } 715 | 716 | if sub.nil? 717 | @log.debug("This node has no valid next nodes") 718 | return nil 719 | end 720 | 721 | next_oid = sub_node(sub).left_path 722 | end 723 | 724 | if next_oid.nil? 725 | # We've got no next node below us 726 | return nil 727 | else 728 | # We've got a next OID to go to; append ourselves to the front and 729 | # send it back up the line 730 | next_oid.unshift(sub) 731 | @log.debug("The next OID for #{oid.inspect} is #{next_oid.inspect}") 732 | return ObjectId.new(next_oid) 733 | end 734 | end 735 | 736 | private 737 | def sub_node(idx) 738 | @log.debug("sub_node(#{idx.inspect})") 739 | raise ArgumentError.new("Index [#{idx}] must be an integer in a MIB tree") unless idx.is_a? ::Integer 740 | 741 | # Dereference into the subtree. Let's see what we've got here, shall we? 742 | @log.debug("#{self.object_id}.subnodes[#{idx}] is a #{@subnodes[idx].class}") 743 | @subnodes[idx] 744 | end 745 | 746 | def keys 747 | @subnodes.keys 748 | end 749 | 750 | def length 751 | @subnodes.length 752 | end 753 | end 754 | 755 | class MibNodePlugin < MibNode # :nodoc: 756 | def initialize(opts = {}, &block) 757 | @log = opts[:logger].nil? ? Logger.new('/dev/null') : opts[:logger] 758 | @plugin_timeout = opts[:plugin_timeout] ? 2 : opts[:plugin_timeout] 759 | @proc = block 760 | @oid = opts[:oid] 761 | @cached_value = nil 762 | @cache_until = 0 763 | end 764 | 765 | def value 766 | nil 767 | end 768 | 769 | def to_hash 770 | plugin_value.to_hash 771 | end 772 | 773 | def get_node(oid, community = nil) 774 | @log.debug("plugin get_node(#{oid.to_s}, #{community.to_s})") 775 | val = plugin_value(community) 776 | val.get_node(oid, community) if val.respond_to? :get_node 777 | end 778 | 779 | def add_node(oid, node) 780 | raise ArgumentError.new("Adding this plugin would encroach on the subtree of an existing plugin") 781 | end 782 | 783 | def left_path 784 | plugin_value.left_path 785 | end 786 | 787 | def next_oid_in_tree(oid) 788 | val = plugin_value 789 | val.next_oid_in_tree(oid) if val.respond_to? :next_oid_in_tree 790 | end 791 | 792 | private 793 | def plugin_value community = nil 794 | @log.debug("Getting plugin value") 795 | if Time.now.to_i > @cache_until 796 | begin 797 | plugin_data = nil 798 | Timeout::timeout(@plugin_timeout) do 799 | plugin_data = @proc.call community 800 | end 801 | rescue Timeout::Error 802 | @log.warn("Plugin for OID #{@oid} exceeded the timeout") 803 | return MibNodeValue.new(:logger => @log, :value => nil) 804 | rescue DontReplyException => e 805 | # Just pass it on up the chain 806 | raise e 807 | rescue => e 808 | @log.warn("Plugin for OID #{@oid} raised an exception: #{e.message}\n#{e.backtrace.join("\n")}") 809 | return MibNodeValue.new(:logger => @log, :value => nil) 810 | end 811 | 812 | if plugin_data.instance_of? Array 813 | plugin_data = plugin_data.to_hash 814 | end 815 | 816 | if plugin_data.is_a? Hash 817 | unless plugin_data[:cache].nil? 818 | @cache_until = Time.now.to_i + plugin_data[:cache] 819 | plugin_data.delete :cache 820 | end 821 | end 822 | 823 | @cached_value = MibNode.create(plugin_data, :logger => @log) 824 | end 825 | 826 | @cached_value 827 | end 828 | end 829 | 830 | class MibNodeProxy < MibNode # :nodoc: 831 | def initialize(opts) 832 | @base_oid = SNMP::ObjectId.new(opts[:base_oid]) 833 | @manager = SNMP::Manager.new(:Host => opts[:host], :Port => opts[:port]) 834 | @log = opts[:logger] ? opts[:logger] : Logger.new('/dev/null') 835 | end 836 | 837 | def get_node(oid, community = nil) 838 | oid = SNMP::ObjectId.new(oid) unless oid.is_a? SNMP::ObjectId 839 | 840 | complete_oid = ObjectId.new(@base_oid + oid) 841 | 842 | rv = @manager.get([complete_oid]) 843 | 844 | MibNodeValue.new(:value => rv.varbind_list[0].value) 845 | end 846 | 847 | def add_node(oid, node) 848 | raise ArgumentError.new("Cannot add a node inside a MibNodeProxy") 849 | end 850 | 851 | def left_path() 852 | next_oid_in_tree(@base_oid) 853 | end 854 | 855 | def next_oid_in_tree(oid) 856 | oid = SNMP::ObjectId.new(oid) unless oid.is_a? SNMP::ObjectId 857 | 858 | complete_oid = ObjectId.new(@base_oid + oid) 859 | 860 | rv = @manager.get_next([complete_oid]) 861 | 862 | next_oid = rv.varbind_list[0].name 863 | 864 | if next_oid.subtree_of? @base_oid 865 | # Remember to only return the interesting subtree portion! 866 | next_oid[@base_oid.length..-1] 867 | else 868 | nil 869 | end 870 | end 871 | end 872 | 873 | class MibNodeValue < MibNode # :nodoc: 874 | include Comparable 875 | 876 | attr_reader :value 877 | 878 | def initialize(opts) 879 | @value = opts[:value] 880 | @log = Logger.new('/dev/null') 881 | end 882 | 883 | def <=>(other) 884 | @value.nil? or other.nil? ? 0 : @value <=> other.value 885 | end 886 | 887 | def get_node(oid, community = nil) 888 | oid.length == 0 ? self : MibNodeTree.new 889 | end 890 | 891 | def add_node(oid, node) 892 | RuntimeError.new("You really shouldn't do that") 893 | end 894 | 895 | def left_path() 896 | value.nil? ? nil : [] 897 | end 898 | 899 | def next_oid_in_tree(oid) 900 | nil 901 | end 902 | end 903 | 904 | # To signal that the agent received a message that it didn't know how to 905 | # handle. 906 | class UnknownMessageError < StandardError 907 | end 908 | 909 | end 910 | 911 | class Array # :nodoc: 912 | def keys 913 | k = [] 914 | length.times { |v| k << v } 915 | k 916 | end 917 | 918 | def to_hash 919 | h = {} 920 | keys.each {|k| h[k] = self[k]} 921 | h 922 | end 923 | end 924 | 925 | class NilClass # :nodoc: 926 | def value 927 | nil 928 | end 929 | end 930 | 931 | class UDPSocketPool 932 | def initialize(log, address, port) 933 | @socket_list = {} 934 | @port = port 935 | @address = address 936 | @log = log 937 | 938 | init_socket_list 939 | end 940 | 941 | def self.listen(port, &block) 942 | pool = UDPSocketPool.new(port) 943 | 944 | pool.listen(&block) 945 | end 946 | 947 | def listen 948 | raise RuntimeError.new("No block given to UDPSocketPool#listen") unless block_given? 949 | 950 | loop do 951 | ready = IO::select(@socket_list.values)[0] 952 | 953 | ready.each do |s| 954 | data, origin = s.recvfrom(65535) 955 | if s == @socket_list['0.0.0.0'] 956 | # We don't explicitly handle data received by the 'any' 957 | # socket, we just use it to trigger a rescan 958 | init_socket_list 959 | else 960 | result = yield(data) 961 | s.send(result, 0, origin[3], origin[1]) unless result.nil? 962 | end 963 | end 964 | end 965 | end 966 | 967 | def close 968 | @socket_list.values.each {|s| s.close} 969 | end 970 | 971 | private 972 | def init_socket_list 973 | addrs = [@address] 974 | @log.info("Binding to #{@address}") 975 | addrs.each do |a| 976 | next if @socket_list.keys.include? a 977 | @socket_list[a] = ::UDPSocket.new 978 | @socket_list[a].setsockopt(Socket::SOL_SOCKET, 979 | Socket::SO_REUSEADDR, 980 | 1) 981 | @socket_list[a].bind(a, @port) 982 | end 983 | end 984 | end 985 | 986 | # Exception to be raised by a plugin if it really, really, really doesn't 987 | # want to have anything at all to do with this request. 988 | class DontReplyException < Exception 989 | end 990 | 991 | if __FILE__ == $0 992 | agent = SNMP::Agent.new(:address => '10.0.0.1', :port => 1061, :logger => Logger.new(STDOUT)) 993 | trap("INT") { agent.shutdown } 994 | agent.start 995 | end 996 | --------------------------------------------------------------------------------