├── Notifications ├── Slack │ ├── readme.txt │ ├── slack.sh │ └── slack1.sh └── Telegram │ └── telegram.sh ├── README.md └── Templates ├── Bareos ├── LICENSE ├── README.md ├── bareos-zabbix.xml ├── bareos.conf ├── bareos.monitoring.py └── installation.script.sh ├── Docker └── zabbix-docker-master │ ├── .gitignore │ ├── README.md │ ├── ReleaseNotes.md │ ├── ZabbixDockerTemplate.xml │ ├── install.sh │ ├── package.sh │ ├── testtools │ ├── Dockerfile │ ├── Makefile │ ├── memkiller.c │ ├── package.sh │ └── testrun.sh │ ├── userparameter_zabbixdocker.conf │ ├── zabbix-docker-convert.py │ ├── zabbix-docker-discover.py │ ├── zabbix-docker-info.py │ ├── zabbix-docker-inspect.py │ └── zabbix-docker-stats.py ├── FFmpeg Chunks ├── chunk.conf └── ffmpeg_chunks_template.xml ├── Graylog2 ├── README.md ├── graylog.conf └── graylog.xml ├── IOstat ├── iostat-collect.sh ├── iostat-parse.sh ├── iostat.conf └── iostat.xml ├── IPMI └── supermicro.ipmi.xml ├── Jenkins ├── README.md ├── config ├── jenkins.conf ├── jenkins.job.status.py ├── jenkins.job.xml └── requirements.txt ├── Keepalived └── keepalived.xml ├── Lm-sensors ├── lm.sensors.xml ├── lmsensors.conf └── lmsensors.sh ├── Logfiles monitoring ├── README.md ├── log.monitoring.conf ├── logfile.discovery.py └── logfile.monitoring.xml ├── MD-RAID ├── README.txt ├── mdraid.xml ├── raid.conf └── zabbix_mdraid.sh ├── Memcache ├── README.txt ├── memcache-stats.sh ├── memcache.conf └── memcache.xml ├── Microservices ├── microservice.get.data.py ├── microservice.parse.py └── microservice.status.conf ├── Monitoring email sending ├── aws.ses.test.xml ├── email.test.php └── sending.email.test.conf ├── MySQL ├── MySQL App.xml ├── MySQL Slave │ ├── mysql-slave.conf │ └── mysql-slave.xml ├── Template MySQL Backup.xml └── mysql.conf ├── Nginx ├── README.txt ├── nginx-cache │ ├── Template Nginx-cache.xml │ ├── nginx_cache.conf │ └── nginx_cache.sh ├── nginx-performance-time │ ├── Template Nginx performance time.xml │ ├── nginx.performance.time.conf │ ├── nginx.request.sh │ └── nginx.upstream.time.sh ├── nginx-rtmp │ ├── Template Nginx-RTMP.xml │ ├── nginx-rtmp.conf │ ├── nginx-rtmp.sh │ └── stat.xsl ├── nginx-status.conf ├── nginx.conf ├── nginx.template.xml └── nginx_status.sh ├── OS Linux ├── READ.ME └── os.linux.xml ├── OVH ├── README.txt ├── cloud.storage.container.size.py.py ├── ovh.xml └── ovh.zabbix.conf ├── PHP-FPM ├── README.txt ├── php-fpm-status.conf ├── php-fpm.conf ├── php-fpm.sh ├── php-fpm.xml └── status.php ├── PostgreSQL ├── Backup │ ├── PostgreSQL backup.xml │ └── postgres.backup.conf ├── README.md ├── docker.postgres.master.conf ├── postgresql.DATABASENAME.xml ├── postgresql.common.xml ├── postgresql.conf ├── postgresql.slave.conf ├── postgresql.slave.xml └── template.preparing.sh ├── RabbitMQ ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.ja.md ├── README.md ├── rabbitmq.template.xml ├── scripts │ └── rabbitmq │ │ ├── .rab.auth │ │ ├── api.py │ │ ├── list_rabbit_nodes.sh │ │ ├── list_rabbit_queues.sh │ │ ├── list_rabbit_shovels.sh │ │ └── rabbitmq-status.sh ├── templates │ └── rabbitmq.template.xml ├── tests │ ├── setup_auth.sh │ └── test_basic_return.py └── zabbix_agentd.d │ └── rabbitmq.conf ├── Redis ├── README.md ├── redis-stats.py ├── redis.conf └── redis.xml ├── SMART ├── README.md ├── smart.template.xml ├── smart.thresh.check.sh ├── smartctl-disks-discovery.pl └── zabbix_smartctl.conf ├── SSL Certificates ├── check-ssl-expire.py ├── check-ssl-expire.template.xml ├── ssl-cert-check.sh └── ssl.cert.conf ├── Tarantool ├── README.txt ├── tarantool-params.conf ├── tarantool-slab.sh ├── tarantool-stats.sh └── tarantool.xml └── WebSockets ├── README.txt ├── web.socket.check.conf ├── web.socket.check.py ├── websocket.html └── websocket.template.xml /Notifications/Slack/readme.txt: -------------------------------------------------------------------------------- 1 | put the script into /usr/lib/zabbix/alertscripts/ - it's described in zabbix-server configeuration file, parameter "AlertScriptsPath" 2 | chmod 755 3 | 4 | you can check how it does work: 5 | bash /usr/lib/zabbix/alertscripts/slack.sh '@SLACKUSERNAME' PROBLEM 'Oh no! Something is wrong!' - set your own slack-username 6 | 7 | In zabbix GUI set slack.sh as script in Administration > Media Types 8 | Then create Action, choose notification via Slack 9 | -------------------------------------------------------------------------------- /Notifications/Slack/slack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Slack incoming web-hook URL and user name 4 | url='https://hooks.slack.com/services/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' # private chat with @username 5 | username='Zabbix' 6 | 7 | ## Values received by this script: 8 | # To = $1 (Slack channel or user to send the message to, specified in the Zabbix web interface; "@username" or "#channel") 9 | # Subject = $2 (usually either PROBLEM or RECOVERY) 10 | # Message = $3 (whatever message the Zabbix action sends, preferably something like "Zabbix server is unreachable for 5 minutes - Zabbix server$ 11 | 12 | # Get the Slack channel or user ($1) and Zabbix subject ($2 - hopefully either PROBLEM or RECOVERY) 13 | to="$1" 14 | subject="$2" 15 | 16 | # Change message emoji depending on the subject - smile (RECOVERY), frowning (PROBLEM), or ghost (for everything else) 17 | recoversub='^RECOVER(Y|ED)?$' 18 | if [[ "$subject" =~ ${recoversub} ]]; then 19 | emoji=':smile:' 20 | elif [ "$subject" == 'PROBLEM' ]; then 21 | emoji=':frowning:' 22 | else 23 | emoji=':ghost:' 24 | fi 25 | 26 | # The message that we want to send to Slack is the "subject" value ($2 / $subject - that we got earlier) 27 | # followed by the message that Zabbix actually sent us ($3) 28 | message="${subject}: $3" 29 | 30 | # Build our JSON payload and send it as a POST request to the Slack incoming web-hook URL 31 | payload="payload={\"channel\": \"${to//\"/\\\"}\", \"username\": \"${username//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \$ 32 | curl -m 5 --data-urlencode "${payload}" $url -A 'zabbix-slack-alertscript / https://github.com/ericoc/zabbix-slack-alertscript' -------------------------------------------------------------------------------- /Notifications/Slack/slack1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Slack incoming web-hook URL and user name 4 | url='https://hooks.slack.com/services/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' # private chat with @username 5 | username='Zabbix' 6 | 7 | ## Values received by this script: 8 | # To = $1 (Slack channel or user to send the message to, specified in the Zabbix web interface; "@username" or "#channel") 9 | # Subject = $2 (usually either PROBLEM or RECOVERY) 10 | # Message = $3 (whatever message the Zabbix action sends, preferably something like "Zabbix server is unreachable for 5 minutes - Zabbix server (127.0.0.1)") 11 | 12 | # Get the Slack channel or user ($1) and Zabbix subject ($2 - hopefully either PROBLEM or RECOVERY) 13 | to="$1" 14 | subject="$2" 15 | 16 | # Change message emoji depending on the subject - smile (RECOVERY), frowning (PROBLEM), or ghost (for everything else) 17 | recoversub='^RECOVER(Y|ED)?$' 18 | if [[ "$subject" =~ ${recoversub} ]]; then 19 | emoji=':smile:' 20 | elif [ "$subject" == 'PROBLEM' ]; then 21 | emoji=':frowning:' 22 | else 23 | emoji=':ghost:' 24 | fi 25 | 26 | # The message that we want to send to Slack is the "subject" value ($2 / $subject - that we got earlier) 27 | # followed by the message that Zabbix actually sent us ($3) 28 | message="${subject}: $3" 29 | 30 | # Build our JSON payload and send it as a POST request to the Slack incoming web-hook URL 31 | payload="payload={\"channel\": \"${to//\"/\\\"}\", \"username\": \"${username//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}" 32 | curl -m 5 --data-urlencode "${payload}" $url -A 'zabbix-slack-alertscript / https://github.com/ericoc/zabbix-slack-alertscript' -------------------------------------------------------------------------------- /Notifications/Telegram/telegram.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 1. create you own bot 4 | # 2. put bot key to this script 5 | # 3. add this bot to your contact list 6 | # 4. write any message to it 7 | # 5. find out you chat_id using https://api.telegram.org/bot$BOTKEY/getUpdates 8 | #https://core.telegram.org/bots/api 9 | 10 | CHATID="$1" 11 | THEME="$2" 12 | BODY="$3" 13 | 14 | BOTKEY="12345:sdsadsa-dfdsf" #like this 15 | TIMEOUT="10" 16 | 17 | TEXT="$THEME $BODY " 18 | 19 | #PARSEMODE="markdown" 20 | PARSEMODE="html" 21 | 22 | curl -s --max-time $TIMEOUT "https://api.telegram.org/bot$BOTKEY/sendMessage?chat_id=$CHATID&disable_web_page_preview=1&parse_mode=$PARSEMODE&text=$TEXT" > /dev/null -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Zabbix-Extensions 2 | 3 | Here are scripts and templates are for Zabbix monitoring system hosted on Linux OS Servers 4 | -------------------------------------------------------------------------------- /Templates/Bareos/LICENSE: -------------------------------------------------------------------------------- 1 | Created by yap 2 | 3 | Copyright (C) 2017 4 | -------------------------------------------------------------------------------- /Templates/Bareos/README.md: -------------------------------------------------------------------------------- 1 | Requirements: 2 | * bareos veriosion >= 15.2 3 | * python 2.7 4 | * active zabbix-agent 5 | * Find the password for bconsole connection needs - usually it's in this file /etc/bareos/bconsole.conf 6 | * python-bareos module 7 | 8 | Intallation zabbix module to monitor Bareos 9 | 10 | Run installation.script.sh as root 11 | 12 | Then upload xml-template to your own zabbix and link it to bareos-server 13 | 14 | #http://wiki.bacula.org/doku.php?id=faq#what_do_all_those_job_status_codes_mean 15 | Bareos Job Status Code Meaning 16 | A Canceled by user 17 | B Blocked 18 | C Created, but not running 19 | c Waiting for client resource 20 | D Verify differences 21 | d Waiting for maximum jobs 22 | E Terminated in error 23 | e Non-fatal error 24 | f fatal error 25 | F Waiting on File Daemon 26 | j Waiting for job resource 27 | M Waiting for mount 28 | m Waiting for new media 29 | p Waiting for higher priority jobs to finish 30 | R Running 31 | S Scan 32 | s Waiting for storage resource 33 | T Terminated normally 34 | t Waiting for start time 35 | -------------------------------------------------------------------------------- /Templates/Bareos/bareos-zabbix.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2018-04-02T11:01:41Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 278 | 279 | 280 | 281 | {Template Bareos:proc.num[{$BAREOS.DIR}].last()}=0 282 | Bareos Director is DOWN in {HOST.NAME} 283 | 284 | 0 285 | 3 286 | 287 | 0 288 | 289 | 290 | 291 | {Template Bareos:proc.num[{$BAREOS.FD}].last()}=0 292 | Bareos File is DOWN in {HOST.NAME} 293 | 294 | 0 295 | 3 296 | 297 | 0 298 | 299 | 300 | 301 | {Template Bareos:proc.num[{$BAREOS.SD}].last()}=0 302 | Bareos Storage is DOWN in {HOST.NAME} 303 | 304 | 0 305 | 3 306 | 307 | 0 308 | 309 | 310 | 311 | 312 | -------------------------------------------------------------------------------- /Templates/Bareos/bareos.conf: -------------------------------------------------------------------------------- 1 | #Backup Storage monitoing 2 | UserParameter=bareos.jobs.discovery, sudo /usr/bin/python /etc/zabbix/scripts/bareos/bareos.monitoring.py discovery 3 | UserParameter=bareos.job[*], sudo /usr/bin/python /etc/zabbix/scripts/bareos/bareos.monitoring.py $1 | grep $2 -A2 | tail -n3 | grep status | cut -f4 -d\" 4 | -------------------------------------------------------------------------------- /Templates/Bareos/bareos.monitoring.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import bareos.bsock 3 | import sys 4 | import re 5 | from configobj import ConfigObj 6 | import json 7 | 8 | command = sys.argv[1] 9 | 10 | try: 11 | config = ConfigObj('/etc/zabbix/scripts/bareos/config') 12 | secretspassword = config.get('password') 13 | except Exception as E: 14 | print E 15 | sys.exit(1) 16 | 17 | password=bareos.bsock.Password(secretspassword) 18 | 19 | #conneting to bconsole 20 | directorconsole=bareos.bsock.DirectorConsoleJson(address="localhost", port=9101, password=password) 21 | 22 | #Discovery Bareos' jobs 23 | def discovery(): 24 | jobs=directorconsole.call(".jobs") 25 | data = [] 26 | for i in jobs['jobs']: 27 | data.append({'{#BAREOSJOBNAME}': i['name']}) 28 | 29 | return json.dumps({"data": data}, indent=4, sort_keys=True) 30 | 31 | #Bareos' jobs status explanation 32 | #http://wiki.bacula.org/doku.php?id=faq#what_do_all_those_job_status_codes_mean 33 | #"type": "B", - only if backup 34 | #bad job status 35 | #e, f, E, B, 36 | 37 | #Get status of job 38 | def status(): 39 | jobs=directorconsole.call("llist jobs") 40 | data = [] 41 | for i in jobs['jobs']: 42 | data.append({'jobname': i['name'], 'status': i['jobstatus'], 'timestamp': i['jobtdate'], 'starttime': i['starttime']}) 43 | 44 | return json.dumps(data, indent=4, sort_keys=True) 45 | 46 | function_dict = {'status':status, 'discovery':discovery } 47 | func = function_dict[command] 48 | 49 | print func() 50 | -------------------------------------------------------------------------------- /Templates/Bareos/installation.script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #run as root 3 | 4 | #Installation bareos-python module 5 | 6 | git clone https://github.com/bareos/python-bareos 7 | cd python-bareos 8 | python setup.py install 9 | cd ../ 10 | 11 | #preparation zabbix config 12 | mkdir /etc/zabbix/scripts/ 13 | mkdir /etc/zabbix/scripts/bareos/ 14 | touch /etc/zabbix/scripts/bareos/config 15 | cp bareos.monitoring.py /etc/zabbix/scripts/bareos/bareos.monitoring.py 16 | echo password=$(grep Password /etc/bareos/bconsole.conf | cut -f2 -d\") >> /etc/zabbix/scripts/bareos/config 17 | chown -R zabbix:zabbix /etc/zabbix/scripts/bareos/ 18 | chmod -R 700 /etc/zabbix/scripts/bareos/ 19 | cp bareos.conf /etc/zabbix/zabbix_agentd.d/ 20 | 21 | systemctl restart zabbix-agent 22 | 23 | sudo -u zabbix zabbix_agentd -t bareos.jobs.discovery -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/.gitignore: -------------------------------------------------------------------------------- 1 | memkiller 2 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/README.md: -------------------------------------------------------------------------------- 1 | # zabbix-docker 1.0.3 2 | This repository contains monitoring code for Zabbix to discover and monitor Docker instances on Linux platforms. 3 | 4 | This module once installed provides monitoring capabilities through Zabbix 2.x for Docker version 1.7 and later. 5 | 6 | If you experience any software defects related to this module, please notify the author by submitting an issue on [Github](https://github.com/rpsedlak/zabbix-docker/issues). 7 | 8 | ## Installation Instructions: 9 | * Run package.sh to create the ZabbixDocker.tar.gz file. 10 | * Copy the ZabbixDocker.tar.gz file to necessary servers. 11 | * On the server: tar zxvf ZabbixDocker.tar.gz. It is recommended that this is done in it's own directory. 12 | * Run install.sh. Please note that this assumes that the Zabbix agent files are located at /etc/zabbix/zabbix_agentd.d/. If this is not the case as in an Ubuntu installation then please add the directory as a parameter to install.sh. 13 | * Restart the zabbix-agent process. 14 | * Import the ZabbixDockerTemplate.xml file into Zabbix using the GUI. You can do this from your local computer. 15 | 16 | ## Files: 17 | * userparameter_zabbixdocker.conf - Client-side agent parameter definition 18 | * ZabbixDockerTemplate.xml - File to be imported into Zabbix UI for "Template App Docker" template 19 | * zabbix-docker-discover.py - Python script to provide docker instance discovery. 20 | * zabbix-docker-stats.py - Python helper script to provide information from 'docker stats' 21 | * zabbix-docker-convert.py - Python helper script to convert byte calculations (i.e. GB -> B) 22 | 23 | ## Notes: 24 | * Docker 1.7.1 seems to have an issue where it stops responding after so many commands are issued to it. Several workarounds have been attempted but the long term testing has demonstrated that this is still an issue. This issue wasn't present in Docker versions 1.8.x (and later). 25 | * Approximately half of the discovered keys for a container that are available are disabled by default. You may enable these to your taste and needs. The more data you collect the more storage and processing power you will need. 26 | * The "lifetime" setting for discovered containers is 2 days. You may vary this based on your needs through the Zabbix UI. This value only affects the cleanup of containers that are no longer available. 27 | 28 | ## Testing Information: 29 | * This module was tested using CentOS 6.7, CentOS 7.1, and Ubuntu 14.04 agents and Zabbix server 2.0.16, 2.2.11, and 2.4.7 running on CentOS 6.7. The Docker versions were 1.7.1 and 1.9.1 used for testing. 30 | 31 | ## Disclaimer: 32 | * This code is provided without warranty and responsibility rests completely on the end user for testing, etc. The author is not responsible for any production issues in any way. 33 | * This code is licensed under [GPLv2](http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html). 34 | 35 | ## Note Bene: 36 | ### If you are using this code successfully, please drop me a line at [richard.p.sedlak@gmail.com](mailto:richard.p.sedlak@gmail.com). I'm just curious if anyone is using it successfully. -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/ReleaseNotes.md: -------------------------------------------------------------------------------- 1 | # Release notes 2 | 3 | ## Release 1.0.3 - 12/22/2015 4 | * #28 - Fixed 'Running instances showing OOMKilled set to true' 5 | * #27 - Fixed 'ID from 'docker info' only returning a partial value' 6 | * #15 - Fixed 'Requests for data appears to be overloading 'docker' command' 7 | 8 | ## Release 1.0.2 - 12/21/2015 9 | * #24 - Fixed container memory % report for Ubuntu 10 | * #23 - Fixed false "down" report for Ubuntu 11 | 12 | ## Release 1.0.1 - 12/21/2015 13 | * #20 - Changed collection delay for docker version 14 | * #18 - Implemented WARNING trigger for paused containers 15 | * #17 - Implemented AVERAGE trigger for OMMKilled containers 16 | * #16 - Corrected install.sh problem for overridden directories 17 | * #15 - Implemented value file caching on zabbix-agent host to keep docker from becoming over loaded. 18 | * #12 - Changed "Docker Storage Driver Deferred Removal Enabled" from text to unsigned int (Boolean value). 19 | 20 | ## Release 1.0.0 - 12/19/2015 21 | * Feature complete 22 | * Supports docker 'inspect' 23 | * Supports docker 'stats' commands 24 | * Supports docker 'info' 25 | * Supports Zabbix LLD 26 | 27 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # argument processing 4 | 5 | CONFIG_PATH=/etc/zabbix/zabbix_agentd.d/ 6 | 7 | if [ "$#" -eq 1 ]; 8 | then CONFIG_PATH=$1 9 | fi 10 | 11 | # Copy the files 12 | cp -f *.py /usr/local/bin 13 | cp -f *.conf $CONFIG_PATH 14 | cp -f *.xml /tmp 15 | 16 | # tell the user some stuff 17 | echo "Python scripts copied to /usr/local/bin" 18 | echo "zabbix-agent configuration files copied to $CONFIG_PATH" 19 | echo "XML Zabbix Templates copied to /tmp" 20 | echo "" 21 | #echo "You will need to restart the zabbix-agent and import the XML template on your Zabbix server" 22 | 23 | # restarting zabbix-agent service 24 | echo "Restarting Zabbix Agent ... " 25 | service zabbix-agent restart && echo "Restarted" 26 | 27 | 28 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | tar zcvf ZabbixDocker.tar.gz *.py *.conf *.xml install.sh 4 | 5 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/testtools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jdeathe/centos-ssh 2 | 3 | MAINTAINER Richard Sedlak 4 | 5 | USER root 6 | 7 | ENV AP /data/app 8 | ENV PATH $PATH:$AP 9 | 10 | RUN yum -y install gcc 11 | 12 | ADD Makefile $AP/ 13 | ADD memkiller.c $AP/ 14 | 15 | WORKDIR $AP 16 | 17 | RUN make clean all 18 | 19 | CMD ["./memkiller"] 20 | 21 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/testtools/Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: memkiller 3 | 4 | new: clean all 5 | 6 | clean: 7 | echo "Cleaning up" 8 | rm -f memkiller 9 | 10 | memkiller: memkiller.c 11 | echo "Compiling memkiller" 12 | cc memkiller.c -o memkiller 13 | 14 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/testtools/memkiller.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define SIZE 1024L 5 | 6 | int main ( int argc, char** argv ) 7 | { 8 | char* ptr = NULL; 9 | long size = 0; 10 | 11 | while ( 1 ) 12 | { 13 | #ifdef DEBUG 14 | printf("allocating %ld bytes\n",SIZE); 15 | size += SIZE; 16 | #endif 17 | 18 | ptr = (char*) malloc(SIZE); 19 | 20 | #ifdef DEBUG 21 | printf("%ld allocated thus far\n",size); 22 | #endif 23 | } 24 | 25 | return 0; 26 | } 27 | 28 | 29 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/testtools/package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | tar zcvf testtools.tar.gz Makefile memkiller.c testrun.sh Dockerfile 4 | 5 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/testtools/testrun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # first, build our special docker image 4 | docker build --tag="rpsedlak/memkiller" . 5 | 6 | # next, start some standard stuff 7 | docker run -d --name="httpd-1" httpd 8 | docker run -d --name="httpd-2-pause" httpd 9 | docker run -d --name="httpd-3-kill" httpd 10 | docker run -d --name="redis-1" redis:2.8 11 | docker run -d --name="redis-2-pause" redis:2.8 12 | docker run -d --name="redis-3-kill" redis:2.8 13 | 14 | # next, pause a container 15 | docker pause httpd-2-pause 16 | docker pause redis-2-pause 17 | 18 | # next, kill a container 19 | docker kill httpd-3-kill 20 | docker kill redis-3-kill 21 | 22 | # last, run the memkiller image 23 | docker run -d --memory=4M --name="oom-test" rpsedlak/memkiller 24 | 25 | 26 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/userparameter_zabbixdocker.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Zabbix Docker Monitoring 1.0 3 | # 4 | # Author: Richard Sedlak 5 | # 6 | # Github: git@github.com:rpsedlak/zabbix-docker.git 7 | # 8 | ########################################################## 9 | 10 | UserParameter=docker.version, docker -v 11 | 12 | UserParameter=docker.running.centos, ps -ef | grep 'docker -d' | grep -v grep | wc -l 13 | UserParameter=docker.running.ubuntu, ps -ef | grep 'dockerd' | grep -v grep | wc -l 14 | 15 | UserParameter=docker.containers.running, docker ps -q | wc -l 16 | 17 | UserParameter=docker.discovery, /usr/local/bin/zabbix-docker-discover.py 18 | 19 | UserParameter=docker.info[*], /usr/local/bin/zabbix-docker-info.py "$1" 20 | 21 | UserParameter=docker.info.boolean[*], /usr/local/bin/zabbix-docker-info.py "$1" | grep -i true | wc -l 22 | 23 | UserParameter=docker.info.convert[*], docker info 2>/dev/null | grep "$1" | cut -f2 -d: | cut -c2- | /usr/local/bin/zabbix-docker-convert.py 24 | 25 | UserParameter=docker.container.inspect[*], /usr/local/bin/zabbix-docker-inspect.py $1 $2 26 | 27 | UserParameter=docker.container.inspect.boolean[*], /usr/local/bin/zabbix-docker-inspect.py $1 $2 | grep -i true | wc -l 28 | 29 | UserParameter=docker.container.stats[*], /usr/local/bin/zabbix-docker-stats.py $1 $2 30 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/zabbix-docker-convert.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ################################################################# 4 | # 5 | # zabbix-docker-convert-py 6 | # 7 | # A program that converts between K,M,G,T. 8 | # 9 | # Version: 1.0 10 | # 11 | # Author: Richard Sedlak 12 | # 13 | ################################################################# 14 | 15 | import sys 16 | 17 | def B(b): 18 | return int(float(b)) 19 | 20 | def KB(b): 21 | return int(float(b) * 1024) 22 | 23 | def MB(b): 24 | return int(float(b) * 1024 * 1024) 25 | 26 | def GB(b): 27 | return int(float(b) * 1024 * 1024 * 1024) 28 | 29 | def TB(b): 30 | return int(float(b) * 1024 * 1024 * 1024 * 1024) 31 | 32 | options = { 33 | 'k':KB, 34 | 'K':KB, 35 | 'm':MB, 36 | 'M':MB, 37 | 'g':GB, 38 | 'G':GB, 39 | 't':TB, 40 | 'T':TB, 41 | 'b':B, 42 | 'B':B 43 | } 44 | 45 | # 46 | # First read from stdin 47 | # 48 | lines = sys.stdin.readlines(81) 49 | 50 | tokens = lines[0].split(" "); 51 | 52 | c = tokens[1][0] 53 | 54 | print options[c](tokens[0]) 55 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/zabbix-docker-discover.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ################################################################# 4 | # 5 | # zabbix-docker-discover.py 6 | # 7 | # A program that produces LLD information for Zabbix to 8 | # process Docker instances. 9 | # 10 | # Version: 1.0 11 | # 12 | # Author: Richard Sedlak 13 | # 14 | ################################################################# 15 | 16 | import subprocess 17 | import json 18 | 19 | strings = subprocess.Popen("docker ps -a", shell=True, stdout=subprocess.PIPE).stdout.readlines() 20 | 21 | l=list() 22 | for i in range(1,len(strings)): 23 | pstring = strings[i].split() 24 | d=dict() 25 | d["{#ZD_ID}"]=pstring[0] 26 | d["{#ZD_IMAGE}"]=pstring[1] 27 | d["{#ZD_NAME}"]=pstring[-1] 28 | l.append(d) 29 | 30 | s_json=dict() 31 | s_json["data"]=l 32 | 33 | print json.dumps(s_json) 34 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/zabbix-docker-info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import subprocess 5 | import os 6 | import time 7 | 8 | errorString="***NOT FOUND***" 9 | 10 | def local_run_command(cmd,file): 11 | cmd = cmd + " | tee > " + file 12 | if os.path.isfile(file) == False: 13 | os.system(cmd) 14 | else: 15 | (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file) 16 | ticks=int(time.time()) 17 | delta=ticks-mtime 18 | if (delta > 60): 19 | os.system(cmd) 20 | 21 | strings = open(file,"r").readlines() 22 | return strings 23 | 24 | def findString(strings,term): 25 | found=-1 26 | ndx=0 27 | maxNdx=len(strings) 28 | while (found==-1) and (ndx=0: 35 | retval=strings[found] 36 | return retval 37 | 38 | def getValue(string): 39 | pos=string.index(":") 40 | return string[pos+2:-1] 41 | 42 | 43 | search_for=sys.argv[1] 44 | 45 | cmd="docker info" 46 | filename="/tmp/zabbix-docker-info.out" 47 | 48 | strings = local_run_command(cmd,filename) 49 | 50 | line=findString(strings,search_for) 51 | 52 | if errorString in line: 53 | print search_for, " ", errorString 54 | else: 55 | print getValue(line) 56 | 57 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/zabbix-docker-inspect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ################################################################# 4 | # 5 | # zabbix-docker-inspect.py 6 | # 7 | # A program that parses the "docker inspect" values for 8 | # reporting data to Zabbix. 9 | # 10 | # Version: 1.0 11 | # 12 | # Author: Richard Sedlak 13 | # 14 | ################################################################# 15 | 16 | import sys 17 | import subprocess 18 | import os 19 | import time 20 | import json 21 | 22 | ################################################################# 23 | # sys.argv[1] - the instanceID of the docker container 24 | # sys.argv[2] - the JSON value of the key to collect 25 | ################################################################# 26 | 27 | 28 | def local_run_command(cmd,file): 29 | cmd = cmd + " | tee > " + file 30 | if os.path.isfile(file) == False: 31 | os.system(cmd) 32 | else: 33 | (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file) 34 | ticks=int(time.time()) 35 | delta=ticks-mtime 36 | if (delta > 60): 37 | os.system(cmd) 38 | 39 | strings = open(file,"r").read() 40 | return strings 41 | 42 | cmd="docker inspect " + sys.argv[1] 43 | strings = local_run_command(cmd,"/tmp/zabbix-docker-inspect-"+sys.argv[1]+".out") 44 | 45 | parsed_json = json.loads(strings) 46 | 47 | key_path = sys.argv[2].split('.') 48 | 49 | ptr = parsed_json[0] 50 | 51 | for i in range(0,len(key_path)): 52 | ptr=ptr[key_path[i]] 53 | 54 | print ptr 55 | -------------------------------------------------------------------------------- /Templates/Docker/zabbix-docker-master/zabbix-docker-stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ################################################################# 4 | # 5 | # zabbix-docker-stats.py 6 | # 7 | # A program that produces information for Zabbix to 8 | # process Docker container statistics. 9 | # 10 | # Version: 1.0 11 | # 12 | # Author: Richard Sedlak 13 | # 14 | ################################################################# 15 | 16 | import sys 17 | import subprocess 18 | import os 19 | import time 20 | 21 | def B(b): 22 | return int(float(b)) 23 | 24 | def KB(b): 25 | return int(float(b) * 1024) 26 | 27 | def MB(b): 28 | return int(float(b) * 1024 * 1024) 29 | 30 | def GB(b): 31 | return int(float(b) * 1024 * 1024 * 1024) 32 | 33 | def TB(b): 34 | return int(float(b) * 1024 * 1024 * 1024 * 1024) 35 | 36 | size_options = { 37 | 'k':KB, 38 | 'K':KB, 39 | 'm':MB, 40 | 'M':MB, 41 | 'g':GB, 42 | 'G':GB, 43 | 't':TB, 44 | 'T':TB, 45 | 'b':B, 46 | 'B':B 47 | } 48 | 49 | 50 | def pcpu(data): 51 | pdata=data.split() 52 | pcpu_data=pdata[1].split('%')[0] 53 | return pcpu_data 54 | 55 | def umem(data): 56 | pdata=data.split('/')[0].split() 57 | value = size_options[pdata[3][0]](pdata[2]) 58 | return value 59 | 60 | def lmem(data): 61 | pdata=data.split('/')[1].split() 62 | value = size_options[pdata[1][0]](pdata[0]) 63 | return value 64 | 65 | def pmem(data): 66 | pdata=data.split('/')[1].split() 67 | pmem_data=pdata[2].split('%')[0] 68 | return pmem_data 69 | 70 | def inet(data): 71 | pdata=data.split('/')[1].split() 72 | value = size_options[pdata[4][0]](pdata[3]) 73 | return value 74 | 75 | def onet(data): 76 | pdata=data.split('/')[2].split() 77 | value = size_options[pdata[1][0]](pdata[0]) 78 | return value 79 | 80 | options = { 81 | 'pcpu':pcpu, 82 | 'umem':umem, 83 | 'lmem':lmem, 84 | 'pmem':pmem, 85 | 'inet':inet, 86 | 'onet':onet 87 | } 88 | 89 | def local_run_command(cmd,file): 90 | cmd = cmd + " | tee > " + file 91 | if os.path.isfile(file) == False: 92 | os.system(cmd) 93 | else: 94 | (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file) 95 | ticks=int(time.time()) 96 | delta=ticks-mtime 97 | if (delta > 60): 98 | os.system(cmd) 99 | 100 | strings = open(file,"r").readlines() 101 | return strings 102 | 103 | 104 | container=sys.argv[1] 105 | key=sys.argv[2] 106 | 107 | cmd="docker stats --no-stream=true " + container 108 | strings = local_run_command(cmd,"/tmp/zabbix-docker-stats-"+container+".out") 109 | 110 | print options[key](strings[1]) 111 | 112 | -------------------------------------------------------------------------------- /Templates/FFmpeg Chunks/chunk.conf: -------------------------------------------------------------------------------- 1 | #Chunk dir size 2 | 3 | UserParameter = chunk.dir.size, sudo du -s /srv/stream_control/shared/hls/live | cut -f1 -------------------------------------------------------------------------------- /Templates/FFmpeg Chunks/ffmpeg_chunks_template.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-02-01T13:23:38Z 5 | 6 | 7 | Templates 8 | 9 | 10 | 11 | 75 | 76 | 77 | 78 | Chunk folder size 79 | 900 80 | 200 81 | 0.0000 82 | 100.0000 83 | 1 84 | 1 85 | 0 86 | 1 87 | 0 88 | 0.0000 89 | 0.0000 90 | 0 91 | 0 92 | 0 93 | 0 94 | 95 | 96 | 0 97 | 1 98 | 1A7C11 99 | 0 100 | 2 101 | 0 102 | 103 | Template ffmpeg chunks 104 | chunk.dir.size 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /Templates/Graylog2/README.md: -------------------------------------------------------------------------------- 1 | INSTALLATION 2 | 3 | * Create a Graylog2 user with the "reader" role 4 | * copy graylog.conf to /etc/zabbix/zabbix_agentd.d/ 5 | * change username & password to your own in /etc/zabbix/zabbix_agentd.d/graylog.conf 6 | * restart zabbix-agent 7 | * upload XML-file to Zabbix GUI 8 | 9 | -------------------------------------------------------------------------------- /Templates/Graylog2/graylog.conf: -------------------------------------------------------------------------------- 1 | UserParameter=graylog.state[*], curl -u USER:PASSWORD -sS http://127.0.0.1/api/system/ | python -m json.tool | grep $1 | awk '{print $NF}' | cut -f1 -d, -------------------------------------------------------------------------------- /Templates/Graylog2/graylog.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-06-21T12:03:21Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 215 | 216 | 217 | 218 | {Template Graylog2:net.tcp.service[{$GRAYLOG_PROTOCOL},{$GRAYLOG_URL}].last()}=0 219 | API port is DOWN 220 | 221 | 0 222 | 4 223 | 224 | 0 225 | 226 | 227 | 228 | {Template Graylog2:graylog.state[lb_status].str(alive)}=0 229 | Node is marked as DOWN 230 | 231 | 0 232 | 2 233 | 234 | 0 235 | 236 | 237 | API port is DOWN 238 | {Template Graylog2:net.tcp.service[{$GRAYLOG_PROTOCOL},{$GRAYLOG_URL}].last()}=0 239 | 240 | 241 | 242 | 243 | {Template Graylog2:graylog.state[is_processing].str(false)}=1 244 | Node is not processing 245 | 246 | 0 247 | 3 248 | 249 | 0 250 | 251 | 252 | API port is DOWN 253 | {Template Graylog2:net.tcp.service[{$GRAYLOG_PROTOCOL},{$GRAYLOG_URL}].last()}=0 254 | 255 | 256 | 257 | 258 | {Template Graylog2:graylog.state[lifecycle].str(running)}=0 259 | Node is not running 260 | 261 | 0 262 | 3 263 | 264 | 0 265 | 266 | 267 | API port is DOWN 268 | {Template Graylog2:net.tcp.service[{$GRAYLOG_PROTOCOL},{$GRAYLOG_URL}].last()}=0 269 | 270 | 271 | 272 | 273 | 274 | 275 | Service state 276 | 277 | 278 | 0 279 | Down 280 | 281 | 282 | 1 283 | Up 284 | 285 | 286 | 287 | 288 | 289 | -------------------------------------------------------------------------------- /Templates/IOstat/iostat-collect.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Description: Script for iostat monitoring 3 | # Author: Epikhin Mikhail michael@nomanlab.org 4 | # Revision 1: Lesovsky A.V. lesovsky@gmail.com 5 | 6 | SECONDS=$2 7 | TOFILE=$1 8 | IOSTAT="/usr/bin/iostat" 9 | 10 | [[ $# -lt 2 ]] && { echo "FATAL: some parameters not specified"; exit 1; } 11 | 12 | DISK=$($IOSTAT -x 1 $SECONDS | awk 'BEGIN {check=0;} {if(check==1 && $1=="avg-cpu:"){check=0}if(check==1 && $1!=""){print $0}if($1=="Device:"){check=1}}' | tr '\n' '|') 13 | echo $DISK | sed 's/|/\n/g' > $TOFILE 14 | echo 0 -------------------------------------------------------------------------------- /Templates/IOstat/iostat-parse.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Description: Script for disk monitoring 3 | # Author: Epikhin Mikhail michael@nomanlab.org 4 | # Revision 1: Lesovsky A.V. lesovsky@gmail.com 5 | 6 | NUMBER=0 7 | FROMFILE=$1 8 | DISK=$2 9 | METRIC=$3 10 | 11 | [[ $# -lt 3 ]] && { echo "FATAL: some parameters not specified"; exit 1; } 12 | [[ -f "$FROMFILE" ]] || { echo "FATAL: datafile not found"; exit 1; } 13 | 14 | case "$3" in 15 | "rrqm/s") 16 | NUMBER=2 17 | ;; 18 | "wrqm/s") 19 | NUMBER=3 20 | ;; 21 | "r/s") 22 | NUMBER=4 23 | ;; 24 | "w/s") 25 | NUMBER=5 26 | ;; 27 | "rkB/s") 28 | NUMBER=6 29 | ;; 30 | "wkB/s") 31 | NUMBER=7 32 | ;; 33 | "avgrq-sz") 34 | NUMBER=8 35 | ;; 36 | "avgqu-sz") 37 | NUMBER=9 38 | ;; 39 | "await") 40 | NUMBER=10 41 | ;; 42 | "r_await") 43 | NUMBER=11 44 | ;; 45 | "w_await") 46 | NUMBER=12 47 | ;; 48 | "svctm") 49 | NUMBER=13 50 | ;; 51 | "util") 52 | NUMBER=14 53 | ;; 54 | *) echo ZBX_NOTSUPPORTED; exit 1 ;; 55 | esac 56 | 57 | grep -w $DISK $FROMFILE | tail -n +2 | tr -s ' ' |awk -v N=$NUMBER 'BEGIN {sum=0.0;count=0;} {sum=sum+$N;count=count+1;} END {printf("%.2f\n", sum/count);}' -------------------------------------------------------------------------------- /Templates/IOstat/iostat.conf: -------------------------------------------------------------------------------- 1 | # Disk statistics via iostat (sysstat) 2 | # Attention: Second parameter in iostat.collect must be less than Timeout option in zabbix_agentd.conf 3 | UserParameter=iostat.discovery,/usr/bin/iostat -d | awk 'BEGIN {check=0;count=0;array[0]=0;} {if(check==1 && $1 != ""){array[count]=$1;count=count+1;}if($1=="Device:"){check=1;}} END {printf("{\n\t\"data\":[\n");for(i=0;i/dev/null 2>&1; then echo 0; else echo 1; fi 2 | UserParameter=jenkins.job.discovery, /usr/bin/python /etc/zabbix/scripts/jenkins/jenkins.job.status.py discovery 3 | UserParameter=job.status[*], /usr/bin/python /etc/zabbix/scripts/jenkins/jenkins.job.status.py status $1 -------------------------------------------------------------------------------- /Templates/Jenkins/jenkins.job.status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # desc: Jenkins' job monitoring with LLD 3 | 4 | import os 5 | import sys 6 | import time 7 | import json 8 | import baker 9 | import requests 10 | import urllib2 11 | import base64 12 | from configobj import ConfigObj 13 | from datetime import datetime as dt 14 | 15 | try: 16 | config = ConfigObj('/etc/zabbix/scripts/jenkins/config') 17 | HOSTNAME = config.get('hostname') 18 | USERNAME = config.get('username') 19 | PASSWORD = config.get('password') 20 | JENKINS_URL = config.get('jenkins_url') 21 | PREFIX = config.get('prefix',"") 22 | except Exception as E: 23 | print E 24 | sys.exit(1) 25 | 26 | if PREFIX == "": 27 | PREFIX = ["HIGH-DTB","HIGH-BI","DISASTER-DTB","HIGH-SUPPORT"] 28 | 29 | # Discoverying jenkins' job by Zabbix 30 | def _discovery(prefix=""): 31 | jobs = requests.get(JENKINS_URL + '/view/All/api/json', auth=(USERNAME,PASSWORD)) 32 | data = { 'data':[] } 33 | if prefix.lower() == 'all': 34 | for job in jobs.json().get('jobs'): 35 | if job.get('color') != "disabled": 36 | data['data'].append({'{#JOBNAME}' : job.get('name') }) 37 | elif prefix is not None: 38 | for job in jobs.json().get('jobs'): 39 | if job.get('name').upper().startswith(prefix.upper()) and job.get('color') != "disabled" : 40 | data['data'].append({'{#JOBNAME}' : job.get('name') }) 41 | return json.dumps(data) 42 | 43 | # Get result status of job 44 | def _status(name="",maxtime=0): 45 | request = urllib2.Request(JENKINS_URL + '/job/' + name + '/lastBuild/api/json') 46 | base64string = base64.b64encode('%s:%s' % (USERNAME, PASSWORD)) 47 | request.add_header("Authorization", "Basic %s" % base64string) 48 | result = urllib2.urlopen(request) 49 | data = json.load(result) 50 | jobStatus = data['result'] 51 | print (jobStatus) 52 | 53 | @baker.command 54 | def discovery(prefix=""): 55 | return _discovery(prefix=prefix) 56 | 57 | @baker.command 58 | def status(name="",maxtime=0): 59 | _status(name=name,maxtime=maxtime) 60 | 61 | if __name__ == "__main__": 62 | baker.run() -------------------------------------------------------------------------------- /Templates/Jenkins/jenkins.job.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-12-14T13:10:29Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 169 | 170 | 171 | 172 | {Jenkins Job:jenkins.api.connection.last()}<>0 173 | Jenkins' API is unavaiable 174 | 175 | 0 176 | 3 177 | 178 | 0 179 | 180 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /Templates/Jenkins/requirements.txt: -------------------------------------------------------------------------------- 1 | * Baker==1.3 2 | * requests==2.20.0 3 | * configobj==5.0.6 4 | 5 | * active zabbix-agent configured -------------------------------------------------------------------------------- /Templates/Lm-sensors/lm.sensors.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-11-29T12:40:38Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 177 | 178 | 179 | -------------------------------------------------------------------------------- /Templates/Lm-sensors/lmsensors.conf: -------------------------------------------------------------------------------- 1 | UserParameter=lmsensors[*],/etc/zabbix/scripts/lmsensors.sh "none" $1 -------------------------------------------------------------------------------- /Templates/Lm-sensors/lmsensors.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #thanks to source: http://wiki.enchtex.info/howto/zabbix/zabbix_lmsensors_monitoring 3 | 4 | #with some versions of devices, you have to run follows: sed -i 's/physical/package/g' 5 | 6 | export LC_ALL="" 7 | export LANG="en_US.UTF-8" 8 | # 9 | if [[ -z $1 || -z $2 ]]; then 10 | ##### DISCOVERY ##### 11 | PROCESSORS=`sensors 2>&1 | awk '{if (tolower($1)~"physical") {counter +=1;}} END {for (i=1; i<=counter; i+=1) printf("CPU%1d\n", i-1); }'` 12 | if [[ -n ${PROCESSORS} ]]; then 13 | JSON="{ \"data\":[" 14 | SEP="" 15 | for CPU in ${PROCESSORS}; do 16 | JSON=${JSON}"$SEP{\"{#CPUNAME}\":\"${CPU}\"}" 17 | SEP=", " 18 | done 19 | JSON=${JSON}"]}" 20 | echo ${JSON} 21 | fi 22 | exit 0 23 | else 24 | ##### PARAMETERS ##### 25 | HOST="$1" 26 | CPUNAME="$2" 27 | TABLE=`sensors 2>&1 | awk '{if (tolower($1)~"physical") { counter +=1; } if (tolower($1)=="core") 28 | { if ($3 > temperature[counter]) temperature[counter] = $3;}} 29 | END {for (i=1; i<=counter; i+=1) printf ( "CPU%1d %2.1f\n", i-1, temperature[i]); }'` 30 | echo "${TABLE}" | awk "/${CPUNAME}/ {print \$2}" | head -n1 31 | # 32 | fi -------------------------------------------------------------------------------- /Templates/Logfiles monitoring/README.md: -------------------------------------------------------------------------------- 1 | Don't forget to add macros {$LOGFILE_PATH} to your host!!!! 2 | By default it looks at /var/log/, but in your case you cloud see to any other directory. 3 | 4 | 5 | Plugin for logfile monitoring. 6 | It finds out files with name like *.log by itself. 7 | 8 | Then in each file it looks at content of file and trying to catch exception. 9 | If TRUE then trigger notifies you. 10 | 11 | If you don't need to monitor exceptions, just add another one item as you need. 12 | 13 | Don't forget to add macros {$LOGFILE_PATH} to your host!!!! -------------------------------------------------------------------------------- /Templates/Logfiles monitoring/log.monitoring.conf: -------------------------------------------------------------------------------- 1 | UserParameter=discovery.logfile.path[*], /etc/zabbix/scripts/logfile.discovery.py $1 -------------------------------------------------------------------------------- /Templates/Logfiles monitoring/logfile.discovery.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import sys 5 | import json 6 | 7 | logdir = sys.argv[1] 8 | 9 | data = [] 10 | 11 | for (logdir, _, files) in os.walk(logdir): 12 | for f in files: 13 | if f.endswith(".log"): 14 | path = os.path.join(logdir, f) 15 | data.append({'{#LOGFILEPATH}':path}) 16 | jsondata = json.dumps(data) 17 | 18 | print json.dumps({"data": data}) -------------------------------------------------------------------------------- /Templates/Logfiles monitoring/logfile.monitoring.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2018-01-24T16:24:43Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 186 | 187 | 188 | -------------------------------------------------------------------------------- /Templates/MD-RAID/README.txt: -------------------------------------------------------------------------------- 1 | It was borrowed from https://github.com/linuxsquad/zabbix_mdraid 2 | 3 | Zabbix template handles Software RAID (MD) on Linux 4 | ================== 5 | 6 | Design and Implementaion: 7 | ----------------- 8 | 9 | - auto-discovery for all active MDs 10 | - no assumption made about MD name 11 | - currently, only two HDD/SSD reported as members of the array 12 | - trigger is constructed to monitor RAID State. 13 | - to avoid flipping, the trigger will fire if state change sustain for 14 | more than one collection cycle 15 | 16 | 17 | TO DO list 18 | ------ 19 | 20 | - indtroduce items: failed device, number of failed devices 21 | - auto-discover array devices 22 | 23 | 24 | Append to zabbix_agentd.conf file 25 | ---------------- 26 | 27 | chmod 755 /etc/zabbix/scripts/zabbix_mdraid.sh 28 | 29 | Note 30 | ---- 31 | **don't forget to add zabbix user to sudoers** 32 | 33 | 34 | Referrence: 35 | ------- 36 | 37 | - https://www.kernel.org/doc/Documentation/md.txt 38 | - http://unix.stackexchange.com/questions/47163/whats-the-difference-between-mdadm-state-active-and-state-clean 39 | - Zabbix LSI RAID template https://www.zabbix.com/wiki/templates/start 40 | 41 | -------------------------------------------------------------------------------- /Templates/MD-RAID/raid.conf: -------------------------------------------------------------------------------- 1 | # Software RAID 2 | 3 | UserParameter=mdraid[*], sudo /etc/zabbix/scripts/zabbix_mdraid.sh -m'$1' -$2'$3' 4 | UserParameter=mdraid.discovery, sudo /etc/zabbix/scripts/zabbix_mdraid.sh -D -------------------------------------------------------------------------------- /Templates/MD-RAID/zabbix_mdraid.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # https://github.com/linuxsquad/zabbix_mdraid/blob/master/zabbix_mdraid.sh 3 | 4 | #echo "OPTIND is now $OPTIND" 5 | while getopts ":Dm:e:s:d:" optname 6 | do 7 | case "$optname" in 8 | "e") 9 | # Extract string values 10 | /sbin/mdadm --detail ${MD_dev} | grep "${OPTARG}" | awk -F":" '{print $2}' | tr -d [[:space:]] 11 | ;; 12 | "s") 13 | # echo "Size of the array" 14 | /sbin/mdadm --detail ${MD_dev} | grep "${OPTARG}" | awk -F":" '{print $2}' | sed -e "s/(.*//" | tr -d [[:space:]] 15 | ;; 16 | "d") 17 | # echo "Devices in the array" 18 | /sbin/mdadm --detail ${MD_dev} | tail -n+2 | grep "/dev/" | awk -v x=${OPTARG} '$4 == x {print $5,$6,$7}' 19 | ;; 20 | "m") 21 | # echo "Setting MD RAID" 22 | MD_dev="${OPTARG}" 23 | ;; 24 | "D") 25 | # echo "Discovery" 26 | echo -e "{\n\t\"data\":[" 27 | typeset -i nbLines 28 | typeset -i cntLines=0 29 | nbLines=`cat /proc/mdstat | grep ^md | wc -l` 30 | cat /proc/mdstat | grep ^md | while read line 31 | do 32 | cntLines=${cntLines}+1 33 | MDdev=`echo $line | awk '{print $1}'` 34 | if [ ${cntLines} -eq ${nbLines} ]; then 35 | echo -e "\t{ \"{#MD_DEVICE}\":\t\"/dev/${MDdev}\" }" 36 | else 37 | echo -e "\t{ \"{#MD_DEVICE}\":\t\"/dev/${MDdev}\" }," 38 | fi 39 | done 40 | echo -e "\t]\n}" 41 | ;; 42 | "?") 43 | echo "Unknown option $OPTARG" 44 | ;; 45 | ":") 46 | echo "No argument value for option $OPTARG" 47 | ;; 48 | *) 49 | # Should not occur 50 | echo "Unknown error while processing options" 51 | ;; 52 | esac 53 | # echo "OPTIND is now $OPTIND" 54 | done -------------------------------------------------------------------------------- /Templates/Memcache/README.txt: -------------------------------------------------------------------------------- 1 | mkdir /etc/zabbix/scripts/; chown root:root -R /etc/zabbix/scripts/; chmod 755 /etc/zabbix/scripts/; nano /etc/zabbix/scripts/memcache-stats.sh 2 | 3 | chown root:root /etc/zabbix/scripts/memcache-stats.sh; chmod 755 /etc/zabbix/scripts/memcache-stats.sh; sudo -u zabbix /etc/zabbix/scripts/memcache-stats.sh "none" uptime 4 | 5 | based on: http://wiki.enchtex.info/howto/zabbix/zabbix_memcache_monitoring -------------------------------------------------------------------------------- /Templates/Memcache/memcache-stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ##### OPTIONS VERIFICATION ##### 3 | if [[ -z "$1" || -z "$2" ]]; then 4 | exit 1 5 | fi 6 | ##### PARAMETERS ##### 7 | RESERVED="$1" 8 | METRIC="$2" 9 | NC="/bin/nc" # or /usr/bin/nc 10 | CACHE_TTL="55" 11 | CACHE_FILE="/tmp/zabbix.memcache.cache" 12 | EXEC_TIMEOUT="1" 13 | NOW_TIME=`date '+%s'` 14 | ##### RUN ##### 15 | if [ -s "${CACHE_FILE}" ]; then 16 | CACHE_TIME=`stat -c"%Y" "${CACHE_FILE}"` 17 | else 18 | CACHE_TIME=0 19 | fi 20 | DELTA_TIME=$((${NOW_TIME} - ${CACHE_TIME})) 21 | # 22 | if [ ${DELTA_TIME} -lt ${EXEC_TIMEOUT} ]; then 23 | sleep $((${EXEC_TIMEOUT} - ${DELTA_TIME})) 24 | elif [ ${DELTA_TIME} -gt ${CACHE_TTL} ]; then 25 | echo "" >> "${CACHE_FILE}" # !!! 26 | DATACACHE=`echo -e "stats\nquit" | ${NC} -q2 127.0.0.1 11211` 27 | echo "${DATACACHE}" > "${CACHE_FILE}" # !!! 28 | chmod 640 "${CACHE_FILE}" 29 | fi 30 | # 31 | cat "${CACHE_FILE}" | grep -i "STAT ${METRIC} " | awk '{print $3}' | head -n1 32 | # 33 | exit 0 -------------------------------------------------------------------------------- /Templates/Memcache/memcache.conf: -------------------------------------------------------------------------------- 1 | #Memcached 2 | UserParameter=memcache[*], /etc/zabbix/scripts/memcache-stats.sh "none" $1 -------------------------------------------------------------------------------- /Templates/Microservices/microservice.get.data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | import urllib2 6 | import sys 7 | 8 | service = sys.argv[1] 9 | port = sys.argv[2] 10 | ip = sys.argv[3] 11 | resfile = '/tmp/microservice.' + service + '.status.tmp' 12 | 13 | # Make URL 14 | url = 'http://' + ip + ':' + port + '/status' 15 | 16 | # Read to file 17 | response = urllib2.urlopen(url) 18 | myfile = response.readline() 19 | 20 | with open(resfile, 'w') as f: 21 | f.write(myfile) 22 | -------------------------------------------------------------------------------- /Templates/Microservices/microservice.parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | import sys 6 | 7 | service = sys.argv[1] 8 | resfile = '/tmp/microservice.' + service + '.status.tmp' 9 | 10 | with open(resfile) as f: 11 | data = json.load(f) 12 | 13 | state = [] 14 | 15 | for key, value in data["dependencies"].items(): 16 | if value == 1: 17 | state.append(key) 18 | 19 | if state: 20 | print (str(state)) 21 | else: 22 | print ('OK') -------------------------------------------------------------------------------- /Templates/Microservices/microservice.status.conf: -------------------------------------------------------------------------------- 1 | UserParameter = microservice.get.data[*], sudo python /etc/zabbix/scripts/microservices/microservice.get.data.py $1 $2 $3 && echo "OK" 2 | UserParameter = microservice.status[*], sudo python /etc/zabbix/scripts/microservices/microservice.parse.py $1 -------------------------------------------------------------------------------- /Templates/Monitoring email sending/aws.ses.test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2018-05-22T08:31:18Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 81 | 82 | 83 | 84 | {AWS SES Test:test.email.ses.last()}<>1 85 | AWS SES: Sending email issue 86 | 87 | 0 88 | 2 89 | Notrifications for email checking 90 | 0 91 | 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /Templates/Monitoring email sending/email.test.php: -------------------------------------------------------------------------------- 1 | isSMTP(); 6 | $mail->setFrom('from@example.com', 'Test Subject'); 7 | $mail->addAddress('to@example.com', 'recieverName'); 8 | // Replace smtp_username with your Amazon SES SMTP user name. 9 | $mail->Username = "YOURSESUSER"; 10 | $mail->Password = "YOURSESPASSWORD"; 11 | $mail->SMTPSecure = "ssl"; 12 | $mail->Port = 465; 13 | $mail->From = "from@example.com"; 14 | $mail->FromName = "TestEmail"; 15 | // If you're using Amazon SES in a region other than US West (Oregon), 16 | // replace email-smtp.us-west-2.amazonaws.com with the Amazon SES SMTP 17 | // endpoint in the appropriate region. 18 | //$mail->Host = "email-smtp.eu-west-1.amazonaws.com"; 19 | $mail->Host = "email-smtp.eu-west-1.amazonaws.com"; 20 | $mail->Subject = 'Amazon SES test (SMTP interface accessed using PHP)'; 21 | $mail->Body = 'Sending... OK'; 22 | $mail->SMTPAuth = true; 23 | $mail->SMTPSecure = 'ssl'; 24 | $mail->Port = 465; 25 | $mail->SMTPDebug = 4; 26 | // Tells PHPMailer to send HTML-formatted email 27 | $mail->isHTML(true); 28 | // The alternative email body; this is only displayed when a recipient 29 | // opens the email in a non-HTML email client. The \r\n represents a 30 | // line break. 31 | $mail->AltBody = "Email Test\r\nThis email was sent through the 32 | Amazon SES SMTP interface using the PHPMailer class."; 33 | 34 | if(!$mail->send()) { 35 | echo "Email not sent. " , $mail->ErrorInfo , PHP_EOL; 36 | } else { 37 | echo "Email sent!" , PHP_EOL; 38 | } 39 | ?> -------------------------------------------------------------------------------- /Templates/Monitoring email sending/sending.email.test.conf: -------------------------------------------------------------------------------- 1 | #Check sending emails notification via AWS SES 2 | UserParameter=test.email.ses, /usr/bin/php7.0 /etc/zabbix/scripts/email.monitoring/email.test.php | grep -c "Sending... OK" -------------------------------------------------------------------------------- /Templates/MySQL/MySQL Slave/mysql-slave.conf: -------------------------------------------------------------------------------- 1 | #Monitoring MySQL replication 2 | #UserParameter=mysql.slave[*], HOME=/var/lib/zabbix mysql -e "show slave status\G" | awk '/'$1'':'/{gsub(/NULL/,"99999",$$2); print $$2}' | awk '{print $2}' 3 | UserParameter=mysql.slave[*], HOME=/etc/zabbix/ mysql -e "show slave status\G" | awk '/'$1'':'/{gsub(/NULL/,"99999",$$2); print $$2}' | awk '{print $2}' -------------------------------------------------------------------------------- /Templates/MySQL/Template MySQL Backup.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-08-28T11:59:53Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 204 | 205 | 206 | 207 | ({Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-bets-latest.sql].last()}-{Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-bets-latest.sql].last(#2)})<=0 and ({Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-bets-latest.sql].last(#2)}-{Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-bets-latest.sql].last(#3)})<=0 208 | MySQL Backup: check csgl-bets backup collecting 209 | 210 | 0 211 | 2 212 | 213 | 0 214 | 215 | 216 | 217 | ({Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-trades-latest.sql].last()}-{Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-trades-latest.sql].last(#2)})<=0 and ({Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-trades-latest.sql].last(#2)}-{Template MySQL Backup:vfs.file.size[/home/lounges/csgl/csgl-trades-latest.sql].last(#3)})<=0 218 | MySQL Backup: check csgl-trades backup collecting 219 | 220 | 0 221 | 2 222 | 223 | 0 224 | 225 | 226 | 227 | ({Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-bets-latest.sql].last()}-{Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-bets-latest.sql].last(#2)})<=0 and ({Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-bets-latest.sql].last(#2)}-{Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-bets-latest.sql].last(#3)})<=0 228 | MySQL Backup: check d2l-bets backup collecting 229 | 230 | 0 231 | 2 232 | 233 | 0 234 | 235 | 236 | 237 | ({Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-trades-latest.sql].last()}-{Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-trades-latest.sql].last(#2)})<=0 and ({Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-trades-latest.sql].last(#2)}-{Template MySQL Backup:vfs.file.size[/home/lounges/d2l/d2l-trades-latest.sql].last(#3)})<=0 238 | MySQL Backup: check d2l-trades backup collecting 239 | 240 | 0 241 | 2 242 | 243 | 0 244 | 245 | 246 | 247 | 248 | -------------------------------------------------------------------------------- /Templates/MySQL/mysql.conf: -------------------------------------------------------------------------------- 1 | # Key syntax is mysql.status[variable]. 2 | UserParameter=mysql.status[*],echo "show global status where Variable_name='$1';" | HOME=/etc/zabbix mysql -N | awk '{print $$2}' 3 | 4 | # Key syntax is mysql.size[,,]. 5 | UserParameter=mysql.size[*],bash -c 'echo "select sum($(case "$3" in both|"") echo "data_length+index_length";; data|index) echo "$3_length";; free) echo "data_free";; esac)) from information_schema.tables$([[ "$1" = "all" || ! "$1" ]] || echo " where table_schema=\"$1\"")$([[ "$2" = "all"$ 6 | UserParameter=mysql.ping,HOME=/etc/zabbix mysqladmin ping | grep -c alive 7 | UserParameter=mysql.version,mysql -V 8 | -------------------------------------------------------------------------------- /Templates/Nginx/README.txt: -------------------------------------------------------------------------------- 1 | Prepare Nginx - turning on status-module: 2 | 3 | location /status { 4 | stub_status on; 5 | access_log off; 6 | allow 127.0.0.1; 7 | deny all; 8 | } 9 | 10 | 11 | # put shell-script in /etc/zabbix/scripts/ 12 | # chmod 755 ... 13 | 14 | Create file /tmp/status_nginx.tmp with permissions for Zabbix user 15 | 16 | 17 | In template there macroses were described: 18 | 19 | {$NGINX_CON_NUM} - requests 20 | {$NGINX_REQ_NUM} - connection 21 | 22 | You should make the same on Nginx-hosts, and define your own threshold. -------------------------------------------------------------------------------- /Templates/Nginx/nginx-cache/nginx_cache.conf: -------------------------------------------------------------------------------- 1 | #Nginx Cache 2 | 3 | UserParameter=nginx_cache[*],/etc/zabbix/scripts/nginx_cache.sh $1 -------------------------------------------------------------------------------- /Templates/Nginx/nginx-cache/nginx_cache.sh: -------------------------------------------------------------------------------- 1 | # put it in /etc/zabbix/scripts/ 2 | # chmod 755 ... 3 | #!/bin/bash 4 | 5 | LOGFILE=/var/log/nginx/cache.log 6 | awk '{print $3}' $LOGFILE | sort | uniq -c | sort -r | grep "S1" -i | awk '{print $1}' 7 | 8 | #9 MISS 9 | #6 STALE 10 | #19 BYPASS 11 | #156 – 12 | #127 HIT 13 | 14 | exit 0 15 | 16 | -------------------------------------------------------------------------------- /Templates/Nginx/nginx-performance-time/nginx.performance.time.conf: -------------------------------------------------------------------------------- 1 | UserParameter=nginx.upstream.response.time[*], grep $1 /tmp/nginx.upstream.response.time.tmp | cut -f2 -d= 2 | UserParameter=nginx.request.time[*], grep $1 /tmp/nginx.request.time.tmp | cut -f2 -d= -------------------------------------------------------------------------------- /Templates/Nginx/nginx-performance-time/nginx.request.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Preparing 4 | #put to /etc/zabbix/scripts/nginx.request.time.sh 5 | #add task to cron: * * * * * bash /etc/zabbix/scripts/nginx.request.time.sh 6 | #chmod 744 $RESFILE #for reading by Zabbix-agent 7 | 8 | LOGFILE='/home/log/nginx/access.log' 9 | RESFILE='/tmp/nginx.request.time.tmp' 10 | 11 | CURRENT_TIME=:$(date +"%H:%M" --date 'now - 1 minutes'): # timestamp - 1 muniute = previous 12 | 13 | #min 14 | a=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-3)}' | grep request_time | grep "\." | cut -f2 -d= | sort | head -n 1) 15 | echo "min="${a::6} > $RESFILE #rewrite this file 16 | 17 | #max 18 | a=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-3)}' | grep request_time | grep "\." | cut -f2 -d= | sort | tail -n 1) 19 | echo "max="${a::6} >> $RESFILE 20 | 21 | #percentile 22 | 23 | 24 | #mean 25 | NUMBER=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-3)}' | grep request_time | grep "\." | cut -f2 -d= | sort | wc -l) 26 | SUM=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-3)}' | grep request_time | grep "\." | cut -f2 -d= | awk '{s+=$1} END {print s}' | cut -f1 -d.) 27 | MEAN=$(echo - | awk -v n=$NUMBER -v s=$SUM '{ print s / n }') 28 | echo "mean="${MEAN::6} >> $RESFILE -------------------------------------------------------------------------------- /Templates/Nginx/nginx-performance-time/nginx.upstream.time.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Preparing 4 | #put to /etc/zabbix/scripts/nginx.request.time.sh 5 | #add task to cron: * * * * * bash /etc/zabbix/scripts/nginx.upstream.time.sh 6 | #chmod 744 $RESFILE #for reading by Zabbix-agent 7 | 8 | LOGFILE='/var/log/nginx/access.log' 9 | RESFILE='/tmp/nginx.upstream.response.time.tmp' 10 | 11 | CURRENT_TIME=:$(date +"%H:%M" --date 'now - 1 minutes'): # timestamp - 1 muniute = previous 12 | 13 | if grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-1),$NF}' | grep upstream_response_time; then 14 | 15 | #min 16 | a=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-1),$NF}' | grep upstream_response_time | awk '{print $NF}' | grep "\." | sort | head -n 1) 17 | echo "min="${a::6} > $RESFILE #rewrite this file 18 | 19 | #max 20 | a=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-1),$NF}' | grep upstream_response_time | awk '{print $NF}' | grep "\." | sort | tail -n 1) 21 | echo "max="${a::6} >> $RESFILE 22 | 23 | #mean 24 | NUMBER=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-1),$NF}' | grep upstream_response_time | awk '{print $NF}' | grep "\." | sort | wc -l) 25 | SUM=$(grep $CURRENT_TIME $LOGFILE | awk '{print $(NF-1),$NF}' | grep upstream_response_time | awk '{print $NF}' | grep "\." | awk '{s+=$1} END {print s}' ) 26 | MEAN=$(echo - | awk -v n=$NUMBER -v s=$SUM '{ print s / n }') 27 | echo "mean="${MEAN::6} >> $RESFILE 28 | 29 | else 30 | 31 | echo -e 'min=0.00\nmax=0.00\nmean=0.00' > $RESFILE #rewrite this file 32 | 33 | fi -------------------------------------------------------------------------------- /Templates/Nginx/nginx-rtmp/nginx-rtmp.conf: -------------------------------------------------------------------------------- 1 | ###Nginx-RTMP Status 2 | UserParameter=nginx.rtmp[*],/etc/zabbix/scripts/nginx-rtmp.sh $1 3 | #Active streams 4 | UserParameter=nginx.rtmp.streams, grep "^ | cut -f1 -d\< | awk '{s+=$1} END {print s}'; else echo 0; fi 7 | -------------------------------------------------------------------------------- /Templates/Nginx/nginx-rtmp/nginx-rtmp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILESTATUS=/tmp/rtmp_nginx.tmp # chmod 777 ... 4 | #NOWTIME=$(date +%s) 5 | #if [ ! -f $FILESTATUS ] 6 | #then wget http://91.109.202.182/stat -O $FILESTATUS -o /dev/null 7 | #fi 8 | #FILETIME=$(stat -c %Y $FILESTATUS) 9 | #TIME=$(($NOWTIME-$FILETIME)) 10 | #if [ "$TIME" -ge "20" ] 11 | #then 12 | wget http://127.0.1.1/stat -O $FILESTATUS -o /dev/null 13 | #fi 14 | case "$1" in 15 | bytes_in) 16 | grep $1 $FILESTATUS | cut -f2 -d '>' | cut -f1 -d '<' | head -n1 17 | exit 0 18 | ;; 19 | bytes_out) 20 | grep $1 $FILESTATUS | cut -f2 -d '>' | cut -f1 -d '<' | head -n1 21 | exit 0 22 | ;; 23 | accepted) 24 | grep $1 $FILESTATUS | cut -f2 -d '>' | cut -f1 -d '<' 25 | exit 0 26 | ;; 27 | *) 28 | echo "ZBX_UNSUPPORTED" 29 | exit 1 30 | ;; 31 | esac 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /Templates/Nginx/nginx-rtmp/stat.xsl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | RTMP statistics 17 | 18 | 19 | 20 |
21 | Generated by 22 | nginx-rtmp-module , 23 | nginx , 24 | pid , 25 | built   26 | 27 | 28 |
29 | 30 | 31 |
32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 59 | 64 | 71 | 78 | 84 | 85 | 86 |
RTMP#clientsVideoAudioIn bytesOut bytesIn bits/sOut bits/sStateTime
Accepted: codecbits/ssizefpscodecbits/sfreqchan 55 | 56 | 57 | 58 | 60 | 61 | 62 | 63 | 65 | 66 | 67 | 68 | 69 | 70 | 72 | 73 | 74 | 75 | 76 | 77 | 79 | 80 | 81 | 82 | 83 |
87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | live streams 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | vod streams 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | #cccccc 132 | #dddddd 133 | 134 | 135 | 136 | 137 | 138 | var d=document.getElementById('-'); 139 | d.style.display=d.style.display=='none'?'':'none'; 140 | return false 141 | 142 | 143 | 144 | [EMPTY] 145 | 146 | 147 | 148 | 149 | 150 |    151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 |   167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | - 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 |
IdStateAddressFlash versionPage URLSWF URLDroppedTimestampA-VTime
232 | 233 | 234 |
235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | d 246 | 247 | 248 | 249 | h 250 | 251 | 252 | 253 | m 254 | 255 | 256 | s 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | T 270 | 271 | 272 | G 273 | 274 | 275 | M 276 | 277 | K 278 | 279 | 280 | 281 | b 282 | B 283 | 284 | /s 285 | 286 | 287 | 288 | 289 | 290 | active 291 | idle 292 | 293 | 294 | 295 | 296 | 297 | 298 | publishing 299 | playing 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | #cccccc 309 | #eeeeee 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | http://apps.db.ripe.net/search/query.html?searchtext= 318 | 319 | whois 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | publishing 346 | 347 | 348 | 349 | active 350 | 351 | 352 | 353 | x 354 | 355 | 356 | -------------------------------------------------------------------------------- /Templates/Nginx/nginx-status.conf: -------------------------------------------------------------------------------- 1 | # put it there /etc/nginx/conf.d/nginx-status.conf 2 | 3 | server { 4 | listen 127.0.0.1:80; 5 | location /nginx_status { 6 | stub_status on; 7 | access_log off; 8 | allow 127.0.0.1; 9 | deny all; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /Templates/Nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | # Nginx 2 | # UserParameter=nginx_status.reading,curl -s http://127.0.0.1/nginx_status | grep Reading | awk ' {print $2} ' 3 | # UserParameter=nginx_status.active,curl -s http://127.0.0.1/nginx_status | grep Active | awk ' {print $NF} ' 4 | # UserParameter=nginx_status.writing,curl -s http://127.0.0.1/nginx_status | grep Writing | awk ' {print $4} ' 5 | # UserParameter=nginx_status.waiting,curl -s http://127.0.0.1/nginx_status | grep Waiting | awk ' {print $6} ' 6 | # UserParameter=nginx_status.accepts,curl -s http://127.0.0.1/nginx_status | grep accepts -A 1 | tail -n 1 | awk ' {print $1} ' 7 | # UserParameter=nginx_status.handled,curl -s http://127.0.0.1/nginx_status | grep handled -A 1 | tail -n 1 | awk ' {print $2} ' 8 | # UserParameter=nginx_status.requests,curl -s http://127.0.0.1/nginx_status | grep requests -A 1 | tail -n 1 | awk ' {print $3} ' 9 | 10 | UserParameter=nginx[*],/etc/zabbix/scripts/nginx_status.sh $1 -------------------------------------------------------------------------------- /Templates/Nginx/nginx_status.sh: -------------------------------------------------------------------------------- 1 | # put it in /etc/zabbix/scripts/ 2 | # chmod 755 ... 3 | #!/bin/bash 4 | 5 | case "$1" in 6 | active) 7 | curl -s http://127.0.0.1/nginx_status | grep Active | awk ' {print $NF} ' 8 | exit 0 9 | ;; 10 | accepts) 11 | curl -s http://127.0.0.1/nginx_status | grep accepts -A 1 | tail -n 1 | awk ' {print $1} ' 12 | ;; 13 | handled) 14 | curl -s http://127.0.0.1/nginx_status | grep handled -A 1 | tail -n 1 | awk ' {print $2} ' 15 | ;; 16 | requests) 17 | curl -s http://127.0.0.1/nginx_status | grep requests -A 1 | tail -n 1 | awk ' {print $3} ' 18 | ;; 19 | reading) 20 | curl -s http://127.0.0.1/nginx_status | grep Reading | awk ' {print $2} ' 21 | ;; 22 | writing) 23 | curl -s http://127.0.0.1/nginx_status | grep Writing | awk ' {print $4} ' 24 | ;; 25 | waiting) 26 | curl -s http://127.0.0.1/nginx_status | grep Waiting | awk ' {print $6} ' 27 | ;; 28 | *) 29 | echo "ZBX_UNSUPPORTED" 30 | exit 1 31 | ;; 32 | esac 33 | 34 | exit 0 35 | 36 | -------------------------------------------------------------------------------- /Templates/OS Linux/READ.ME: -------------------------------------------------------------------------------- 1 | I rewrote network and filesystems' discovery rules because we work with docker and rancher a lot. 2 | 3 | 4 | You need to add several regular expressions via Zabbix GUI 5 | * Administration > General> then select "Regular expression" 6 | * There should be preinstalled regexp "File systems for discovery": 7 | 8 | ^(btrfs|ext2|ext3|ext4|jfs|reiser|xfs|ffs|ufs|jfs|jfs2|vxfs|hfs|refs|ntfs|fat32|zfs)$ [Result is TRUE] 9 | 10 | * Rewrite regexp "Network interfaces" as follows: 11 | 12 | ^lo$ [Result is FALSE] 13 | ^Software Loopback Interface [Result is FALSE] 14 | ^veth [Result is FALSE] 15 | ^docker [Result is FALSE] 16 | ^br [Result is FALSE] 17 | 18 | * Add new one "Docker filesystems discovery": 19 | 20 | docker [Result is FALSE] 21 | 22 | * ... and another one "Rancher filesystems discovery": 23 | 24 | rancher [Result is FALSE] 25 | 26 | And then import os.linux.xml to your own Zabbix 27 | Filter in "Mounted filesystem discovery" was set up as (A and B) and C -------------------------------------------------------------------------------- /Templates/OVH/README.txt: -------------------------------------------------------------------------------- 1 | just for the record: 2 | in template you need to put your own containerId and container name in item -------------------------------------------------------------------------------- /Templates/OVH/cloud.storage.container.size.py.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | import json 4 | import ovh 5 | import sys 6 | 7 | # pass as parameter container ID you want to see 8 | containerid = sys.argv[1] 9 | 10 | client = ovh.Client( 11 | endpoint='ovh-eu', # Endpoint of API OVH Europe (List of available endpoints) 12 | application_key='application_key', # Application Key 13 | application_secret='application_secret', # Application Secret 14 | consumer_key='consumer_key', # Consumer Key 15 | ) 16 | 17 | result = client.get('/cloud/project/11463cd19d7e4bf7be1c5a64991f37fd/storage/' + containerid) 18 | 19 | data = json.dumps(result, indent=4) 20 | resp = json.loads(data) 21 | 22 | print (resp['storedBytes']) -------------------------------------------------------------------------------- /Templates/OVH/ovh.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-10-24T09:44:56Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 81 | 82 | 83 | 84 | OVH cloud storage: YOURCONTAINERNAME container size 85 | 900 86 | 200 87 | 0.0000 88 | 100.0000 89 | 1 90 | 1 91 | 0 92 | 1 93 | 0 94 | 0.0000 95 | 0.0000 96 | 0 97 | 0 98 | 0 99 | 0 100 | 101 | 102 | 0 103 | 1 104 | 0000BB 105 | 0 106 | 2 107 | 0 108 | 109 | OVH 110 | ovh.cloud.storage.container.size[YOURCONTAINERID] 111 | 112 | 113 | 114 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /Templates/OVH/ovh.zabbix.conf: -------------------------------------------------------------------------------- 1 | UserParameter=ovh.cloud.storage.container.size[*], /usr/bin/python /etc/zabbix/scripts/ovh/cloud.storage.container.size.py $1 2 | -------------------------------------------------------------------------------- /Templates/PHP-FPM/README.txt: -------------------------------------------------------------------------------- 1 | Prepare php-fpm: 2 | 3 | in your pool-config add next lines: 4 | 5 | pm.status_path = /php-status 6 | ping.path = /ping 7 | ping.response = 1 8 | 9 | 10 | Prepare Nginx: 11 | 12 | you need to make another location {} in nginx.conf 13 | 14 | location ~ ^/(php-status|ping)$ { 15 | access_log off; 16 | allow 127.0.0.1; 17 | deny all; 18 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 19 | include fastcgi_params; 20 | fastcgi_pass unix:/var/run/php5-fpm.sock; 21 | 22 | } 23 | 24 | nginx -t - check errors in nginx.conf 25 | service nginx reload 26 | 27 | service php-fpm5 reload 28 | 29 | 30 | -------------------------------------------------------------------------------- /Templates/PHP-FPM/php-fpm-status.conf: -------------------------------------------------------------------------------- 1 | # put it there /etc/nginx/conf.d/nginx-status.conf 2 | 3 | server { 4 | listen localhost; 5 | location ~ ^/(php-status|ping)$ { 6 | access_log off; 7 | allow 127.0.0.1; 8 | deny all; 9 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 10 | include fastcgi_params; 11 | fastcgi_pass unix:/var/run/php/php7.0-fpm.sock; 12 | } 13 | } -------------------------------------------------------------------------------- /Templates/PHP-FPM/php-fpm.conf: -------------------------------------------------------------------------------- 1 | #PHP-FPM 2 | UserParameter=php.fpm[*], /etc/zabbix/scripts/php-fpm_status.sh $1 3 | UserParameter=php.fpm.status, /usr/bin/php /etc/zabbix/scripts/status.php 2>&1 > /tmp/php_fpm_status.tmp; if grep ^PHPOK$ /tmp/php_fpm_status.tmp -v 1>/dev/null; then echo 1; else echo 0; fi -------------------------------------------------------------------------------- /Templates/PHP-FPM/php-fpm.sh: -------------------------------------------------------------------------------- 1 | # put it in /etc/zabbix/scripts/ 2 | # chmod 755 ... 3 | #!/bin/bash 4 | 5 | FILESTATUS=/tmp/status_php.tmp # chmod 777 ... 6 | FILEPING=/tmp/php_fpm_ping.tmp # chmod 777 ... 7 | 8 | NOWTIME=$(date +%s) 9 | if [ ! -f $FILESTATUS ] 10 | then wget http://localhost/php-status -O $FILESTATUS -o /dev/null 11 | fi 12 | FILETIME=$(stat -c %Y $FILESTATUS) 13 | let "TIME = $NOWTIME - $FILETIME" 14 | if [ "$TIME" -ge "20" ] 15 | then 16 | wget http://localhost/php-status -O $FILESTATUS -o /dev/null 17 | fi 18 | 19 | case "$1" in 20 | active_processes) 21 | grep "^active processes" $FILESTATUS | awk '{print $NF}' 22 | ;; 23 | accepted_conn) 24 | grep "^accepted conn" $FILESTATUS | awk '{print $NF}' 25 | ;; 26 | idle_processes) 27 | grep "^idle processes:" $FILESTATUS | awk '{print $NF}' 28 | ;; 29 | listen_queue_len) 30 | grep "^listen queue len" $FILESTATUS | awk '{print $NF}' 31 | ;; 32 | listen_queue) 33 | grep "^listen queue:" $FILESTATUS | awk '{print $NF}' 34 | ;; 35 | max_active_processes) 36 | grep "^max active processes" $FILESTATUS | awk '{print $NF}' 37 | ;; 38 | max_children_reached) 39 | grep "^max children reached" $FILESTATUS | awk '{print $NF}' 40 | ;; 41 | max_listen_queue) 42 | grep "^max listen queue" $FILESTATUS | awk '{print $NF}' 43 | ;; 44 | total_processes) 45 | grep "^total processes" $FILESTATUS | awk '{print $NF}' 46 | ;; 47 | ping) 48 | wget http://localhost/ping -O $FILEPING -o /dev/null 49 | grep "^1" $FILEPING 50 | ;; 51 | 52 | *) 53 | echo "ZBX_UNSUPPORTED" 54 | exit 1 55 | ;; 56 | esac 57 | 58 | exit 0 -------------------------------------------------------------------------------- /Templates/PHP-FPM/status.php: -------------------------------------------------------------------------------- 1 | 4 | -------------------------------------------------------------------------------- /Templates/PostgreSQL/Backup/PostgreSQL backup.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-04-21T08:34:58Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 75 | 76 | 77 | 78 | {Template Postgres Backup:postgres.backup.check.last()}<>1 79 | Postgres backup issue 80 | 81 | 0 82 | 3 83 | 84 | 0 85 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /Templates/PostgreSQL/Backup/postgres.backup.conf: -------------------------------------------------------------------------------- 1 | UserParameter = postgres.backup.check, if [ -f /home/storage/backup/postgres/dump.failed ]; then echo 0; else echo 1; fi -------------------------------------------------------------------------------- /Templates/PostgreSQL/README.md: -------------------------------------------------------------------------------- 1 | NOTICE!!! 2 | zabbix user in psql must be superuser 3 | after changing configuring psql sometimes it has to restart postgresql service (not reload) 4 | 5 | PostgreSQL monitoring with Zabbix. 6 | This configuration was tested on postgres 10.1 and zabbix-server 3.0 7 | 8 | based on pgCayenne 9 | 10 | There are 2 templates: 11 | * Common: common metrics and discovery databases 12 | * DATABASENAME: tables autodiscovery 13 | So you need to copy database-template for each DB. Use template.preparing.sh script: 14 | ./template.preparing.sh mydatabase 15 | then upload to Zabbix gui 16 | 17 | #### INSTALLATIONs 18 | 19 | ``` 20 | git clone https://github.com/igogorevi4/Zabbix-Extensions.git 21 | 22 | mkdir /etc/zabbix/zabbix_agentd.d/ 23 | 24 | cp files/postgresql/postgresql.conf /etc/zabbix/zabbix_agentd.d/ 25 | 26 | nano /etc/zabbix/zabbix_agentd.conf 27 | 28 | Include=/etc/zabbix/zabbix_agentd.d/ 29 | 30 | systemctl restart zabbix-agent.service 31 | ``` 32 | ``` 33 | - create zabbix user in postgres: 34 | 35 | CREATE USER zabbix WITH PASSWORD 'qwerty12345' SUPERUSER; 36 | 37 | - edit access rules in postgres [pg_hba.conf](http://www.postgresql.org/docs/9.6/static/auth-pg-hba-conf.html) 38 | host all zabbix 127.0.0.1/32 trust 39 | - or add file .pgpass to zabbix homedir with content like so: 40 | 41 | host IP:port:database:postges user:password 42 | 127.0.0.1:5432:*:zabbix:qwerty12345 # for all DBs 43 | 44 | ``` 45 | - import XML template into web monitoring and link template with target host; 46 | 47 | - edit template macros, go to the template page and open "Macros" tab: 48 | 49 | - if you want to monitor buffers' and SQL statements executed by a server statistic, you need to add some extensions for all DBs you want observe: 50 | 51 | CREATE EXTENSION pg_stat_statements; 52 | CREATE EXTENSION pg_buffercache; 53 | 54 | PG_CONNINFO - connection settings for zabbix agent connections to the postgres service; 55 | 56 | PG_CONNINFO_STANDBY - connection settings for zabbix agent connections to the postgres service on standby servers, required for streaming replication lag monitoring; 57 | 58 | PG_CACHE_HIT_RATIO - shared buffers cache ratio; 59 | 60 | PG_CHECKPOINTS_REQ_THRESHOLD - threshold for checkpoints which occured by demand; 61 | 62 | PG_CONFLICTS_THRESHOLD - threshold for recovery conflicts trigger; 63 | 64 | PG_CONN_IDLE_IN_TRANSACTION - threshold for connections which is idle in transaction state; 65 | 66 | PG_CONN_TOTAL_PCT - the percentage of the total number of connections to the maximum allowed (max_connections); 67 | 68 | PG_CONN_WAITING - threshold for connections which is in waiting state; 69 | 70 | PG_DATABASE_SIZE_THRESHOLD - threshold for database size; 71 | 72 | PG_DEADLOCKS_THRESHOLD - threshold for deadlock conflicts trigger; 73 | 74 | PG_LONG_QUERY_THRESHOLD - threshold for long transactions trigger; 75 | 76 | PG_PING_THRESHOLD_MS - threshold for postgres service response; 77 | 78 | PG_SR_LAG_BYTE - threshold in bytes for streaming replication lag between server and discovered standby servers; 79 | 80 | PG_SR_LAG_SEC - threshold in seconds for streaming replication lag between server and discovered standby servers; 81 | 82 | PG_UPTIME_THRESHOLD - threshold for service uptime. 83 | 84 | - add additional items into template if required. 85 | 86 | #### Graphs description 87 | - PostgreSQL bgwriter - information about buffers, how much allocated and written. 88 | - PostgreSQL buffers - general information about shared buffers; how much cleaned, dirtied, used and total. 89 | - PostgreSQL checkpoints - checkpoints and write/sync time during chckpoints. 90 | - PostgreSQL connections - connection info (idle, active, waiting, idle in transaction). 91 | - PostgreSQL service response - service response, average query time (pg_stat_statements required). 92 | - PostgreSQL summary db stats: block hit/read - information about how much blocks read from disk or cache. 93 | - PostgreSQL summary db stats: events - commits and rollbacks, recovery conflicts and deadlocks. 94 | - PostgreSQL summary db stats: temp files - information about allocated temporary files. 95 | - PostgreSQL summary db stats: tuples - how much tuples inserted/deleted/updated/fetched/returned. 96 | - PostgreSQL transactions - max execution time for active/idle/waiting/prepared transactions. 97 | - PostgreSQL uptime - cache hit ratio and uptime. 98 | - PostgerSQL write-ahead log - information about amount of WAL write and WAL segments count. 99 | - PostgreSQL database size - per-database graph with database size. 100 | - PostgreSQL table read stat - information about how much block of table or index readden from disk or cache (per-table). 101 | - PostgreSQL table rows - how much tuples inserted/updated/deleted per second (per-table). 102 | - PostgreSQL table scans - sequential/index scans andhow much rows returned by this scans (per-table). 103 | - PostgreSQL table size - table and table's indexes size (per-table). 104 | - PostgreSQL streaming replication lag with standby - streaming replication between master and standby in bytes and seconds (per-standby). 105 | 106 | #### Known issues: 107 | - Supported PostgreSQL version is 9.6 108 | - Strongly recommended install pg_buffercache and pg_stat_statements extensions into monitored database. 109 | - Table low-level discovery require manual specifies a list of tables to find, otherwise LLD generate many items (21 item per table). -------------------------------------------------------------------------------- /Templates/PostgreSQL/docker.postgres.master.conf: -------------------------------------------------------------------------------- 1 | UserParameter=postgres.who.is.master, docker exec -t pgpool psql -h 127.0.0.1 -t -U postgres -d replication_db -c "SELECT name FROM repmgr_pg_cluster.repl_nodes WHERE type='master';" # | grep [a-z,0123456789] -------------------------------------------------------------------------------- /Templates/PostgreSQL/postgresql.slave.conf: -------------------------------------------------------------------------------- 1 | #UserParameter = pgsql.slave.delay[*], psql -qAtX $1 -c "SELECT CASE WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0 ELSE EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp()) END AS log_delay" # up to v9.6 2 | UserParameter = pgsql.slave.delay[*], psql -qAtX $1 -c "SELECT CASE WHEN pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() THEN 0 ELSE EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp()) END AS log_delay" # for v10+ 3 | UserParameter = pgsql.slave.runnig, ps aux | grep postgres | grep "wal receiver process" -c -------------------------------------------------------------------------------- /Templates/PostgreSQL/postgresql.slave.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-12-04T13:18:43Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 123 | 124 | 125 | 126 | {Template PostgreSQL Slave:pgsql.slave.runnig.nodata(300)}=1 or 127 | {Template PostgreSQL Slave:pgsql.slave.runnig.last()}=0 128 | PSQL: Replication isn't working on {HOST.NAME} 129 | 130 | 0 131 | 3 132 | 133 | 0 134 | 135 | 136 | 137 | 138 | 139 | PostgreSQL: Slave delay 140 | 900 141 | 200 142 | 0.0000 143 | 100.0000 144 | 1 145 | 1 146 | 0 147 | 1 148 | 0 149 | 0.0000 150 | 0.0000 151 | 0 152 | 0 153 | 0 154 | 0 155 | 156 | 157 | 0 158 | 1 159 | 1A7C11 160 | 0 161 | 2 162 | 0 163 | 164 | Template PostgreSQL Slave 165 | pgsql.slave.delay[{$PG_CONNINFO}] 166 | 167 | 168 | 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /Templates/PostgreSQL/template.preparing.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DB_NAME=$1 4 | 5 | TEMPLATE='postgresql.DATABASENAME.xml' 6 | 7 | DB_TEMPLATE=postgresql.$DB_NAME.xml 8 | 9 | cp $TEMPLATE $DB_TEMPLATE 10 | 11 | sed -i "s/DATABASENAME/$DB_NAME/g" $DB_TEMPLATE 12 | 13 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.auth 3 | *.gz 4 | *~ 5 | *.swp 6 | *.log 7 | .cache 8 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | services: 3 | - rabbitmq 4 | python: 5 | - "2.7" 6 | 7 | before_install: ./tests/setup_auth.sh 8 | 9 | script: 10 | - py.test 11 | - ./scripts/rabbitmq/list_rabbit_queues.sh 12 | - ./scripts/rabbitmq/list_rabbit_nodes.sh 13 | - ./scripts/rabbitmq/list_rabbit_shovels.sh 14 | - ./scripts/rabbitmq/rabbitmq-status.sh server message_stats_deliver_get 15 | - ./scripts/rabbitmq/rabbitmq-status.sh check_aliveness 16 | notifications: 17 | email: 18 | recipients: 19 | - mcintoshj@gmail.com 20 | on_failure: always 21 | on_success: change 22 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, and 10 | distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by the copyright 13 | owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all other entities 16 | that control, are controlled by, or are under common control with that entity. 17 | For the purposes of this definition, "control" means (i) the power, direct or 18 | indirect, to cause the direction or management of such entity, whether by 19 | contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the 20 | outstanding shares, or (iii) beneficial ownership of such entity. 21 | 22 | "You" (or "Your") shall mean an individual or Legal Entity exercising 23 | permissions granted by this License. 24 | 25 | "Source" form shall mean the preferred form for making modifications, including 26 | but not limited to software source code, documentation source, and configuration 27 | files. 28 | 29 | "Object" form shall mean any form resulting from mechanical transformation or 30 | translation of a Source form, including but not limited to compiled object code, 31 | generated documentation, and conversions to other media types. 32 | 33 | "Work" shall mean the work of authorship, whether in Source or Object form, made 34 | available under the License, as indicated by a copyright notice that is included 35 | in or attached to the work (an example is provided in the Appendix below). 36 | 37 | "Derivative Works" shall mean any work, whether in Source or Object form, that 38 | is based on (or derived from) the Work and for which the editorial revisions, 39 | annotations, elaborations, or other modifications represent, as a whole, an 40 | original work of authorship. For the purposes of this License, Derivative Works 41 | shall not include works that remain separable from, or merely link (or bind by 42 | name) to the interfaces of, the Work and Derivative Works thereof. 43 | 44 | "Contribution" shall mean any work of authorship, including the original version 45 | of the Work and any modifications or additions to that Work or Derivative Works 46 | thereof, that is intentionally submitted to Licensor for inclusion in the Work 47 | by the copyright owner or by an individual or Legal Entity authorized to submit 48 | on behalf of the copyright owner. For the purposes of this definition, 49 | "submitted" means any form of electronic, verbal, or written communication sent 50 | to the Licensor or its representatives, including but not limited to 51 | communication on electronic mailing lists, source code control systems, and 52 | issue tracking systems that are managed by, or on behalf of, the Licensor for 53 | the purpose of discussing and improving the Work, but excluding communication 54 | that is conspicuously marked or otherwise designated in writing by the copyright 55 | owner as "Not a Contribution." 56 | 57 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf 58 | of whom a Contribution has been received by Licensor and subsequently 59 | incorporated within the Work. 60 | 61 | 2. Grant of Copyright License. 62 | 63 | Subject to the terms and conditions of this License, each Contributor hereby 64 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 65 | irrevocable copyright license to reproduce, prepare Derivative Works of, 66 | publicly display, publicly perform, sublicense, and distribute the Work and such 67 | Derivative Works in Source or Object form. 68 | 69 | 3. Grant of Patent License. 70 | 71 | Subject to the terms and conditions of this License, each Contributor hereby 72 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 73 | irrevocable (except as stated in this section) patent license to make, have 74 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where 75 | such license applies only to those patent claims licensable by such Contributor 76 | that are necessarily infringed by their Contribution(s) alone or by combination 77 | of their Contribution(s) with the Work to which such Contribution(s) was 78 | submitted. If You institute patent litigation against any entity (including a 79 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a 80 | Contribution incorporated within the Work constitutes direct or contributory 81 | patent infringement, then any patent licenses granted to You under this License 82 | for that Work shall terminate as of the date such litigation is filed. 83 | 84 | 4. Redistribution. 85 | 86 | You may reproduce and distribute copies of the Work or Derivative Works thereof 87 | in any medium, with or without modifications, and in Source or Object form, 88 | provided that You meet the following conditions: 89 | 90 | You must give any other recipients of the Work or Derivative Works a copy of 91 | this License; and 92 | You must cause any modified files to carry prominent notices stating that You 93 | changed the files; and 94 | You must retain, in the Source form of any Derivative Works that You distribute, 95 | all copyright, patent, trademark, and attribution notices from the Source form 96 | of the Work, excluding those notices that do not pertain to any part of the 97 | Derivative Works; and 98 | If the Work includes a "NOTICE" text file as part of its distribution, then any 99 | Derivative Works that You distribute must include a readable copy of the 100 | attribution notices contained within such NOTICE file, excluding those notices 101 | that do not pertain to any part of the Derivative Works, in at least one of the 102 | following places: within a NOTICE text file distributed as part of the 103 | Derivative Works; within the Source form or documentation, if provided along 104 | with the Derivative Works; or, within a display generated by the Derivative 105 | Works, if and wherever such third-party notices normally appear. The contents of 106 | the NOTICE file are for informational purposes only and do not modify the 107 | License. You may add Your own attribution notices within Derivative Works that 108 | You distribute, alongside or as an addendum to the NOTICE text from the Work, 109 | provided that such additional attribution notices cannot be construed as 110 | modifying the License. 111 | You may add Your own copyright statement to Your modifications and may provide 112 | additional or different license terms and conditions for use, reproduction, or 113 | distribution of Your modifications, or for any such Derivative Works as a whole, 114 | provided Your use, reproduction, and distribution of the Work otherwise complies 115 | with the conditions stated in this License. 116 | 117 | 5. Submission of Contributions. 118 | 119 | Unless You explicitly state otherwise, any Contribution intentionally submitted 120 | for inclusion in the Work by You to the Licensor shall be under the terms and 121 | conditions of this License, without any additional terms or conditions. 122 | Notwithstanding the above, nothing herein shall supersede or modify the terms of 123 | any separate license agreement you may have executed with Licensor regarding 124 | such Contributions. 125 | 126 | 6. Trademarks. 127 | 128 | This License does not grant permission to use the trade names, trademarks, 129 | service marks, or product names of the Licensor, except as required for 130 | reasonable and customary use in describing the origin of the Work and 131 | reproducing the content of the NOTICE file. 132 | 133 | 7. Disclaimer of Warranty. 134 | 135 | Unless required by applicable law or agreed to in writing, Licensor provides the 136 | Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, 137 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, 138 | including, without limitation, any warranties or conditions of TITLE, 139 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are 140 | solely responsible for determining the appropriateness of using or 141 | redistributing the Work and assume any risks associated with Your exercise of 142 | permissions under this License. 143 | 144 | 8. Limitation of Liability. 145 | 146 | In no event and under no legal theory, whether in tort (including negligence), 147 | contract, or otherwise, unless required by applicable law (such as deliberate 148 | and grossly negligent acts) or agreed to in writing, shall any Contributor be 149 | liable to You for damages, including any direct, indirect, special, incidental, 150 | or consequential damages of any character arising as a result of this License or 151 | out of the use or inability to use the Work (including but not limited to 152 | damages for loss of goodwill, work stoppage, computer failure or malfunction, or 153 | any and all other commercial damages or losses), even if such Contributor has 154 | been advised of the possibility of such damages. 155 | 156 | 9. Accepting Warranty or Additional Liability. 157 | 158 | While redistributing the Work or Derivative Works thereof, You may choose to 159 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or 160 | other liability obligations and/or rights consistent with this License. However, 161 | in accepting such obligations, You may act only on Your own behalf and on Your 162 | sole responsibility, not on behalf of any other Contributor, and only if You 163 | agree to indemnify, defend, and hold each Contributor harmless for any liability 164 | incurred by, or claims asserted against, such Contributor by reason of your 165 | accepting any such warranty or additional liability. 166 | 167 | END OF TERMS AND CONDITIONS 168 | 169 | APPENDIX: How to apply the Apache License to your work 170 | 171 | To apply the Apache License to your work, attach the following boilerplate 172 | notice, with the fields enclosed by brackets "[]" replaced with your own 173 | identifying information. (Don't include the brackets!) The text should be 174 | enclosed in the appropriate comment syntax for the file format. We also 175 | recommend that a file or class name and description of purpose be included on 176 | the same "printed page" as the copyright notice for easier identification within 177 | third-party archives. 178 | 179 | Copyright [yyyy] [name of copyright owner] 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/README.ja.md: -------------------------------------------------------------------------------- 1 | rabbitmq-zabbix 2 | ======================= 3 | 4 | Zabbix 経由で RabbitMQ キューとサーバーをモニタする為のテンプレート。 5 | 6 | ## なぜこれを作ったか: 7 | SNMP プラグインは公式にサポートされているプラグインではないので、rabbitmqctl ベースのモニタは大変遅いので。 8 | 9 | ## どんなもの: 10 | python のスクリプト群、Zabbix テンプレート、及び自動発見の為の関連データ。 11 | 12 | ## 使い方: 13 | 1. /etc/zabbix/ フォルダにファイルをインストールし、Zabbix がアクセスできるパーミッションに変える。 14 | 2. 設定を行う(下記参照) 15 | 3. テンプレートを Zabbix サーバーにインポートする。 16 | 4. zabbix_sender がインストールされていることを確認する。 17 | 5. 注意: プロセスがタイムアウトしていないか確認する。処理に3秒以上かかるようなデータやキューが RabbitMQ 内にあるとこの現象が起きることがある。(以下、翻訳省略) 18 | 6. ローカルの zabbix agent を再起動する。 19 | 20 | ## 設定: 21 | オプションで .rab.auth ファイルを scripts/ ディレクトリ以下に作成しても良い。このファイルでデフォルトパラメータを変更することができる。フォーマットは `変数=値` で、1行に1つ記述できる。デフォルト値は次の通り: 22 | 23 | USERNAME=guest 24 | PASSWORD=guest 25 | CONF=/etc/zabbix/zabbix_agent.conf 26 | 27 | このファイルにフィルタを追加することもでき、そうするとどのキューをモニタするかを厳格化できる。このアイテムは JSONでエンコードされた文字列である。使用するのが柔軟になるようなフォーマットになっている。フィルタするために、単一のオブジェクトまたはオブジェクトのリストを設定することもできる。使用可能なキーは、status, node, name, consumers, vhost, durable, 28 | exclusive_consumer_tag, auto_delete, memory, policy である。 29 | 30 | 例えば次のフィルタは全ての durable なキューを見つける: 31 | `FILTER='{"durable": true}'` 32 | 33 | 指定した vhost に対する durable なキューのみに絞る場合は以下の様なフィルタになる: 34 | `FILTER='{"durable": true, "vhost": "mine"}'` 35 | 36 | キュー名のリストを出すには、以下のようなフィルタを設定: 37 | `FILTER='[{"name": "mytestqueuename"}, {"name": "queue2"}]'` 38 | 39 | 将来、フィルタは正規表現や除外したキューを指定できるように改善されるかもしれない。 40 | 41 | ## 変更 42 | * zabbix_sender を用いてデータをリクエストするように更新。(以下翻訳略) 43 | * オブジェクト一覧を扱えるようにフィルタを更新。 44 | 45 | ## 未来のアイデア 46 | 結果をローカルにキャッシュする機能(けど RabbitMQ を殺しすぎるかも) 47 | アイデアがアレばお気軽に mcintoshj@gmail.com までメールください。 48 | 49 | 50 | ## Definite kudos to some of the other developers around the web. In particular, 51 | * Python Scripts: https://github.com/kmcminn/rabbit-nagios 52 | * Base idea for the Rabbit template: https://github.com/alfss/zabbix-rabbitmq 53 | * Also need to thank Lewis Franklin https://github.com/brolewis for his contributions! 54 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/README.md: -------------------------------------------------------------------------------- 1 | rabbitmq-zabbix 2 | ======================= 3 | [![Build Status](https://travis-ci.org/jasonmcintosh/rabbitmq-zabbix.svg?branch=master)](https://travis-ci.org/jasonmcintosh/rabbitmq-zabbix) 4 | 5 | Template and checks to monitor rabbitmq queues and server via Zabbix. 6 | 7 | ## SOURCE: 8 | https://github.com/jasonmcintosh/rabbitmq-zabbix 9 | 10 | ## WHY: 11 | Because the SNMP plugin isn't an officially supported plugin, and rabbitmqctl based monitors are REALLY slow in comparison. 12 | 13 | ## WHAT: 14 | Set of python scripts, zabbix template, and associated data to do autodiscovery 15 | 16 | ## HOW: 17 | 1. Install the files into /etc/zabbix/ folder, change permissions to Zabbix. 18 | 2. Setup configuration (see below) 19 | 3. Import the template to your zabbix server 20 | 4. Make sure zabbix_sender is installed 21 | 5. **WARNING** Watch your process timeout. I hit an issue with the amount of data and queues in rabbit where processing the results took longer than 3 seconds - that's the default timeout for the agent to kill a process. If I can switch to a file based push instead of calling send for each item, this will hopefully reduce the time to send even further 22 | 6. Restart the local zabbix agent 23 | 24 | 25 | ## CONFIGURATION: 26 | **Basic security recommendation** 27 | ``` 28 | When setting up a monitoring system, a general rule is that you should not to use guest. 29 | Guest is an admin account with full permissions. A basic suggestion is to setup a read 30 | only account who can access the management API. Make sure that account is READ ONLY. With 31 | one caveat - the monitoring user should be able execute the aliveness-test api. That might mean 32 | needing a slightly different set of permissions or pre-creation of the aliveness check queues. 33 | IF using guest a warning - it can only access RabbitMQ management via localhost so you will 34 | need to set HOSTNAME=localhost 35 | ``` 36 | 37 | You should create a `.rab.auth` file in the `scripts/rabbitmq` directory. This file allows you to change default parameters. The format is `VARIABLE=value`, one per line: 38 | The default values are as follows: 39 | 40 | USERNAME=guest 41 | PASSWORD=guest 42 | CONF=/etc/zabbix/zabbix_agent.conf 43 | LOGLEVEL=INFO 44 | LOGFILE=/var/log/zabbix/rabbitmq_zabbix.log 45 | PORT=15672 46 | 47 | You can also add a filter in this file to restrict which queues are monitored. 48 | This item is a JSON-encoded string. The format provides some flexibility for 49 | its use. You can either provide a single object or a list of objects to filter. 50 | The available keys are: status, node, name, consumers, vhost, durable, 51 | exclusive_consumer_tag, auto_delete, memory, policy 52 | 53 | For example, the following filter could find all the durable queues: 54 | `FILTER='{"durable": true}'` 55 | 56 | To only use the durable queues for a given vhost, the filter would be: 57 | `FILTER='{"durable": true, "vhost": "mine"}'` 58 | 59 | To supply a list of queue names, the filter would be: 60 | `FILTER='[{"name": "mytestqueuename"}, {"name": "queue2"}]'` 61 | 62 | To debug any potential issues, make sure the log directory exists and can be written to by zabbix, then set LOGLEVEL=DEBUG in the .rab.auth file and you'll get quite verbose output 63 | 64 | ### Macros 65 | 66 | You can adjust the values for the critical and warning levels for the amount of messages by changing the following macros: 67 | 68 | - RABBIT_QUEUE_MESSAGES_CRIT Defines the critical value for the amount of messages in a queue. It is set to 200000 messages per default 69 | - RABBIT_QUEUE_MESSAGES_WARN Defines the warning value for the amount of messages in a queue. It is set to 100000 messages per default 70 | 71 | ## Low level discovery of queues, including GLOBAL REGULAR EXPRESSIONS: 72 | `https://www.zabbix.com/documentation/3.0/manual/regular_expressions` 73 | The low level discovery, which is what determines what queues to be monitored, requires with the existing template that a filter be defined as a global regular expression. You can modify the template to do it in other ways, e.g. with a host level macro (NOT TESTED), or override it per host. Or any number of methods. But without a filter, NO queues will be discovered, JUST server level items will show up, and your checks will fail. 74 | 75 | At some point the filters may be improved to include regular expressions or "ignore these queues" 76 | 77 | ## CHANGES 78 | * Updated to use zabbix_sender to push data on request to an item request. This is similar to how the FromDual MySQL Zabbix stuff works and the concept was pulled from their templates. 79 | * Updated the filters to handle a list of objects 80 | 81 | 82 | ## Ideas for the future? 83 | Add a local cache of the results (may be overkill for RabbitMQ). 84 | Feel free to submit changes or ideas - mcintoshj@gmail.com 85 | 86 | Repo: 87 | https://github.com/jasonmcintosh/rabbitmq-zabbix 88 | 89 | ## Definite kudos to some of the other developers around the web. In particular, 90 | * Python Scripts: https://github.com/kmcminn/rabbit-nagios 91 | * Base idea for the Rabbit template: https://github.com/alfss/zabbix-rabbitmq 92 | * Also need to thank Lewis Franklin https://github.com/brolewis for his contributions! 93 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/scripts/rabbitmq/.rab.auth: -------------------------------------------------------------------------------- 1 | USERNAME=guest 2 | PASSWORD=guest 3 | CONF=/etc/zabbix/zabbix_agentd.conf 4 | HOSTNAME=localhost 5 | NODE=rabbit@stage 6 | PORT=15672 7 | FILTER='{"durable": true}' 8 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/scripts/rabbitmq/list_rabbit_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # https://github.com/jasonmcintosh/rabbitmq-zabbix 4 | # 5 | cd "$(dirname "$0")" 6 | . .rab.auth 7 | 8 | if [[ -z "$HOSTNAME" ]]; then 9 | HOSTNAME=`hostname` 10 | fi 11 | if [[ -z "$NODE" ]]; then 12 | NODE=`hostname` 13 | fi 14 | 15 | ./api.py --username=$USERNAME --password=$PASSWORD --check=list_nodes --filter="$FILTER" --conf=$CONF --hostname=$HOSTNAME --node="$NODE" --loglevel=${LOGLEVEL} --logfile=${LOGFILE} --port=$PORT --protocol=$PROTOCOL 16 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/scripts/rabbitmq/list_rabbit_queues.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # https://github.com/jasonmcintosh/rabbitmq-zabbix 4 | # 5 | cd "$(dirname "$0")" 6 | . .rab.auth 7 | 8 | if [[ -z "$HOSTNAME" ]]; then 9 | HOSTNAME=`hostname` 10 | fi 11 | if [[ -z "$NODE" ]]; then 12 | NODE=`hostname` 13 | fi 14 | 15 | ./api.py --username=$USERNAME --password=$PASSWORD --check=list_queues --filter="$FILTER" --conf=$CONF --hostname=$HOSTNAME --node="$NODE" --loglevel=${LOGLEVEL} --logfile=${LOGFILE} --port=$PORT --protocol=$PROTOCOL 16 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/scripts/rabbitmq/list_rabbit_shovels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # https://github.com/jasonmcintosh/rabbitmq-zabbix 4 | # 5 | cd "$(dirname "$0")" 6 | . .rab.auth 7 | 8 | if [[ -z "$HOSTNAME" ]]; then 9 | HOSTNAME=`hostname` 10 | fi 11 | if [[ -z "$NODE" ]]; then 12 | NODE=`hostname` 13 | fi 14 | 15 | 16 | ./api.py --username=$USERNAME --password=$PASSWORD --check=list_shovels --filter="$FILTER" --hostname=$HOSTNAME --node="$NODE" --conf=$CONF --loglevel=${LOGLEVEL} --logfile=${LOGFILE} --port=$PORT --protocol=$PROTOCOL 17 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/scripts/rabbitmq/rabbitmq-status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # https://github.com/jasonmcintosh/rabbitmq-zabbix 4 | # 5 | #UserParameter=rabbitmq[*],<%= zabbix_script_dir %>/rabbitmq-status.sh 6 | cd "$(dirname "$0")" 7 | 8 | . .rab.auth 9 | 10 | TYPE_OF_CHECK=$1 11 | METRIC=$2 12 | NODE=$3 13 | 14 | if [[ -z "$HOSTNAME" ]]; then 15 | HOSTNAME=`hostname` 16 | fi 17 | if [[ -z "$NODE" ]]; then 18 | NODE=`hostname` 19 | fi 20 | #rabbitmq[queues] 21 | #rabbitmq[server,disk_free] 22 | #rabbitmq[check_aliveness] 23 | 24 | # This assumes that the server is going to then use zabbix_sender to feed the data BACK to the server. Right now, I'm doing that 25 | # in the python script 26 | 27 | ./api.py --hostname=$HOSTNAME --username=$USERNAME --password=$PASSWORD --check=$TYPE_OF_CHECK --metric=$METRIC --node="$NODE" --filters="$FILTER" --conf=$CONF --loglevel=${LOGLEVEL} --logfile=${LOGFILE} --port=$PORT --protocol=$PROTOCOL 28 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/tests/setup_auth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "Setting up rabbit auth file" 3 | 4 | cat<scripts/rabbitmq/.rab.auth 5 | USERNAME=guest 6 | PASSWORD=guest 7 | 8 | LOGFILE=/tmp/rabbitmq_zabbix.log 9 | LOGLEVEL=DEBUG 10 | 11 | HOSTNAME=localhost 12 | PORT=15672 13 | 14 | EOF 15 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/tests/test_basic_return.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.abspath("scripts/rabbitmq") ) 4 | from api import * 5 | 6 | import io 7 | import pytest 8 | 9 | def test_output_default_help_message(capsys): 10 | with pytest.raises(SystemExit): 11 | main() 12 | out, err = capsys.readouterr() 13 | assert "At least one check should be specified" in err 14 | 15 | debugoutput = "" 16 | def test_queue_check(capsys, monkeypatch): 17 | current_args = sys.argv 18 | sys.argv = ["prog", "--check","list_queues"] 19 | def mockloggingconfig(filename, level, format): 20 | assert filename == "/var/log/zabbix/rabbitmq_zabbix.log" 21 | def mockdebug(somestring): 22 | global debugoutput 23 | debugoutput = debugoutput + somestring + "####" 24 | monkeypatch.setattr(logging, "basicConfig", mockloggingconfig) 25 | monkeypatch.setattr(logging, "debug", mockdebug) 26 | monkeypatch.setattr(logging, "info", mockdebug) 27 | 28 | with pytest.raises(Exception): 29 | main() 30 | out, err = capsys.readouterr() 31 | assert "Started trying to process data" in debugoutput 32 | assert "Issue a rabbit" in debugoutput 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /Templates/RabbitMQ/zabbix_agentd.d/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | UserParameter=rabbitmq.discovery_queues,/etc/zabbix/scripts/rabbitmq/list_rabbit_queues.sh 2 | UserParameter=rabbitmq.discovery_shovels,/etc/zabbix/scripts/rabbitmq/list_rabbit_shovels.sh 3 | UserParameter=rabbitmq.discovery_nodes,/etc/zabbix/scripts/rabbitmq/list_rabbit_nodes.sh 4 | UserParameter=rabbitmq[*],/etc/zabbix/scripts/rabbitmq/rabbitmq-status.sh $1 $2 $3 5 | -------------------------------------------------------------------------------- /Templates/Redis/README.md: -------------------------------------------------------------------------------- 1 | 0) change server IP on redis-stats.py 2 | 3 | 1) Put redis.conf into your zabbix_agentd.conf config subdirectory (like: /etc/zabbix/zabbix_agentd.d/). 4 | 5 | 2) Change script name in redis.conf to use redis-stats.py . Redis server params can be passed to the python script as arguments e.g.: 6 | redis-stats.py localhost -p 6379 -a mypassword 7 | 8 | 3) Change your zabbix_agentd.conf config so it will include this file: 9 | Include=/etc/zabbix/zabbix_agentd.d/ 10 | 11 | 4) In working dir (/etc/zabbix/scripts) do: 12 | 13 | For use python verson script: 14 | 15 | pip install redis 16 | chmod +x redis-stats.py 17 | 18 | 5) Import redis.xml into zabbix in Tepmplate section web gui. 19 | There is a macros in template: {$REDIS_HOSTNAME} - 'localhost' by default. 20 | Change it in Zabbix GUI for server, if you connect to redis using other hostname. -------------------------------------------------------------------------------- /Templates/Redis/redis-stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #https://github.com/blacked/zbx_redis_template/blob/master/zbx_redis_stats.py 3 | 4 | import sys, redis, json, re, struct, time, socket, argparse 5 | 6 | parser = argparse.ArgumentParser(description='Zabbix Redis status script') 7 | parser.add_argument('redis_hostname',nargs='?') 8 | parser.add_argument('metric',nargs='?') 9 | parser.add_argument('db',default='none',nargs='?') 10 | parser.add_argument('-p','--port',dest='redis_port',action='store',help='Redis server port',default=6379,type=int) 11 | parser.add_argument('-a','--auth',dest='redis_pass',action='store',help='Redis server pass',default=None) 12 | args = parser.parse_args() 13 | 14 | zabbix_host = '127.0.0.1' # Zabbix Server IP 15 | zabbix_port = 10051 # Zabbix Server Port 16 | 17 | # Name of monitored server like it shows in zabbix web ui display 18 | redis_hostname = args.redis_hostname if args.redis_hostname else socket.gethostname() 19 | 20 | class Metric(object): 21 | def __init__(self, host, key, value, clock=None): 22 | self.host = host 23 | self.key = key 24 | self.value = value 25 | self.clock = clock 26 | 27 | def __repr__(self): 28 | result = None 29 | if self.clock is None: 30 | result = 'Metric(%r, %r, %r)' % (self.host, self.key, self.value) 31 | else: 32 | result = 'Metric(%r, %r, %r, %r)' % (self.host, self.key, self.value, self.clock) 33 | return result 34 | 35 | def send_to_zabbix(metrics, zabbix_host='127.0.0.1', zabbix_port=10051): 36 | result = None 37 | j = json.dumps 38 | metrics_data = [] 39 | for m in metrics: 40 | clock = m.clock or ('%d' % time.time()) 41 | metrics_data.append(('{"host":%s,"key":%s,"value":%s,"clock":%s}') % (j(m.host), j(m.key), j(m.value), j(clock))) 42 | json_data = ('{"request":"sender data","data":[%s]}') % (','.join(metrics_data)) 43 | data_len = struct.pack(' 5 | # 6 | 7 | # Discovery 8 | UserParameter=redis.discovery, /etc/zabbix/scripts/redis-stats.py localhost list_key_space_db 9 | 10 | # Return Redis statistics 11 | UserParameter=redis[*], /etc/zabbix/scripts/redis-stats.py $1 $2 $3 12 | -------------------------------------------------------------------------------- /Templates/SMART/README.md: -------------------------------------------------------------------------------- 1 | #based on https://github.com/v-zhuravlev/zbx-smartctl 2 | 3 | #Description 4 | This is the template for Zabbix providing SMART monitoring for HDD using smartctl utility. 5 | *main* branch has the templates for Zabbix 3.0, 2.4 and 2.2. Only devices with SMART enabled will be discovered. 6 | 7 | #Installation: 8 | ##Linux/BSD/Mac OSX: 9 | - Make sure that smartmontools utils are installed: 10 | - install the script smartctl-disks-discovery.pl in /etc/zabbix/scripts/ 11 | - test the script by running it. You should receive JSON object in the script output 12 | - add the following permissions into /etc/sudoers: 13 | ``` 14 | zabbix ALL= (ALL) NOPASSWD: /usr/sbin/smartctl,/etc/zabbix/scripts/smartctl-disks-discovery.pl,/etc/zabbix/scripts/smart.thresh.check.sh 15 | ``` 16 | Add the following lines in zabbix_agentd.conf file: 17 | ``` 18 | #############SMARTMON 19 | #UserParameter=uHDD[*], sudo smartctl -A $1| grep -i "$2"| tail -1| awk '{print $10}' 20 | UserParameter=uHDD[*], sudo smartctl -A $1| grep -i "$2"| tail -1| cut -c 88-|cut -f1 -d' ' 21 | UserParameter=uHDD.model.[*],sudo smartctl -i $1 |grep -i "Device Model"| cut -f2 -d: |tr -d " " 22 | UserParameter=uHDD.sn.[*],sudo smartctl -i $1 |grep -i "Serial Number"| cut -f2 -d: |tr -d " " 23 | UserParameter=uHDD.health.[*],sudo smartctl -H $1 |grep -i "test"| cut -f2 -d: |tr -d " " 24 | #UserParameter=uHDD.errorlog.[*],sudo smartctl -l error $1 | grep -i "ATA Error Count" | cut -f2 -d: |tr -d " "); if [[ ! -z "$a" ]]; then echo $a; else echo 0; fi 25 | UserParameter=uHDD.errorlog.[*],sudo smartctl -l error $1 |grep -i "ATA Error Count"| cut -f2 -d: |tr -d " " 26 | UserParameter=uHDD.discovery,sudo /etc/zabbix/scripts/smartctl-disks-discovery.pl 27 | UserParameter=uHDD.thresh.check[*], sudo /etc/zabbix/scripts/smart.thresh.check.sh 28 | 29 | ``` 30 | ###Building deb package 31 | You can create .deb package `zabbix-agent-extra-smartctl` for Debian/Ubuntu distributions: 32 | ```shell 33 | dpkg-buildpackage -tc -Zgzip 34 | 35 | ``` 36 | #License 37 | GPL v3 or newer. 38 | 39 | #More info: 40 | http://habrahabr.ru/company/zabbix/blog/196218/ 41 | http://www.lanana.org/docs/device-list/devices-2.6+.txt 42 | https://www.smartmontools.org/wiki/Supported_RAID-Controllers 43 | -------------------------------------------------------------------------------- /Templates/SMART/smart.thresh.check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # add zabbix-user to /etc/sudoers to execute this scpirt without sudo 3 | # zabbix ALL= (ALL) NOPASSWD: /usr/sbin/smartctl, /etc/zabbix/scripts/smart.thresh.check.sh 4 | 5 | DISK=$1 6 | ATTRIBUTE=$2 7 | DIFF=0 8 | 9 | # Current value 10 | VALUE=$((10#$(sudo smartctl -A $DISK | grep -i "$ATTRIBUTE" | tail -1 | awk '{ print $4 }'))) # $((10#...)) - this is needed to correctly decimal calculation 11 | # THRESH 12 | THRESH=$((10#$(sudo smartctl -A $DISK | grep -i "$ATTRIBUTE" | tail -1 | awk '{ print $6 }'))) # $((10#...)) - this is needed to correctly decimal calculation 13 | 14 | let "DIFF = $VALUE - $THRESH" 15 | 16 | echo $DIFF -------------------------------------------------------------------------------- /Templates/SMART/smartctl-disks-discovery.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | #must be run as root 4 | $first = 1; 5 | 6 | #add path if needed into $smartctl_cmd 7 | $smartctl_cmd = "smartctl"; 8 | 9 | my @disks; 10 | if ($^O eq 'darwin') { # if MAC OSX 11 | 12 | while (glob( '/dev/disk*' )) { 13 | if ($_ =~ /\/(disk+[0-9])$/) {push @disks,$1;} 14 | } 15 | } 16 | else { 17 | for (`$smartctl_cmd --scan-open`) { 18 | #splitting lines like "/dev/sda -d scsi # /dev/sda, SCSI device" 19 | my @device = split / /, $_; 20 | #Adding full value from smartctl --scan to get SMART from not only /dev/sd devices but /dev/bus/0 -d megaraid,01 too 21 | $disk = "@device[0] @device[1] @device[2]"; 22 | push @disks,$disk; 23 | } 24 | } 25 | 26 | #print "Disks are @disks"; 27 | print "{\n"; 28 | print "\t\"data\":[\n\n"; 29 | 30 | @serials; 31 | DISKLOOP:foreach my $disk (@disks) { 32 | #DISK LOOP 33 | $smart_enabled = 0; 34 | 35 | chomp($disk); 36 | #SMART STATUS LOOP 37 | foreach $line (`$smartctl_cmd -i $disk`) { 38 | #Some disks have "Number" and some "number", so 39 | if ($line =~ /^Serial (N|n)umber: +(.+)$/) { 40 | #print "Serial number is ".$2."\n"; 41 | if (grep /$2/,@serials) { 42 | #print "disk already exist skipping\n"; 43 | next DISKLOOP; 44 | } 45 | else { 46 | push @serials,$2; 47 | } 48 | } elsif ($line =~ /^SMART.+?: +(.+)$/) { 49 | #print "$1\n"; 50 | 51 | if ( $1 =~ /Enabled/ ) { 52 | $smart_enabled = 1; 53 | } 54 | #if SMART is disabled then try to enable it (also offline tests etc) 55 | elsif ( $1 =~ /Disabled/ ) { 56 | foreach (`smartctl -s on -o on -S on $disk`) { 57 | if (/SMART Enabled/) { $smart_enabled = 1; } 58 | } 59 | } 60 | } 61 | } 62 | 63 | print ",\n" if not $first; 64 | $first = 0; 65 | print "\t\t{\n"; 66 | print "\t\t\t\"{#DISKNAME}\":\"$disk\",\n"; 67 | print "\t\t\t\"{#SMART_ENABLED}\":\"$smart_enabled\"\n"; 68 | print "\t\t}"; 69 | 70 | } 71 | 72 | print "\n\t]\n"; 73 | print "}\n"; 74 | -------------------------------------------------------------------------------- /Templates/SMART/zabbix_smartctl.conf: -------------------------------------------------------------------------------- 1 | #############SMARTMON 2 | #UserParameter=uHDD[*], sudo smartctl -A $1| grep -i "$2"| tail -1| awk '{print $10}' 3 | UserParameter=uHDD[*], sudo smartctl -A $1| grep -i "$2"| tail -1| cut -c 88-|cut -f1 -d' ' 4 | UserParameter=uHDD.model.[*],sudo smartctl -i $1 |grep -i "Device Model"| cut -f2 -d: |tr -d " " 5 | UserParameter=uHDD.sn.[*],sudo smartctl -i $1 |grep -i "Serial Number"| cut -f2 -d: |tr -d " " 6 | UserParameter=uHDD.health.[*],sudo smartctl -H $1 |grep -i "test"| cut -f2 -d: |tr -d " " 7 | #UserParameter=uHDD.errorlog.[*],sudo smartctl -l error $1 | grep -i "ATA Error Count" | cut -f2 -d: |tr -d " "); if [[ ! -z "$a" ]]; then echo $a; else echo 0; fi 8 | UserParameter=uHDD.errorlog.[*],sudo smartctl -l error $1 |grep -i "ATA Error Count"| cut -f2 -d: |tr -d " " 9 | UserParameter=uHDD.discovery,sudo /etc/zabbix/scripts/smartctl-disks-discovery.pl 10 | UserParameter=uHDD.thresh.check[*], sudo /etc/zabbix/scripts/smart.thresh.check.sh -------------------------------------------------------------------------------- /Templates/SSL Certificates/check-ssl-expire.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # source https://gist.github.com/crashdump/5683952 5 | # 6 | 7 | __author__ = "Adrien Pujol - http://www.crashdump.fr/" 8 | __copyright__ = "Copyright 2013, Adrien Pujol" 9 | __license__ = "Mozilla Public License" 10 | __version__ = "0.3" 11 | __email__ = "adrien.pujol@crashdump.fr" 12 | __status__ = "Development" 13 | __doc__ = "Check a TLS certificate validity." 14 | 15 | import argparse 16 | import socket 17 | from datetime import datetime 18 | import time 19 | try: 20 | # Try to load pyOpenSSL first 21 | # aptitude install python-dev && pip install pyopenssl 22 | from OpenSSL import SSL 23 | PYOPENSSL = True 24 | except ImportError: 25 | # Else, fallback on standard ssl lib (doesn't support SNI) 26 | import ssl 27 | PYOPENSSL = False 28 | 29 | CA_CERTS = "/etc/ssl/certs/ca-certificates.crt" 30 | 31 | 32 | def exit_error(errcode, errtext): 33 | print errtext 34 | exit(errcode) 35 | 36 | 37 | def pyopenssl_check_callback(connection, x509, errnum, errdepth, ok): 38 | ''' callback for pyopenssl ssl check''' 39 | if x509.get_subject().commonName == HOST: 40 | if x509.has_expired(): 41 | exit_error(1, 'Error: Certificate has expired!') 42 | else: 43 | print pyopenssl_check_expiration(x509.get_notAfter()) 44 | 45 | if not ok: 46 | return False 47 | return ok 48 | 49 | 50 | def pyopenssl_check_expiration(asn1): 51 | ''' Return the numbers of day before expiration. False if expired.''' 52 | try: 53 | expire_date = datetime.strptime(asn1, "%Y%m%d%H%M%SZ") 54 | except: 55 | exit_error(1, 'Certificate date format unknow.') 56 | 57 | expire_in = expire_date - datetime.now() 58 | if expire_in.days > 0: 59 | return expire_in.days 60 | else: 61 | return False 62 | 63 | 64 | def pyssl_check_hostname(cert, hostname): 65 | ''' Return True if valid. False is invalid ''' 66 | if 'subjectAltName' in cert: 67 | for typ, val in cert['subjectAltName']: 68 | # Wilcard 69 | if typ == 'DNS' and val.startswith('*'): 70 | if val[2:] == hostname.split('.', 1)[1]: 71 | return True 72 | # Normal hostnames 73 | elif typ == 'DNS' and val == hostname: 74 | return True 75 | else: 76 | return False 77 | 78 | 79 | def pyssl_check_expiration(cert): 80 | ''' Return the numbers of day before expiration. False if expired. ''' 81 | if 'notAfter' in cert: 82 | try: 83 | expire_date = datetime.strptime(cert['notAfter'], 84 | "%b %d %H:%M:%S %Y %Z") 85 | except: 86 | exit_error(1, 'Certificate date format unknow.') 87 | expire_in = expire_date - datetime.now() 88 | if expire_in.days > 0: 89 | return expire_in.days 90 | else: 91 | return False 92 | 93 | 94 | def main(): 95 | parser = argparse.ArgumentParser() 96 | parser.add_argument('host', help='specify an host to connect to') 97 | parser.add_argument('-p', '--port', help='specify a port to connect to', 98 | type=int, default=443) 99 | args = parser.parse_args() 100 | 101 | global HOST, PORT 102 | HOST = args.host 103 | PORT = args.port 104 | 105 | # Check the DNS name 106 | try: 107 | socket.getaddrinfo(HOST, PORT)[0][4][0] 108 | except socket.gaierror as e: 109 | exit_error(1, e) 110 | 111 | # Connect to the host and get the certificate 112 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 113 | sock.connect((HOST, PORT)) 114 | 115 | # If handled by python SSL library 116 | if not PYOPENSSL: 117 | try: 118 | ssl_sock = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_REQUIRED, 119 | ca_certs=CA_CERTS, 120 | ciphers=("HIGH:-aNULL:-eNULL:" 121 | "-PSK:RC4-SHA:RC4-MD5")) 122 | 123 | cert = ssl_sock.getpeercert() 124 | if not pyssl_check_hostname(cert, HOST): 125 | print 'Error: Hostname does not match!' 126 | 127 | print pyssl_check_expiration(cert) 128 | 129 | sock = ssl_sock.unwrap() 130 | 131 | except ssl.SSLError as e: 132 | exit_error(1, e) 133 | 134 | # If handled by pyOpenSSL module 135 | else: 136 | try: 137 | ctx = SSL.Context(SSL.TLSv1_METHOD) 138 | ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, 139 | pyopenssl_check_callback) 140 | ctx.load_verify_locations(CA_CERTS) 141 | 142 | ssl_sock = SSL.Connection(ctx, sock) 143 | ssl_sock.set_connect_state() 144 | ssl_sock.set_tlsext_host_name(HOST) 145 | ssl_sock.do_handshake() 146 | 147 | x509 = ssl_sock.get_peer_certificate() 148 | x509name = x509.get_subject() 149 | if x509name.commonName != HOST: 150 | print 'Error: Hostname does not match!' 151 | 152 | ssl_sock.shutdown() 153 | 154 | except SSL.Error as e: 155 | exit_error(1, e) 156 | 157 | sock.close() 158 | 159 | 160 | if __name__ == "__main__": 161 | main() -------------------------------------------------------------------------------- /Templates/SSL Certificates/check-ssl-expire.template.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-08-07T10:05:20Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 84 | 85 | 86 | 87 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].nodata(86500)}=1 88 | SSL certificate on {$SSL.SITE}. No new data in the last 24h 89 | 90 | 0 91 | 2 92 | No data received for > 24h. Check the script. 93 | 0 94 | 95 | 96 | 97 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].last(0)}<1 98 | SSL certificate on {$SSL.SITE} expired 99 | 100 | 0 101 | 5 102 | 103 | 0 104 | 105 | 106 | 107 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].last(0)}<30 108 | SSL certificate on {$SSL.SITE} expires in less than 30 days ({ITEM.LASTVALUE} left) 109 | 110 | 0 111 | 3 112 | 113 | 0 114 | 115 | 116 | SSL certificate on {$SSL.SITE} expired 117 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].last(0)}<1 118 | 119 | 120 | 121 | 122 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].last(0)}<50 123 | SSL certificate on {$SSL.SITE} expires in less than 50 days ({ITEM.LASTVALUE} left) 124 | 125 | 0 126 | 1 127 | 128 | 0 129 | 130 | 131 | SSL certificate on {$SSL.SITE} expires in less than 30 days ({ITEM.LASTVALUE} left) 132 | {Template SSL Cert Expire:ssl.cert.expire[{$SSL.SITE},{$SSL.PORT}].last(0)}<30 133 | 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /Templates/SSL Certificates/ssl.cert.conf: -------------------------------------------------------------------------------- 1 | #UserParameter=ssl.cert.expire, /etc/zabbix/scripts/check-ssl-expire.py $(grep Hostname /etc/zabbix/zabbix_agentd.conf | grep ^# -v | cut -f2 -d=) 2 | #UserParameter=ssl.cert.expire[*], python /etc/zabbix/scripts/check-ssl-expire.py -p $2 $1 2>/dev/null 3 | UserParameter=ssl.cert.expire[*], /etc/zabbix/scripts/ssl-cert-check.sh -s $1 -p $2 -n | awk '{print $NF}' | cut -f2 -d= -------------------------------------------------------------------------------- /Templates/Tarantool/README.txt: -------------------------------------------------------------------------------- 1 | based on https://github.com/PushOk/tarantool_zabbix 2 | In /etc/zabbix/zabbix_agentd.conf follows: 3 | UnsafeUserParameters=1 4 | This allows special symbols in UserParameters: \ ' " ` * ? [ ] { } ~ $ ! & ; ( ) < > | # @ 5 | 6 | 7 | and set in Zabbix GUI macros to your server: 8 | {$TARANTOOLCTL} like so user:password@localhost:3301 -------------------------------------------------------------------------------- /Templates/Tarantool/tarantool-params.conf: -------------------------------------------------------------------------------- 1 | UserParameter=tarantool-stats[*],/etc/zabbix/scripts/tarantool/tarantool-stats.sh $1 $2 $3 2 | UserParameter=tarantool-slab[*],/etc/zabbix/scripts/tarantool/tarantool-slab.sh $1 $2 3 | -------------------------------------------------------------------------------- /Templates/Tarantool/tarantool-slab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "box.slab.info()" | tarantoolctl connect $1 2>/dev/null |grep "$2:" | head -n 1| awk -F':' '{print $2}' |sed -e"s/[ |\%]//g" 4 | -------------------------------------------------------------------------------- /Templates/Tarantool/tarantool-stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "box.stat()" | tarantoolctl connect $1 2>/dev/null |grep $2 -A 2 |grep $3| awk -F':' '{print $2}' |sed -e"s/ //g" 4 | -------------------------------------------------------------------------------- /Templates/WebSockets/README.txt: -------------------------------------------------------------------------------- 1 | monitoring web-socket's connection -------------------------------------------------------------------------------- /Templates/WebSockets/web.socket.check.conf: -------------------------------------------------------------------------------- 1 | UserParameter=web.socket.check[*], /etc/zabbix/scripts/web.socket.check.py $1 -------------------------------------------------------------------------------- /Templates/WebSockets/web.socket.check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Checking possibility of socket connection 3 | 4 | import sys 5 | from websocket import create_connection 6 | 7 | host = sys.argv[1] 8 | 9 | #open connection, try to send message, recieve and close 10 | try: 11 | ws = create_connection("wss://%s" % (host)) 12 | ws.send("ping") 13 | result = ws.recv() 14 | ws.close() 15 | connectionState = 1 16 | except Exception: 17 | connectionState = 0 18 | 19 | print connectionState -------------------------------------------------------------------------------- /Templates/WebSockets/websocket.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | WebSocket Test 4 | 64 | 65 |

WebSocket Test

66 | 67 |
68 | -------------------------------------------------------------------------------- /Templates/WebSockets/websocket.template.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 3.0 4 | 2017-05-11T08:53:02Z 5 | 6 | 7 | Templates/Custom 8 | 9 | 10 | 11 | 75 | 76 | 77 | 78 | {Websocket Check:web.socket.check[api.looch.tv/chat?user=1].last()}<>1 79 | Check api.chat websocket 80 | 81 | 1 82 | 3 83 | 84 | 0 85 | 86 | 87 | 88 | 89 | --------------------------------------------------------------------------------