├── .gitignore ├── LICENSE.md ├── README.md ├── db ├── README.md ├── backup.sh ├── move_items.py ├── rename.sh └── restore.sh ├── docs ├── requirements.md ├── zbx_changeMultipleTriggers.md ├── zbx_clone.md ├── zbx_deleteMonitors.md ├── zbx_deleted-linux.md ├── zbx_deleted.md ├── zbx_discovery-manager.md ├── zbx_historyGet.md └── zbx_hostgroupOrganizer.md ├── odbc ├── README.md └── test_odbc_connection.sh ├── requirements.txt ├── zbx_changeMultipleTriggers.py ├── zbx_clone.py ├── zbx_deleteMonitors.py ├── zbx_deleted-linux.py ├── zbx_deleted.py ├── zbx_discovery-manager.py ├── zbx_historyGet.py └── zbx_hostgroupOrganizer.py /.gitignore: -------------------------------------------------------------------------------- 1 | # git ls-files --others --exclude-from=.git/info/exclude 2 | # Lines that start with '#' are comments. 3 | # For a project mostly in C, the following would be a good set of 4 | # exclude patterns (uncomment them if you want to use them): 5 | # *.[oa] 6 | # *~ 7 | # OS generated files # 8 | ###################### 9 | .DS_Store 10 | .DS_Store? 11 | ._* 12 | .Spotlight-V100 13 | .Trashes 14 | ehthumbs.db 15 | Thumbs.db 16 | # Python compiled 17 | *.pyc 18 | *.pyo 19 | scripts/tmp 20 | .idea/ 21 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2016, Globo.com (http://globo.com) and the [contributors on GitHub](https://github.com/globocom/zabbix-scripts) All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | * Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Zabbix Scripts 2 | 3 | A collection of scripts to ease Zabbix administration 4 | 5 | ## Free software 6 | > BSD license 7 | 8 | This repo contains scripts and their documentation in regards ease maintenance of Zabbix operation. 9 | 10 | ## Requisites 11 | Its recommended to work inside a [virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/). 12 | 13 | [PIP](https://pip.pypa.io/en/stable/installing/) is also required. 14 | 15 | Read [requirements.txt](docs/requirements.md) for more info. 16 | 17 | 18 | ## Docs 19 | [requirements.txt](docs/requirements.md) 20 | 21 | [zbx_changeMultipleTriggers.py](docs/zbx_changeMultipleTriggers.md) 22 | 23 | [zbx_clone.py](docs/zbx_clone.md) 24 | 25 | [zbx_deleted-linux.py](docs/zbx_deleted-linux.md) 26 | 27 | [zbx_deleted.py](docs/zbx_deleted.md) 28 | 29 | [zbx_deleteMonitors.py](docs/zbx_deleteMonitors.md) 30 | 31 | [zbx_discovery-manager.py](docs/zbx_discovery-manager.md) 32 | 33 | [zbx_historyGet.py](docs/zbx_historyGet.md) 34 | 35 | [zbx_hostgroupOrganizer.py](docs/zbx_hostgroupOrganizer.md) 36 | 37 | ## Todo 38 | > Improve docs 39 | -------------------------------------------------------------------------------- /db/README.md: -------------------------------------------------------------------------------- 1 | # **Zabbix-Scripts::DB** 2 | --- 3 | This path contains database related scripts. 4 | 5 | All scripts assumes you are running on your database server and have 'root' access. Changes may be required for your environment. 6 | 7 | ## backup.sh 8 | --- 9 | > This script creates a database backup from Zabbix 2.4 configuration only. 10 | > 11 | > It's very light (~1min for 500G database) because it does not backup data tables (history* and trends*), only their structures. 12 | > 13 | >### Credits 14 | >> Author: Ricardo Santos (rsantos at gmail.com) 15 | >> 16 | >> http://zabbixzone.com/zabbix/backuping-only-the-zabbix-configuration/ 17 | > 18 | >### Usage 19 | > ```shell 20 | > $ sh backup.sh your.database.address databasename 21 | > ``` 22 | 23 | ## restore.sh 24 | --- 25 | > Based on the backup.sh, this one restores the database. 26 | > 27 | >> Looks on the backupdir and selects the newest backup avaiable 28 | >> Creates database 29 | >> Restore backup 30 | >> Remove *any* partition it may have 31 | > 32 | > ### Credits 33 | >> Author: Filipe Paternot (fpaternot at corp.globo.com) 34 | > 35 | > ### Usage 36 | > ```shell 37 | > $ sh restore.sh database_name 38 | > ``` 39 | > 40 | 41 | ## rename.sh 42 | --- 43 | > Similar to restore, this one is mostly used to keep multiple copies of same database. 44 | > 45 | > We use it mostly to keep live backup's from different development environmnents. 46 | > 47 | > 48 | > ### Credits 49 | >> Author: Filipe Paternot (fpaternot at corp.globo.com) 50 | > 51 | > ### Usage 52 | > ```shell 53 | > $ rename.sh database_name 54 | > ``` 55 | 56 | 57 | ## move_items.py 58 | --- 59 | > `Still under development.` 60 | > 61 | > This one intends to revert data loss of specific items by making inserts to trends or trends_uint table (we ignore history, for now at least) based on a live backup database. 62 | > 63 | > It assumes you for whatever reason lost something in currenct live database and has a backup to recover from, but cant simply overwrite all database and want to recover only some slice of data you missed. 64 | > 65 | > ### Overall flow 66 | > 1. Connects to Zabbix API to find hostid's 67 | > 68 | > 1. Connects to current database and searches for: 69 | > 1. Given a hostid, lists current items, filtering for desired key_ pattern 70 | > 71 | > 1. Connects to backup database and searches for: 72 | > 1. Same hostid and the same key_'s 73 | > 74 | > 1. With the match list of all itemid's: 75 | > 1. Select all data from trends or trends_uint for each itemid 76 | > 1. Creates bulk inserts, 1000 values each, and writes them to .sql file 77 | > 1. Each file has up to 1M values (1k lines) 78 | > 79 | > 1. Next step then is for you/your DBA to import this scripts to current database. Should be harmless, but if your DB is busy, better have someone watching the task. 80 | > 81 | > ### Usage 82 | > ```shell 83 | > $ python move_items.py --help 84 | > $ python move_items.py --url=http://localhost --zuser=admin --zpass=zabbix --dbuser=zabbix --dbpassword='' --dbbkphost=BKPHOST --dblivehost=localhost --verbose --loglevel=INFO 85 | > ``` 86 | > 87 | > ### TODO 88 | > * Args to filter the item key_'s 89 | > * Args to filter hostgroupid 90 | > 91 | -------------------------------------------------------------------------------- /db/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # zabbix-mysql-backupconf.sh 4 | # v0.6 - 20161122 Find out what tables exist and copy them all, except known 5 | # data tables; they are structure copied only. This is most 6 | # useful between Zabbix versions. 7 | # v0.5 - 20160416 Support migrated to 3.0, auth into database moved 8 | # to --default-extra-file 9 | # Changed from user/pass to extra-file auth method 10 | # v0.4 - 20120302 Incorporated mysqldump options suggested by Jonathan Bayer 11 | # v0.3 - 20120206 Backup of Zabbix 1.9.x / 2.0.0, removed unnecessary use of 12 | # variables (DATEBIN etc) for commands that use to be in $PATH 13 | # v0.2 - 20111105 14 | # 15 | # Configuration Backup for Zabbix 2.0+ w/MySQL 16 | # 17 | # Original author: Ricardo Santos (rsantos at gmail.com) 18 | # http://zabbixzone.com/zabbix/backuping-only-the-zabbix-configuration/ 19 | # 20 | # modified by Jens Berthold, 2012 21 | # 22 | # Thanks for suggestions from: 23 | # - Oleksiy Zagorskyi (zalex) 24 | # - Petr Jendrejovsky 25 | # - Jonathan Bayer 26 | # 27 | 28 | # 29 | # mysql config 30 | # 31 | DBHOST="$1" 32 | EXTRADIR='/opt/zabbix_keys/db' 33 | EXTRAFILE="${EXTRADIR}/${DBHOST}" 34 | DBNAME="$2" 35 | # following will store the backup in a subdirectory of the current directory 36 | MAINDIR="`dirname \"$0\"`" 37 | DUMPDIR="/opt/zabbix/backup/${DBHOST}/`date +%Y%m%d-%H%M`" 38 | TMPDIR="/dev/shm" 39 | 40 | usage(){ 41 | echo "backup.sh DBHOST DBNAME" 42 | echo "note: DBHOST must be a filename inside ${EXTRADIR}" 43 | } 44 | 45 | if [ ! -x /usr/bin/mysqldump ]; then 46 | echo "mysqldump not found." 47 | exit 1 48 | fi 49 | if [ ! -f $EXTRAFILE ] || [ "x$EXTRAFILE" == 'x' ]; then 50 | echo "extrafile not found." 51 | usage 52 | exit 1 53 | fi 54 | 55 | # tables with large data 56 | DATATABLES=( acknowledges alerts auditlog auditlog_details events event_recovery event_tag \ 57 | history history_log history_str history_text history_uint housekeeper \ 58 | problem problem_tag sessions trends trends_uint ) 59 | 60 | DUMPFILE="${DUMPDIR}/zbx-conf-bkup-`date +%Y%m%d-%H%M`.sql.gz" 61 | DUMPFILETMP="${TMPDIR}/zbx-conf-bkup-`date +%Y%m%d-%H%M`.sql" 62 | [ ! -f "${DUMPDIR}" ] && mkdir -p "${DUMPDIR}" 63 | >"${DUMPFILETMP}" 64 | >"${DUMPFILE}" 65 | 66 | TABLES=`mysql --defaults-extra-file=${EXTRAFILE} ${DBNAME} -e 'show tables;' | tail -n +2` 67 | # configtables, loop through all tables and copy them.. 68 | for table in ${TABLES}; do 69 | found=0; # ignore large tables like history and stuff 70 | for x in ${DATATABLES[*]}; do 71 | [ "$x" == "$table" ] && found=1; 72 | done 73 | [ $found -eq 1 ] && continue; 74 | echo "Backuping config table ${table}" 75 | mysqldump --defaults-extra-file=${EXTRAFILE} --set-gtid-purged=OFF \ 76 | ${DBNAME} --tables ${table} >>"${DUMPFILETMP}" 77 | done 78 | 79 | # datatables, with history we dont need 80 | for table in ${DATATABLES[*]}; do 81 | echo "Backuping data table ${table}" 82 | mysqldump --defaults-extra-file=${EXTRAFILE} --set-gtid-purged=OFF \ 83 | --no-data ${DBNAME} --tables ${table} >>"${DUMPFILETMP}" 84 | done 85 | 86 | echo "Compressing file" 87 | gzip -c "${DUMPFILETMP}" > "${DUMPFILE}" 88 | rm "${DUMPFILETMP}" 89 | 90 | echo 91 | echo "Backup Completed - ${DUMPDIR}" 92 | #echo "Hit ENTER" 93 | #read 94 | -------------------------------------------------------------------------------- /db/move_items.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | #from __future__ import print_function 3 | # Copyright (c) 2016, Globo.com 4 | # This file is part of globocom/zabbix-scripts 5 | # (see https://github.com/globocom/zabbix-scripts). 6 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 7 | import pymysql,sys,os,json 8 | from pyzabbix import ZabbixAPI 9 | from argparse import ArgumentParser 10 | from logprint import LogPrint 11 | from progressbar import ProgressBar, Percentage, ETA, ReverseBar, RotatingMarker, Timer 12 | 13 | parser=ArgumentParser(description='Script to copy items history from one database to another') 14 | parser.add_argument('--url',dest='url',help='Zabbix frontend address') 15 | parser.add_argument('--zuser',dest='zabbixuser',help='Zabbix frontend user') 16 | parser.add_argument('--zpassword',dest='zabbixpassword',help='Zabbix frontend password') 17 | 18 | parser.add_argument('--dbuser',dest='dbuser',help='Database user') 19 | parser.add_argument('--dbpassword',dest='dbpassword',help='Database password') 20 | parser.add_argument('--dbbkphost',dest='dbbkphost',help='Backup database to get trends data from') 21 | parser.add_argument('--dblivehost',dest='dblivehost',help='Production database to get items from') 22 | 23 | parser.add_argument('--no-verbose',dest='verbose',action='store_false',help='Don\'t show any logs on screen') 24 | parser.add_argument('--verbose',dest='verbose',action='store_true') 25 | parser.set_defaults(verbose=False) 26 | parser.add_argument('--loglevel',dest='loglevel',default='ERROR',help='Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 27 | args=parser.parse_args() 28 | tmp_dir='../tmp' 29 | move_items_file='%s/move_items.txt' % tmp_dir 30 | 31 | TIMEOUT=30.0 32 | LOGFILE='/tmp/%s.log' % os.path.basename(sys.argv[0]) 33 | logger=LogPrint(echo=args.verbose,logfile=LOGFILE,loglevel=args.loglevel.upper()) 34 | # Connects to zabbix api to get hostid's,verify itemid's and item type (maybe?) 35 | try: 36 | zapi=ZabbixAPI(args.url,timeout=TIMEOUT) 37 | zapi.login(args.zabbixuser,args.zabbixpassword) 38 | except Exception,e: 39 | logger.error('Unable to login to Zabbix: %s' % (e)) 40 | sys.exit(1) 41 | 42 | if not os.path.exists(tmp_dir): 43 | os.makedirs(tmp_dir) 44 | 45 | # Connects to production database (read and write) 46 | try: 47 | dblive=pymysql.connect(host=args.dblivehost,port=3306,user=args.dbuser,passwd=args.dbpassword,db='zabbix_staging') 48 | except Exception,e: 49 | logger.error('Unable to login to LIVE database (%s): %s' % (args.dblivehost,e)) 50 | sys.exit(1) 51 | 52 | # Connection to backup database,where our backup history lies 53 | try: 54 | dbbkp=pymysql.connect(host=args.dbbkphost,port=3306,user=args.dbuser,passwd=args.dbpassword,db='zabbix') 55 | except Exception,e: 56 | logger.error('Unable to login to BACKUP database (%s): %s' % (args.dbbkphost,e)) 57 | sys.exit(1) 58 | 59 | def getItems(hostgroups=['138']): 60 | ''' 61 | Search for all hosts inside hostgroups 62 | Identify all items present in both production and bkp databases 63 | 64 | [{"hostid": "17906", "itens": [ 65 | {"itemid": 3663516, "status": 0, "value_type": 3, "bkpitemid": 3409700, "key_": "ltmNodeAddrStatServerCurConns[/Common/1.1.1.1,15.47.67.111.109.109.111.110.47.49.46.49.46.49.46.49]"}, 66 | {"itemid": 3663517, "status": 0, "value_type": 3, "bkpitemid": 3409701, "key_": "ltmNodeAddrStatServerCurConns[/Common/10.1.1.1,16.47.67.111.109.109.111.110.47.49.48.46.49.46.49.46.49]" 67 | ], 68 | "name": "HOST"} 69 | }, 70 | ''' 71 | logger.info('Discovering hostid\'s and item\'s') 72 | # Get hostid's from Zabbix 73 | hosts=zapi.host.get(output=['hostid','name'],groupids=hostgroups) 74 | 75 | live={} 76 | bkp={} 77 | for host in hosts: 78 | logger.debug('Working on host: %s' % host) 79 | # When changing this select order or results, remember to do the same for the lists on 'for item' 80 | itemsQuery='SELECT itemid,key_,status,value_type FROM items WHERE hostid=%d and (key_ LIKE \"ltmNodeAddr%s\" OR key_ LIKE \"ltmNodeAddr%s\" OR key_ LIKE \"ltmVirtualServStatClient%s\" OR key_ LIKE \"ltmVsStatusAvailState%s\")' % (int(host['hostid']),'%','%','%','%') 81 | curdblive.execute(itemsQuery) 82 | curdbbkp.execute(itemsQuery) 83 | live['mysql_items']=curdblive.fetchall() 84 | bkp['mysql_items']=curdbbkp.fetchall() 85 | 86 | if live['mysql_items'].__len__() != bkp['mysql_items'].__len__(): 87 | logger.warning('Different number of items on prod and backup:') 88 | logger.warning('mysql results for live: %d' % live['mysql_items'].__len__()) 89 | logger.warning('mysql results for backup: %d' % bkp['mysql_items'].__len__()) 90 | 91 | host['itens']=[] 92 | # For each current item, we find if there's any match on backup and list it here 93 | for item in live['mysql_items']: 94 | # Search for the match! 95 | for bkpitem in bkp['mysql_items']: 96 | if bkpitem[1] == item[1] and bkpitem[2] == item[2] and bkpitem[3] == item[3]: 97 | break 98 | # Create this host item match list 99 | host['itens'].append({'itemid': item[0], 'key_': item[1], 'status': item[2], 'value_type': item[3], 'bkpitemid': bkpitem[0]}) 100 | logger.info('Hostid\'s and item\'s discovered.') 101 | return hosts 102 | 103 | def getTrends(hostname,item): 104 | ''' 105 | Get trends data for a given itemid 106 | Returns a list like this 107 | (3409700, 1440356400, 6, 0, 0, 0) 108 | ''' 109 | logger.debug('Geting trends data: %s:%s' % (hostname,item['key_'])) 110 | values={} 111 | if item['value_type'] == 3: 112 | values['table']='trends_uint' 113 | elif item['value_type'] == 0: 114 | values['table']='trends' 115 | valuesQuery='SELECT itemid,clock,num,value_min,value_avg,value_max FROM %s WHERE itemid=\'%d\'' % (values['table'],int(item['bkpitemid'])) 116 | curdbbkp.execute(valuesQuery) 117 | values['values']=curdbbkp.fetchall() 118 | return values 119 | def createSQL(table,values,name='insert'): 120 | ''' 121 | Generate the SQL insert line, breaking each insert to up to ~1k values 122 | and up to ~1k insert's (~1M values total for each SQL file) 123 | ''' 124 | logger.info('Generating SQL file') 125 | queryInsert='INSERT INTO %s (itemid,clock,num,value_min,value_avg,value_max) VALUES' % table 126 | i=0 # Controls the progress bar 127 | x=0 # Controls number of inserts in one line 128 | y=0 # Controls number of lines in one file 129 | z=0 # Controls number of file name 130 | valuesLen=values.__len__() 131 | sqlFile='%s.sql.%d' % (name,z) 132 | logger.debug('Total itens for %s: %d' % (name,valuesLen)) 133 | 134 | if valuesLen > 0: 135 | bar=ProgressBar(maxval=valuesLen,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 136 | for value in values: 137 | i+=1 138 | x+=1 139 | if x != 1: # First line only 140 | sqlInsert='%s,%s' % (sqlInsert,value) 141 | else: 142 | sqlInsert=value 143 | if y >= 1000: # If there is more than 1k lines, write to new file 144 | z+=1 145 | y=0 146 | if x >= 1000 or i == valuesLen: # If there is more than 1k values or we finished our list, write to file 147 | sqlFile='%s.sql.%d' % (name,z) 148 | fileAppend(f=sqlFile,content='%s %s;\n' % (queryInsert,sqlInsert)) 149 | x=0 150 | y+=1 151 | sqlInsert='' 152 | if args.loglevel.upper() != 'DEBUG': # Dont print progressbar if in debug mode 153 | bar.update(i) 154 | bar.finish() 155 | else: 156 | logger.warning('No values received') 157 | def fileAppend(f=None,content=None): 158 | ''' 159 | Adds content to a given file 160 | ''' 161 | if not (f and content): 162 | logger.warning('Error when writing new file: no data received') 163 | return False 164 | f='%s/%s' % (tmp_dir,f) 165 | with open(f,'a') as f: 166 | f.write(content) 167 | return True 168 | def main(): 169 | ''' 170 | Controls general flow of operations 171 | ''' 172 | # If it exists, use the cached data of hosts and items 173 | if (os.path.isfile(move_items_file)): 174 | with open(move_items_file) as infile: 175 | hosts=json.load(infile) 176 | logger.info('Cache loaded from file (%s)' % move_items_file) 177 | else: 178 | hosts=getItems() 179 | with open(move_items_file, 'w') as outfile: 180 | json.dump(hosts, outfile) 181 | logger.info('Cache written to file (%s)' % move_items_file) 182 | 183 | for host in hosts: 184 | logger.info('Geting trends data of host: %s' % host['name']) 185 | host['trends']=list() 186 | host['trends_uint']=list() 187 | if host['itens'].__len__() > 0: 188 | bar=ProgressBar(maxval=host['itens'].__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 189 | i=0 190 | for item in host['itens']: 191 | temp=getTrends(hostname=host['name'],item=item) 192 | i+=1 193 | if args.loglevel.upper() != 'DEBUG': 194 | bar.update(i) 195 | if temp['table'] == 'trends': 196 | for value in temp['values']: 197 | host['trends'].append('(%d, %d, %d, %d, %d, %d)' % (int(item['itemid']), int(value[1]), int(value[2]), int(value[3]), int(value[4]), int(value[5]))) 198 | elif temp['table'] == 'trends_uint': 199 | for value in temp['values']: 200 | host['trends_uint'].append('(%d, %d, %d, %d, %d, %d)' % (int(item['itemid']), int(value[1]), int(value[2]), int(value[3]), int(value[4]), int(value[5]))) 201 | else: 202 | logger.warning('Unknown value type: %s' % temp['table']) 203 | bar.finish() 204 | ''' 205 | Now, we send in blocks of up to ~1M values to generate the SQL files 206 | ''' 207 | if host['trends'].__len__() > 0: 208 | createSQL(table='trends',values=host['trends'],name=host['name']) 209 | elif host['trends_uint'].__len__() > 0: 210 | createSQL(table='trends_uint',values=host['trends_uint'],name=host['name']) 211 | else: 212 | logger.warning('No data from %s found to be sent.' % host['name']) 213 | 214 | 215 | 216 | # Start DB connection 217 | curdblive=dblive.cursor() 218 | curdbbkp=dbbkp.cursor() 219 | main() 220 | 221 | -------------------------------------------------------------------------------- /db/rename.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 2015.08.26 - @fpaternot 3 | # 4 | # Copyright (c) 2016, Globo.com 5 | # This file is part of globocom/zabbix-scripts 6 | # (see https://github.com/globocom/zabbix-scripts). 7 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 8 | # 9 | # Based on 10 | # http://stackoverflow.com/questions/67093/how-do-i-quickly-rename-a-mysql-database-change-schema-name 11 | 12 | user='root' 13 | date; 14 | db="$1"; 15 | if [ "x$db" == "x" ]; then 16 | echo "rename-database.sh DATABASE"; 17 | exit 1; 18 | fi 19 | newdb="${db}_`date +%Y%m%d_%H%M`"; 20 | 21 | echo "Renaming from ${db} to ${newdb}"; 22 | echo "" 23 | 24 | echo "Clear history and trends tables of ${db}"; 25 | mysql -u${user} $db -e "TRUNCATE history; TRUNCATE history_log; TRUNCATE history_str; TRUNCATE history_text; TRUNCATE history_uint; TRUNCATE trends; TRUNCATE trends_uint;"; 26 | 27 | echo "Import old db into new database"; 28 | mysql -u${user} -e "CREATE DATABASE ${newdb};"; 29 | time mysqldump -u${user} --opt --single-transaction --skip-lock-tables --extended-insert=FALSE ${db} | mysql -u${user} -D ${newdb} > /dev/null; 30 | 31 | echo "Remove old database: ${db}"; 32 | mysql -u${user} -e "DROP DATABASE ${db};"; 33 | 34 | 35 | echo 'Finished'; 36 | date; 37 | -------------------------------------------------------------------------------- /db/restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Copyright (c) 2016, Globo.com 4 | # This file is part of globocom/zabbix-scripts 5 | # (see https://github.com/globocom/zabbix-scripts). 6 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 7 | 8 | 9 | DBHOST="$1" 10 | EXTRADIR='/opt/zabbix_keys/db' 11 | EXTRAFILE="${EXTRADIR}/${DBHOST}" 12 | DBNAME="$2" 13 | TMPDIR="/dev/shm" 14 | usage() { 15 | echo "import.sh DBHOST DBNAME" 16 | echo "note: DBHOST must be a filename inside ${EXTRADIR}" 17 | } 18 | if [ ! -r "${EXTRAFILE}" ]; then 19 | echo "no extra-file found at ${EXTRADIR}" 20 | usage 21 | exit 1; 22 | fi 23 | if [ "x${DBNAME}" == "x" ]; then 24 | echo "no database name found" 25 | usage 26 | exit 1; 27 | fi 28 | date; 29 | path="/opt/zabbix_backup/zabbix/master.database.suaempresa.com/"; 30 | i=`ls $path|tail -n1`; 31 | echo "Found $i. Coping to curr dir.."; 32 | sql="zbx-conf-bkup-${i}.sql"; 33 | cd $TMPDIR; 34 | cp ${path}/${i}/${sql}.gz .; 35 | [ ! -f "${sql}" ] && echo "Ungziping.." && gzip -d ${sql}.gz; 36 | if [ -f "${sql}" ]; then 37 | echo "Cleaning target database" 38 | DEL=`echo "select '/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;' union select concat('drop table ',table_schema,'.',table_name,';') from information_schema.tables where table_schema = '$DBNAME' union select '/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;'" | mysql --defaults-extra-file=${EXTRAFILE};`; 39 | time mysql --defaults-extra-file=${EXTRAFILE} ${DBNAME} -e "$DEL"; 40 | echo "Importing to mysql database ${DBNAME} (${sql}).."; 41 | time mysql --defaults-extra-file=${EXTRAFILE} ${DBNAME} < ${sql}; 42 | echo "Removing partitions"; 43 | time mysql --defaults-extra-file=${EXTRAFILE} ${DBNAME} -e "ALTER TABLE \`history\` REMOVE PARTITIONING; ALTER TABLE \`history_log\` REMOVE PARTITIONING; ALTER TABLE \`history_str\` REMOVE PARTITIONING; ALTER TABLE \`history_text\` REMOVE PARTITIONING; ALTER TABLE \`history_uint\` REMOVE PARTITIONING; ALTER TABLE \`trends\` REMOVE PARTITIONING; ALTER TABLE \`trends_uint\` REMOVE PARTITIONING;" 44 | rm -f ${sql}; 45 | else 46 | echo "Failed to find ${sql}"; 47 | cd $OLDPWD; 48 | date; 49 | exit 1; 50 | fi 51 | echo "Finished.."; 52 | cd $OLDPWD; 53 | date; 54 | -------------------------------------------------------------------------------- /docs/requirements.md: -------------------------------------------------------------------------------- 1 | # requirements.txt 2 | List pip dependencies for all scripts. 3 | 4 | ## Install without virtualenv 5 | ```sh 6 | $ pip install --requirement requirements.txt 7 | ``` 8 | 9 | ## Install with virtualenv 10 | ```sh 11 | $ mk virtualenv zabbix-scripts 12 | $ pip install --requirement requirements.txt 13 | ``` -------------------------------------------------------------------------------- /docs/zbx_changeMultipleTriggers.md: -------------------------------------------------------------------------------- 1 | # zbx_changeMultipleTriggers.py 2 | Change status of multiple triggers inside multiple hosts 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_changeMultipleTriggers.py [-h] [--url URL] [--user USER] 7 | [--password PASSWORD] [--no-verbose] 8 | [--verbose] [--no-run] [--run] --status 9 | STATUS [--loglevel LOGLEVEL] 10 | 11 | Change status of multiple triggers in multiple Zabbix Hosts. 12 | 13 | optional arguments: 14 | -h, --help show this help message and exit 15 | --url URL Zabbix server address 16 | --user USER Zabbix user 17 | --password PASSWORD Zabbix password 18 | --no-verbose Dont show any logs on screen 19 | --verbose 20 | --no-run Dont perform any operation 21 | --run Work 22 | --status STATUS Status to change trigger to. [0|1] 23 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 24 | ``` 25 | 26 | # TODO 27 | > - Hosts and triggers should be in a separate file 28 | -------------------------------------------------------------------------------- /docs/zbx_clone.md: -------------------------------------------------------------------------------- 1 | # zbx_clone.py 2 | 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_clone.py [-h] [--url URL] [--user USER] [--password PASSWORD] 7 | [--no-verbose] [--verbose] [--loglevel LOGLEVEL] [--hosts] 8 | [--no-hosts] [--proxy] [--no-proxy] [--proxy-local] 9 | [--no-proxy-local] [--discovery] [--no-discovery] [--mail] 10 | [--no-mail] 11 | 12 | Changes all passive proxies to active 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --url URL Zabbix server address 17 | --user USER Zabbix user 18 | --password PASSWORD Zabbix password 19 | --no-verbose Don't show any logs on screen 20 | --verbose 21 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 22 | --hosts Disable all hosts inside zabbix 23 | --no-hosts Keep the state as is, from all hosts 24 | --proxy Change all proxies to active 25 | --no-proxy Dont change proxy mode 26 | --proxy-local Change all passive proxies to localhost 27 | --no-proxy-local Dont change passive proxies address 28 | --discovery Disable all network discovery rules 29 | --no-discovery Keep state of all network discovery rules 30 | --mail Change email source address to zabbix- 31 | AMBIENTE@suaempresa.com 32 | --no-mail Keep current email source address 33 | ``` 34 | 35 | # TODO 36 | > - Hosts and triggers should be in a separate file 37 | -------------------------------------------------------------------------------- /docs/zbx_deleteMonitors.md: -------------------------------------------------------------------------------- 1 | # zbx_deleteMonitors.py 2 | Call Globo custom method deleteMonitors to remove a given hostname or, `evilly` remove all hosts from a given hostgroup. USE with **`CAUTION`**! 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_deleteMonitors.py [-h] --url URL --user USER --password PASSWORD 7 | [--no-verbose] [--verbose] [--loglevel LOGLEVEL] 8 | [--hostname HOSTNAME] [--groupname GROUPNAME] 9 | [--no-run] [--run] 10 | 11 | This script removes all hosts from a given hostgroup id. It can also remove a 12 | host by its name. 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --url URL Zabbix server address 17 | --user USER Zabbix user 18 | --password PASSWORD Zabbix password 19 | --no-verbose Dont show any logs on screen 20 | --verbose 21 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 22 | --hostname HOSTNAME Host name to be removed 23 | --groupname GROUPNAME 24 | Hostgroup name to be cleaned (all hosts DELETED)! USE 25 | WITH CAUTION 26 | --no-run Dont remove anything, just count (works only with 27 | hostgroup) 28 | --run Remove every match (works only with hostgroup) 29 | ``` 30 | 31 | # TODO 32 | > - Nothing yet! 33 | -------------------------------------------------------------------------------- /docs/zbx_deleted-linux.md: -------------------------------------------------------------------------------- 1 | # zbx_deleted-linux.py 2 | Identify each Linux host inside \_DELETED\_ hostgroup that looks like a linux server, connect's and then disable service _SNMPD_. Also, disable the host in Zabbix. 3 | 4 | This prevents network discovery from rediscovering the device and false-alarms. 5 | 6 | # Usage: 7 | ```sh 8 | usage: zbx_deleted-linux.py [-h] [--url URL] [--user USER] 9 | [--password PASSWORD] [--no-verbose] [--verbose] 10 | [--loglevel LOGLEVEL] [--sshkey SSHKEY] 11 | [--groupid GROUPID] 12 | 13 | This script connects to each server that looks like LINUX and stops its snmpd, 14 | preventing removed hosts from beeing rediscovered. 15 | 16 | optional arguments: 17 | -h, --help show this help message and exit 18 | --url URL Zabbix server address 19 | --user USER Zabbix user 20 | --password PASSWORD Zabbix password 21 | --no-verbose Dont show any logs on screen 22 | --verbose 23 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 24 | --sshkey SSHKEY SSH Key to be used 25 | --groupid GROUPID Groupid to be checked. Default: 72 26 | ``` 27 | 28 | # TODO 29 | > - Some more testing 30 | > - Improve output 31 | -------------------------------------------------------------------------------- /docs/zbx_deleted.md: -------------------------------------------------------------------------------- 1 | # zbx_deleted.py 2 | Remove hosts that have been too long on hostgroup called \_DELETED\_ and hosts that have no timestamp. 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_deleted.py [-h] --url URL --user USER --password PASSWORD 7 | [--no-verbose] [--verbose] [--loglevel LOGLEVEL] 8 | [--max-age MAX_AGE] [--no-run] [--run] [--no-matches] 9 | [--matches] 10 | 11 | Script used to remove hosts older than --max-age days from hostgroup _DELETED_ 12 | (hostgroupid=72) 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --url URL Zabbix server address 17 | --user USER Zabbix user 18 | --password PASSWORD Zabbix password 19 | --no-verbose Dont show any logs on screen 20 | --verbose 21 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 22 | --max-age MAX_AGE Max age in days for host to be in there 23 | --no-run Dont remove any host, just count 24 | --run Remove all hosts that expired 25 | --no-matches Dont remove any host that has no prefix 26 | --matches Remove all hosts that has no prefix 27 | ``` 28 | 29 | # TODO 30 | > - Use hostgroupname instead of hostgroupid 31 | -------------------------------------------------------------------------------- /docs/zbx_discovery-manager.md: -------------------------------------------------------------------------------- 1 | # zbx\_discovery-manager.py 2 | 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_discovery-manager.py [-h] --url URL --user USER --password PASSWORD 7 | [--no-verbose] [--verbose] 8 | [--loglevel LOGLEVEL] [--fake FAKE] 9 | [--no-move] [--move] 10 | 11 | Create discovery rules for all necessary networks for Globo.com 12 | 13 | optional arguments: 14 | -h, --help show this help message and exit 15 | --url URL Zabbix server address 16 | --user USER Zabbix user 17 | --password PASSWORD Zabbix password 18 | --no-verbose Don't show any logs on screen 19 | --verbose 20 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 21 | --fake FAKE Just fake the execution and simulate the result (BETA) 22 | --no-move Manage only new vlans. Existing ones will not be moved 23 | between proxies (Better) 24 | --move 25 | ``` -------------------------------------------------------------------------------- /docs/zbx_historyGet.md: -------------------------------------------------------------------------------- 1 | # zbx_historyGet.py 2 | Collect data from whole hostgroups filtering by specific item inside all hosts. 3 | 4 | Assumes all items have the same delta and data_type. 5 | 6 | # Usage: 7 | ```sh 8 | usage: zbx_historyGet.py [-h] --url URL --user USER --password PASSWORD 9 | [--no-verbose] [--verbose] --group GROUP --item ITEM 10 | [--loglevel LOGLEVEL] 11 | 12 | Collect history data from selected items 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --url URL Zabbix server address 17 | --user USER Zabbix user 18 | --password PASSWORD Zabbix password 19 | --no-verbose Dont show any logs on screen 20 | --verbose 21 | --group GROUP Hostgroup name with hosts to look for 22 | --item ITEM Item name inside each host of hostgroup 23 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 24 | ``` 25 | 26 | # TODO 27 | > - Maybe allow multiple hostgroups and or item name 28 | -------------------------------------------------------------------------------- /docs/zbx_hostgroupOrganizer.md: -------------------------------------------------------------------------------- 1 | # zbx\_hostgroupOrganizer.py 2 | 3 | 4 | # Usage: 5 | ```sh 6 | usage: zbx_hostgroupOrganizer.py [-h] [--url URL] [--user USER] 7 | [--password PASSWORD] [--no-verbose] 8 | [--verbose] [--loglevel LOGLEVEL] 9 | 10 | Creates a hostgroup for each proxy, and adds all hosts monitored by it. Also, 11 | interacts with all hosts in Operacao organizing it. 12 | 13 | optional arguments: 14 | -h, --help show this help message and exit 15 | --url URL Zabbix server address 16 | --user USER Zabbix user 17 | --password PASSWORD Zabbix password 18 | --no-verbose Dont show any logs on screen 19 | --verbose 20 | --loglevel LOGLEVEL Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL 21 | ``` 22 | 23 | # TODO 24 | > - Nothing yet? 25 | -------------------------------------------------------------------------------- /odbc/README.md: -------------------------------------------------------------------------------- 1 | # **Zabbix-Scripts::ODBC** 2 | --- 3 | 4 | 5 | ## test_odbc_connection.sh 6 | --- 7 | > Script to test connection for each DSN found in the odbc.ini configuration file 8 | > 9 | > For each connection error, your DSN will be included in the file /tmp/error_conn_odbc.txt 10 | > 11 | >## Requisites 12 | > * isql 13 | > * ODBC drivers for your DBMS 14 | > 15 | >### Credits 16 | >> Author: Janssen Lima (janssen.lima at corp.globo.com) 17 | > 18 | >### Usage 19 | > ```shell 20 | > $ sh test_odbc_connection.sh 21 | > ``` 22 | 23 | > ### TODO 24 | > * Any suggestion? Pull request 25 | > 26 | -------------------------------------------------------------------------------- /odbc/test_odbc_connection.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2019, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | 7 | prep_arq() 8 | { 9 | file="/tmp/dsn.txt" 10 | user_conn="usr_zabbix" 11 | 12 | cp /etc/odbc.ini $file 13 | 14 | sed -i '/^\[/!d' $file 15 | 16 | sed -i 's/\]//' $file 17 | sed -i 's/\[//' $file 18 | 19 | echo "quit" > /tmp/quit.sql 20 | } 21 | 22 | 23 | test_conn() 24 | { 25 | prep_arq 26 | 27 | for dsn in $(cat $file); 28 | do 29 | echo "Testing connection using DSN $dsn" 30 | 31 | /usr/bin/isql $dsn $user_conn < /tmp/quit.sql 32 | 33 | if [ "$?" -ne 0 ]; then 34 | echo "Connection failed!!!" 35 | echo $dsn >> /tmp/error_conn_odbc.txt 36 | else 37 | echo "Success in connection!!!" 38 | fi 39 | 40 | echo 41 | sleep 5 42 | done 43 | } 44 | 45 | which isql > /dev/null 46 | if [ "$?" -ne 0 ]; then 47 | echo "isql binary not found." 48 | exit 49 | else 50 | test_conn 51 | fi -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | logprint==0.1.1 2 | netdiscovery==0.2.1 3 | progressbar==2.3 4 | pyzabbix==0.7.3 5 | requests==2.20.0 6 | wsgiref==0.1.2 7 | -------------------------------------------------------------------------------- /zbx_changeMultipleTriggers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2018, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from os import path 7 | from sys import argv, exit 8 | from pyzabbix import ZabbixAPI 9 | from argparse import ArgumentParser 10 | from progressbar import ProgressBar, Percentage, ETA, ReverseBar, RotatingMarker, Timer 11 | from logprint import LogPrint 12 | 13 | parser = ArgumentParser(description = 'Change status of multiple triggers in multiple Zabbix Hosts.') 14 | parser.add_argument('--url', dest = 'url', help = 'Zabbix server address') 15 | parser.add_argument('--user', dest = 'user', help = 'Zabbix user') 16 | parser.add_argument('--password', dest = 'password', help = 'Zabbix password') 17 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Dont show any logs on screen') 18 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 19 | parser.set_defaults(verbose=False) 20 | parser.add_argument('--no-run', dest = 'run', action = 'store_false', help = 'Dont perform any operation') 21 | parser.add_argument('--run', dest = 'run', action = 'store_true', help = 'Work') 22 | parser.set_defaults(run=False) 23 | parser.add_argument('--status', dest = 'status', type = int, required = True, help = 'Status to change trigger to. [0|1]') 24 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 25 | args = parser.parse_args() 26 | 27 | TIMEOUT = 5.0 28 | LOGFILE = '/tmp/%s.log' % path.basename(argv[0]) 29 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 30 | 31 | if args.status != 0 and args.status != 1: 32 | logger.error('--status should be either 0[enabled] or 1[disabled]') 33 | 34 | try: 35 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 36 | zapi.login(args.user,args.password) 37 | except: 38 | logger.error('Unable to login. Check your credentials.') 39 | exit(1) 40 | 41 | lista = [ 42 | { 'host': 'HOST A', 'trigger': 'eth0' }, 43 | { 'host': 'HOST B', 'trigger': 'eth0' }, 44 | ] 45 | maintenance_triggers_ids = [] 46 | 47 | for host in lista: 48 | h = zapi.host.get(output=['hostid','name'],search={'name': host['host']}) 49 | if h.__len__() == 0: 50 | logger.warning('Host {0} not found!'.format(host['host'])) 51 | continue 52 | triggers = zapi.trigger.get(output=['description','triggerid'],hostids=[h[0]['hostid']],expandDescription=1,search={'description': ': {0}'.format(host['trigger'])}) 53 | logger.info('Found {0} triggers for host {1}'.format(triggers.__len__(),host['host'])) 54 | logger.print_json(triggers) 55 | for t in triggers: 56 | maintenance_triggers_ids.append(t['triggerid']) 57 | 58 | i = 0 59 | logger.info('Found {0} triggers'.format(maintenance_triggers_ids.__len__())) 60 | bar = ProgressBar(maxval=maintenance_triggers_ids.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 61 | for t in maintenance_triggers_ids: 62 | if args.run == True: 63 | out = zapi.trigger.update(triggerid=t,status=args.status) 64 | i += 1 65 | bar.update(i) 66 | else: 67 | logger.warning('Should change triggerid {0} to status {1}'.format(t,args.status)) 68 | bar.finish() 69 | 70 | zapi.user.logout() 71 | logger.info('Done!!') 72 | -------------------------------------------------------------------------------- /zbx_clone.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from __future__ import division 7 | from os import path 8 | from sys import argv, exit 9 | from progressbar import ProgressBar, Percentage, ETA, ReverseBar, RotatingMarker, Timer 10 | from pyzabbix import ZabbixAPI 11 | from argparse import ArgumentParser 12 | from math import ceil 13 | from logprint import LogPrint 14 | 15 | parser = ArgumentParser(description = 'Changes all passive proxies to active') 16 | 17 | parser.add_argument('--url', dest = 'url', help = 'Zabbix server address') 18 | parser.add_argument('--user', dest = 'user', help = 'Zabbix user') 19 | parser.add_argument('--password', dest = 'password', help = 'Zabbix password') 20 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Don\'t show any logs on screen') 21 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 22 | parser.set_defaults(verbose=False) 23 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 24 | 25 | parser.add_argument('--hosts', dest = 'hosts', action = 'store_true', help = 'Disable all hosts inside zabbix') 26 | parser.add_argument('--no-hosts', dest = 'hosts', action = 'store_false', help = 'Keep the state as is, from all hosts') 27 | parser.set_defaults(host=True) 28 | parser.add_argument('--proxy', dest = 'proxy', action = 'store_true', help = 'Change all proxies to active') 29 | parser.add_argument('--no-proxy', dest = 'proxy', action = 'store_false', help = 'Dont change proxy mode') 30 | parser.set_defaults(proxy=False) 31 | parser.add_argument('--proxy-local', dest = 'proxy_local', action = 'store_true', help = 'Change all passive proxies to localhost') 32 | parser.add_argument('--no-proxy-local', dest = 'proxy_local', action = 'store_false', help = 'Dont change passive proxies address') 33 | parser.set_defaults(proxy_local=False) 34 | parser.add_argument('--discovery', dest = 'discovery', action = 'store_true', help = 'Disable all network discovery rules') 35 | parser.add_argument('--no-discovery', dest = 'discovery', action = 'store_false', help = 'Keep state of all network discovery rules') 36 | parser.set_defaults(discovery=True) 37 | parser.add_argument('--mail', dest = 'mail', action = 'store_true', help = 'Change email source address to zabbix-AMBIENTE@suaempresa.com') 38 | parser.add_argument('--no-mail', dest = 'mail', action = 'store_false', help = 'Keep current email source address') 39 | parser.set_defaults(mail=True) 40 | args = parser.parse_args() 41 | 42 | TIMEOUT = 180.0 43 | LOGFILE = '/tmp/%s.log' % path.basename(argv[0]) 44 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 45 | 46 | try: 47 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 48 | zapi.login(args.user,args.password) 49 | except Exception, e: 50 | logger.error('Unable to login: %s' % (e)) 51 | exit(1) 52 | 53 | # Grupos a manter ativo apos desabilitar todos. Usar IDs. 54 | groupids = [ 4 ] # Zabbix Servers 55 | 56 | def hosts_disable_all(): 57 | """ 58 | status de host 0 = enabled 59 | status de host 1 = disabled 60 | """ 61 | logger.info('Disabling all hosts, in blocks of 1000') 62 | hosts = zapi.host.get(output=[ 'hostid' ], search={ 'status': 0 }) 63 | maxval = int(ceil(hosts.__len__())/1000+1) 64 | bar = ProgressBar(maxval=maxval,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 65 | i = 0 66 | for i in xrange(maxval): 67 | block = hosts[:1000] 68 | del hosts[:1000] 69 | result = zapi.host.massupdate(hosts=[ x for x in block ], status=1) 70 | i += 1 71 | bar.update(i) 72 | bar.finish() 73 | logger.info('Done') 74 | return 75 | 76 | def hosts_enable_selected(): 77 | """ 78 | TODO: 79 | Ativar apenas os hosts dos grupos desejados 80 | """ 81 | logger.info('Enabling selected hosts') 82 | hosts = zapi.host.get(output=[ 'hostid' ], groupids=groupids, search={ 'status': 1 }) 83 | result = zapi.host.massupdate(hosts=[ x for x in hosts ], status=0) 84 | logger.info('Done') 85 | return 86 | 87 | def proxy_passive_to_active(): 88 | """ 89 | status de prxy 5 = active 90 | status de prxy 6 = passive 91 | """ 92 | logger.info('Change all proxys to active') 93 | proxys = zapi.proxy.get(output=[ 'shorten', 'host' ], 94 | filter={ 'status': 6 }) 95 | if ( proxys.__len__() == 0 ): 96 | logger.info('Done') 97 | return 98 | bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 99 | i = 0 100 | for x in proxys: 101 | i += 1 102 | proxyid = x['proxyid'] 103 | result = zapi.proxy.update(proxyid=proxyid, status=5) 104 | logger.echo = False 105 | logger.debug('Changed from passive to active proxy: %s' % (x['host'])) 106 | bar.update(i) 107 | bar.finish() 108 | logger.echo = True 109 | logger.info('Done') 110 | return 111 | 112 | def proxy_passive_to_localhost(): 113 | logger.info('Change all passive proxys to localhost') 114 | proxys = zapi.proxy.get(output=[ 'extend', 'host' ],filter={'status': 6}, selectInterface='extend') 115 | if ( proxys.__len__() == 0 ): 116 | logger.info('Done') 117 | return 118 | bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 119 | i = 0 120 | for x in proxys: 121 | proxyid = x['proxyid'] 122 | params = { 123 | 'proxyid': proxyid, 124 | 'status': 6, 125 | 'interface': { 126 | 'interfaceid': x['interface']['interfaceid'], 127 | 'dns': 'localhost', 128 | 'ip': '127.0.0.1' 129 | } 130 | } 131 | result = zapi.proxy.update(**params) 132 | logger.debug('Proxy changed to localhost: %s' % (x['host'])) 133 | i += 1 134 | logger.echo = False 135 | bar.update(i) 136 | logger.echo = True 137 | bar.finish() 138 | logger.info('Done') 139 | return 140 | 141 | def discovery_disable_all(status=0): 142 | """ 143 | Alterar status de todos os discoveries *auto* 144 | Status 0 = enable 145 | Status 1 = disable 146 | """ 147 | logger.info('Disabling all network discoveries') 148 | druleids = zapi.drule.get(output=[ 'druleid', 'iprange', 'name', 'proxy_hostid', 'status' ], 149 | selectDChecks='extend', filter={ 'status': 0 }) 150 | if ( druleids.__len__() == 0 ): 151 | logger.info('Done') 152 | return 153 | bar = ProgressBar(maxval=druleids.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 154 | i = 0 155 | for x in druleids: 156 | params_disable = { 157 | 'druleid': x['druleid'], 158 | 'iprange': x['iprange'], 159 | 'name': x['name'], 160 | 'dchecks': x['dchecks'], 161 | 'status': 1 162 | } 163 | out = zapi.drule.update(**params_disable) 164 | logger.echo = False 165 | if out: 166 | logger.debug('\tNew status: %s (%s) --> %d' % (x['name'],out['druleids'],status)) 167 | else: 168 | logger.warning('\tFAILED to change status: %s (%s) --> %d' % (x['name'],out['druleids'],status)) 169 | i += 1 170 | bar.update(i) 171 | logger.echo = True 172 | bar.finish() 173 | logger.info('Done') 174 | return 175 | 176 | def mail_src(): 177 | """ 178 | Ajusta a fonte de emails do zabbix. 179 | Depende da url de consulta. 180 | """ 181 | email = 'zabbix@zabbix.%s' % args.url.split('zabbix.')[1] 182 | logger.info('Updating source of email address to %s' % email) 183 | out = zapi.mediatype.update(mediatypeid=1,smtp_email=email) 184 | logger.info('Done') 185 | 186 | 187 | if ( args.proxy ): 188 | proxy_passive_to_active() 189 | 190 | if ( args.hosts ): 191 | hosts_disable_all() 192 | hosts_enable_selected() 193 | 194 | if ( args.discovery ): 195 | discovery_disable_all() 196 | 197 | if ( args.proxy_local ): 198 | proxy_passive_to_localhost() 199 | 200 | if ( args.mail ): 201 | mail_src() 202 | zapi.user.logout() 203 | -------------------------------------------------------------------------------- /zbx_deleteMonitors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from os import path 7 | from sys import argv, exit 8 | from logprint import LogPrint 9 | from pyzabbix import ZabbixAPI 10 | from argparse import ArgumentParser 11 | from progressbar import ProgressBar, Percentage, ETA, ReverseBar, RotatingMarker, Timer 12 | 13 | parser = ArgumentParser(description = 'This script removes all hosts from a given hostgroup id. It can also remove a host by its name.') 14 | parser.add_argument('--url', dest = 'url', required = True, help = 'Zabbix server address') 15 | parser.add_argument('--user', dest = 'user', required = True, help = 'Zabbix user') 16 | parser.add_argument('--password', dest = 'password', required = True, help = 'Zabbix password') 17 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Don\'t show any logs on screen') 18 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 19 | parser.set_defaults(verbose=False) 20 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 21 | parser.add_argument('--hostname', dest = 'hostname', help = 'Host name to be removed') 22 | parser.add_argument('--groupname', dest = 'groupname', help = 'Hostgroup name to be cleaned (all hosts DELETED)! USE WITH CAUTION') 23 | parser.add_argument('--no-run', dest = 'run', action = 'store_false', help = 'Don\'t remove anything, just count (works only with hostgroup)') 24 | parser.add_argument('--run', dest = 'run', action = 'store_true', help = 'Remove every match (works only with hostgroup)') 25 | 26 | args = parser.parse_args() 27 | 28 | TIMEOUT = 30.0 29 | LOGFILE = "/tmp/%s.log" % path.basename(argv[0]) 30 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 31 | 32 | if not args.hostname and not args.groupname: 33 | logger.error('You MUST use at least one of --hostname or --groupname.') 34 | exit(1) 35 | 36 | try: 37 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 38 | zapi.login(args.user,args.password) 39 | except Exception, e: 40 | logger.error("Unable to login: %s" % (e)) 41 | exit(1) 42 | 43 | def deleteHostByName(hostname): 44 | logger.print_json(zapi.globo.deleteMonitors(host=hostname)) 45 | return 46 | 47 | def deleteHostsByHostgroup(groupname): 48 | hostgroup = zapi.hostgroup.get(output=['groupid'],filter={'name': groupname}) 49 | if hostgroup.__len__() != 1: 50 | logger.error('Hostgroup not found: %s\n\tFound this: %s' % (groupname,hostgroup)) 51 | groupid = int(hostgroup[0]['groupid']) 52 | hosts = zapi.host.get(output=['name','hostid'],groupids=groupid) 53 | total = len(hosts) 54 | logger.info('Hosts found: %d' % (total)) 55 | if ( args.run ): 56 | x = 0 57 | bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 58 | logger.echo = False 59 | for host in hosts: 60 | x = x + 1 61 | bar.update(x) 62 | logger.debug('(%d/%d) >> Removing >> %s' % (x, total, host)) 63 | out = zapi.globo.deleteMonitors(host['name']) 64 | bar.finish() 65 | logger.echo = True 66 | else: 67 | logger.info('No host removed due to --no-run arg. Full list of hosts:') 68 | for host in hosts: 69 | logger.info('%s' % host['name']) 70 | return 71 | 72 | 73 | if ( args.hostname ): 74 | deleteHostByName(hostname=args.hostname) 75 | exit(0) 76 | 77 | if ( args.groupname ): 78 | deleteHostsByHostgroup(groupname=args.groupname) 79 | exit(0) 80 | zapi.user.logout() 81 | -------------------------------------------------------------------------------- /zbx_deleted-linux.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | import commands 7 | import string 8 | from os import path 9 | from sys import argv, exit 10 | from logprint import LogPrint 11 | from pyzabbix import ZabbixAPI 12 | from argparse import ArgumentParser 13 | 14 | parser = ArgumentParser(description = 'This script connects to each server that looks like LINUX and stops its snmpd, preventing removed hosts from beeing rediscovered.') 15 | parser.add_argument('--url', dest = 'url', help = 'Zabbix server address') 16 | parser.add_argument('--user', dest = 'user', help = 'Zabbix user') 17 | parser.add_argument('--password', dest = 'password', help = 'Zabbix password') 18 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Dont show any logs on screen') 19 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 20 | parser.set_defaults(verbose=False) 21 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 22 | parser.add_argument('--sshkey', dest = 'sshkey', help = 'SSH Key to be used') 23 | parser.add_argument('--groupid', dest = 'groupid', default = '72', help = 'Groupid to be checked. Default: 72') 24 | args = parser.parse_args() 25 | 26 | LOGFILE = "/tmp/%s.log" % path.basename(argv[0]) 27 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 28 | try: 29 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 30 | zapi.login(args.user,args.password) 31 | except Exception, e: 32 | logger.error("Unable to login: %s" % (e)) 33 | exit(1) 34 | 35 | # Filtra do hostgroup _DELETED_ os hosts com status 0. 36 | a = zapi.host.get(groupids=[ args.groupid ],selectInterfaces='extend',output=['name','hostid'],filter={"status": 0}) 37 | #,templateids=['10069']) 38 | 39 | for host in a: 40 | logger.debug("Doing host %s" % host['name']) 41 | ok = 0 42 | for ip in host['interfaces']: 43 | if ip['main'] == '1': 44 | zapi.host.update(hostid=host['hostid'],status=1) 45 | # Verifico se pareco linux. Poderia verificar pelo template... Mas ai seriam multiplas buscas 46 | if ( 'lf' in host['name'] or 'ls' in host['name'] or 'lb' in host['name'] ): 47 | if args.sshkey: 48 | exe = "ssh -i %s root@%s \"/etc/init.d/snmpd stop\"" % (ip['ip'],args.sshkey) 49 | else: 50 | exe = "ssh root@%s \"/etc/init.d/snmpd stop\"" % (ip['ip']) 51 | (status,out) = commands.getstatusoutput(exe) 52 | logger.warning("Failed on %s" % ip['ip']) 53 | if ok == 0: 54 | logger.warning("Failed for %s" % host['name']) 55 | zapi.user.logout() 56 | -------------------------------------------------------------------------------- /zbx_deleted.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | import re 7 | import commands 8 | import string 9 | from os import path 10 | from sys import argv, exit 11 | from datetime import datetime 12 | from pyzabbix import ZabbixAPI, ZabbixAPIException 13 | from argparse import ArgumentParser 14 | from progressbar import ProgressBar, Percentage, ETA, ReverseBar, RotatingMarker, Timer 15 | from logprint import LogPrint 16 | 17 | parser = ArgumentParser(description = 'Script used to remove hosts older than --max-age days from hostgroup _DELETED_ (hostgroupid=72)') 18 | parser.add_argument('--url', dest = 'url', required=True, help = 'Zabbix server address') 19 | parser.add_argument('--user', dest = 'user', required=True, help = 'Zabbix user') 20 | parser.add_argument('--password', dest = 'password', required=True, help = 'Zabbix password') 21 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Dont show any logs on screen') 22 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 23 | parser.set_defaults(verbose=True) 24 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 25 | parser.add_argument('--max-age', dest = 'max_age', default = 31, help = 'Max age in days for host to be in there') 26 | parser.add_argument('--no-run', dest = 'run', action = 'store_false', help = 'Dont remove any host, just count') 27 | parser.add_argument('--run', dest = 'run', action = 'store_true', help = 'Remove all hosts that expired') 28 | parser.add_argument('--no-matches', dest = 'matches', action = 'store_false', help = 'Dont remove any host that has no prefix') 29 | parser.add_argument('--matches', dest = 'matches', action = 'store_true', help = 'Remove all hosts that has no prefix') 30 | args = parser.parse_args() 31 | 32 | TIMEOUT = 30.0 33 | LOGFILE = "/tmp/%s.log" % path.basename(argv[0]) 34 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 35 | 36 | try: 37 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 38 | zapi.login(args.user,args.password) 39 | except Exception, e: 40 | logger.error("Unable to login: %s" % (e)) 41 | exit(1) 42 | 43 | call = { 44 | "output": [ "name", "hostid", ], 45 | "groupids": [ 72 ], 46 | } 47 | hosts = zapi.host.get(**call) 48 | hosts_exclude = [] 49 | hosts_no_match = [] 50 | date_curr = datetime.now() 51 | 52 | """ 53 | Find all hosts that match the expired period 54 | """ 55 | for host in hosts: 56 | matchObj = re.search( r'_(\d{6})\d+_', host['name'], re.M|re.I) 57 | if matchObj: 58 | host_date = datetime.strptime('20%d' % int(matchObj.group(1)), '%Y%m%d') 59 | timediff = (date_curr - host_date).days 60 | if ( timediff >= int(args.max_age) ): 61 | host['timediff'] = timediff 62 | hosts_exclude.append(host) 63 | else: 64 | logger.debug("No matches for host: %s" % host) 65 | hosts_no_match.append(host) 66 | 67 | 68 | 69 | """ 70 | Perform (or not >> --no-run) the removal of preveously identified hosts 71 | """ 72 | total = hosts_exclude.__len__() 73 | logger.info("Hosts to remove: %d" % total) 74 | if args.run and total > 0: 75 | x = 0 76 | bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 77 | logger.echo = False 78 | for host in hosts_exclude: 79 | x += 1 80 | bar.update(x) 81 | logger.debug("(%d/%d) >> Removing >> %s" % (x, total, host)) 82 | out = zapi.host.delete(host['hostid']) 83 | bar.finish() 84 | logger.echo = args.verbose 85 | 86 | total = hosts_no_match.__len__() 87 | logger.info("Other hosts without timestamp to remove: %d" % total) 88 | if args.run and total > 0 and args.matches: 89 | x = 0 90 | bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() 91 | logger.echo = False 92 | for host in hosts_no_match: 93 | x += 1 94 | bar.update(x) 95 | logger.debug("(%d/%d) >> Removing >> %s" % (x, total, host)) 96 | out = zapi.host.delete(host['hostid']) 97 | bar.finish() 98 | logger.echo = args.verbose 99 | 100 | if args.run: 101 | exit(0) 102 | 103 | logger.warning("Not removing.. script ran with --no-run") 104 | zapi.user.logout() 105 | exit(0) 106 | -------------------------------------------------------------------------------- /zbx_discovery-manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from socket import gethostbyname 7 | from types import DictType 8 | from sys import exit 9 | from pyzabbix import ZabbixAPI 10 | from argparse import ArgumentParser 11 | from subprocess import Popen, PIPE # For zabbix reload 12 | from netaddr import IPNetwork, IPAddress # For matching ip and network 13 | from logprint import LogPrint 14 | from netdiscovery import NetworkGet 15 | 16 | parser = ArgumentParser(description = 'Create discovery rules for all necessary networks for Globo.com') 17 | parser.add_argument('--url', dest = 'url', required = True, help = 'Zabbix server address') 18 | parser.add_argument('--user', dest = 'user', required = True, help = 'Zabbix user') 19 | parser.add_argument('--password', dest = 'password', required = True, help = 'Zabbix password') 20 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Don\'t show any logs on screen') 21 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 22 | parser.set_defaults(verbose=False) 23 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 24 | parser.add_argument('--fake', dest = 'fake', default = 0, help = 'Just fake the execution and simulate the result (BETA)') 25 | parser.add_argument('--no-move', dest = 'move', action = 'store_false', help = 'Manage only new vlans. Existing ones will not be moved between proxies (Better)') 26 | parser.add_argument('--move', dest = 'move', action = 'store_true') 27 | parser.set_defaults(move=False) 28 | 29 | # CUSTOM VARIABLES 30 | TIMEOUT = 30.0 #ZabbixAPI timeout 31 | # Grupos excluidos de operacoes no zabbix, de discovery 32 | forbiden_groups = ( '_DELETED', 'Cloud', 'Template', 'Zabbix' ) 33 | # Redes que sao removidas forcadamente. Nao ha discovery automatico dessas, abaixo 34 | networks_blacklist = {} 35 | networks_blacklist['rj'] = [ '10.0.0.0/16' ] 36 | networks_blacklist['sp'] = [ '10.1.0.0/16' ] 37 | DNS_SUFIX = 'suaempresa.com' # Concatenado nas buscas de ip/dns 38 | ZABBIX_SERVERS = { 39 | 'http://zabbix.suaempresa.com' : [ '10.0.1.1', '10.0.1.2' ], 40 | 'http://zabbix2.suaempresa.com' : [ 'localhost' ], 41 | } 42 | 43 | # OTHER VARIABLES 44 | args = parser.parse_args() 45 | fake = args.fake 46 | LOGFILE = '/tmp/zbx_discovery-manager.log' 47 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 48 | loglevels = { 49 | 'CRITICAL' : 50, 50 | 'ERROR' : 40, 51 | 'WARNING' : 30, 52 | 'INFO' : 20, 53 | 'DEBUG' : 10 54 | } 55 | 56 | try: 57 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 58 | zapi.login(args.user,args.password) 59 | except Exception, e: 60 | logger.error('Unable to login: {0}'.format(e)) 61 | exit(1) 62 | 63 | # CUSTOM FUNCTIONS 64 | def discovery_checks(): 65 | """ 66 | retornar o formato json com os discovery checks 67 | essa entrada eh manual 68 | """ 69 | dchecks = [ 70 | { 'uniq': '1', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'sysName.0' }, 71 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'sysDescr.0' }, 72 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'sysContact.0' }, 73 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'MIB-Dell-10892::chassisModelName.1' }, 74 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'LSI-MegaRAID-SAS-MIB::productName.0' }, 75 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'F5-BIGIP-SYSTEM-MIB::sysProductName.0' }, 76 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'ENTITY-MIB::entPhysicalAlias.155',}, 77 | { 'uniq': '0', 'snmp_community': '{$SNMP_COMMUNITY}', 'type': '11', 'ports': '161', 'key_': 'CPQSINFO-MIB::cpqSiProductName.0', }, 78 | ] 79 | return dchecks 80 | def discovery_rule(ip_range,proxy_hostid,proxy_name): 81 | """ 82 | criar/atualizar a regra dentro do zabbix, passando apenas o range de ip 83 | ignora o primeiro ip e os 2 ultimos 84 | aceita de /19 ate /32 85 | """ 86 | rule_name = 'SNMP_auto - {0}'.format(ip_range) 87 | ip = IPNetwork(ip_range) 88 | ip_list = list(ip) 89 | ip_range_small = '' 90 | if ip.prefixlen >= 30: 91 | ip_range_small = ip_range 92 | elif ip.prefixlen >= 24 and ip.prefixlen < 30: 93 | ip_range_small = '{0}-{1}'.format(str(ip_list[2]), str(ip_list[-4]).split('.')[3]) 94 | elif ip.prefixlen > 18 and ip.prefixlen < 24: 95 | nets = list(ip.subnet(24)) 96 | for index in range(nets.__len__()): 97 | ip_list = list(IPNetwork(nets[index])) 98 | if index == 0: # Primeiro 99 | ip_range_small = '{0}-{1}'.format(str(ip_list[2]), str(ip_list[-1]).split('.')[3]) 100 | elif index == (nets.__len__()-1): # Ultimo 101 | ip_range_small = '{0},{1}-{2}'.format(ip_range_small,str(ip_list[2]), str(ip_list[-4]).split('.')[3]) 102 | else: # Do meio 103 | ip_range_small = '{0},{1}-{2}'.format(ip_range_small,str(ip_list[1]), str(ip_list[-1]).split('.')[3]) 104 | else: 105 | logger.warning('Tamanho de rede invalido: {0}'.format(ip_range)) 106 | dchecks = discovery_checks() 107 | out={ 'druleids': 'fake data' } 108 | exists = zapi.drule.get(output=['name'],filter={'name': rule_name}) 109 | params = { 'name': rule_name, 'iprange': ip_range_small, 'delay': '86400', 'proxy_hostid': proxy_hostid, 'dchecks': dchecks, 'status': 1 } 110 | if exists.__len__() == 1: 111 | query = { 112 | 'output':[ 'druleids' ], 113 | 'search': { 'name': rule_name } 114 | } 115 | params['druleid'] = zapi.drule.get(**query)[0]['druleid'] 116 | if not args.move: 117 | del params['proxy_hostid'] 118 | if not fake: 119 | out = zapi.drule.update(**params) 120 | elif exists.__len__() == 0: 121 | # Nao existe a regra de discovery ainda.. 122 | if not fake: 123 | out = zapi.drule.create(**params) #fecha zapi.drule.create 124 | else: 125 | logger.error('Too many discovery rules for {0}: {1}'.format(rule_name,exists.__len__())) 126 | 127 | if ( out or fake ): 128 | logger.debug('\t{0} --> rule {1} ({2})'.format(proxy_name,rule_name,out['druleids'])) 129 | else: 130 | logger.error('\tFAILED:\t{0} --> rule {1} ({2})'.format(proxy_name,rule_name,out['druleids'])) 131 | return 132 | def discovery_rule_per_proxy(): 133 | """ 134 | Distribuo as redes a serem descobertas nos proxies. 135 | """ 136 | weight_per_proxy = {} 137 | for local in network_ranges: 138 | weight_per_proxy[local] = ( network_ranges[local]['total_weight'] / len(proxies[local]) ) 139 | logger.debug('\tCada proxy do {0} devera ter {1} \'pesos\' de discovery'.format(local,weight_per_proxy[local])) 140 | 141 | # Busco as redes ja existentes e aloco os ranges nos proxies.. 142 | # Ignorarei la na frente esses ranges, para evitar realocacao 143 | drules_cache = discovery_rules() 144 | 145 | # Marco as redes como usadas, aquelas que ja estao alocadas 146 | if ( args.move == False): 147 | for rule in drules_cache: 148 | local = network_find(rule['iprange']) 149 | if ( local ): 150 | network_ranges[local][rule['iprange']]['used'] = True 151 | for i in proxies[local]: 152 | if ( i['proxyid'] == rule['proxy_hostid'] ): 153 | if ( i.get('total_weight', False) ): 154 | i['total_weight'] += network_ranges[local][rule['iprange']]['weight'] 155 | else: 156 | i['total_weight'] = network_ranges[local][rule['iprange']]['weight'] 157 | i['ranges'].append(rule['iprange']) 158 | break 159 | 160 | # Aloco as redes aos proxies com menor peso atribuido 161 | for local in network_ranges: 162 | for x in network_ranges[local]: 163 | if ( (str(x) == 'disabled') or (str(x) == 'total_weight') ): 164 | continue 165 | if ( type(network_ranges[local].get(x)) is DictType): 166 | if ( network_ranges[local].get(x).get('used', False) == True ): 167 | continue 168 | tmp_weight = int(network_ranges[local].get(x).get('weight')) 169 | proxy = proxies_low_weight(local) 170 | proxy['total_weight'] += tmp_weight 171 | logger.debug( '{0} --> add network {1} --> weight {2} --> acumulado {3}'.format(proxy['host'],x,tmp_weight,proxy['total_weight']) ) 172 | network_ranges[local].get(x)['used'] = True 173 | proxy['ranges'].append(x) 174 | proxies[local].sort(key=lambda d: d['host']) 175 | return 176 | def proxies_low_weight(local): 177 | """ 178 | Retorna o proxyid do proxy com o menor peso atribuido. 179 | Em caso de empate, a ordem do loop prevalece. 180 | """ 181 | proxies[local].sort(key=lambda d: d['total_weight']) 182 | for proxy in proxies[local]: 183 | return proxy 184 | def network_find(net): 185 | """ 186 | Busco por rede em qualquer local 187 | Retorno o local 188 | """ 189 | for local in network_ranges: 190 | if ( type(network_ranges[local].get(net)) is DictType ): 191 | return local 192 | return False 193 | def discovery_rules(): 194 | """ 195 | Retorna a lista de discovery rules existentes 196 | """ 197 | query = { 'output': 'extend', 'search': { 'name': 'SNMP_auto' } } 198 | return zapi.drule.get(**query) 199 | def discovery_change_status(status=0): 200 | """ 201 | Alterar status de todos os discoveries *auto* 202 | Status 0 = enable 203 | Status 1 = disable 204 | """ 205 | dchecks = discovery_checks() 206 | query = { 'output': [ 'druleid', 'iprange', 'name', 'proxy_hostid', 'status' ], 'search': { 'name': 'SNMP_auto' } } 207 | druleids = zapi.drule.get(**query) 208 | for i in druleids: 209 | if ( int(i['status']) == status ): 210 | logger.debug('Discovery rule already set at: {0} <==> {1}'.format(i['name'],status)) 211 | continue 212 | query = { 213 | 'name': i['name'], 'druleid': i['druleid'], 'iprange': i['iprange'], 214 | 'proxy_hostid': i['proxy_hostid'], 'dchecks': dchecks, 'status': status 215 | } 216 | if not fake: 217 | out = zapi.drule.update(**query) 218 | if out or fake: 219 | logger.debug('\tNew status: {0} ({1}) --> {2}'.format(i['name'],out['druleids'],status)) 220 | else: 221 | logger.warning('\tFAILED to change status: {0} ({1}) --> {2}'.format(i['name'],out['druleids'],status)) 222 | def network_api_get_ranges(): 223 | """ 224 | Conectar na network api e capturar as redes de BE 225 | -- 'be', 'producao', 'core/densidade' 226 | """ 227 | 228 | netapi = NetworkGet() 229 | for local in networks_blacklist: 230 | for x in networks_blacklist[local]: 231 | netapi.networks_blacklist[local].append(x) 232 | netapi.getNetworkAPI() 233 | return netapi.getNetwork() 234 | def zabbix_server_get(): 235 | """ 236 | 'Descobre' qual o zabbix server do ambiente 237 | A API nao possui essa informacao 238 | """ 239 | if ( args.url in ZABBIX_SERVERS.keys() ): 240 | return options[args.url] 241 | else: 242 | logger.error('ERROR: No zabbix_server know to reload for {0}.'.format(args.url)) 243 | def zabbix_server_reload(): 244 | """ 245 | Efetuar o reload no zabbix server 246 | Necessario tratar os dois possiveis servers!!! 247 | """ 248 | zabbix_servers = zabbix_server_get() 249 | for zabbix_server in zabbix_servers: 250 | print 'Reloading zabbix_server on {0}'.format(zabbix_server) 251 | cmd = 'ssh -l root {0} \'/etc/init.d/zabbix_server reload\''.format(zabbix_server) # Chamo o alias de reload do zabbix_server 252 | if not fake: 253 | p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) 254 | out, err = p.communicate() 255 | if ( (out.rstrip() == 'Reloading Zabbix Server: command sent successfully') or fake ): 256 | info_message = 'Reloaded: ', out.rstrip() 257 | info(info_message) 258 | else: 259 | break # Por enquanto ignoro os erros.. provavelmente o zbx_server nao esta rodando 260 | logger.error('ERROR: Failed to reload {0}. Message: {1}'.format(zabbix_server, out.rstrip())) 261 | return 262 | def proxy_distribute_rules(): 263 | """ 264 | Pegar os resultados ja quase consolidados e aplica-los 265 | A lista proxies['ranges'] deve ser considerada, para cada proxy 266 | Necessario pegar os dados do proxy e range, p/ usar no drule! 267 | """ 268 | dchecks = discovery_checks() 269 | for local in proxies: 270 | for x in proxies[local]: 271 | for i in x['ranges']: 272 | discovery_rule(i,x['proxyid'],x['host']) 273 | def proxies_get(): 274 | """ 275 | Conectar na API do zabbix e recuperar os proxies (host e proxyid) 276 | possiveis p/ uso 277 | """ 278 | proxies = {} 279 | proxies_search = {} 280 | #proxies_search['NOME NO NETDISCOVERY'] = 'NOME DO PROXY NO ZABBIX' 281 | proxies_search['rj'] = 'RJ' 282 | proxies_search['sp'] = 'SP' 283 | proxies_search['rj1'] = 'RJ' # compatibilidade entre nomes 284 | tmp_proxies = {} 285 | for local in proxies_search: 286 | proxies[local] = zapi.proxy.get(output=['host','proxyid'],search={'host': proxies_search[local]},selectHosts='extend') 287 | tmp_proxies[local] = [] 288 | for proxy in proxies[local]: 289 | proxy['newhosts'] = list() 290 | proxy['total_weight'] = 0 291 | proxy['ranges'] = list() 292 | if ( '-BE' in proxy['host'] ): 293 | tmp_proxies[local].append(proxy) 294 | del proxies,proxies_search 295 | if ( len(tmp_proxies) > 0 ): 296 | return tmp_proxies 297 | else: 298 | logger.error('\tFAILED: no proxies found, looking for \'{0}\''.format(proxies_search)) 299 | def proxy_decide_hosts(ip,hostid,host): 300 | """ 301 | Baseado no ip do host informado, decido para qual proxy ele deve ser direcionado 302 | utilizando o proxies(index)['ranges'] 303 | ip precisa de pelo menos 7 chars (1.1.1.1 = 7) 304 | """ 305 | if ( hostid.isspace() or host.isspace() or (len(ip) < 6 )): 306 | logger.error('Failed on proxy_decide_hosts({0},{1},{2}). Invalid params'.format(ip,hostid,host)) 307 | for local in proxies: 308 | for x in proxies[local]: 309 | for y in x['ranges']: 310 | if IPAddress(ip) in IPNetwork(y): 311 | logger.debug('ip match found for {0} ==> {1} ==> {2}'.format(x['host'],ip,y)) 312 | x['newhosts'].append(hostid) 313 | return 314 | logger.debug('No network found for host {0} with ip {1}'.format(host,ip)) 315 | return 316 | def hosts_get_all(): 317 | """ 318 | Identificar todos os hosts a serem migrados 319 | """ 320 | query = { 321 | 'output': [ 'name', 'host', 'hostid', 'proxy_hostid' ], 322 | 'selectInterfaces': [ 'dns', 'useip', 'ip', 'type', 'main' ], 323 | 'selectGroups': [ 'groupid', 'name' ], 324 | } 325 | hosts = zapi.host.get(**query) 326 | 327 | for host in hosts: 328 | stop = False 329 | for group in host['groups']: 330 | for forbiden_group in forbiden_groups: 331 | if ( forbiden_group in group['name'] ): 332 | stop = True 333 | break 334 | if (stop): 335 | logger.debug('Grupo {0} nao permitido'.format(group['name'])) 336 | continue 337 | 338 | for interface in host['interfaces']: 339 | if ( int(interface['main']) == 1 ): 340 | if ( int(interface['useip']) == 1): #Se for por IP 341 | ip = interface['ip'] 342 | else: #Se for por DNS, descubro qual o IP 343 | if ( DNS_SUFIX in interface['dns'] ): 344 | dns = interface['dns'] 345 | elif ('localhost' in interface['dns']): 346 | dns = 'localhost' 347 | else: 348 | dns = interface['dns'] + DNS_SUFIX 349 | 350 | # Como nao sei qual o ip, resolvo para poder decidir que rede usar 351 | if ( (dns) and (dns != DNS_SUFIX) ): 352 | try: 353 | ip = gethostbyname(dns) 354 | except: 355 | logger.print_json(host) 356 | logger.debug('Could not resolv {0}'.format(dns)) 357 | stop = True 358 | break 359 | else: 360 | logger.print_json(host) 361 | logger.debug('Could not determine the dns correctly') 362 | stop = True 363 | break 364 | if ( (IPAddress(ip) in IPNetwork('127.0.0.1/8')) or (IPAddress(ip) in IPNetwork('10.31.0.0/24')) ): 365 | stop = True 366 | logger.debug('IP {0} nao permitido'.format(ip)) 367 | break 368 | if ( not stop ): 369 | proxy_decide_hosts(ip, host['hostid'], host['host']) 370 | def hosts_to_proxies(): 371 | """ 372 | Mover os hosts em proxies[proxy]['newhosts'] para o novo proxy 373 | Usar massupdate p/ isso, isso eh CRITICO 374 | Caso contrario podemos perder relacionamento de host com proxy 375 | ou perder muita performance 376 | """ 377 | for local in proxies: 378 | for proxy in proxies[local]: 379 | out = { 'newhosts': 'fake data' } 380 | if not fake: 381 | query = { 382 | 'proxy_hostid': proxy['proxyid'], 383 | 'hosts': [ { 'hostid': x } for x in proxy['newhosts'] ] 384 | } 385 | out = zapi.host.massupdate(**query) 386 | if out or fake: 387 | logger.debug('\tSUCCESS: updated proxy with {0} hosts: {1}'.format(len(proxy['newhosts']),proxy['host'])) 388 | if ( loglevels[args.loglevel.upper()] < loglevels['INFO'] ): 389 | logger.debug('Detailed hosts at proxy {0}'.format(proxy['host'])) 390 | logger.print_json(out) 391 | else: 392 | logger.warning('\tFAILED: updated proxy with {0} hosts: {1}'.format(len(proxy['newhosts']),proxy['host'])) 393 | 394 | #1) Identificar redes via network api 395 | logger.info('1) Descobrir as redes:') 396 | network_ranges = network_api_get_ranges() 397 | for local in network_ranges: 398 | logger.info('Achei {0} redes para {1}'.format((len(network_ranges[local]) -2),local) ) # diminuo dois para ignorar o disabled e total_weight 399 | 400 | #2) Identificar proxies disponiveis 401 | logger.info('2) Descobrir os proxies cadastrados:') 402 | proxies = proxies_get() 403 | for local in proxies: 404 | #logger.print_json(proxies[local]) 405 | logger.info('Achei {0} proxies para {1}'.format(len(proxies[local]),local)) 406 | 407 | #2.1) Calcular a distribuicao das redes por proxy 408 | #2.2) Atribuir peso de quantidade maxima de hosts por rede, e calcular assim por proxy 409 | discovery_rule_per_proxy() 410 | logger.debug('2.1) Total de ranges por proxy:') 411 | if ( loglevels[args.loglevel.upper()] < loglevels['INFO'] ): 412 | for local in proxies: 413 | for x in proxies[local]: 414 | logger.debug('\tRanges p/ {0} = {1}'.format(x['host'],len(x['ranges']))) 415 | 416 | #3) Distribuir de fato os discoveries nos proxies 417 | logger.info('3) Realocar e desabilitar todas as regras de discovery para os proxies') 418 | proxy_distribute_rules() 419 | 420 | #4) Identificar os hosts que devem ser migrados de proxy 421 | logger.info('4) Identificar os hosts que devem ser migrados de proxy') 422 | if ( args.move ): 423 | hosts_get_all() 424 | else: 425 | logger.info('\t--no-move utilizado. Nao necessario.') 426 | 427 | #5) Migrar esses hosts 428 | logger.info('5) Migrar esses hosts') 429 | if ( args.move ): 430 | hosts_to_proxies() 431 | else: 432 | logger.info('\t--no-move utilizado. Nao necessario.') 433 | 434 | #6) Sync dos dados (reload) 435 | logger.info('6) Reload zabbix server e esperar pelo sync dos proxies') 436 | logger.info('\tfuncao desligada') 437 | #zabbix_server_reload() 438 | 439 | #7) Ativar os discoveries 440 | logger.info('7) Ativar todos os discoveries') 441 | discovery_change_status(0) 442 | 443 | #8) Mostrar resumo dos pesos dos proxies 444 | logger.info('8) Resumo dos proxies') 445 | logger.info('\tLocal || Proxy\t\t|| Redes || Peso') 446 | for local in proxies: 447 | for prx in proxies[local]: 448 | logger.info('\t{0} || {1} || {2} || {3}'.format(local,prx['host'],len(prx['ranges']),prx['total_weight'])) 449 | 450 | #9) Finish! 451 | logger.debug('finished.. debug log') 452 | logger.print_json(proxies) 453 | 454 | logger.info('Terminei.. gastei alguns segundos preciosos..') 455 | zapi.user.logout() 456 | exit(0) 457 | -------------------------------------------------------------------------------- /zbx_historyGet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from os import path 7 | from sys import argv, exit 8 | from pyzabbix import ZabbixAPI 9 | from argparse import ArgumentParser 10 | from time import localtime,mktime 11 | from logprint import LogPrint 12 | 13 | parser = ArgumentParser(description = 'Collect history data from selected items') 14 | parser.add_argument('--url', required = True, dest = 'url', help = 'Zabbix server address') 15 | parser.add_argument('--user', required = True, dest = 'user', help = 'Zabbix user') 16 | parser.add_argument('--password', required = True, dest = 'password', help = 'Zabbix password') 17 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Dont show any logs on screen') 18 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 19 | parser.set_defaults(verbose=False) 20 | parser.add_argument('--group', required = True, dest = 'group', help = 'Hostgroup name with hosts to look for') 21 | parser.add_argument('--item', required = True, dest = 'item', help = 'Item name inside each host of hostgroup') 22 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 23 | args = parser.parse_args() 24 | 25 | TIMEOUT = 15.0 26 | LOGFILE = "/tmp/%s.log" % path.basename(argv[0]) 27 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 28 | 29 | try: 30 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 31 | zapi.login(args.user,args.password) 32 | except Exception, e: 33 | logger.error("Unable to login: %s" % (e)) 34 | exit(1) 35 | 36 | groupids = zapi.hostgroup.get(output=['groupid'],search={'name': args.group }) 37 | 38 | itens = zapi.item.get(output=['name','itemid','value_type','delay'],groupids=[x['groupid'] for x in groupids], 39 | search={'name': args.item },filter={'status': 0, 'state': 0}, 40 | selectHosts=['name'],sortorder='ASC',sortfield='itemid') 41 | value_type = itens[0]['value_type'] 42 | time_from = mktime(localtime()) - int(itens[0]['delay']) - 15 43 | 44 | history = zapi.history.get(output='extend',history=value_type,itemids=[x['itemid'] for x in itens], 45 | time_from=time_from) 46 | 47 | def get_last_history(itemid,history): 48 | lastclock = int(0) 49 | for h in history: 50 | if h['itemid'] == itemid and lastclock == 0: 51 | value = h['value'] 52 | lastclock = h['clock'] 53 | elif h['itemid'] == itemid and h['clock'] > lastclock: 54 | value = h['value'] 55 | lastclock = h['clock'] 56 | return { 'value': value, 'clock': lastclock } 57 | 58 | 59 | for item in itens: 60 | x = get_last_history(item['itemid'],history) 61 | logger.info("Host {0}, itemid {1}, value {2}, clock {3}".format(item['hosts'][0]['name'],item['itemid'],x['value'],x['clock'])) 62 | 63 | logger.info("Fim") 64 | zapi.user.logout() 65 | -------------------------------------------------------------------------------- /zbx_hostgroupOrganizer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Copyright (c) 2016, Globo.com 3 | # This file is part of globocom/zabbix-scripts 4 | # (see https://github.com/globocom/zabbix-scripts). 5 | # License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause 6 | from types import UnicodeType 7 | from os import path 8 | from sys import argv, exit 9 | from pyzabbix import ZabbixAPI 10 | from argparse import ArgumentParser 11 | from logprint import LogPrint 12 | 13 | parser = ArgumentParser(description = 'Creates a hostgroup for each proxy, and adds all hosts monitored by it. Also, interacts with all hosts in Operacao organizing it.') 14 | parser.add_argument('--url', dest = 'url', help = 'Zabbix server address') 15 | parser.add_argument('--user', dest = 'user', help = 'Zabbix user') 16 | parser.add_argument('--password', dest = 'password', help = 'Zabbix password') 17 | parser.add_argument('--no-verbose', dest = 'verbose', action = 'store_false', help = 'Dont show any logs on screen') 18 | parser.add_argument('--verbose', dest = 'verbose', action = 'store_true') 19 | parser.set_defaults(verbose=False) 20 | parser.add_argument('--loglevel', dest = 'loglevel', default = 'ERROR', help = 'Debug level. DEBUG/INFO/WARNING/ERROR/CRITICAL') 21 | args = parser.parse_args() 22 | 23 | TIMEOUT = 5.0 24 | LOGFILE = '/tmp/%s.log' % path.basename(argv[0]) 25 | logger = LogPrint(echo=args.verbose, logfile=LOGFILE, loglevel=args.loglevel.upper()) 26 | HGS = [ 'Operacao', 'Operacao::Servico' ] 27 | 28 | try: 29 | zapi = ZabbixAPI(args.url,timeout=TIMEOUT) 30 | zapi.login(args.user,args.password) 31 | except Exception, e: 32 | logger.error('Unable to login: {0}'.format(e)) 33 | exit(1) 34 | 35 | def hg_cache(): 36 | return zapi.hostgroup.get(output=['name']) 37 | 38 | def hg_search(name): 39 | for hgx in hg_names: 40 | if ( hgx['name'] == name ): 41 | return hgx['groupid'] 42 | return False 43 | 44 | def hg_find(name): 45 | ret = [] 46 | for hgx in hg_names: 47 | if ( name in hgx['name'] ): 48 | ret.append(hgx['groupid']) 49 | if (len(ret) > 0): 50 | return(ret) 51 | else: 52 | return(False) 53 | 54 | def api_validate(json_in): 55 | if ( type(json_in.get('message')) is UnicodeType ): 56 | if ( json_in['message'] == 'Invalid params.' ): 57 | logger.error('Invalid params. Check query below.') 58 | logger.print_json(json_in) 59 | return False 60 | return True 61 | 62 | def hg_massupdate(hostsJson,hostgroupName=False,hostgroupId=False): 63 | if hostgroupId: 64 | hostgroupid = hostgroupId 65 | hostgroupMessage = 'id {0}'.format(hostgroupId) 66 | else: 67 | hostgroupid = hg_search(hostgroupName) 68 | hostgroupMessage = hostgroupName 69 | query = { 'groups': [ { 'groupid': hostgroupid } ], 'hosts': hostsJson } 70 | logger.debug(query) 71 | try: 72 | out = zapi.hostgroup.massupdate(**query) 73 | if ( api_validate(out) ): 74 | logger.info('Updated hostgroups: {0}'.format(hostgroupMessage)) 75 | else: 76 | logger.warning('Error when updating hosts: {0}'.format(out['message'])) 77 | except Exception, e: 78 | logger.warning('API Exception when updating hosts: {0}'.format(e)) 79 | 80 | def hg_cleangroup(hostgroupId=None): 81 | hostids = zapi.host.get(groupids=hostgroupId,output=['hostid']) 82 | zapi.hostgroup.massremove(groupids=hostgroupId,hostids=[ x['hostid'] for x in hostids ]) 83 | return 84 | 85 | def operacao(): 86 | # sanity check (avoid zabbix api errors when overwriting Operacao hostgroup) 87 | hg_operacao = hg_search('Operacao') 88 | if not hg_operacao: 89 | logger.error('Hostgroup not found') 90 | return 91 | hosts_operacao = zapi.host.get(output=['hostid'],selectGroups=['groupid','name'],groupids=[ hg_operacao ]) 92 | for host in hosts_operacao: 93 | if host['groups'].__len__() == 1: 94 | logger.warning('Hostid {0} is only at hostgroup {1}({2})'.format(host['hostid'],host['groups'][0]['name'],host['groups'][0]['groupid'])) 95 | 96 | for HG in HGS: 97 | logger.info('Starting \'{0}\'...'.format(HG)) 98 | hgs_operacao = hg_find('{0}::'.format(HG)) 99 | hosts_operacao = zapi.host.get(output=['hostid'],groupids=[ x for x in hgs_operacao ]) 100 | hg_massupdate(hostsJson=hosts_operacao,hostgroupName=HG) 101 | return 102 | 103 | def proxy(): 104 | proxies = zapi.proxy.get(output=['host'],sortfield=['host']) 105 | for p in proxies: 106 | hg_name = 'Zabbix::Proxy::{0}'.format(p['host']) 107 | logger.info('Starting \'{0}\''.format(hg_name)) 108 | hg_proxy = hg_search(hg_name) 109 | if not hg_proxy: 110 | logger.debug('Creating hostgroup: {0}'.format(hg_name)) 111 | out = zapi.hostgroup.create(name=hg_name) 112 | logger.print_json(out) 113 | hg_proxy = out['groupids'][0] 114 | hosts_proxy = zapi.host.get(output=['hostid'],proxyids=[p['proxyid']]) 115 | hg_cleangroup(hostgroupId=hg_proxy) 116 | hg_massupdate(hostsJson=hosts_proxy,hostgroupId=hg_proxy) 117 | return 118 | 119 | try: 120 | hg_names = hg_cache() 121 | operacao() 122 | pass 123 | except Exception, e: 124 | logger.error('Failed to organize operacao hostgroups: {0}'.format(e)) 125 | 126 | try: 127 | hg_names = hg_cache() 128 | proxy() 129 | except Exception, e: 130 | logger.error('Failed to organize proxy hostgroups: {0}'.format(e)) 131 | 132 | logger.info('Fim') 133 | zapi.user.logout() 134 | --------------------------------------------------------------------------------