├── AWS └── snapshot_report.sh ├── LICENSE ├── MHA ├── dead_master.sh ├── external.sh └── switch.sh ├── PARTITION_MGMT ├── README ├── pdb-parted └── pdb-parted-MariaDB-el9 ├── README.md ├── alter_progress.sh ├── archiver ├── README ├── archive.sh └── ok2drop.sh ├── backup ├── README ├── backup_schedule ├── check_backups.sh ├── etc_default_backup_full ├── etc_default_backup_incr ├── pdb_backup_status.sh ├── pdb_cons_backup.sh ├── pdb_full_backup.sh ├── pdb_full_enc_backup.sh ├── pdb_incr_backup.sh ├── restore.sh └── workdir.tar ├── backup_manager ├── INSTALL ├── README ├── backup_manager └── config.txt ├── bakasql ├── BUGS_AND_LIMITATIONS ├── CHANGELOG ├── README ├── bakasql.cgi ├── bakasql.conf ├── bakautils.c ├── images │ ├── bakasql.png │ ├── bb.png │ └── favicon.png ├── screenshot.pdf └── version.h ├── binlog_parser ├── README ├── binlog_parser.c └── binlog_tracker.sh ├── buffer_warmer.sh ├── bump_autoinc.sh ├── change_binlog_path.sh ├── compress_binlog.sh ├── dbgranter ├── README └── granter.cgi ├── findmax.sh ├── general_logger.sh ├── genlogfilter.c ├── innodb_sampler ├── parser_55.c ├── parser_56.sh └── sampler.sh ├── innodb_watcher.sh ├── ioping-0.8-PZ ├── Makefile ├── README ├── changelog ├── ioping ├── ioping.1 ├── ioping.c ├── ioping.man ├── ioping.o ├── ioping.spec └── version ├── magic_change_master.sh ├── migrate2innodb.c ├── move_grants.sh ├── mysql_case_insensitive_replace.sql ├── nagios ├── mysql_check_max_connections.sh └── mysql_check_partitions.sh ├── osc_progress.sh ├── parallel_import ├── Config ├── README ├── run.sh └── single.sh ├── pz-arrayperf.sh ├── pz-maria-multi-skip.sh ├── pz-slave-monitor.sh ├── pzdd.sh ├── remaster.sh ├── rotate_slow_logs.sh ├── rtm ├── README ├── binspector.c └── rtm ├── show_grants.sh ├── simple_warmup.sh ├── table_growth ├── README ├── pz-schema-growth.sh ├── pz-table-growth.sh ├── run_and_mail.sh └── sample.sh ├── truelag.sh ├── tungsten_osc_babysitter.sh └── zrm_print.sh /AWS/snapshot_report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # reports about snapshots for volumes mounted on the instance 4 | # assumes interesting volumes are tagged with $TAG 5 | # rpizzi@blackbirdit.com 6 | # 7 | TAG="Cluster" 8 | # 9 | yesterday=$(date --date yesterday "+%Y-%m-%d") 10 | today=$(date --date today "+%Y-%m-%d") 11 | instance_id=$(wget -qO- http://169.254.169.254/latest/meta-data/instance-id) 12 | for vol_id in $(ec2-describe-volumes --filter attachment.instance-id=$instance_id | grep ^VOLUME | cut -f 2) 13 | do 14 | thistag=$(ec2-describe-volumes --filter attachment.instance-id=$instance_id --filter volume-id=$vol_id | grep ^TAG | fgrep "$TAG" | cut -f 5) 15 | [ "$thistag" = "" ] && continue # assuming root disk since untagged 16 | last=$(ec2-describe-snapshots -o self --filter "volume-id=$vol_id" --filter "status=completed" | cut -f 5 | sort -r | head -1 | cut -d"T" -f 1) 17 | case "$last" in 18 | '') last="NEVER"; status="ERROR";; 19 | $yesterday) last="YESTERDAY"; status="OK";; 20 | $today) last="TODAY"; status="OK";; 21 | *) status="ERROR";; 22 | esac 23 | printf "%s: last successful snapshot for %s %s was %s\n" "$status" "$vol_id" "($thistag)" "$last" 24 | done 25 | exit 0 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014-2016 Rick Pizzi 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 16 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 | IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 18 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 20 | OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 21 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 22 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 23 | POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /MHA/dead_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # MHA script to perform manual master failover when master is dead 4 | # rpizzi@blackbirdit.com 5 | # 6 | if [ $# -ne 2 ] 7 | then 8 | echo "usage: $0 [cluster-id] [dead master]" 9 | exit 1 10 | fi 11 | if [ ! -f /etc/mha_$1.cnf ] 12 | then 13 | echo "cluster-id $1 not found" 14 | exit 1 15 | fi 16 | check=$(grep "^hostname=$2\$" /etc/mha_$1.cnf) 17 | if [ "$check" = "" ] 18 | then 19 | echo "specified master $2 not configured for this cluster" 20 | exit 1 21 | fi 22 | masterha_master_switch --master_state=dead --conf=/etc/mha_$1.cnf --dead_master_host=$2 23 | exit 0 24 | -------------------------------------------------------------------------------- /MHA/external.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # MHA custom script for online master failover 4 | # rpizzi@blackbirdit.com 5 | # 6 | for arg in $* 7 | do 8 | option=$(echo $arg | cut -d"=" -f 1) 9 | value=$(echo $arg | cut -d"=" -f 2) 10 | case "$option" in 11 | '--command') command="$value";; 12 | '--orig_master_ip') orig_ip="$value";; 13 | '--new_master_ip') new_ip="$value";; 14 | '--orig_master_user') orig_user="$value";; 15 | '--new_master_user') new_user="$value";; 16 | '--orig_master_password') orig_password=$(echo $value | tr -d "[\\\]");; 17 | '--new_master_password') new_password=$(echo $value | tr -d "[\\\]");; 18 | esac 19 | done 20 | case "$command" in 21 | 'stop') echo "===> setting read_only=ON on current master" 22 | mysql -u $orig_user -h $orig_ip -p$orig_password -e "set global read_only=ON" 23 | ;; 24 | 'start') ;; 25 | *) echo "unknown command $command"; exit 1;; 26 | esac 27 | exit 0 28 | -------------------------------------------------------------------------------- /MHA/switch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # MHA script to perform manual master failover 4 | # rpizzi@blackbirdit.com 5 | # 6 | if [ $# -ne 2 ] 7 | then 8 | echo "usage: $0 [cluster-id] [new master]" 9 | exit 1 10 | fi 11 | if [ ! -f /etc/mha_$1.cnf ] 12 | then 13 | echo "cluster-id $1 not found" 14 | exit 1 15 | fi 16 | check=$(grep "^hostname=$2\$" /etc/mha_$1.cnf) 17 | if [ "$check" = "" ] 18 | then 19 | echo "candidate master $2 not configured" 20 | exit 1 21 | fi 22 | masterha_master_switch --master_state=alive --conf=/etc/mha_$1.cnf --new_master_host=$2 --orig_master_is_new_slave 23 | exit 0 24 | -------------------------------------------------------------------------------- /PARTITION_MGMT/README: -------------------------------------------------------------------------------- 1 | Partition management tool originally developed by PalominoDB, inc. 2 | that I have adopted myself since few years. 3 | 4 | For el9 (Rocky 9, etc) and later, on MariaDB, please use the el9 version of the script, 5 | which leverages the DBD::MariaDB module instead of the old DBD::mysql one, which does not 6 | work anymore. 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A suite of shell scripts and C programs that I wrote to help with my everyday tasks 2 | as a MySQL DBA. 3 | -------------------------------------------------------------------------------- /alter_progress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # script that detects an ALTER table being executed by the replication SQL thread 4 | # and prints a pretty progress bar with an ETA every minute 5 | # riccardo.pizzi@rumbo.com 6 | # 7 | echo -n "Password: " 8 | stty -echo 9 | read password 10 | stty echo 11 | echo 12 | table=$(echo "select concat(db, '.', substring_index(substring_index(info, ' ', -4), ' ', 1)) from processlist where left(info, 5) = 'alter'" | mysql -ANr -p$password information_schema) 13 | if [ "$table" = "" ] 14 | then 15 | echo "No alter table coming via replication at this time. Exiting." 16 | exit 1 17 | fi 18 | echo -n "Counting rows in $table, please stand by... " 19 | count=$(echo "select count(*) from $table" | mysql -ANr -p$password) 20 | echo "done." 21 | prev_rows=0 22 | left=0 23 | while true 24 | do 25 | rows=$(echo "select rows_read from information_schema.processlist where left(info, 5) = 'alter'" | mysql -ANr -p$password) 26 | [ "$rows" = "" ] && break 27 | perc=$(printf "%2.2f\n" $(echo "scale=4; $rows / $count * 100" | bc)) 28 | if [ $prev_rows -gt 0 ] 29 | then 30 | rows_done=$(($rows - $prev_rows)) 31 | left=$(echo "scale=2; ($count - $rows) / $rows_done * 60" | bc) 32 | fi 33 | prev_rows=$rows 34 | echo "select ' 5 10 15 20 25 30 35 40 45 50 55 60 65 70 75 80 85 90 95 100' as progress union select concat(if($perc > 5, 'XXX', '___'), if($perc > 10, 'XXX', '___'), if($perc > 15, 'XXX', '___'), if($perc > 20, 'XXX', '___'), if($perc > 25, 'XXX', '___'), if($perc > 30, 'XXX', '___'), if($perc > 35, 'XXX', '___'), if($perc > 40, 'XXX', '___'), if($perc > 45, 'XXX', '___'), if($perc > 50, 'XXX', '___'), if($perc > 55, 'XXX', '___'), if($perc > 60, 'XXX', '___'), if($perc > 65, 'XXX', '___'), if($perc > 70, 'XXX', '___'), if($perc > 75, 'XXX', '___'), if($perc > 80, 'XXX', '___'), if($perc > 85, 'XXX', '___'), if($perc > 90, 'XXX', '___'), if($perc > 95, 'XXX', '___'), '____ ', $perc, '% ETA: ', IF($left > 0, SEC_TO_TIME($left), '--'))" | mysql -ANr -p$password 35 | sleep 60 36 | done 37 | echo "ALTER is over." 38 | exit 0 39 | -------------------------------------------------------------------------------- /archiver/README: -------------------------------------------------------------------------------- 1 | A couple bash scripts to automate archiviation of tables. 2 | These can be used to automatically archive old partitions before dropping them. 3 | 4 | In order to archive a table, the following requirements must be met: 5 | - table must be partitioned by range 6 | - primary key must be an auto increment integer (better said, must start with an auto increment integer) 7 | - you need to use the same retention in these scripts and in the partition management script 8 | 9 | There are two scripts: 10 | - archive.sh is the actual archiviation script 11 | - ok2drop.sh is a script that checks that the partition has been successfully archived, and exits with zero if success 12 | 13 | The archiviation system is based on the retention that you have set. 14 | Each 1st day of month (assuming you have monthly partitions) the archiviation script will detect that there is a 15 | partition that needs archived, this is computed based on the set retention; for example, if you have a retention of 16 | three months, it will look for the partition that has data older than 3 months. 17 | After checking in the destination (archive) table that the data isn't there yet, it will archive it using pt-archiver. 18 | The following days the archiviation script will detect that the partition has already been archived and will just exit, 19 | until the first day of the next month. 20 | 21 | PLEASE NOTE: 22 | 23 | - origin and destination servers are defined at the top of the scripts 24 | - schema name used on destination server is set to origin schema name. Trivial to change this if you need to 25 | 26 | Partition management should be handled separately, please see the pdb-parted utility that you can find on my GitHub page. 27 | The supplied script ok2drop.sh can be used in conjunction with pdb-parted (or your partition management script of choice) 28 | to check whether the partition has been successfully archived and therefore can be safely dropped. See example below. 29 | 30 | IMPORTANT: all checks are made based on the MAX value for the auto increment PK. These scripts will not work if you have 31 | a non auto increment PK. 32 | 33 | USAGE 34 | 35 | archive.sh source_schema source_table pk_name partitioning_column destination_table optional_where 36 | 37 | source_schema schema of source table on origin server 38 | source_table name of source table on origin server 39 | pk_name name of the auto increment column which is part of your primary key 40 | partitioning_column name of the column you used to partition by range 41 | destination_table name of the destination table on destination server, may be same as source_table 42 | optional_where a double quote enclosed optional where clause to filter rows to archive 43 | 44 | eg. 45 | 46 | # without any filter, same table name to different server (set inside script) 47 | ./archive.sh stats REQUEST_XML ID REQ_DATE REQUEST_XML 48 | # with filter, different destination table name 49 | ./archive.sh stats REQUEST_XML ID REQ_DATE REQUEST_XML_ARCHIVED "AND SOURCE_ENGINE IN ('HIKER', 'ONEWAY', 'FUSION')" 50 | 51 | 52 | EXAMPLE USAGE with pdb-parted 53 | 54 | # partition add 55 | /usr/local/dba/sbin/pdb-parted --add --interval m +6m h=10.10.5.128,D=source_schema,t=source_table,u=partman,p=secret 56 | # partition drop 57 | /localhome/dbadm/ARCHIVER/bin/ok2drop.sh source_schema source_table destination_table "where condition" && /usr/local/dba/sbin/pdb-parted --drop -12m h=10.10.5.128,D=source_schema,t=source_table,u=partman,p=secret 58 | -------------------------------------------------------------------------------- /archiver/archive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # main archiver script - v. 1.03 4 | # assumes: 5 | # - table is partitioned by range 6 | # - (leftmost part of) primary key is an auto_increment integer 7 | # requires: 8 | # - latest version of percona toolkit (pt-archiver >= 3.0.4) 9 | # - partition boundary is monday if weekly 10 | # 11 | SOURCE=localhost # source MySQL server 12 | USER=mariadbadmin # user on source server 13 | PASS= # MySQL password for such user, leave blank to use dot file 14 | DEST= # destination MySQL server; if empty, target must be archiving path 15 | RETENTION=15 # retention as defined in partition management scripts, MUST match 16 | RETENTION_UNIT="week" # retention unit: week, month, year 17 | # 18 | if [ $# -lt 5 ] 19 | then 20 | echo "usage: $0 " 21 | exit 1 22 | fi 23 | KEY=$3 24 | EXTRA=$6 25 | if [ "$DEST" = "" ] 26 | then 27 | if [ ! -d $5 ] 28 | then 29 | echo "$5 is not a valid folder" 30 | exit 1 31 | fi 32 | [ ! -d $5/$(date +%Y) ] && mkdir $5/$(date +%Y) 33 | else 34 | TARGET=$5 35 | fi 36 | [ "$PASS" != "" ] && password="-p $PASS" 37 | partition_function=$(echo "SHOW CREATE TABLE $1.$2" | mysql -ANr -u$USER $password -h $SOURCE 2>/dev/null| fgrep "PARTITION BY RANGE" | tr -d '`') 38 | if [ $(echo "$partition_function" | fgrep -c " DIV ") -eq 1 ] 39 | then 40 | partition_key_type="UNIX_TIMESTAMP" 41 | div=$(echo "$partition_function" | sed -re "s/(.*)Fecha(.*)\)/\2/") 42 | else 43 | partition_key_type=$(echo "$partition_function" | cut -d"(" -f 2) 44 | fi 45 | case "${partition_key_type^^}" in 46 | 'UNIX_TIMESTAMP') partition_filter="FROM_UNIXTIME";; 47 | 'TO_DAYS') partition_filter="FROM_DAYS";; 48 | *) echo "$1.$2: doesn't look like a partitioned table!"; exit 1;; 49 | esac 50 | ref_date=$(date "+%Y-%m-%d") 51 | if [ "$RETENTION_UNIT" = "week" ] 52 | then 53 | # make sure we archive based on monday 54 | if [ $(date +%w) -ne 1 ] 55 | then 56 | ref_date=$(date -d "last monday" "+%Y-%m-%d") 57 | fi 58 | fi 59 | last_partition_name=$(echo "SELECT PARTITION_NAME FROM information_schema.PARTITIONS where PARTITION_NAME is not null AND TABLE_NAME = '$2' and TABLE_SCHEMA='$1' AND TABLE_ROWS > 0 AND PARTITION_DESCRIPTION < $partition_key_type('$ref_date' - interval $RETENTION $RETENTION_UNIT)" | mysql -ANr -u$USER $password -h $SOURCE 2>/dev/null| tail -1) 60 | if [ "$last_partition_name" = "" ] 61 | then 62 | echo "$1.$2: no partition to archive." 63 | exit 0 64 | fi 65 | echo "$1.$2: archive up to partition: $last_partition_name" 66 | if [ "$EXTRA" = "" ] 67 | then 68 | last_key_to_archive=$(echo "select MAX($KEY) from $2 partition($last_partition_name)" | mysql -ANr -u$USER $password -h $SOURCE $1 2>/dev/null) 69 | else 70 | last_key_to_archive=$(echo "select $KEY from $2 partition($last_partition_name) where $EXTRA order by 1 desc limit 1" | mysql -ANr -u$USER $password -h $SOURCE $1 2>/dev/null) 71 | fi 72 | echo "$1.$2: last $KEY value in $last_partition_name: $last_key_to_archive" 73 | if [ "$DEST" != "" ] 74 | then 75 | last_archive_key=$(echo "select MAX($KEY) from $TARGET" | mysql -ANr -u$USER -p$PASS -h $DEST $1 2>/dev/null) 76 | else 77 | last_archive_key=$(tail -1 $5/$SOURCE/$1/$2/*_$last_partition_name.sql 2>/dev/null | cut -d"(" -f 2 | cut -d"," -f 1) 78 | [ "$last_archive_key" = "" ] && last_archive_key=0 79 | fi 80 | echo "$1.$2: last $KEY value in archive: $last_archive_key" 81 | if [ "$last_archive_key" = "$last_key_to_archive" ] 82 | then 83 | echo "$1.$2: already archived, nothing to do." 84 | exit 0 85 | fi 86 | if [ $last_archive_key -gt $last_key_to_archive ] 87 | then 88 | echo "$1.$2: something is wrong, archive has higher $KEY value than source!" 89 | exit 1 90 | fi 91 | source_charset=$(echo "SHOW CREATE TABLE $1.$2" | mysql -ANr -u$USER $password -h $SOURCE | fgrep "DEFAULT CHARSET" | cut -d"=" -f 4 | cut -d " " -f 1 2>/dev/null) 92 | [ "$PASS" != "" ] && pt_password="p=$PASS," 93 | echo "$1.$2: archiving started" 94 | if [ "$DEST" = "" ] 95 | then 96 | [ ! -d $5/$SOURCE/$1/$2 ] && mkdir -p $5/$SOURCE/$1/$2 97 | if [ "$EXTRA" = "" ] 98 | then 99 | echo "$1.$2: executing: mysqldump --single-transaction --skip-extended-insert --compact -u$USER -p***** --where=\"$KEY <= $last_key_to_archive\" $1 $2 " 100 | time mysqldump --single-transaction --skip-extended-insert --compact -u$USER $password --where="$KEY <= $last_key_to_archive" $1 $2 > $5/$SOURCE/$1/$2/$(date +%Y%m%d)_$last_partition_name.sql 101 | else 102 | # need fixed pt-archiver --no-version-check --no-delete --limit 10000 --txn-size 10000 --statistics --source h=$SOURCE,u=$USER,${pt_password}D=$1,t=$2,A=$source_charset --file $arcfile --where "$KEY <= $last_key_to_archive AND $EXTRA" 103 | fi 104 | else 105 | if [ "$EXTRA" = "" ] 106 | then 107 | # need fixed pt-archiver --no-version-check --no-delete --limit 10000 --txn-size 10000 --statistics --source h=$SOURCE,u=$USER,${pt_password}D=$1,t=$2,A=$source_charset --dest h=$DEST,t=$TARGET,u=$USER,p=$PASS,S=/db/data/mysql.sock --where "$KEY <= $last_key_to_archive" 108 | else 109 | # need fixed pt-archiver --no-version-check --no-delete --limit 10000 --txn-size 10000 --statistics --source h=$SOURCE,u=$USER,${pt_password}D=$1,t=$2,A=$source_charset --dest h=$DEST,t=$TARGET,u=$USER,p=$PASS,S=/db/data/mysql.sock --where "$KEY <= $last_key_to_archive AND $EXTRA" 110 | fi 111 | fi 112 | status=$? 113 | echo "$1.$2: archiving completed with status $status." 114 | exit $status 115 | 116 | -------------------------------------------------------------------------------- /archiver/ok2drop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # checks whether retention needs (and can) applied to a partitioned table 4 | # you can use the exit status to run the table dropping script, e.g. pdb-parted, see examples 5 | # 6 | SOURCE=localhost # source MySQL server 7 | USER=mariadbadmin # MySQL user for archiviation (requires SELECT on source tables, INSERT on dest tables) 8 | PASS= # MySQL password for such user 9 | DEST= # destination MySQL server, or mount point for archive files 10 | RETENTION=16 # retention as defined in partition management scripts, MUST match 11 | RETENTION_UNIT="week" # retention unit: week, month, year 12 | # 13 | if [ $# -lt 4 ] 14 | then 15 | echo "usage: $0
" 16 | exit 1 17 | fi 18 | KEY=$3 19 | TARGET=$4 20 | EXTRA=$5 21 | [ "$PASS" != "" ] && password="-p $PASS" 22 | partition_function=$(echo "SHOW CREATE TABLE $1.$2" | mysql -ANr -u$USER $password -h $SOURCE 2>/dev/null| fgrep "PARTITION BY RANGE" | tr -d '`') 23 | if [ $(echo "$partition_function" | fgrep -c " DIV ") -eq 1 ] 24 | then 25 | partition_key_type="UNIX_TIMESTAMP" 26 | div=$(echo "$partition_function" | sed -re "s/(.*)Fecha(.*)\)/\2/") 27 | else 28 | partition_key_type=$(echo "$partition_function" | cut -d"(" -f 2) 29 | fi 30 | case "${partition_key_type^^}" in 31 | 'UNIX_TIMESTAMP') partition_filter="FROM_UNIXTIME";; 32 | 'TO_DAYS') partition_filter="FROM_DAYS";; 33 | *) echo "$1.$2: doesn't look like a partitioned table!"; exit 1;; 34 | esac 35 | last_partition_name=$(echo "SELECT PARTITION_NAME FROM information_schema.PARTITIONS where PARTITION_NAME is not null AND TABLE_NAME = '$2' and TABLE_SCHEMA='$1' AND TABLE_ROWS > 0 AND PARTITION_DESCRIPTION <= $partition_key_type(current_date - interval $RETENTION $RETENTION_UNIT)" | mysql -ANr -u$USER $password -h $SOURCE 2>/dev/null | tail -1) 36 | if [ "$last_partition_name" = "" ] 37 | then 38 | echo "$1.$2: nothing to do, exiting" 39 | exit 1 40 | fi 41 | echo "$1.$2: checking status of last partition that should be in archive, $last_partition_name ..." 42 | if [ "$EXTRA" = "" ] 43 | then 44 | last_key_to_archive=$(echo "select MAX($KEY) from $2 partition($last_partition_name)" | mysql -ANr -u$USER $password -h $SOURCE $1 2>/dev/null) 45 | else 46 | last_key_to_archive=$(echo "select $KEY from $2 partition($last_partition_name) where $EXTRA order by 1 desc limit 1" | mysql -ANr -u$USER $password -h $SOURCE $1 2>/dev/null) 47 | fi 48 | [ "$last_key_to_archive" = "" ] && exit 1 49 | echo "$1.$2: Last $KEY value in $last_partition_name: $last_key_to_archive" 50 | if [ "$DEST" = "" ] 51 | then 52 | last_archive_key=$(tail -1 $4/$SOURCE/$1/$2/*_$last_partition_name.sql 2>/dev/null | cut -d"(" -f 2 | cut -d"," -f 1) 53 | [ "$last_archive_key" = "" ] && last_archive_key="not found" 54 | echo "$1.$2: Last $KEY value from $4/$SOURCE/$1/$2/*_$last_partition_name.sql: $last_archive_key" 55 | else 56 | last_archive_key=$(echo "select MAX($KEY) from $TARGET" | mysql -ANr -u$USER $password -h $DEST $1 2>/dev/null) 57 | echo "$1.$2: Last $KEY value for table $TARGET in archive: $last_archive_key" 58 | fi 59 | if [ "$last_archive_key" = "$last_key_to_archive" ] 60 | then 61 | echo "$1.$2: partition $last_partition_name was successfully archived, OK to drop" 62 | exit 0 63 | else 64 | echo "$1.$2: partition $last_partition_name was not archived properly, NOT OK to proceed" 65 | exit 1 66 | fi 67 | -------------------------------------------------------------------------------- /backup/README: -------------------------------------------------------------------------------- 1 | check_backups.sh: creates a report email about backup status for your clusters 2 | pdb_backup_status.sh: invoked from backup server from check_backups.sh 3 | pdb_cons_backup.sh: creates consolidated incremental backup (eg daily) 4 | pdb_full_backup.sh: creates full backup 5 | pdb_incr_backup.sh: creates incremental backup (eg hourly) 6 | restore.sh: automatically restores latest full and all available consolidated and incrementals 7 | etc_default_backup_full: full backup config file, tailor and install in /etc/default/backup_full 8 | etc_default_backup_incr: incr backup config file, tailor and install in /etc/default/backup_incr 9 | workdir.tar: unpack in your root as root user (I mean /, not /root). Used during backup for tmp files 10 | 11 | Miscellaneous notes: 12 | - scripts need to run as root user 13 | - you need to set up ssh equivalence between slaves taking backup and archive server 14 | - remember to unpack workdir.tar, remove old LSN files in there 15 | - purge user requires if you use Percona server and innodb page tracking; requires SUPER 16 | - consolidated and incremental backups are optional; see example cron schedule 17 | - to use automatic restore script, just copy over, to the server to be restored, the ssh key to access the archive server and the config file for full backup from the slave which originally took the backup. The script will automatically connect to archive server, and stream the backups over ssh, feeding to "xtrabackup --prepare" as appropriate. If it completes, your restore is ready to be fired. 18 | -------------------------------------------------------------------------------- /backup/backup_schedule: -------------------------------------------------------------------------------- 1 | 0 7 * * 0 root /usr/local/sbin/pdb_full_backup.sh 2 | 0 0 * * * root /usr/local/sbin/pdb_cons_backup.sh 3 | 1 * * * * root /usr/local/sbin/pdb_incr_backup.sh 4 | -------------------------------------------------------------------------------- /backup/check_backups.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # checks for proper completion of all backups 4 | # rpizzi@palominodb.com 5 | # 6 | DOMAIN= 7 | #DUMPS_ENABLED=1 8 | SERVERLIST="cluster1:10.17.54.10 cluster2:10.17.53.34 cluster3:10.17.5.17" 9 | # 10 | # 11 | [ "$1" != "" ] && serverlist=$1 12 | yesterday=$(date -d yesterday +%y%m%d) 13 | today=$(date +%y%m%d) 14 | for server in $SERVERLIST 15 | do 16 | dbname=$(echo $server | cut -d":" -f 1) 17 | host=$(echo $server | cut -d":" -f 2) 18 | echo "Checking $dbname backups on $host$DOMAIN" 19 | status=$(ssh -Tq $host$DOMAIN | grep -v "^#") 20 | if [ $? -ne 0 ] 21 | then 22 | echo "Connection error" 23 | continue 24 | fi 25 | lastfull=$(echo $status | cut -d"|" -f 1) 26 | lastincr=$(echo $status | cut -d"|" -f 2) 27 | lastdump=$(echo $status | cut -d"|" -f 3) 28 | ok=0 29 | case "$lastfull" in 30 | '') lastfull='NEVER';; 31 | $today) lastfull='TODAY'; ok=1;; 32 | $yesterday) lastfull='YESTERDAY'; ok=1;; 33 | esac 34 | printf "%-8s %-8s last successful full backup was %s\n" $([ $ok -eq 1 ] && echo OK || echo ERROR) $dbname $lastfull 35 | ok=0 36 | case "$lastincr" in 37 | '') lastincr='NEVER';; 38 | $today) lastincr='TODAY'; ok=1;; 39 | $yesterday) lastincr='YESTERDAY';; 40 | esac 41 | printf "%-8s last successful incr backup was %s\n" $([ $ok -eq 1 ] && echo OK || echo ERROR) $lastincr 42 | if [ $DUMPS_ENABLED ] 43 | then 44 | ok=0 45 | case "$lastdump" in 46 | '') lastdump='NEVER';; 47 | $today) lastdump='TODAY'; ok=1;; 48 | $yesterday) lastdump='YESTERDAY';; 49 | esac 50 | printf "%-8s last successful dump taken %s\n" $([ $ok -eq 1 ] && echo OK || echo ERROR) $lastdump 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /backup/etc_default_backup_full: -------------------------------------------------------------------------------- 1 | remote_server=archive-server.yourdomain.com 2 | remote_path=/home/backup 3 | remote_user=backup 4 | local_path=/backup 5 | db_user=backup 6 | db_pass= 7 | parallelism=50 8 | xtrabackup=xtrabackup_56 9 | purge_user=purge 10 | purge_pass= 11 | -------------------------------------------------------------------------------- /backup/etc_default_backup_incr: -------------------------------------------------------------------------------- 1 | remote_server=archive-server.yourdomain.com 2 | remote_path=/home/backup 3 | remote_user=backup 4 | local_path=/backup 5 | db_user=backup 6 | db_pass= 7 | parallelism=50 8 | xtrabackup=xtrabackup_56 9 | -------------------------------------------------------------------------------- /backup/pdb_backup_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # script called from archive server via ssh 3 | # 4 | lastfull=$(cat /var/log/mysql_full_backup.log 2>/dev/null | fgrep -i ": completed OK" | tail -1 | cut -d " " -f 1) 5 | lastincr=$(cat /var/log/mysql_incremental_backup.log 2>/dev/null | fgrep -i ": completed OK" | tail -1 | cut -d " " -f 1) 6 | lastdump=$(cat /var/log/mysql_dump.log 2>/dev/null | fgrep -i ": completed OK" | tail -1 | cut -d " " -f 1) 7 | echo "# Backup status on $(hostname | cut -d"." -f1):" 8 | echo "$lastfull|$lastincr|$lastdump" 9 | exit 0 10 | -------------------------------------------------------------------------------- /backup/pdb_cons_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # takes an incremental based on last consolidated backup 4 | # and removes hourly incrementals afterwards 5 | # rpizzi@blackbirdit.com 6 | # 7 | DEFAULTS=/etc/default/backup_full 8 | host=$(hostname | cut -d"." -f 1) 9 | 10 | remote_server=$(grep "^remote_server=" $DEFAULTS | cut -d"=" -f 2) 11 | remote_path=$(grep "^remote_path=" $DEFAULTS | cut -d"=" -f 2) 12 | remote_user=$(grep "^remote_user=" $DEFAULTS | cut -d"=" -f 2) 13 | local_path=$(grep "^local_path=" $DEFAULTS | cut -d"=" -f 2) 14 | db_user=$(grep "^db_user=" $DEFAULTS | cut -d"=" -f 2) 15 | db_pass=$(grep "^db_pass=" $DEFAULTS | cut -d"=" -f 2) 16 | thread_count=$(grep "^parallelism=" $DEFAULTS | cut -d"=" -f 2) 17 | xtrabackup=$(grep "^xtrabackup=" $DEFAULTS | cut -d"=" -f 2) 18 | 19 | conf_file=/etc/my.cnf 20 | socket=/var/lib/mysql/mysql.sock 21 | lockfile=$local_path/cons_inprogress.lock 22 | full_lockfile=$local_path/FS_inprogress.lock 23 | incr_lockfile=$local_path/incr_inprogress.lock 24 | local_err=/tmp/pdbfullbckerr.$$ 25 | 26 | backup_path=$local_path/incremental/ 27 | log_file=/var/log/backup_cons.log 28 | lsn_dir=$local_path/lsn_cons 29 | 30 | [ -f $lockfile ] && exit 1 # already in progress 31 | [ -f $full_lockfile ] && exit 0 # full in progress, no consolidation today 32 | while true 33 | do 34 | [ ! -f $incr_lockfile ] && break 35 | sleep 60 # incr running? wait 36 | done 37 | 38 | trap 'rm -f $lockfile $local_err' 0 39 | touch $lockfile 40 | 41 | yesterpath=$(date -d "1 day ago" "+%Y/%m/%d") 42 | 43 | file_time=$(date "+%Y%m%d_%H%M") 44 | backup_file="consolidated.$file_time.xbs.gz" 45 | date_path=$(date "+%Y/%m/%d") 46 | 47 | 48 | ssh -q $remote_user@$remote_server "mkdir -p $remote_path/$date_path" 49 | innobackupex --incremental --slave-info --no-lock --no-timestamp --parallel=$thread_count --ibbackup=/usr/bin/$xtrabackup --socket=$socket --user=$db_user --password=$db_pass --defaults-file=$conf_file --stream=xbstream --extra-lsndir=$lsn_dir --incremental-basedir=$lsn_dir $backup_path 2>>$log_file | ssh -c arcfour128 -q $remote_user@$remote_server "gzip > $remote_path/$date_path/delta_$backup_file" 2>>$local_err 50 | status=0 51 | if [ -s $local_err ] 52 | then 53 | echo "STREAMING ERROR DETECTED!" 54 | cat $local_err 55 | status=1 56 | else 57 | cp $lsn_dir/xtrabackup_checkpoints $local_path/lsn_incr # next incr starts from this backup 58 | # remove hourly incremental(s) taken before the consolidation 59 | ssh -q $remote_user@$remote_server "rm -f $remote_path/$date_path/delta_inc*gz" 60 | # remove previous day's hourly incrementals 61 | ssh -q $remote_user@$remote_server "rm $remote_path/$yesterpath/delta_inc*gz" 62 | fi >> $log_file 63 | exit $status 64 | -------------------------------------------------------------------------------- /backup/pdb_full_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # full backup script 4 | # rpizzi@blackbirdit.com 5 | # 6 | DEFAULTS=/etc/default/backup_full 7 | remote_server=$(grep "^remote_server=" $DEFAULTS | cut -d"=" -f 2) 8 | remote_path=$(grep "^remote_path=" $DEFAULTS | cut -d"=" -f 2) 9 | remote_user=$(grep "^remote_user=" $DEFAULTS | cut -d"=" -f 2) 10 | local_path=$(grep "^local_path=" $DEFAULTS | cut -d"=" -f 2) 11 | db_user=$(grep "^db_user=" $DEFAULTS | cut -d"=" -f 2) 12 | db_pass=$(grep "^db_pass=" $DEFAULTS | cut -d"=" -f 2) 13 | thread_count=$(grep "^parallelism=" $DEFAULTS | cut -d"=" -f 2) 14 | xtrabackup=$(grep "^xtrabackup=" $DEFAULTS | cut -d"=" -f 2) 15 | purge_user=$(grep "^purge_user=" $DEFAULTS | cut -d"=" -f 2) 16 | purge_pass=$(grep "^purge_pass=" $DEFAULTS | cut -d"=" -f 2) 17 | 18 | conf_file=/etc/my.cnf 19 | socket=/var/lib/mysql/mysql.sock 20 | lockfile=$local_path/FS_inprogress.lock 21 | inc_lockfile=$local_path/inc_inprogress.lock 22 | local_err=/tmp/pdbfullbckerr.$$ 23 | 24 | backup_path=$local_path/full/ 25 | log_file=/var/log/backup_full.log 26 | lsn_dir=$local_path/lsn_incr 27 | lsn_dir_cons=$local_path/lsn_cons 28 | 29 | 30 | [ -f $lockfile ] && exit 1 # already in progress 31 | while [ -f $inc_lockfile ] 32 | do 33 | sleep 600 # wait for incremental to complete 34 | done 35 | trap 'rm -f $lockfile $local_err' 0 36 | touch $lockfile 37 | 38 | # get last LSN to purge tracking files later 39 | last_lsn=$(fgrep -h last_lsn /backup/lsn*/xtrabackup_checkpoints | tr -d "[ ]" | cut -d"=" -f 2 | sort -n | tail -1) 40 | echo $last_lsn > /backup/full/last_lsn 41 | 42 | file_time=$(date "+%Y%m%d_%H%M") 43 | backup_file="$file_time.xbs.gz" 44 | date_path=$(date "+%Y/%m/%d") 45 | [ ! -d $lsn_path ] && mkdir $lsn_path 46 | [ ! -d $lsn_dir_cons ] && mkdir -p $lsn_dir_cons 47 | 48 | ssh -q $remote_user@$remote_server "mkdir -p $remote_path/$date_path" 49 | innobackupex --no-version-check --parallel=$thread_count --slave-info --ibbackup=/usr/bin/$xtrabackup --socket=$socket --user=$db_user --password=$db_pass --tmpdir=$local_path/tmp --defaults-file=$conf_file --stream=xbstream --extra-lsndir=$lsn_dir $backup_path 2>>$log_file | ssh -c arcfour128 -q $remote_user@$remote_server "gzip > $remote_path/$date_path/FS_$backup_file" 2>>$local_err 50 | 51 | status=0 52 | if [ -s $local_err ] 53 | then 54 | echo "STREAMING ERROR DETECTED!" 55 | cat $local_err 56 | status=1 57 | else 58 | cp $lsn_dir/xtrabackup_checkpoints $lsn_dir_cons # starting point for consolidated backups 59 | echo "PURGE CHANGED_PAGE_BITMAPS BEFORE $(expr $last_lsn + 1)" | mysql -u $purge_user -h localhost -p$purge_pass 60 | fi >> $log_file 61 | exit $status 62 | -------------------------------------------------------------------------------- /backup/pdb_full_enc_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/ksh 2 | # 3 | # full backup with parallel encryption 4 | # rpizzi@blackbirdit.com 5 | # 6 | ulimit -n 8192 7 | CHUNK=2048 # blocks 8 | # 9 | DEFAULTS=/etc/default/backup_full 10 | remote_server=$(grep "^remote_server=" $DEFAULTS | cut -d"=" -f 2) 11 | remote_path=$(grep "^remote_path=" $DEFAULTS | cut -d"=" -f 2) 12 | remote_user=$(grep "^remote_user=" $DEFAULTS | cut -d"=" -f 2) 13 | local_path=$(grep "^local_path=" $DEFAULTS | cut -d"=" -f 2) 14 | db_user=$(grep "^db_user=" $DEFAULTS | cut -d"=" -f 2) 15 | db_pass=$(grep "^db_pass=" $DEFAULTS | cut -d"=" -f 2) 16 | thread_count=$(grep "^parallelism=" $DEFAULTS | cut -d"=" -f 2) 17 | xtrabackup=$(grep "^xtrabackup=" $DEFAULTS | cut -d"=" -f 2) 18 | purge_user=$(grep "^purge_user=" $DEFAULTS | cut -d"=" -f 2) 19 | purge_pass=$(grep "^purge_pass=" $DEFAULTS | cut -d"=" -f 2) 20 | enc_key=$(grep "^enc_key=" $DEFAULTS | cut -d"=" -f 2) 21 | 22 | ENC_CMD="openssl enc -aes-256-cbc -pass file:$enc_key" 23 | 24 | conf_file=/etc/my.cnf 25 | socket=/var/lib/mysql/mysql.sock 26 | lockfile=$local_path/FS_inprogress.lock 27 | inc_lockfile=$local_path/inc_inprogress.lock 28 | local_err=/tmp/pdbfullbckerr.$$ 29 | key=/root/.ssh/mysql_backup 30 | 31 | 32 | backup_path=$local_path/full/ 33 | log_file=/var/log/mysql_full_backup.log 34 | lsn_dir=$local_path/lsn_incr 35 | 36 | [ -f $lockfile ] && exit 1 # already in progress 37 | while [ -f $inc_lockfile ] 38 | do 39 | sleep 600 # wait for incremental to complete 40 | done 41 | trap 'rm -f $lockfile $local_err' 0 42 | touch $lockfile 43 | 44 | # get last LSN to purge tracking file later 45 | last_lsn=$(fgrep -h last_lsn /backup/lsn_*/xtrabackup_checkpoints | tr -d "[ ]" | cut -d"=" -f 2 | sort -n | tail -1) 46 | 47 | file_time=$(date "+%Y%m%d_%H%M") 48 | backup_file="$file_time.xbs.gz" 49 | date_path=$(date "+%Y/%m/%d") 50 | [ ! -d $lsn_path ] && mkdir $lsn_path 51 | 52 | ssh -q -i $key $remote_user@$remote_server "mkdir -p $remote_path/$date_path" 53 | 54 | # encryption slots setup 55 | $ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece0" |& 56 | exec {s0}>&p 57 | $ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece1" |& 58 | exec {s1}>&p 59 | $ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece2" |& 60 | exec {s2}>&p 61 | $ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece3" |& 62 | exec {s3}>&p 63 | $ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece4" |& 64 | exec {s4}>&p 65 | #$ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece5" |& 66 | #exec {s5}>&p 67 | #$ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece6" |& 68 | #exec {s6}>&p 69 | #$ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece7" |& 70 | #exec {s7}>&p 71 | #$ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece8" |& 72 | #exec {s8}>&p 73 | #$ENC_CMD | ssh -q -i $key $remote_user@$remote_server "cat > $remote_path/$date_path/FS_${backup_file}_piece9" |& 74 | #exec {s9}>&p 75 | 76 | innobackupex --no-version-check --parallel=$thread_count --slave-info --ibbackup=/usr/bin/$xtrabackup --socket=$socket --user=$db_user --password=$db_pass --tmpdir=/mnt/storage1/tmp --defaults-file=$conf_file --stream=xbstream --extra-lsndir=$lsn_dir $backup_path 2>>$log_file | pigz | while true 77 | do 78 | [[ $(dd count=$CHUNK 2>&1 >&$s0) == *0+0* ]] && break 79 | [[ $(dd count=$CHUNK 2>&1 >&$s1) == *0+0* ]] && break 80 | [[ $(dd count=$CHUNK 2>&1 >&$s2) == *0+0* ]] && break 81 | [[ $(dd count=$CHUNK 2>&1 >&$s3) == *0+0* ]] && break 82 | [[ $(dd count=$CHUNK 2>&1 >&$s4) == *0+0* ]] && break 83 | # [[ $(dd count=$CHUNK 2>&1 >&$s5) == *0+0* ]] && break 84 | # [[ $(dd count=$CHUNK 2>&1 >&$s6) == *0+0* ]] && break 85 | # [[ $(dd count=$CHUNK 2>&1 >&$s7) == *0+0* ]] && break 86 | # [[ $(dd count=$CHUNK 2>&1 >&$s8) == *0+0* ]] && break 87 | # [[ $(dd count=$CHUNK 2>&1 >&$s9) == *0+0* ]] && break 88 | done 2>>$local_err 89 | 90 | status=0 91 | if [ -s $local_err ] 92 | then 93 | echo "STREAMING ERROR DETECTED!" 94 | cat $local_err 95 | status=1 96 | else 97 | echo "PURGE CHANGED_PAGE_BITMAPS BEFORE $(expr $last_lsn + 1)" | mysql -u $purge_user -h localhost -p$purge_pass 98 | fi >> $log_file 99 | exit $status 100 | -------------------------------------------------------------------------------- /backup/pdb_incr_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # incremental backup script 4 | # rpizzi@blackbirdit.com 5 | # 6 | DEFAULTS=/etc/default/backup_incr 7 | host=$(hostname | cut -d"." -f 1) 8 | 9 | remote_server=$(grep "^remote_server=" $DEFAULTS | cut -d"=" -f 2) 10 | remote_path=$(grep "^remote_path=" $DEFAULTS | cut -d"=" -f 2) 11 | remote_user=$(grep "^remote_user=" $DEFAULTS | cut -d"=" -f 2) 12 | local_path=$(grep "^local_path=" $DEFAULTS | cut -d"=" -f 2) 13 | db_user=$(grep "^db_user=" $DEFAULTS | cut -d"=" -f 2) 14 | xtrabackup=$(grep "^xtrabackup=" $DEFAULTS | cut -d"=" -f 2) 15 | db_pass=$(grep "^db_pass=" $DEFAULTS | cut -d"=" -f 2) 16 | thread_count=$(grep "^parallelism=" $DEFAULTS | cut -d"=" -f 2) 17 | 18 | conf_file=/etc/my.cnf 19 | socket=/var/lib/mysql/mysql.sock 20 | lockfile=$local_path/inc_inprogress.lock 21 | full_lockfile=$local_path/FS_inprogress.lock 22 | cons_lockfile=$local_path/cons_inprogress.lock 23 | local_err=/tmp/pdbincrbckerr.$$ 24 | 25 | backup_path=$local_path/incr/ 26 | log_file=/var/log/backup_incr.log 27 | lsn_dir=$local_path/lsn_incr 28 | 29 | [ -f $lockfile ] && exit 1 # already in progress 30 | [ -f $full_lockfile ] && exit 0 # full in progress 31 | [ -f $cons_lockfile ] && exit 0 # consolidation in progress 32 | 33 | trap 'rm -f $lockfile $local_err' 0 34 | touch $lockfile 35 | 36 | file_time=$(date "+%Y%m%d_%H%M") 37 | backup_file="inc.$file_time.xbs.gz" 38 | date_path=$(date "+%Y/%m/%d") 39 | [ ! -d $lsn_path ] && mkdir $lsn_path 40 | 41 | ssh -q $remote_user@$remote_server "mkdir -p $remote_path/$date_path" 42 | innobackupex --slave-info --no-version-check --incremental --no-lock --no-timestamp --parallel=$thread_count --ibbackup=/usr/bin/$xtrabackup --socket=$socket --user=$db_user --password=$db_pass --tmpdir=$local_path/tmp --defaults-file=$conf_file --stream=xbstream --extra-lsndir=$lsn_dir --incremental-basedir=$lsn_dir $backup_path 2>>$log_file | ssh -c arcfour128 -q $remote_user@$remote_server "gzip > $remote_path/$date_path/delta_$backup_file" 2>>$local_err 43 | status=0 44 | if [ -s $local_err ] 45 | then 46 | echo "STREAMING ERROR DETECTED!" 47 | cat $local_err 48 | status=1 49 | fi >> $log_file 50 | exit $status 51 | -------------------------------------------------------------------------------- /backup/restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # backup restore script 4 | # v 2.30 Jan 2015 5 | # riccardo.pizzi@rumbo.com 6 | # looks at default files to find archive server, etc 7 | # then hunts for more recent full backup and restores up to latest hourly incremental 8 | # 9 | # optionally, you can pass a second parameter which is the path to a specific full backup file 10 | # and it will restore that one instead of the most recent one, and all available incrementals for that day 11 | # 12 | # parameters: 13 | # $1 = datadir (where you want to restore the backup) -- mandatory 14 | # $2 = /path/to/FS_file (restore this full backup and not the most recent available) 15 | # 16 | # restore sequence 17 | # 18 | # 1 - full (latest one found on archive server) 19 | # 2 - daily deltas (consolidated backups) 20 | # 3 - hourly deltas (hourly incrementals) 21 | # 22 | # 23 | DEFAULTS=/etc/default/backup_full 24 | KEY=/localhome/dbadm/.ssh/id_rsa 25 | MEMORY=48GB 26 | # if you want to skip incrementals set the following to y 27 | FULL_ONLY=n 28 | # 29 | if [ $# -lt 1 ] 30 | then 31 | echo "usage: $0 [ full backup ]" 32 | exit 1 33 | fi 34 | if [ $# -eq 2 ] 35 | then 36 | full="$2" 37 | fi 38 | if [ "${1:0:1}" != "/" ] 39 | then 40 | echo "datadir must start with \"/\"" 41 | exit 1 42 | fi 43 | if [ -d $1 ] 44 | then 45 | echo "datadir $1 must not exist" 46 | exit 1 47 | fi 48 | mkdir -p $1 49 | if [ $? -ne 0 ] 50 | then 51 | echo "error creating datadir $1" 52 | exit 1 53 | fi 54 | log=/tmp/restore.log 55 | tmpf=/tmp/restore.$$ 56 | trap 'rm -f $tmpf' 0 57 | remote_server=$(grep "^remote_server=" $DEFAULTS | cut -d"=" -f 2) 58 | remote_path=$(grep "^remote_path=" $DEFAULTS | cut -d"=" -f 2) 59 | remote_user=$(grep "^remote_user=" $DEFAULTS | cut -d"=" -f 2) 60 | xtrabackup=$(grep "^xtrabackup=" $DEFAULTS | cut -d"=" -f 2) 61 | if [ "$full" = "" ] 62 | then 63 | full=$(ssh -q -i $KEY $remote_user@$remote_server "find $remote_path -type f -name FS\* ! -mmin -60 " | sort | tail -1) 64 | if [ "$full" = "" ] 65 | then 66 | echo "unable to autodetect last full backup" 67 | exit 1 68 | fi 69 | fi 70 | incrbase=$(dirname $full) 71 | offset=$(echo $remote_path | tr -s "[/]" "[\n]" | wc -l) 72 | fulldate=$(echo $full | cut -d"/" -f $(expr 1 + $offset)-$(expr 3 + $offset)) 73 | if [ "$FULL_ONLY" != "y" ] 74 | then 75 | consolidated=$(ssh -q -i $KEY $remote_user@$remote_server "find $incrbase -type f -name delta_consolidated\* -newer $full ! -mmin -60 | sort") 76 | incrementals=$(ssh -q -i $KEY $remote_user@$remote_server "find $incrbase -type f -name delta_inc\* -newer $full ! -mmin -30" | sort) 77 | n_cons=$(echo $consolidated | wc -w) 78 | n_incr=$(echo $incrementals | wc -w) 79 | else 80 | n_cons=0 81 | n_incr=0 82 | fi 83 | echo "*** Phase I -- Full Backup" 84 | echo "**** Copying full backup dated $fulldate" 85 | ssh -q -i $KEY $remote_user@$remote_server cat $full | zcat | xbstream -x -C $1 86 | if [ $? -ne 0 ] 87 | then 88 | echo "error copying full backup, aborting" 89 | exit 1 90 | fi 91 | echo "**** Processing full backup" 92 | $xtrabackup --defaults-file=$1/backup-my.cnf --prepare --use-memory=$MEMORY --apply-log-only --target-dir=$1 > $log 2>&1 93 | if [ $? -ne 0 ] 94 | then 95 | echo "error processing full backup, aborting" 96 | echo 97 | cat $log 98 | exit 1 99 | fi 100 | echo "*** Phase II -- Consolidated Daily Backups" 101 | if [ $n_cons -gt 0 ] 102 | then 103 | echo "**** Processing $n_cons consolidated backup(s)" 104 | last_cons=$(echo $consolidated | tr "[ ]" "[\n]" | tail -1 | cut -d "." -f 2) 105 | for piece in $consolidated 106 | do 107 | name=$(echo $piece | cut -d "." -f 2) 108 | echo "**** Copying $name" 109 | mkdir -p $1/consolidated/$name 110 | ssh -q -i $KEY $remote_user@$remote_server zcat $piece | xbstream -x -C $1/consolidated/$name 111 | if [ $? -ne 0 ] 112 | then 113 | echo "error copying consolidated $name, aborting" 114 | exit 1 115 | fi 116 | done 117 | for piece in $(ls $1/consolidated) 118 | do 119 | echo "**** Applying consolidated $piece" 120 | $xtrabackup --defaults-file=$1/backup-my.cnf --prepare --use-memory=$MEMORY --apply-log-only --target-dir=$1 --incremental-dir=$1/consolidated/$piece > $log 2>&1 121 | if [ $? -ne 0 ] 122 | then 123 | echo "error applying consolidated $piece, aborting" 124 | echo 125 | cat $log 126 | exit 1 127 | fi 128 | done 129 | fi 130 | echo "*** Phase III -- Incremental Hourly Backups" 131 | if [ $n_incr -gt 0 ] 132 | then 133 | echo "**** Processing $n_incr incremental backup(s)" 134 | last_incr=$(echo $incrementals | tr "[ ]" "[\n]" | tail -1 | cut -d "." -f 2) 135 | for piece in $incrementals 136 | do 137 | name=$(echo $piece | cut -d "." -f 2) 138 | echo "**** Copying $name" 139 | mkdir -p $1/incrementals/$name 140 | # originally one pipeline, but xbstream has a bug and does not notice 141 | # broken pipes... 142 | ssh -q -i $KEY $remote_user@$remote_server cat $piece | zcat > $1/incrementals/$name/piece.xbs 2>$tmpf 143 | if [ $? -ne 0 ] 144 | then 145 | echo "error transferring incremental $name: $(cat $tmpf)" 146 | if [ "$name" = "$last_incr" ] 147 | then 148 | echo "maybe interrupted backup, but " 149 | echo "this piece was needed for binlog position. aborting." 150 | echo "suggestion: remove it and rerun restore". 151 | exit 1 152 | fi 153 | echo "assuming interrupted backup, skipping this piece. be careful." 154 | rm -r $1/incrementals/$name 155 | continue 156 | fi 157 | cat $1/incrementals/$name/piece.xbs | xbstream -x -C $1/incrementals/$name 158 | if [ $? -ne 0 ] 159 | then 160 | echo "error extracting incremental $name, aborting" 161 | exit 1 162 | fi 163 | rm $1/incrementals/$name/piece.xbs 164 | done 165 | for piece in $(ls $1/incrementals) 166 | do 167 | echo "**** Applying incremental $piece" 168 | $xtrabackup --defaults-file=$1/backup-my.cnf --prepare --use-memory=$MEMORY --apply-log-only --target-dir=$1 --incremental-dir=$1/incrementals/$piece > $log 2>&1 169 | if [ $? -ne 0 ] 170 | then 171 | echo "error applying incremental $piece, aborting" 172 | echo 173 | cat $log 174 | exit 1 175 | fi 176 | done 177 | fi 178 | echo "*** Phase IV -- Final preparation" 179 | $xtrabackup --defaults-file=$1/backup-my.cnf --prepare --use-memory=$MEMORY --target-dir=$1 > $log 2>&1 180 | if [ $? -ne 0 ] 181 | then 182 | echo "error during final prepare, aborting" 183 | echo 184 | cat $log 185 | exit 1 186 | fi 187 | # get latest information about binlog pos, partitions and table fmt from last backup piece 188 | if [ "$last_incr" != "" ] 189 | then 190 | cp $1/incrementals/$last_incr/xtrabackup_binlog_info $1 191 | cp $1/incrementals/$last_incr/xtrabackup_slave_info $1 192 | cd $1/incrementals/$last_incr 193 | for f in $(find . -name \*frm -o -name \*par | cut -d "/" -f 2-) 194 | do 195 | cp $1/incrementals/$last_incr/$f $1/$f 196 | done 197 | else 198 | if [ "$last_cons" != "" ] 199 | then 200 | cp $1/consolidated/$last_cons/xtrabackup_binlog_info $1 201 | cp $1/consolidated/$last_cons/xtrabackup_slave_info $1 202 | cd $1/consolidated/$last_cons 203 | for f in $(find . -name \*frm -o -name \*par | cut -d "/" -f 2-) 204 | do 205 | cp $1/consolidated/$last_cons/$f $1/$f 206 | done 207 | fi 208 | fi 209 | echo "*** Phase V -- Cleanup" 210 | rm -rf $1/consolidated $1/incrementals 211 | echo "*** Done. Don't forget to change ownership of files on $1 before starting mysqld." 212 | exit 0 213 | -------------------------------------------------------------------------------- /backup/workdir.tar: -------------------------------------------------------------------------------- 1 | backup/0000755000000000000000000000000012344345101011016 5ustar rootrootbackup/lsn_cons/0000755000000000000000000000000012257004376012645 5ustar rootrootbackup/lsn_cons/xtrabackup_checkpoints0000644000000000000000000000015112344202325017312 0ustar rootrootbackup_type = incremental 2 | from_lsn = 17038307913 3 | to_lsn = 17107047991 4 | last_lsn = 17107047991 5 | compact = 0 6 | backup/lsn_incr/0000700000000000000000000000000012257004375012623 5ustar rootrootbackup/lsn_incr/xtrabackup_checkpoints0000644000000000000000000000015112344344663017317 0ustar rootrootbackup_type = incremental 7 | from_lsn = 17139781207 8 | to_lsn = 17142859986 9 | last_lsn = 17142859986 10 | compact = 0 11 | backup/lsn/0000755000000000000000000000000012262563325011623 5ustar rootrootbackup/incr/0000755000000000000000000000000012250434263011755 5ustar rootrootbackup/full/0000755000000000000000000000000012264445601011767 5ustar rootrootbackup/full/last_lsn0000644000000000000000000000001412342547561013531 0ustar rootroot16779472788 12 | backup/tmp/0000755000000000000000000000000012344344663011632 5ustar rootroot -------------------------------------------------------------------------------- /backup_manager/INSTALL: -------------------------------------------------------------------------------- 1 | INSTALLATION INSTRUCTIONS 2 | ========================= 3 | 4 | To install, follow these steps: 5 | 6 | 1) become root user 7 | 2) run backup_manager build-config, this will create a sample config file in /etc/mariadb 8 | 3) tweak configuration to adapt it to your system, see comments in the file for help 9 | 4) create an user for backup manager like this: GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT ON *.* TO 'backup'@'localhost' IDENTIFIED BY .... (10.5 onwards: add SLAVE MONITOR to the set) 10 | 5) run a manual full backup to test the installation - if any package is missing the script will ask you to install it 11 | 6) schedule the backups using a crontab file in /etc/cron.d/backup_manager, add this line in crontab file at top: PATH=/usr/bin:/bin:/usr/local/sbin 12 | 13 | To read documentation for the tool run backup_manager without any parameter. 14 | 15 | NOTE: on multi-cpu machines please set pigz -p parameter accordingly, this will cut on backup time sensibly. 16 | 17 | -------------------------------------------------------------------------------- /backup_manager/README: -------------------------------------------------------------------------------- 1 | 2 | MariaDB Backup Manager - a backup-tool-agnostic backup and restore script for MariaDB 3 | (c) 2018-2025 Rick Pizzi (pizzi@leopardus.com) 4 | 5 | Highlights: 6 | 7 | - supports full backups, incremental backups, binary logs backups, logical backups, 8 | snapshots backups (ZFS only) 9 | - backup methods currently supported: xtrabackup, mariabackup, mysqldump, mydumper, zfs snapshots 10 | - configurable encryption and compression methods 11 | - browsable backup inventory 12 | - can restore locally or to remote server via ssh 13 | - automatic and effortless restore to specific point-in-time 14 | - automatic rebuild of replicas from last backup, including replication 15 | - automatic restore test 16 | - automatic purging of old backups, with smart purge option 17 | - email notifications 18 | - 3rd party notifications support e.g. slack 19 | - workload friendly when using mariabackup (if backup lock can't be obtained in 30 seconds, 20 | give up instead of locking the workload waiting) 21 | - filesystem capacity check (no more filesystem full due to backups) 22 | 23 | Run backup_manager without options to access the online documentation and 24 | see config file comments for more details! 25 | 26 | -------------------------------------------------------------------------------- /backup_manager/config.txt: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # 3 | # MariaDB Backup Manager configuration file 4 | # 5 | ######################################################################## 6 | # 7 | # SERVER TO BACK UP 8 | # MariaDB host, user and password for server to back up 9 | # Note: these can also be passed as environment variables 10 | # 11 | backup_host=localhost # or env var BACKUP_HOST 12 | backup_port=3306 # or env var BACKUP_PORT 13 | backup_user=root # or env var BACKUP_USER 14 | backup_password=root # or env var BACKUP_PASSWORD 15 | backup_socket=/var/lib/mysql/mysql.sock # or env var BACKUP_SOCKET 16 | # 17 | # 18 | ######################################################################## 19 | # 20 | # TARGET DIRECTORY 21 | # where to store backups 22 | # 23 | target_directory=/mnt/backup 24 | # 25 | # date_format: 26 | # format of date for target directory folder creation 27 | # Note: changing this may break some functionalities at this time 28 | # 29 | #date_format=%Y-%m-%d 30 | # 31 | # 32 | # minimum free space on target directory required to run a full backup 33 | # 34 | free_space_percentage=20 35 | # 36 | # 37 | ######################################################################## 38 | # 39 | # COMPRESSION AND ENCRYPTION 40 | # 41 | # compression options - 42 | # if commented out, no compression will take place 43 | # 44 | #compressor=pigz -p 2 45 | #uncompressor=pigz -p 2 -dc 46 | # 47 | # encryption options - 48 | # script will generate a cryptkey and export it in the environment 49 | # with name "enc_key", this can be used by several different encryption 50 | # utilities, eg. openssl. The cryptkey is then saved in the database. 51 | # if commented out, no encryption will take place 52 | # 53 | #encryptor=openssl enc -aes-256-cbc -pass env:enc_key 54 | #unencryptor=openssl enc -d -aes-256-cbc -pass env:enc_key 55 | # 56 | ######################################################################## 57 | # 58 | # PURGE OPTIONS 59 | # purge_days: how many days to keep full backups (for purge) 60 | # 61 | # smart purge will keep daily full backups for last 7 days, weekly backups 62 | # for last month and monthly backups up to the value of smart_purge_months; 63 | # weekly and monthly backups are those taken on sunday 64 | # 65 | purge_days=15 66 | # 67 | # 68 | #smart_purge=1 69 | #smart_purge_months=3 70 | # 71 | # whether to purge broken backups immediately - not recommended 72 | # 73 | #purge_incomplete_backups=1 74 | # 75 | # set cloud_storage to 1 to tell the purge function that backups are 76 | # stored in the cloud so no local filesystem purge operations will be attempted 77 | # 78 | #cloud_storage=0 79 | # 80 | ######################################################################## 81 | # 82 | # BACKUP STATUS NOTIFICATION 83 | # a webhook script can be called after backup completes 84 | # can be used for backup status notifications e.g. via Slack 85 | # script will be called with following parameters: 86 | # - backup tool (mariabackup,mysqldump,...) 87 | # - backup level (0=full,1=incr,2=dump,3=binlog) 88 | # - backup exit status (0=success,1=failure) 89 | # additionally, customer name will be available, if defined, 90 | # in environment variable BACKUP_LABEL 91 | # 92 | #webhook_script=/usr/local/bin/slack.sh 93 | # 94 | ######################################################################## 95 | # 96 | # EMAIL NOTIFICATIONS 97 | # will send an email after backup completes, requires mailx 98 | # and proper setup of the server's mail subsystem 99 | # 100 | # if commented out, no notification happens. if you want failure notifications, 101 | # please set failure_notify. If you also want success notifications (for 102 | # full backups - sends also a pretty inventory in the email body) then set 103 | # success_notify as well (can be a different email address) 104 | # 105 | #failure_notify=root@localhost 106 | # 107 | #success_notify=root@localhost 108 | # 109 | # name of customer, to properly tag email subjects in notifications 110 | # 111 | notify_label=Rick's lab 112 | # 113 | # 114 | ######################################################################## 115 | # 116 | # CALLING OUT URLs 117 | # you can define one "before backup" URL and one "after backup" URL 118 | # 119 | #callout_url_before=https://www.mariadb.com/before 120 | #callout_url_after=https://www.mariadb.com/after 121 | # 122 | ######################################################################## 123 | # 124 | # MISC OPTIONS 125 | # 126 | # parallelism: 127 | # for mariabackup and xtrabackup, how many tablespaces to stream at a time 128 | # for mydumper, how many tables to dump at a time 129 | # 130 | #parallelism=4 131 | # 132 | # 133 | # save master position when using mysqldump - will cause slave to be stopped 134 | # for entire backup duration... 135 | # 136 | #master_position=1 137 | # 138 | # include galera info in backup taken with xtrabackup or mariabackup 139 | # Warning: do not enable if galera not running, as the backup will fail 140 | # 141 | #galera_info=1 142 | # 143 | # restore directory to use as target for automatic restore tests 144 | # (backup_manager restore test command) - if not specified will 145 | # try and use backup folder (target_directory) 146 | # Note: the space is released at the end of the test. 147 | # 148 | #restore_test_directory=/mnt/restore 149 | # 150 | # 151 | # time out backup after a preset amount of time, can be used when the backup 152 | # tool in use likes to freeze from time to time. unit is minutes and 153 | # default is no timeout 154 | # 155 | #backup_timeout=360 156 | # 157 | # 158 | # kill long running queries taking longer than X seconds after locking all 159 | # tables at end of backup - works for {maria,xtra}backup and mydumper 160 | # backup user needs the PROCESS and SUPER privileges for this to work 161 | # 162 | #kill_query_time=300 163 | # 164 | # increase max number of open files 165 | # (this value is set at OS level and passed to mariabackup and mariadbd) 166 | # 167 | #open_files_limit=100000 168 | # 169 | # path to MariaDB server executable 170 | # (used during point-in-time restore) 171 | # 172 | #server_path=/usr/sbin/mysqld 173 | -------------------------------------------------------------------------------- /bakasql/BUGS_AND_LIMITATIONS: -------------------------------------------------------------------------------- 1 | KNOWN BUGS AND LIMITATIONS: 2 | 3 | - the tool will not support changing (part of) a primary key with an update statement, as rollback code will be incorrect 4 | - if, in an update statement, the where condition contains one of the columns touched by the update, rollback may be incorrect; BakaSQL will try to fix simple cases for you but remember to always check rollback code generated in the dry run phase 5 | -------------------------------------------------------------------------------- /bakasql/CHANGELOG: -------------------------------------------------------------------------------- 1 | 1.9 2 | 3 | - major code overhaul in order to improve speed, this version is at least 3x faster than 1.8 4 | - speed hack option (under advanced options) to gain even more speed (up to 5x for updates) - see README 5 | - periodic commit (under advanced options) to commit in the middle, every N statements (useful to reduce transaction size) 6 | -------------------------------------------------------------------------------- /bakasql/README: -------------------------------------------------------------------------------- 1 | BAKASQL 2 | 3 | a tool written in bash and C that can be used to execute DML against a MySQL database in a somewhat safe manner. 4 | 5 | What you get: 6 | 7 | - automatic rollback scripts to revert the changes the query will apply 8 | - automatic check for PK and/or index use (avoid locking table scans!!) 9 | - dry run capability (check query and compute rollback statements without modifying the data) 10 | - log of everything executed against the database 11 | - transaction consistency: all queries are run within the same transaction so an all-or-nothing modification is enforced 12 | - last_insert_id and variables are supported (see examples) 13 | 14 | Installation notes: 15 | 16 | - the system user running this script must have proper grants on the databases, from the host the CGI script runs from 17 | - the script runs under apache httpd, put it in the cgi-bin directory (usually /var/www/cgi-bin) and configure your web server accordingly 18 | - you need to compile the bakautils.c program (gcc -O3 -o bakautils bakautils.c) and place it according to the path at the top of the CGI script. 19 | - place the images from the images folder in your /var/www/html folder 20 | 21 | WATCH OUT: this is early stage software. Bugs are there. Make sure to always check the rollback code in dry run mode before you run the actual changes. 22 | 23 | EXAMPLE USAGE WITH VARIABLES: 24 | 25 | set @myvar1 = (select val from rick.t1 where val=12121 and id = 1021); 26 | set @myvar2 = (select val from rick.t1 where id = 1385); 27 | set @myvar3 = (select 666); 28 | insert into t1 (val) values (@myvar1); 29 | insert into t1 (val) values (@last_insert_id), (@myvar2), (@myvar3); 30 | 31 | Advanced options: 32 | 33 | - speed hacks enables some... speed hacks, at the expense of some features; namely, variables are not substituted, UTF-8 invalid chars are not filtered out, and output will be terse (so it will be harder to locate the row with the error, although still possible) 34 | - periodic commit will commit the transation periodically every N statements, this can be used for very large transactions (eg large number of identical updates) - normally shouldn't be used, as in this case, the all-or-nothing modification is not enforced 35 | 36 | 37 | 38 | Feedbacks welcome! 39 | Enjoy 40 | -------------------------------------------------------------------------------- /bakasql/bakasql.conf: -------------------------------------------------------------------------------- 1 | db01 2 | db02 3 | db03 4 | -------------------------------------------------------------------------------- /bakasql/bakautils.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "version.h" 8 | 9 | static void rollback_args(); 10 | static void get_columns(); 11 | static void index_in_use(); 12 | static void pretty_print(); 13 | static void insert_vars(); 14 | static void check_pk_use(); 15 | static void verify_column_names(); 16 | static void total_query_count(); 17 | static void array_idx(); 18 | 19 | static char *u_replace_in_string(); 20 | static char *u_replace_in_string_eol(); 21 | static int u_blank_string(); 22 | static int u_quotes_in_string(); 23 | static int u_string_is_number(); 24 | static int u_words_in_string(); 25 | static int u_check_if_match(); 26 | static int u_quote_open(); 27 | 28 | static int debug = 0; 29 | 30 | main(argc, argv) 31 | int argc; 32 | char **argv; 33 | { 34 | if (argc < 2) { 35 | printf("bakautils package for BakaSQL %s\n", VERSION); 36 | exit(0); 37 | } 38 | if (!strcmp(argv[1], "rollback_args")) { 39 | rollback_args(argc, argv); 40 | exit(0); 41 | } 42 | if (!strcmp(argv[1], "get_columns")) { 43 | get_columns(argc, argv); 44 | exit(0); 45 | } 46 | if (!strcmp(argv[1], "index_in_use")) { 47 | index_in_use(argc, argv); 48 | exit(0); 49 | } 50 | if (!strcmp(argv[1], "pretty_print")) { 51 | pretty_print(argc, argv); 52 | exit(0); 53 | } 54 | if (!strcmp(argv[1], "array_idx")) { 55 | array_idx(argc, argv); 56 | exit(0); 57 | } 58 | if (!strcmp(argv[1], "total_query_count")) { 59 | total_query_count(argc, argv); 60 | exit(0); 61 | } 62 | if (!strcmp(argv[1], "insert_vars")) { 63 | insert_vars(argc, argv); 64 | exit(0); 65 | } 66 | if (!strcmp(argv[1], "check_pk_use")) { 67 | check_pk_use(argc, argv); 68 | exit(0); 69 | } 70 | if (!strcmp(argv[1], "verify_column_names")) { 71 | verify_column_names(argc, argv); 72 | exit(0); 73 | } 74 | fprintf(stderr, "Unknown function %s\n", argv[1]); 75 | exit(1); 76 | } 77 | 78 | /* 79 | ARGS: 80 | 2 > column list 81 | */ 82 | 83 | static void rollback_args(argc, argv) 84 | int argc; 85 | char **argv; 86 | { 87 | char *b, *p, *r; 88 | int c=0; 89 | 90 | if (argc < 3) { 91 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 92 | exit(1); 93 | } 94 | r = strdup(argv[2]); 95 | b = r; 96 | printf("CONCAT("); 97 | for (p = r; *p; p++) { 98 | switch(*p) { 99 | case 0x20: 100 | case 0x09: 101 | case 0x0a: 102 | *p = 0x00; 103 | if (!u_blank_string(b)) { 104 | if (c) 105 | printf(",',',"); 106 | printf("'%s= ', IF(%s IS NOT NULL, QUOTE(%s),'NULL')", b, b, b); 107 | c++; 108 | } 109 | b = p + 1; 110 | break; 111 | } 112 | } 113 | if (!u_blank_string(b)) { 114 | if (c) 115 | printf(",',',"); 116 | printf("'%s= ', IF(%s IS NOT NULL, QUOTE(%s),'NULL')", b, b, b); 117 | } 118 | printf(")\n"); 119 | } 120 | 121 | /* 122 | ARGS: 123 | 2 > update set arguments 124 | */ 125 | 126 | static void get_columns(argc, argv) 127 | int argc; 128 | char **argv; 129 | { 130 | char *b, *p, *r; 131 | int qo=0; 132 | 133 | if (argc < 3) { 134 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 135 | exit(1); 136 | } 137 | r = strdup(argv[2]); 138 | b = r; 139 | for (p = r; *p; p++) { 140 | switch(*p) { 141 | case '\'': 142 | if (p == r || *(p - 1) != 0x5c) { 143 | if (qo) 144 | qo=0; 145 | else 146 | qo=1; 147 | } 148 | break; 149 | case ',': 150 | if (!qo) 151 | b = p + 1; 152 | break; 153 | case '=': 154 | *p = 0x00; 155 | if (!qo) 156 | printf("%s ", b); 157 | b = p + 1; 158 | break; 159 | } 160 | } 161 | printf("\n"); 162 | } 163 | 164 | /* 165 | ARGS: 166 | 2 > cardinfo file 167 | 3 > cols_used 168 | 4 > min_req_cardinality 169 | */ 170 | 171 | static void index_in_use(argc, argv) 172 | int argc; 173 | char **argv; 174 | { 175 | char *b, *p, *r, *idx_col, *idx_name; 176 | FILE *f; 177 | char buf[256]; 178 | int i, idx_seq, idx_card, idx_size, mc = 0, msz=0, min_card, done = 0, ocard; 179 | char oidx[64]; 180 | char ocol[64]; 181 | unsigned char sc; 182 | int mdebug=0, good_one = 0; 183 | 184 | if (argc < 5) { 185 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 186 | exit(1); 187 | } 188 | r = strdup(argv[3]); 189 | min_card = atoi(argv[4]); 190 | if ((f = fopen(argv[2], "r")) == NULL) { 191 | if (errno == ENOENT) 192 | return; 193 | fprintf(stderr, "%s: fopen: %s: error %d\n", argv[1], argv[2], errno); 194 | exit(1); 195 | } 196 | *oidx = 0x00; 197 | while (fgets(buf, 255, f)) { 198 | *(buf + strlen(buf) - 1) = 0x00; 199 | b = buf; 200 | i=0; 201 | for (p = buf; *p; p++) { 202 | if (*p == 0x09) { 203 | *p = 0x00; 204 | switch(i) { 205 | case 0: 206 | idx_col = strdup(b); 207 | break; 208 | case 1: 209 | idx_seq = atoi(b); 210 | break; 211 | case 2: 212 | idx_card = atoi(b); 213 | break; 214 | case 3: 215 | idx_name = strdup(b); 216 | break; 217 | } 218 | b = p + 1; 219 | i++; 220 | } 221 | } 222 | idx_size = atoi(b); 223 | if (mdebug) 224 | printf("INDEX: col %s seq %d card %d name %s size=%d\n", idx_col, idx_seq, idx_card, idx_name, idx_size); 225 | if (strcasecmp(idx_name, oidx)) { 226 | if (msz) { 227 | if (mdebug) { 228 | printf("Found usable index %s\n", oidx); 229 | } 230 | done++; 231 | idx_name = oidx; 232 | idx_card = ocard; 233 | idx_col = ocol; 234 | break; 235 | } 236 | mc=0; 237 | strcpy(oidx, idx_name); 238 | strcpy(ocol, idx_col); 239 | ocard = idx_card; 240 | } 241 | b = r; 242 | for (p = r; *p; p++) { 243 | switch(*p) { 244 | case 0x20: 245 | case 0x09: 246 | case 0x0a: 247 | sc = *p; 248 | *p = 0x00; 249 | if (u_check_if_match(b, idx_col, NULL, 0)) { 250 | if (idx_seq > 1) { 251 | if (mc < idx_seq -1) { 252 | if (mdebug) 253 | printf("Matched but out of seq\n"); 254 | } 255 | else 256 | mc++; 257 | } 258 | else 259 | mc++; 260 | if (mc == idx_seq) { 261 | msz++; 262 | if (mdebug) 263 | printf("POSITIVE MATCH for %s, %d of %d\n", idx_name, msz, idx_size); 264 | good_one++; 265 | if (msz == idx_size) { 266 | if (mdebug) 267 | printf("FULL MATCH for %s\n", idx_name); 268 | done++; 269 | } 270 | } 271 | } 272 | *p = sc; 273 | b = p + 1; 274 | break; 275 | } 276 | if (done) 277 | break; 278 | } 279 | if (u_check_if_match(b, idx_col, NULL, 0)) { 280 | if (idx_seq > 1) { 281 | if (mc < idx_seq -1) { 282 | if (mdebug) 283 | printf("Matched but out of seq\n"); 284 | } 285 | else 286 | mc++; 287 | } 288 | else 289 | mc++; 290 | if (mc == idx_seq) { 291 | msz++; 292 | if (mdebug) 293 | printf("POSITIVE MATCH for %s, %d of %d\n", idx_name, msz, idx_size); 294 | good_one++; 295 | if (msz == idx_size) { 296 | if (mdebug) 297 | printf("FULL MATCH for %s\n", idx_name); 298 | done++; 299 | } 300 | } 301 | } 302 | if (done) 303 | break; 304 | } 305 | fclose(f); 306 | if (done || msz) { 307 | if (mdebug) 308 | printf("Satisfying index found %s.\n", idx_name); 309 | if (strcmp(idx_name, "PRIMARY")) { 310 | if (idx_card < min_card) { 311 | printf("NOTICE: index %s (on %s) has very low cardinality, and will be skipped. Enable ninja mode to use it regardless.\n", idx_name, idx_col); 312 | if (good_one) 313 | exit(0); 314 | } 315 | else 316 | exit(0); 317 | } 318 | else 319 | exit(0); 320 | } 321 | exit(1); 322 | } 323 | 324 | /* 325 | ARGS: 326 | 2 > string to print 327 | */ 328 | 329 | static void pretty_print(argc, argv) 330 | int argc; 331 | char **argv; 332 | { 333 | int wc = 0; 334 | char *b, *p, *r; 335 | unsigned char sc; 336 | 337 | if (argc < 2) { 338 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 339 | exit(1); 340 | } 341 | r = strdup(argv[2]); 342 | b = r; 343 | for (p = r; *p; p++) { 344 | switch(*p) { 345 | case 0x20: 346 | case 0x09: 347 | case 0x0a: 348 | sc = *p; 349 | *p = 0x00; 350 | printf("%s", b); 351 | if (wc++ == 15) { 352 | printf("
"); 353 | wc=0; 354 | } 355 | else 356 | printf(" "); 357 | *p = sc; 358 | b = p + 1; 359 | break; 360 | } 361 | } 362 | printf("%s", b); 363 | } 364 | 365 | /* 366 | ARGS: 367 | 2 > where condition 368 | 3 > string to search for 369 | */ 370 | 371 | static void array_idx(argc, argv) 372 | int argc; 373 | char **argv; 374 | { 375 | char *b, *p, *r; 376 | unsigned char sc; 377 | int idx = 0; 378 | int qo = 0; 379 | 380 | if (argc < 3) { 381 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 382 | exit(1); 383 | } 384 | r = strdup(argv[2]); 385 | b = r; 386 | for (p = r; *p; p++) { 387 | switch(*p) { 388 | case 0x20: 389 | case 0x09: 390 | case 0x0a: 391 | sc = *p; 392 | *p = 0x00; 393 | if (u_quote_open(b)) { 394 | if (qo) 395 | qo=0; 396 | else 397 | qo=1; 398 | } 399 | if (!qo && !u_blank_string(b)) { 400 | if (u_check_if_match(b, argv[3], NULL, 0)) { 401 | printf("%d\n", idx); 402 | return; 403 | } 404 | } 405 | idx++; 406 | *p = sc; 407 | b = p + 1; 408 | break; 409 | } 410 | } 411 | if (u_quote_open(b)) { 412 | if (qo) 413 | qo=0; 414 | else 415 | qo=1; 416 | } 417 | if (!qo && !u_blank_string(b)) { 418 | if (u_check_if_match(b, argv[3], NULL, 0)) { 419 | printf("%d\n", idx); 420 | return; 421 | } 422 | } 423 | printf("-1\n"); 424 | } 425 | 426 | /* 427 | ARGS: 428 | 2 > query filename 429 | */ 430 | 431 | static void total_query_count(argc, argv) 432 | int argc; 433 | char **argv; 434 | { 435 | char *p; 436 | int q = 0; 437 | int c=0; 438 | FILE *f; 439 | char *wb; 440 | struct stat s; 441 | 442 | if (argc < 2) { 443 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 444 | exit(1); 445 | } 446 | if (stat(argv[2], &s) == -1) { 447 | fprintf(stderr, "%s: stat : %s: error %d\n", argv[1], argv[2], errno); 448 | exit(1); 449 | } 450 | if ((wb = calloc(s.st_size, 1)) == NULL) { 451 | fprintf(stderr, "out of memory\n"); 452 | exit(1); 453 | } 454 | if ((f = fopen(argv[2], "r")) != NULL) { 455 | fread(wb, s.st_size, 1, f); 456 | fclose(f); 457 | } 458 | for (p = wb; *p; p++) { 459 | if (*p == 0x27) { 460 | if (p > wb && *(p - 1) != 0x5c) 461 | q = !q; 462 | } 463 | if (*p == ';' && !q) 464 | c++; 465 | } 466 | printf("%d\n", c); 467 | } 468 | 469 | /* 470 | ARGS: 471 | 2 > where condition 472 | 3 > columns list 473 | */ 474 | 475 | static void verify_column_names(argc, argv) 476 | int argc; 477 | char **argv; 478 | { 479 | char *b, *p, *r, *i; 480 | unsigned char sc; 481 | static char cl[1048576]; 482 | static char *ign_list = "= IS NOT NULL LIKE AND OR > < BETWEEN <= >= IN ( ) , <>"; 483 | 484 | if (argc < 3) { 485 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 486 | exit(1); 487 | } 488 | r = u_replace_in_string(argv[2], "=", " = "); 489 | r = u_replace_in_string(r, "(", " ( "); 490 | r = u_replace_in_string(r, ")", " ) "); 491 | r = u_replace_in_string(r, ",", " , "); 492 | if (debug) 493 | printf("verify_column: input string 1 --%s--\n", r); 494 | r = u_replace_in_string(r, "`.`.", "."); 495 | if (debug) 496 | printf("verify_column: input string 2 --%s--\n", r); 497 | r = u_replace_in_string(r, "`", " "); 498 | b = r; 499 | i = strdup(ign_list); 500 | if (debug) 501 | printf("verify_column: input string --%s--\n", r); 502 | for (p = r; *p; p++) { 503 | switch(*p) { 504 | case 0x20: 505 | case 0x09: 506 | case 0x0a: 507 | sc = *p; 508 | *p = 0x00; 509 | if (!u_blank_string(b)) 510 | if (!u_quote_open(r)) 511 | if (*b != '@') 512 | if (!u_string_is_number(b)) 513 | if (!u_quotes_in_string(b)) 514 | if (!u_check_if_match(b, i, NULL, 0)) 515 | if (!u_check_if_match(b, argv[3], cl, 0)) { 516 | printf("%s\n", b); 517 | exit(1); 518 | } 519 | *p = sc; 520 | b = p + 1; 521 | break; 522 | } 523 | } 524 | if (!u_blank_string(b)) 525 | if (!u_quote_open(r)) 526 | if (*b != '@') 527 | if (!u_string_is_number(b)) 528 | if (!u_quotes_in_string(b)) 529 | if (!u_check_if_match(b, i, NULL, 0)) 530 | if (!u_check_if_match(b, argv[3], cl, 0)) { 531 | printf("%s\n", b); 532 | exit(1); 533 | } 534 | printf("%s\n", cl); 535 | } 536 | 537 | /* 538 | ARGS: 539 | 2 > where condition 540 | 3 > primary key 541 | */ 542 | 543 | static void check_pk_use(argc, argv) 544 | int argc; 545 | char **argv; 546 | { 547 | char *b, *p, *r; 548 | int c = 0, n; 549 | 550 | if (argc < 3) { 551 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 552 | exit(1); 553 | } 554 | n = u_words_in_string(argv[3]); 555 | r = u_replace_in_string(argv[2], ", ", ","); 556 | b = r; 557 | for (p = r; *p; p++) { 558 | switch(*p) { 559 | case 0x20: 560 | case 0x09: 561 | case 0x0a: 562 | *p = 0x00; 563 | if (!u_blank_string(b)) 564 | c += u_check_if_match(b, argv[3], NULL, 1); 565 | b = p + 1; 566 | break; 567 | } 568 | } 569 | if (!u_blank_string(b)) 570 | c += u_check_if_match(b, argv[3], NULL, 1); 571 | printf("%d\n", c == n); 572 | } 573 | 574 | static int u_check_if_match(s, ml, cl, sp) 575 | char *s, *ml; 576 | char *cl; 577 | int sp; // special case 578 | { 579 | char *b, *p, *dn; 580 | 581 | if (debug) 582 | printf("Checking --%s--\n", s); 583 | b = ml; 584 | dn = strdup(s); 585 | if (sp) { 586 | if ((p = strchr(dn, '=')) != NULL) 587 | if (strlen(dn) > 1) 588 | *p = 0x00; 589 | s = dn; 590 | if ((p = strrchr(dn, '.')) != NULL) 591 | s = ++p; 592 | } 593 | for (p = ml; *p; p++) { 594 | switch(*p) { 595 | case 0x20: 596 | case 0x09: 597 | case 0x0a: 598 | *p = 0x00; 599 | if (debug) 600 | printf("Comparing %s with %s\n", s, b); 601 | if (!strcasecmp(s, b)) { 602 | *p = 0x20; 603 | if (debug) 604 | printf("MATCH\n"); 605 | if (cl) { 606 | strcat(cl, s); 607 | strcat(cl, " "); 608 | } 609 | free(dn); 610 | return(1); 611 | } 612 | *p = 0x20; 613 | b = ++p; 614 | break; 615 | } 616 | } 617 | if (debug) 618 | printf("Comparing %s with %s\n", s, b); 619 | if (!strcasecmp(s, b)) { 620 | if (debug) 621 | printf("MATCH\n"); 622 | if (cl) { 623 | strcat(cl, s); 624 | strcat(cl, " "); 625 | } 626 | free(dn); 627 | return(1); 628 | } 629 | free(dn); 630 | return(0); 631 | } 632 | 633 | /* 634 | ARGS: 635 | 2 > $vars_tmpf 636 | 3 > query 637 | 4 > $last_id 638 | */ 639 | 640 | #define MAX_VAR_VAL_LENGTH 65536 641 | 642 | static void insert_vars(argc, argv) 643 | int argc; 644 | char **argv; 645 | { 646 | FILE *f; 647 | char buf[MAX_VAR_VAL_LENGTH], replace_this[64], replace_with[MAX_VAR_VAL_LENGTH]; 648 | char *r, *p, *var_name, *var_value; 649 | 650 | if (argc < 4) { 651 | fprintf(stderr, "%s: not enough arguments\n", argv[1]); 652 | exit(1); 653 | } 654 | if (argc == 5) 655 | r = u_replace_in_string(argv[3], "@last_insert_id", argv[4]); 656 | else 657 | r = argv[3]; 658 | if ((f = fopen(argv[2], "r")) == NULL) { 659 | if (errno == ENOENT) { // file may be non existent if no vars in use 660 | printf("%s\n", r); 661 | return; 662 | } 663 | fprintf(stderr, "%s: fopen: %s: error %d\n", argv[1], argv[2], errno); 664 | exit(1); 665 | } 666 | while (fgets(buf, MAX_VAR_VAL_LENGTH - 1, f)) { 667 | *(buf + strlen(buf) - 1) = 0x00; 668 | if ((p = strchr(buf, 0x09)) == NULL) { 669 | fprintf(stderr, "%s: %s: format error\n", argv[1], argv[2]); 670 | exit(1); 671 | } 672 | *p = 0x00; 673 | var_name = buf; 674 | var_value = ++p; 675 | sprintf(replace_this, "@%s,", var_name); 676 | sprintf(replace_with, "'%s',", var_value); 677 | r = u_replace_in_string(r, replace_this, replace_with); 678 | sprintf(replace_this, "@%s)", var_name); 679 | sprintf(replace_with, "'%s')", var_value); 680 | r = u_replace_in_string(r, replace_this, replace_with); 681 | sprintf(replace_this, "@%s ", var_name); 682 | sprintf(replace_with, "'%s' ", var_value); 683 | r = u_replace_in_string(r, replace_this, replace_with); 684 | sprintf(replace_this, "@%s", var_name); // fine riga 685 | sprintf(replace_with, "'%s'", var_value); 686 | r = u_replace_in_string_eol(r, replace_this, replace_with); 687 | sprintf(replace_this, "@%s+", var_name); 688 | sprintf(replace_with, "'%s'+", var_value); 689 | r = u_replace_in_string(r, replace_this, replace_with); 690 | sprintf(replace_this, "@%s-", var_name); 691 | sprintf(replace_with, "'%s'-", var_value); 692 | r = u_replace_in_string(r, replace_this, replace_with); 693 | } 694 | fclose(f); 695 | printf("%s\n", r); 696 | } 697 | 698 | /* 699 | search in s1 for s2 and replace with s3 700 | */ 701 | 702 | static char *u_replace_in_string(s1, s2, s3) 703 | char *s1, *s2, *s3; 704 | { 705 | char *b, *p, *p2, *p3; 706 | 707 | if ((b = malloc(strlen(s1) *16)) == NULL) { 708 | fprintf(stderr, "out of memory!\n"); 709 | exit(1); 710 | } 711 | memset(b, 0x00, strlen(s1) * 16); 712 | p2 = b; 713 | for (p = s1; *p; p++) { 714 | if (!strncasecmp(p, s2, strlen(s2))) { 715 | for (p3 = s3; *p3; p3++) 716 | *p2++ = *p3; 717 | p += strlen(s2) - 1; 718 | } 719 | else 720 | *p2++ = *p; 721 | } 722 | return(b); 723 | } 724 | 725 | static char *u_replace_in_string_eol(s1, s2, s3) 726 | char *s1, *s2, *s3; 727 | { 728 | char *b, *p, *p2, *p3; 729 | 730 | if ((b = malloc(strlen(s1) * 16)) == NULL) { 731 | fprintf(stderr, "out of memory!\n"); 732 | exit(1); 733 | } 734 | memset(b, 0x00, strlen(s1) * 16); 735 | p2 = b; 736 | for (p = s1; *p; p++) { 737 | if (!strncasecmp(p, s2, strlen(s2))) { 738 | if (*(p + strlen(s2)) == 0x00) { 739 | for (p3 = s3; *p3; p3++) 740 | *p2++ = *p3; 741 | break; 742 | } 743 | else 744 | *p2++ = *p; 745 | } 746 | else 747 | *p2++ = *p; 748 | } 749 | return(b); 750 | } 751 | 752 | static int u_blank_string(s) 753 | char *s; 754 | { 755 | char *p; 756 | 757 | for (p = s; *p; p++) { 758 | switch(*p) { 759 | case 0x20: 760 | case 0x09: 761 | case 0x0a: 762 | break; 763 | default: 764 | return(0); 765 | } 766 | } 767 | return(1); 768 | } 769 | 770 | static int u_quotes_in_string(s) 771 | char *s; 772 | { 773 | char *p; 774 | 775 | for (p = s; *p; p++) 776 | if (*p == 0x27) 777 | return(1); 778 | return(0); 779 | } 780 | 781 | static int u_words_in_string(s) 782 | char *s; 783 | { 784 | int c = 0; 785 | char *b, *p; 786 | unsigned char sc; 787 | 788 | b = s; 789 | for (p = s; *p; p++) { 790 | switch(*p) { 791 | case 0x20: 792 | case 0x09: 793 | case 0x0a: 794 | sc = *p; 795 | *p = 0x00; 796 | if (!u_blank_string(b)) 797 | c++; 798 | *p = sc; 799 | b = ++p; 800 | break; 801 | } 802 | } 803 | if (!u_blank_string(b)) 804 | c++; 805 | return(c); 806 | } 807 | 808 | static int u_quote_open(s) 809 | char *s; 810 | { 811 | char *p; 812 | int c = 0; 813 | 814 | for (p = s; *p; p++) { 815 | if (*p == 0x27) { 816 | //if (p == s) 817 | //continue; 818 | if (p == s || *(p - 1) != 0x5c) 819 | c++; 820 | } 821 | } 822 | return(c%2); 823 | } 824 | 825 | static int u_string_is_number(s) 826 | char *s; 827 | { 828 | char *p; 829 | 830 | for (p = s; *p; p++) { 831 | switch(*p) { 832 | case '0': 833 | case '1': 834 | case '2': 835 | case '3': 836 | case '4': 837 | case '5': 838 | case '6': 839 | case '7': 840 | case '8': 841 | case '9': 842 | case '.': 843 | case '-': 844 | break; 845 | default: 846 | return(0); 847 | } 848 | } 849 | return(1); 850 | } 851 | -------------------------------------------------------------------------------- /bakasql/images/bakasql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/bakasql/images/bakasql.png -------------------------------------------------------------------------------- /bakasql/images/bb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/bakasql/images/bb.png -------------------------------------------------------------------------------- /bakasql/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/bakasql/images/favicon.png -------------------------------------------------------------------------------- /bakasql/screenshot.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/bakasql/screenshot.pdf -------------------------------------------------------------------------------- /bakasql/version.h: -------------------------------------------------------------------------------- 1 | #define VERSION "1.9.x" 2 | -------------------------------------------------------------------------------- /binlog_parser/README: -------------------------------------------------------------------------------- 1 | A small C program and wrapper script that will trace any DELETE happening 2 | on a master server into a separate table. Useful to track rows deleted 3 | that often go unnoticed when populating a BI platform using the usual 4 | methods. 5 | 6 | The binary logs are examined at regular intervals and any DELETE that involves 7 | one of the configured tables is tracked into a separate table, including 8 | the table schema and name and the where statement. 9 | 10 | If using with galera cluster, install and run the script and program on 11 | all nodes, it will only do its work on the node that is current master. 12 | 13 | HTH 14 | 15 | Rick 16 | -------------------------------------------------------------------------------- /binlog_parser/binlog_parser.c: -------------------------------------------------------------------------------- 1 | # 2 | # binlog_parser.c - ROW format compatible 3 | # Rick Pizzi - pizzi@leopardus.com 4 | # 5 | # compile with: gcc -O3 -o binlog_parser binlog_parser.c 6 | # 7 | # galera cluster compatible - must run on all nodes, will only 8 | # track deletes that happen on the master node 9 | # 10 | # conf file is just a list of schema qualified table names 11 | # that you want to track 12 | 13 | 14 | /* 15 | create tracking table as follows: 16 | 17 | create table deletions ( 18 | id bigint non null auto_increment priamry key, 19 | binlog_name varchar(100), 20 | event_date datetime, 21 | schema_name varchar (100), 22 | table_name varchar(100), 23 | where_condition varchar(200), 24 | key idx1(schema_name, table_name) 25 | ); 26 | 27 | */ 28 | 29 | #define _GNU_SOURCE 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | #define CONFIG_FILE "/etc/binlog_parser.conf" 36 | #define TRACKING_SCHEMA "bi_tracking" 37 | #define TRACKING_TABLE "deletions" 38 | #define MAX_TABLES 100 39 | 40 | #define TIMESTAMP_ENTRY "SET TIMESTAMP=" 41 | 42 | #define DELETE_ENTRY "# DELETE FROM " 43 | #define MAP_ENTRY "Table_map: " 44 | #define SERVER_ID "server id" 45 | 46 | static char *escape(); 47 | static void load_config(); 48 | static int table_configured(); 49 | void exit(); 50 | 51 | struct FilterT { 52 | char *table_schema; 53 | char *table_name; 54 | }; 55 | 56 | static struct FilterT filtered_tables[MAX_TABLES]; 57 | 58 | 59 | int main(argc, argv) 60 | int argc; 61 | char **argv; 62 | { 63 | char *p; 64 | char *q; 65 | char *z; 66 | char *k, *k2; 67 | time_t last_ts; 68 | char serverid[4096]; 69 | char where[4096]; 70 | char buf[8192]; 71 | char st[256]; 72 | char table[256], schema[256]; 73 | int delete = 0; 74 | 75 | if (argc != 2) { 76 | fprintf(stderr, "usage: binlog_parser \n"); 77 | exit(1); 78 | } 79 | load_config(); 80 | memset(serverid,0x00,sizeof(serverid)); 81 | while(fgets(buf, sizeof(buf) - 1, stdin)) { 82 | *(buf + strlen(buf) - 1) = 0x00; 83 | // printf("DEBUG: %s\n", buf); 84 | if (!strncasecmp(buf, TIMESTAMP_ENTRY, strlen(TIMESTAMP_ENTRY))) { 85 | if ((p = strchr(buf, '/')) == NULL) 86 | fprintf(stderr, "unparsable timestamp\n"); 87 | else 88 | *p = 0x00; 89 | last_ts = (time_t) atol(buf + strlen(TIMESTAMP_ENTRY)); 90 | continue; 91 | } 92 | 93 | 94 | 95 | if (!strncasecmp(buf, DELETE_ENTRY, strlen(DELETE_ENTRY))) { 96 | if ((p = strcasestr(buf, "where ")) != NULL) { 97 | strcpy(where, escape(p + 6)); 98 | delete++; 99 | continue; 100 | } 101 | else { 102 | fprintf(stderr, "Unexpected error looking for where\n"); 103 | exit(1); 104 | } 105 | } 106 | 107 | 108 | 109 | 110 | if ((k = strcasestr(buf, "server id")) != NULL) { 111 | strcpy(serverid, k + 10); 112 | if ((k2 = strchr(serverid, ' ')) != NULL) 113 | *k2 = 0x00; 114 | } 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | if (delete && (q = strcasestr(buf, MAP_ENTRY))!= NULL) { 124 | if ((z = strchr(q + 11, ' ')) != NULL) { 125 | *z = 0x00; 126 | memset(st, 0x00, 256); 127 | z=st; 128 | for (p = q + 11; *p; p++) 129 | if (*p != '`') 130 | *z++ = *p; 131 | if ((p = strchr(st, '.')) != NULL) { 132 | *p=0x00; 133 | strcpy(schema, st); 134 | strcpy(table, ++p); 135 | } 136 | else { 137 | fprintf(stderr, "Unexpected error parsing schema table\n"); 138 | exit(1); 139 | } 140 | if (table_configured(schema, table)) 141 | printf("INSERT INTO %s.%s values (NULL,'%s', FROM_UNIXTIME(%u), '%s', '%s', '%s'); // galeraserverid=%s\n", 142 | TRACKING_SCHEMA, TRACKING_TABLE, argv[1],(unsigned int)last_ts, schema, table, where, serverid); 143 | delete--; 144 | 145 | } 146 | else { 147 | fprintf(stderr, "Unexpected error looking for end of table map\n"); 148 | exit(1); 149 | } 150 | } 151 | } 152 | } 153 | 154 | 155 | static char *escape(s) 156 | char *s; 157 | { 158 | static char buf[8192]; 159 | char *p, *sp; 160 | 161 | memset(buf, 0x00, sizeof(buf)); 162 | sp = buf; 163 | for (p = s; *p; p++) { 164 | if (*p == '\'' || *p == '\\') 165 | *sp++ = '\\'; 166 | *sp++ = *p; 167 | } 168 | return(buf); 169 | } 170 | 171 | static void load_config() 172 | { 173 | FILE *f; 174 | char buf[1024]; 175 | char *p; 176 | int i = 0; 177 | 178 | if ((f = fopen(CONFIG_FILE, "r")) == NULL) { 179 | perror(CONFIG_FILE); 180 | exit(1); 181 | } 182 | while (fgets(buf, 1023, f)) { 183 | if (i == MAX_TABLES) { 184 | fprintf(stderr, "More than %d tables configured, ignoring the extra ones\n", MAX_TABLES); 185 | break; 186 | } 187 | *(buf + strlen(buf) - 1) = 0x00; 188 | if ((p = strchr(buf, '.')) == NULL) { 189 | fprintf(stderr, "Syntax error near line '%s'\n", buf); 190 | exit(1); 191 | } 192 | else 193 | *p = 0x00; 194 | filtered_tables[i].table_schema = strdup(buf); 195 | filtered_tables[i++].table_name = strdup(++p); 196 | } 197 | fclose(f); 198 | } 199 | 200 | static int table_configured(s, n) 201 | char *s; 202 | char *n; 203 | { 204 | int i; 205 | 206 | for (i = 0; i < MAX_TABLES; i ++) { 207 | if (filtered_tables[i].table_name == NULL) 208 | break; 209 | if (!strcasecmp(s, filtered_tables[i].table_schema) && !strcasecmp(n, filtered_tables[i].table_name)) 210 | return(1); 211 | } 212 | return(0); 213 | } 214 | -------------------------------------------------------------------------------- /binlog_parser/binlog_tracker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # tracks selected delete statements in binary logs, saving them in an auxiliary table 4 | # should be run from crontab every 15 minutes or so. it relies on a small C program called binlog_parser 5 | # config file in /etc/binlog_parser.conf - see C program for more details 6 | # 7 | # pizzi@leopardus.com 8 | # 9 | HOME=/localhome/dbadm 10 | TRACKING_USER=delete_tracker 11 | TRACKING_PASSWORD=tr4ck3r@BI 12 | LOCKFILE=/tmp/binlog_tracker.pid 13 | 14 | 15 | server_id=($(echo "show variables like 'server_id'" | mysql -Ar -f -u $TRACKING_USER -p$TRACKING_PASSWORD | grep server_id | awk '{print $2}')) 16 | 17 | search_serverid="galeraserverid="$server_id 18 | #echo "### $search_serverid" 19 | 20 | trap 'rm -f $LOCKFILE' 0 21 | # 22 | if [ -f $LOCKFILE ] 23 | then 24 | kill -0 $(cat $LOCKFILE) && exit 1 25 | fi 26 | echo $$ > $LOCKFILE 27 | watermark=$HOME/.deletetracker_watermark 28 | [ ! -f $watermark ] && touch -d "1 year ago" $watermark 29 | for binlog in $(find $(dirname $(grep ^log_bin /etc/my.cnf | cut -d"=" -f2)) -type f -name $(basename $(grep ^log_bin /etc/my.cnf | cut -d"=" -f2)).[0-9]\* ! -mmin -10 -newer $watermark -print | sort) 30 | do 31 | echo "BEGIN;" 32 | mysqlbinlog --base64-output=decode-rows -vv $binlog | $HOME/bin/binlog_parser_new_row $(basename $binlog) | grep "$search_serverid" 33 | echo "COMMIT;" 34 | touch -r $binlog $watermark 35 | done | tee /tmp/mysql_debug.log | mysql -A -f -u $TRACKING_USER -p$TRACKING_PASSWORD 2>/dev/null 36 | exit 0 37 | -------------------------------------------------------------------------------- /buffer_warmer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # buffer pool warmer 4 | # picks traffic from one server and replicates it to another one, checking for 5 | # number of new slow queries and acting upon it 6 | # 7 | # when max $target new slow queries are recorded on target server for $minok consecutive 8 | # times, the target buffer pool is considered warm. 9 | # pt-query-digest is killed and restarted at every iteration because otherwise a single 10 | # long running query would delay the iteration and weaken script's effectiveness 11 | # 12 | # rpizzi@blackbirdit.com 13 | # 14 | SRC_DSN="h=1.2.3.4,u=root,p=XXXX" 15 | DSTUSER=root 16 | DSTPASS=XXXXX 17 | DSTHOST=localhost 18 | # 19 | # 20 | target=0 # how many new slow queries are acceptable to mark one iteration OK (max) 21 | minok=3 # number of consecutive OK iterations to end script (DB is warm) 22 | grace=36 # number of initial iterations where we don't check anything 23 | max_time=600 # exit after this time has elapsed, in seconds, even if DB not warm 24 | # 25 | dst_dsn="h=$DSTHOST,u=$DSTUSER,p=$DSTPASS" 26 | lastsq=0 27 | ok=0 28 | start=$(date +%s) 29 | timelimit=$(expr $start + 600) 30 | while true 31 | do 32 | /usr/bin/pt-query-digest --processlist $SRC_DSN --interval 1 --filter '$event->{arg} =~ m/^SELECT/i' --execute $dst_dsn >/dev/null 2>&1 & 33 | pid=$! 34 | echo "spawned pt-query-digest pid $pid" 35 | sq=$(mysqladmin -h localhost -u root -p$PASS status | cut -d " " -f 12) 36 | echo "current slow query counter value $sq" 37 | if [ $grace -gt 0 ] 38 | then 39 | grace=$(expr $grace - 1) 40 | echo "sleeping 5 seconds (grace period)" 41 | else 42 | if [ $lastsq -gt 0 ] 43 | then 44 | diff=$(expr $sq - $lastsq) 45 | echo "found $diff new slow queries" 46 | if [ $diff -le $target ] 47 | then 48 | ok=$(expr $ok + 1) 49 | if [ $ok -eq $minok ] 50 | then 51 | break 52 | else 53 | echo target $target met, $ok/$minok 54 | fi 55 | else 56 | echo target is $target, not met 57 | ok=0 58 | fi 59 | fi 60 | lastsq=$sq 61 | echo "sleeping 5 seconds" 62 | fi 63 | sleep 5 64 | echo "killing query digest pid $pid" 65 | kill -9 $pid # -9 needed here 66 | curtime=$(date +%s) 67 | if [ $curtime -gt $timelimit ] 68 | then 69 | echo "time limit of $max_time seconds reached, exiting" 70 | exit 0 71 | fi 72 | done 2>/dev/null 73 | echo "target of $target met $ok/$minok - done" 74 | exit 0 75 | -------------------------------------------------------------------------------- /bump_autoinc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # alter all tables in specified schemas, bumping the auto_increment value by $BUMP percent 4 | # rpizzi@blackbirdit.com 5 | # 6 | SCHEMAS="schema1 schema2" 7 | BUMP=0.25 8 | # 9 | schema_list=$(for s in $SCHEMAS; do echo "'"$s"'";done | tr "[\n]" "[,]" | sed -e "s/,$//g") 10 | bump=$(echo "scale=3; ($BUMP + 100) / 100" | bc) 11 | QUERY="select distinct c.table_schema, c.table_name from columns c left join key_column_usage k on c.table_schema = k.table_schema and c.table_name = k.table_name and c.column_name = k.column_name where c.TABLE_SCHEMA IN ($schema_list) and c.COLUMN_KEY <> '' and c.DATA_TYPE IN ('bigint', 'int', 'mediumint', 'smallint', 'tinyint') and c.extra = 'auto_increment' and k.ordinal_position is not null group by c.table_schema, c.table_name, c.column_name" 12 | IFS=" 13 | " 14 | echo "-- altering all tables on $SCHEMAS" 15 | echo "-- bumping autoincrement value by $BUMP%" 16 | echo "--" 17 | for row in $(echo "$QUERY" | mysql -N -r information_schema) 18 | do 19 | schema=$(echo $row | cut -f 1) 20 | table=$(echo $row | cut -f 2) 21 | ai_val=$(echo "show create table $schema.$table\G" | mysql -A -N -r | fgrep "AUTO_INCREMENT=" | cut -d"=" -f 3 | cut -d" " -f 1) 22 | [ "$ai_val" = "" ] && ai_val=0 23 | new_val=$(echo "$ai_val * $bump + 1" | bc | cut -d"." -f 1) 24 | echo "-- on $schema.$table current value $ai_val" 25 | echo "ALTER TABLE $schema.$table AUTO_INCREMENT=$new_val;" 26 | done 27 | echo 1>&2 28 | exit 0 29 | 30 | -------------------------------------------------------------------------------- /change_binlog_path.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # script that changes the path where the binlogs are being written without stopping the server or the service, 4 | # by using a symbolic link 5 | # 6 | # (C) pizzi@leopardus.com Sept-2016 7 | # 8 | # Step 1: configure the variable below to point to new path 9 | # Step 2: run the script as root, after ensuring that the binlog being written has just been opened, because you need some 10 | # time for next step. 11 | # Step 3: issue a FLUSH BINARY LOGS on the server, it will start writing the binlogs in the new path 12 | # 13 | # Note: slave(s) may be affected by the operation; to recover, just issue a STOP SLAVE; START SLAVE and that will clear the error 14 | # 15 | # 16 | NEW_BINLOG_DIR=/db/binlog2 17 | # 18 | binlog_dir=$(dirname $(grep "log_bin" /etc/my.cnf | cut -d "=" -f 2 | tr -d " ")) 19 | binlog_base=$(basename $(grep "log_bin" /etc/my.cnf | cut -d "=" -f 2 | tr -d " ")) 20 | curbin=$(cat $binlog_dir/$binlog_base.index | tail -1 | cut -d "." -f 2) 21 | touch $NEW_BINLOG_DIR/$binlog_base.$curbin 22 | mv $binlog_dir ${binlog_dir}_orig 23 | ln -s $NEW_BINLOG_DIR $binlog_dir 24 | exit 0 25 | -------------------------------------------------------------------------------- /compress_binlog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # automatic compression of MySQL binlog files 4 | # riccardo.pizzi@lastminute.com 5 | # 6 | # 7 | BINLOG_DIR=/storage/binlog 8 | COMPRESSOR=pigz 9 | COMPRESS_OPTIONS="-p 4" 10 | COMPRESS_EXTENSION=gz 11 | MIN_AGE_FOR_COMPRESSION=60 12 | REPL_USER= 13 | REPL_PASSWORD= 14 | MASTER_HOST=localhost 15 | # 16 | tmpf=/tmp/compress_binlog.lock 17 | echo "Starting $(date)" 18 | if [ -f $tmpf ] 19 | then 20 | echo "Already in progress, exiting" 21 | exit 0 22 | fi 23 | trap 'rm -f $tmpf' 0 24 | touch $tmpf 25 | base=$(basename $(grep ^log_bin /etc/my.cnf | cut -d"=" -f 2)) 26 | min_pos=1000000 27 | sc=0 28 | for slave in $(echo "select substring_index(host, ':', 1) from information_schema.processlist where command = 'binlog dump'" | mysql -ANr -h $MASTER_HOST -u $REPL_USER -p$REPL_PASSWORD 2>/dev/null) 29 | do 30 | if [ -s /tmp/slave_$slave.pos ] # check if there is an uploaded position first 31 | then 32 | sp=$(cat /tmp/slave_$slave.pos) 33 | else 34 | sp=$(echo "show slave status\G" | mysql -Ar -h $slave -u $REPL_USER -p$REPL_PASSWORD 2>/dev/null | fgrep " Master_Log_File" | cut -d ":" -f 2) 35 | fi 36 | if [ "$sp" != "" ] 37 | then 38 | echo "* slave $slave last downloaded binlog $sp" 39 | [ ${sp#*.} -lt $min_pos ] && min_pos=${sp#*.} 40 | sc=$((sc+1)) 41 | fi 42 | done 43 | if [ $sc -eq 0 ] 44 | then 45 | echo "* no slaves detected" 46 | else 47 | echo "* last downloaded file of most lagging slave: $min_pos" 48 | fi 49 | cd $BINLOG_DIR 50 | for f in $(find $BINLOG_DIR -name $base.[0-9]\* -mmin +$MIN_AGE_FOR_COMPRESSION | sort) 51 | do 52 | [ ${f#*.} -ge $min_pos ] && break 53 | if [[ $(file -b $f) == "MySQL"* ]] 54 | then 55 | echo "> compressing $(basename $f)" 56 | $COMPRESSOR $COMPRESS_OPTIONS $f 57 | mv $f.$COMPRESS_EXTENSION $f 58 | fi 59 | done 60 | echo "Completed $(date)" 61 | -------------------------------------------------------------------------------- /dbgranter/README: -------------------------------------------------------------------------------- 1 | DBGranter 2 | 3 | a CGI script written in bash that can be used to quickly edit basic grants for a MySQL user. 4 | 5 | The user using this tool must have proper grants on the database from the host the CGI script runs from (GRANT OPTION). 6 | 7 | The script runs under apache httpd, put it in the cgi-bin directory and configure accordingly. 8 | 9 | WATCH OUT: this is early stage software. Bugs are there. 10 | 11 | Feedbacks welcome! 12 | Enjoy 13 | -------------------------------------------------------------------------------- /dbgranter/granter.cgi: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | VERSION="0.4.5" 4 | # 5 | # NOTICE: CONFIG FILE 6 | # Config file /etc/dbgranter.conf should contain the following: 7 | # user=tool_user 8 | # password=tool_password 9 | # this user needs SELECT privileges on all schemas from the machine the tool runs on 10 | # 11 | service_user=$(grep ^user /etc/dbgranter.conf | cut -d"=" -f 2) 12 | service_password=$(grep ^password /etc/dbgranter.conf | cut -d"=" -f 2) 13 | # 14 | closing_tags="" 15 | genprivs="USAGE|FILE|PROCESS" 16 | lgrant="" 17 | rgrant="" 18 | grants="" 19 | changes=0 20 | tmpf=/tmp/dbgranter.$$ 21 | trap 'rm -f $tmpf' 0 22 | set -f 23 | vpn_mask="$(echo $REMOTE_ADDR | cut -d"." -f 1-3).%" 24 | # 25 | 26 | unescape_input() 27 | { 28 | echo "$1" | sed -e "s/%40/@/g" -e "s/%21/!/g" -e "s/%60/\`/g" -e "s/+/ /g" -e "s/%3D/=/g" -e "s/%2B/+/g" -e "s/%3B/;/g" -e "s/%27/'/g" -e "s/%3A/:/g" -e "s/%28/(/g" -e "s/%29/)/g" -e "s/%2C/,/g" -e "s/%23/#/g" -e "s/%22/\"/g" -e "s/%3C//g" -e "s/%26/\&/g" -e "s/%7B/{/g" -e "s/%7D/}/g" -e "s/%5B/[/g" -e "s/%5D/]/g" -e "s/%5C/\\\/g" -e "s/%25/%/g" -e "s/%7C/|/g" -e "s/%09/ /g" -e "s/%7E/~/g" -e "s/%80/\\x80/g" -e "s/%81/\\x81/g" -e "s/%82/\\x82/g" -e "s/%83/\\x83/g" -e "s/%84/\\x84/g" -e "s/%85/\\x85/g" -e "s/%86/\\x86/g" -e "s/%87/\\x87/g" -e "s/%88/\\x88/g" -e "s/%89/\\x89/g" -e "s/%8A/\\x8A/g" -e "s/%8B/\\x8B/g" -e "s/%8C/\\x8C/g" -e "s/%8D/\\x8D/g" -e "s/%8E/\\x8E/g" -e "s/%8F/\\x8F/g" -e "s/%90/\\x90/g" -e "s/%91/\\x91/g" -e "s/%92/\\x92/g" -e "s/%93/\\x93/g" -e "s/%94/\\x94/g" -e "s/%95/\\x95/g" -e "s/%96/\\x96/g" -e "s/%97/\\x97/g" -e "s/%98/\\x98/g" -e "s/%99/\\x99/g" -e "s/%9A/\\x9A/g" -e "s/%9B/\\x9B/g" -e "s/%9C/\\x9C/g" -e "s/%9D/\\x9D/g" -e "s/%9E/\\x9E/g" -e "s/%9F/\\x9F/g" -e "s/%A0/\\xA0/g" -e "s/%A1/\\xA1/g" -e "s/%A2/\\xA2/g" -e "s/%A3/\\xA3/g" -e "s/%A4/\\xA4/g" -e "s/%A5/\\xA5/g" -e "s/%A6/\\xA6/g" -e "s/%A7/\\xA7/g" -e "s/%A8/\\xA8/g" -e "s/%A9/\\xA9/g" -e "s/%AA/\\xAA/g" -e "s/%AB/\\xAB/g" -e "s/%AC/\\xAC/g" -e "s/%AD/\\xAD/g" -e "s/%AE/\\xAE/g" -e "s/%AF/\\xAF/g" -e "s/%B0/\\xB0/g" -e "s/%B1/\\xB1/g" -e "s/%B2/\\xB2/g" -e "s/%B3/\\xB3/g" -e "s/%B4/\\xB4/g" -e "s/%B5/\\xB5/g" -e "s/%B6/\\xB6/g" -e "s/%B7/\\xB7/g" -e "s/%B8/\\xB8/g" -e "s/%B9/\\xB9/g" -e "s/%BA/\\xBA/g" -e "s/%BB/\\xBB/g" -e "s/%BC/\\xBC/g" -e "s/%BD/\\xBD/g" -e "s/%BE/\\xBE/g" -e "s/%BF/\\xBF/g" -e "s/%C0/\\xC0/g" -e "s/%C1/\\xC1/g" -e "s/%C2/\\xC2/g" -e "s/%C3/\\xC3/g" -e "s/%C4/\\xC4/g" -e "s/%C5/\\xC5/g" -e "s/%C6/\\xC6/g" -e "s/%C7/\\xC7/g" -e "s/%C8/\\xC8/g" -e "s/%C9/\\xC9/g" -e "s/%CA/\\xCA/g" -e "s/%CB/\\xCB/g" -e "s/%CC/\\xCC/g" -e "s/%CD/\\xCD/g" -e "s/%CE/\\xCE/g" -e "s/%CF/\\xCF/g" -e "s/%D0/\\xD0/g" -e "s/%D1/\\xD1/g" -e "s/%D2/\\xD2/g" -e "s/%D3/\\xD3/g" -e "s/%D4/\\xD4/g" -e "s/%D5/\\xD5/g" -e "s/%D6/\\xD6/g" -e "s/%D7/\\xD7/g" -e "s/%D8/\\xD8/g" -e "s/%D9/\\xD9/g" -e "s/%DA/\\xDA/g" -e "s/%DB/\\xDB/g" -e "s/%DC/\\xDC/g" -e "s/%DD/\\xDD/g" -e "s/%DE/\\xDE/g" -e "s/%DF/\\xDF/g" -e "s/%E0/\\xE0/g" -e "s/%E1/\\xE1/g" -e "s/%E2/\\xE2/g" -e "s/%E3/\\xE3/g" -e "s/%E4/\\xE4/g" -e "s/%E5/\\xE5/g" -e "s/%E6/\\xE6/g" -e "s/%E7/\\xE7/g" -e "s/%E8/\\xE8/g" -e "s/%E9/\\xE9/g" -e "s/%EA/\\xEA/g" -e "s/%EB/\\xEB/g" -e "s/%EC/\\xEC/g" -e "s/%ED/\\xED/g" -e "s/%EE/\\xEE/g" -e "s/%EF/\\xEF/g" -e "s/%F0/\\xF0/g" -e "s/%F1/\\xF1/g" -e "s/%F2/\\xF2/g" -e "s/%F3/\\xF3/g" -e "s/%F4/\\xF4/g" -e "s/%F5/\\xF5/g" -e "s/%F6/\\xF6/g" -e "s/%F7/\\xF7/g" -e "s/%F8/\\xF8/g" -e "s/%F9/\\xF9/g" -e "s/%FA/\\xFA/g" -e "s/%FB/\\xFB/g" -e "s/%FC/\\xFC/g" -e "s/%FD/\\xFD/g" -e "s/%FE/\\xFE/g" -e "s/%FF/\\xFF/g" -e "s/'/\\\'/g" 29 | } 30 | 31 | post_checks() 32 | { 33 | post_error=0 34 | if [ "$host" = "" ] 35 | then 36 | display "Please specify a server" 1 37 | post_error=1 38 | return 39 | fi 40 | err=$(mysqladmin -u "$service_user" -p"$service_password" -h"$host" ping 2>&1 | fgrep error | cut -d":" -f2-) 41 | if [ "$err" != "" ] 42 | then 43 | display "$err" 1 44 | post_error=1 45 | return 46 | fi 47 | if [ "$user" = "" ] 48 | then 49 | display "Please specify your user" 1 50 | post_error=1 51 | return 52 | fi 53 | if [ "$user" = "root" ] 54 | then 55 | display "Root access is not allowed" 1 56 | post_error=1 57 | return 58 | fi 59 | if [ "$password" = "" ] 60 | then 61 | display "Please specify your password" 1 62 | post_error=1 63 | return 64 | fi 65 | password=$(unescape_input "$password") 66 | pass_ok=$(echo "select if(password('$password') = password, 1, 0) from mysql.user where user = '$user'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>/dev/null) 67 | if [ $pass_ok -eq 0 ] 68 | then 69 | display "username/password combination is incorrect" 1 70 | post_error=1 71 | return 72 | fi 73 | can_grant=$(echo "select if(Grant_priv = 'Y', 1, 0) from mysql.user where user = '$user'" | mysql -ANr -u "$user" -p"$password" -h"$host" 2>/dev/null) 74 | [ "$can_grant" = "" ] && can_grant=0 75 | 76 | if [ "$(echo "show variables like 'read_only'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>&1 | cut -f 2)" != "OFF" ] 77 | then 78 | if [ $can_grant -eq 1 ] 79 | then 80 | display "This instance is READ ONLY" 1 81 | post_error=1 82 | return 83 | fi 84 | fi 85 | if [ "$lgrant" = "" ] 86 | then 87 | display "Please specify the grant" 0 88 | post_error=2 89 | return 90 | fi 91 | if [ $can_grant -eq 0 -a "$lgrant" != "$user" ] 92 | then 93 | display "You are allowed to see our own grants only" 1 94 | post_error=2 95 | return 96 | fi 97 | rgrant=$(unescape_input "$rgrant") 98 | if [ "$rgrant" = "" ] 99 | then 100 | display "Please specify the grant" 0 101 | post_error=2 102 | return 103 | fi 104 | if [ "$schema" = "" ] 105 | then 106 | display "Please specify schema name" 0 107 | post_error=2 108 | return 109 | fi 110 | res=$(echo "show databases like '$schema'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>&1) 111 | if [ "$res" != "$schema" ] 112 | then 113 | display "No such schema $schema" 1 114 | post_error=2 115 | return 116 | fi 117 | grants=$(echo "show grants for '$lgrant'@'$rgrant'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>/dev/null | tr -d "[\`]" | egrep "$genprivs") 118 | if [ "$grants" = "" ] 119 | then 120 | display "No USAGE for '$lgrant'@'$rgrant'" 1 121 | post_error=2 122 | return 123 | fi 124 | grants=$(echo "show grants for '$lgrant'@'$rgrant'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>/dev/null | tr -d "[\`]" | fgrep "ON $schema.") 125 | if [ "$grants" = "" ] 126 | then 127 | display "WARNING: no grants found for '$lgrant'@'$rgrant'" 2 128 | fi 129 | } 130 | 131 | display() { 132 | escaped=$(echo "$1" | sed -e "s/\%/%%/g") 133 | case "$2" in 134 | 0) message="$message$escaped
";; 135 | 1) message="$messageERROR: $escaped
";; 136 | 2) message="$message$escaped
";; 137 | 3) message="$message$escaped
";; 138 | esac 139 | } 140 | 141 | show_form() 142 | { 143 | printf "DBGranter version: $VERSION

" 144 | printf "\n" "$vpn_mask" 145 | printf "
\n" 146 | printf "
\n" 147 | printf "\n" 148 | printf "\n" 149 | printf "\n" 150 | printf "\n" 151 | printf "\n" 167 | printf "\n" 168 | printf "\n" 169 | printf "
Host:
User:
Password:
 
\n" 152 | printf "
\n" "$1" 153 | printf "\n" 154 | printf "\n" 155 | if [ $can_grant -eq 1 ] 156 | then 157 | printf "\n" "$lgrant" "$rgrant" 158 | else 159 | printf "\n" "$user" "$vpn_mask" 160 | fi 161 | printf "\n" 162 | printf "\n" 163 | printf "\n" "$SCRIPT_NAME" 165 | printf "
 
Grant:@
Grant:@
Schema:
 
\n" 164 | printf "START OVER
\n" 166 | printf "
 
$message
\n" 170 | } 171 | 172 | format() 173 | { 174 | s="" 175 | u="" 176 | i="" 177 | d="" 178 | if [[ $2 == *"SELECT"* ]] 179 | then 180 | s=" CHECKED" 181 | [ $can_grant -eq 1 ] && printf "\n" "$1" 182 | fi 183 | if [[ $2 == *"INSERT"* ]] 184 | then 185 | i=" CHECKED" 186 | [ $can_grant -eq 1 ] && printf "\n" "$1" 187 | fi 188 | if [[ $2 == *"UPDATE"* ]] 189 | then 190 | u=" CHECKED" 191 | [ $can_grant -eq 1 ] && printf "\n" "$1" 192 | fi 193 | if [[ $2 == *"DELETE"* ]] 194 | then 195 | d=" CHECKED" 196 | [ $can_grant -eq 1 ] && printf "\n" "$1" 197 | fi 198 | [ $can_grant -eq 0 ] && disabled=" disabled=\"disabled\"" || disabled="" 199 | printf "S \n" "$1" "$s" "$disabled" 200 | printf "I \n" "$1" "$i""$disabled" 201 | printf "U \n" "$1" "$u""$disabled" 202 | printf "D \n" "$1" "$d""$disabled" 203 | } 204 | 205 | generate_grants() 206 | { 207 | [ $can_grant -eq 0 ] && return 208 | t=$1 209 | s=$2 210 | os=$3 211 | i=$4 212 | oi=$5 213 | u=$6 214 | ou=$7 215 | d=$8 216 | od=$9 217 | grant="" 218 | [ $os -eq 0 -a $s -eq 1 ] && grant="$grant, SELECT" 219 | [ $oi -eq 0 -a $i -eq 1 ] && grant="$grant, INSERT" 220 | [ $ou -eq 0 -a $u -eq 1 ] && grant="$grant, UPDATE" 221 | [ $od -eq 0 -a $d -eq 1 ] && grant="$grant, DELETE" 222 | if [ "$grant" ] 223 | then 224 | cg=$(echo $grant | sed -e "s/,//") 225 | printf "GRANT %s ON %s TO '%s'@'%s';
\n" $cg $t $lgrant $rgrant | mysql -ANr -u "$user" -p"$password" -h"$host" 2>/dev/null 226 | display "Granted $cg on $t" 2 227 | changes=1 228 | fi 229 | grant="" 230 | [ $os -eq 1 -a $s -eq 0 ] && grant="$grant, SELECT" 231 | [ $oi -eq 1 -a $i -eq 0 ] && grant="$grant, INSERT" 232 | [ $ou -eq 1 -a $u -eq 0 ] && grant="$grant, UPDATE" 233 | [ $od -eq 1 -a $d -eq 0 ] && grant="$grant, DELETE" 234 | if [ "$grant" ] 235 | then 236 | cg=$(echo $grant | sed -e "s/,//") 237 | printf "REVOKE %s ON %s FROM '%s'@'%s';
\n" $cg $t $lgrant $rgrant | mysql -ANr -u "$user" -p"$password" -h"$host" 2>/dev/null 238 | display "Revoked $cg on $t" 3 239 | changes=1 240 | fi 241 | } 242 | 243 | parse_grants() 244 | { 245 | # display "$1" 0 246 | ( 247 | IFS=" 248 | " 249 | for row in $1 250 | do 251 | IFS=" " 252 | ga=($row) 253 | idx=0 254 | for w in ${ga[@]} 255 | do 256 | [ "${ga[$idx]}" = "ON" ] && break 257 | idx=$((idx + 1)) 258 | done 259 | idx=$((idx - 1)) 260 | what=$(echo ${ga[@]:1:$idx} | tr -d "[ ]") 261 | [ "$what" = "ALLPRIVILEGES" ] && continue 262 | w=$(($idx + 2)) 263 | tschema=$(echo ${ga[$w]} | cut -d "." -f 1) 264 | table=$(echo ${ga[$w]} | cut -d "." -f 2) 265 | echo "$tschema $table $what" 266 | done 267 | echo "$schema * .none" 268 | echo "select concat('$schema', ' ', table_name, ' .none') from information_schema.tables where table_schema = '$schema'" | mysql -ANr -u "$service_user" -p"$service_password" -h"$host" 2>/dev/null 269 | ) | sort > $tmpf 270 | printf "\n" 271 | printf "\n" 272 | printf "\n" $lgrant $rgrant 273 | printf "\n" 274 | prev01="" 275 | prev2="" 276 | for row in $(cat $tmpf) 277 | do 278 | IFS=" " 279 | ga=($row) 280 | gn=$((${#ga[@]} - 2)) 281 | if [ "${ga[0]}.${ga[1]}" != "$prev01" ] 282 | then 283 | [ "$prev01" != "" ] && printf "\n" "$prev01" "$(format $prev01 $prev2)" 284 | fi 285 | prev01="${ga[0]}.${ga[1]}" 286 | prev2="${ga[2]}" 287 | done 288 | printf "\n" "$prev01" "$(format $prev01 $prev2)" 289 | printf "\n" 290 | [ $can_grant -eq 1 ] && printf "
 
$schema grants for '%s'@'%s'
 
%s%s
%s%s
 
\n" 291 | printf "
\n" 292 | } 293 | 294 | printf "Content-Type: text/html; charset=utf-8\n\n" 295 | printf "\n" 296 | printf "\n" 297 | printf "DBGranter v%s\n" "$VERSION" 298 | printf "\n" 299 | printf "\n" 300 | printf "\n" 301 | case "$REQUEST_METHOD" in 302 | 'GET');; 303 | 'POST');; 304 | *) printf "Unsupported HTTP method $REQUEST_METHOD"; 305 | printf "$closing_tags\n" 306 | exit 0;; 307 | esac 308 | if [ "$REQUEST_METHOD" = "POST" ] 309 | then 310 | post=1 311 | IFS=" 312 | " 313 | pname="" 314 | for row in $(tr "[&]" "[\n]") 315 | do 316 | name=$(echo $row | cut -d"=" -f 1) 317 | value=$(echo $row | cut -d"=" -f 2) 318 | value=$(unescape_input "$value") 319 | case "$name" in 320 | 'user') user="$value";; 321 | 'password') password="$value";; 322 | 'host') host="$value";; 323 | 'lgrant') lgrant="$value";; 324 | 'rgrant') rgrant="$value";; 325 | 'schema') schema="$value";; 326 | *) 327 | if [ "$pname" != "$name" ] 328 | then 329 | if [ "$pname" != "" ] 330 | then 331 | generate_grants $pname $s $os $i $oi $u $ou $d $od 332 | fi 333 | s=0 334 | os=0 335 | i=0 336 | oi=0 337 | u=0 338 | ou=0 339 | d=0 340 | od=0 341 | fi 342 | case "$value" in 343 | 'select') s=1;; 344 | 'oselect') os=1;; 345 | 'insert') i=1;; 346 | 'oinsert') oi=1;; 347 | 'update') u=1;; 348 | 'oupdate') ou=1;; 349 | 'delete') d=1;; 350 | 'odelete') od=1;; 351 | esac 352 | pname=$name 353 | ;; 354 | esac 355 | done 356 | generate_grants $pname $s $os $i $oi $u $ou $d $od 357 | fi 358 | if [ $post -eq 1 ] 359 | then 360 | post_checks 361 | case $post_error in 362 | 0) show_form block 363 | [ $changes -eq 0 ] && parse_grants "$grants" 364 | ;; 365 | 1) show_form none;; 366 | 2) show_form block;; 367 | esac 368 | else 369 | if [ "$service_user" = "" -o "$service_password" = "" ] 370 | then 371 | echo "Please ensure config file exists and contains required information" 372 | exit 0 373 | fi 374 | show_form none 375 | fi 376 | printf "\n" 377 | printf "$closing_tags\n" 378 | exit 0 379 | -------------------------------------------------------------------------------- /findmax.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # rpizzi@palominodb.com 4 | # 5 | QUERY="select distinct c.table_schema, c.table_name, c.column_name, c.data_type, if (c.column_type LIKE '%unsigned', 'unsigned', '') as uns, if (c.extra = 'auto_increment', 'autoinc', '') as autoinc, min(k.ordinal_position) from columns c left join key_column_usage k on c.table_schema = k.table_schema and c.table_name = k.table_name and c.column_name = k.column_name where c.TABLE_SCHEMA NOT IN ('information_schema','mysql') and c.COLUMN_KEY <> '' and c.DATA_TYPE IN ('bigint', 'int', 'mediumint', 'smallint', 'tinyint') and k.ordinal_position is not null group by c.table_schema, c.table_name, c.column_name" 6 | #QUERY="select distinct c.column_name, c.table_schema, c.table_name, c.data_type, if (c.column_type LIKE '%unsigned', 'unsigned', '') as uns, if (c.extra = 'auto_increment', 'autoinc', '') as autoinc, k.ordinal_position from columns c left join key_column_usage k on c.table_schema = k.table_schema and c.table_name = k.table_name and c.column_name = k.column_name where c.TABLE_SCHEMA NOT IN ('information_schema','mysql') and c.COLUMN_KEY <> '' and c.DATA_TYPE IN ('bigint', 'int', 'mediumint', 'smallint', 'tinyint') and k.ordinal_position is not null" 7 | #QUERY="select c.column_name, c.table_schema, c.table_name, c.data_type, if (c.column_type LIKE '%unsigned', 'unsigned', '') as uns, if (c.extra = 'auto_increment', 'autoinc', '') as autoinc, c.column_type from columns c left join key_column_usage k on c.table_schema = k.table_schema and c.table_name = k.table_name and c.column_name = k.column_name where c.TABLE_SCHEMA NOT IN ('information_schema','mysql') and c.COLUMN_KEY <> '' and c.DATA_TYPE IN ('bigint', 'int', 'mediumint', 'smallint', 'tinyint') and k.ordinal_position = 1" 8 | # 9 | [ "$1" = "" ] && echo "usage: $0 dbhost" && exit 1 10 | IFS=" 11 | " 12 | echo "SCHEMA NAME,TABLE NAME,COLUMN NAME,COLUMN TYPE,UNSIGNED?,AUTOINC?,MAX VALUE,COLUMN LIMIT,%FULL" 13 | for row in $(echo "$QUERY" | mysql -h $1 -N -r information_schema) 14 | do 15 | schema=$(echo $row | cut -f 1) 16 | table=$(echo $row | cut -f 2) 17 | col=$(echo $row | cut -f 3) 18 | type=$(echo $row | cut -f 4) 19 | unsigned=$(echo $row | cut -f 5) 20 | autoinc=$(echo $row | cut -f 6) 21 | keypos=$(echo $row | cut -f 7) 22 | if [ $keypos -ne 1 ] 23 | then 24 | # not listed in information_schema.key_column_usage as leftmost part of a key 25 | # still, it could be indexed. let's see... 26 | echo "show create table $schema.$table" | mysql -h $1 -A -N -r | grep "^ KEY" | fgrep -q "(\`$col\`)" 27 | if [ $? -ne 0 ] 28 | then 29 | echo 1>&2 30 | echo "Warning: not checking $schema.$table.$col because it would be a table scan, but it should be checked" 1>&2 31 | continue 32 | fi 33 | fi 34 | case "$type" in 35 | 'tinyint') [ $unsigned ] && limit=255 || limit=127;; 36 | 'smallint') [ $unsigned ] && limit=65535 || limit=32767;; 37 | 'mediumint') [ $unsigned ] && limit=16777215 || limit=8388607;; 38 | 'int') [ $unsigned ] && limit=4294967295 || limit=2147483647;; 39 | 'bigint') [ $unsigned ] && limit=18446744073709551615 || limit=9223372036854775807;; 40 | *) echo "error $type"; exit 1;; 41 | esac 42 | echo -n "." 1>&2 43 | max=$(echo "select max(\`$col\`) from $schema.$table" | mysql -h $1 -A -N -r) 44 | [ "$max" = "NULL" ] && max=0 # empty table 45 | perc=$(echo "scale=2;$max * 100 / $limit" | bc) 46 | echo $schema","$table","$col","$type","$unsigned","$autoinc","$max","$limit","$perc 47 | done 48 | echo 1>&2 49 | exit 0 50 | -------------------------------------------------------------------------------- /general_logger.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # general logger - enable general log and handle daily files 4 | # rick.pizzi@mariadb.com 5 | # tweak folders below then run daily at midnight from cron 6 | # 7 | BASEDIR=/usr/local/mariadb/columnstore/mysql 8 | BINDIR=$BASEDIR/bin 9 | LOG_STORAGE=$BASEDIR/rdba 10 | # 11 | chown mysql:mysql $LOG_STORAGE 12 | echo "set global general_log_file='$LOG_STORAGE/general.log'; set global general_log=ON;" | $BINDIR/mysql -A 13 | logname=general_$(date +%Y-%m-%d -d "today - 1 day").log 14 | mv $LOG_STORAGE/general.log $LOG_STORAGE/$logname 2>/dev/null 15 | echo "flush general logs" | $BINDIR/mysql -A 16 | gzip $LOG_STORAGE/$logname 2>/dev/null 17 | exit 0 18 | -------------------------------------------------------------------------------- /genlogfilter.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | int main(argc, argv) 8 | int argc; 9 | char **argv; 10 | { 11 | char buf[16384]; 12 | char *p; 13 | int t; 14 | int c=0; 15 | FILE *f = NULL; 16 | char fn[32]; 17 | 18 | if (argc != 2) { 19 | fprintf(stderr, "Usage: %s \n", argv[0]); 20 | exit(1); 21 | } 22 | if (chdir(argv[1]) != 0) { 23 | perror(argv[1]); 24 | exit(1); 25 | } 26 | while(fgets(buf, sizeof(buf) - 1, stdin)) { 27 | c++; 28 | printf("%d\n", c); fflush(stdout); 29 | *(buf + strlen(buf) -1) = 0x00; 30 | if (!strncmp(buf + 8, " Query", 6) || !strncmp(buf + 8, " Connect", 8) || 31 | !strncmp(buf + 8, " Quit", 5) || !strncmp(buf + 8, " Init DB", 8)) { 32 | *(buf + 8) = 0x00; 33 | t = atoi(buf + 2); 34 | printf("Thread %d\n", t); 35 | sprintf(fn, "%d.thread", t); 36 | if (f) 37 | fclose(f); 38 | if ((f = fopen(fn, "a")) == NULL) { 39 | perror(fn); 40 | exit(1); 41 | } 42 | *(buf + 8) = 0x20; 43 | fprintf(f, "%s\n", buf); 44 | continue; 45 | } 46 | if (!strncmp(buf + 22, " Query", 6) || !strncmp(buf + 22, " Connect", 8) || 47 | !strncmp(buf + 22, " Quit", 5) || !strncmp(buf + 22, " Init DB", 8)) { 48 | *(buf + 22) = 0x00; 49 | t = atoi(buf + 16); 50 | printf("Thread %d\n", t); 51 | sprintf(fn, "%d.thread", t); 52 | if (f) 53 | fclose(f); 54 | if ((f = fopen(fn, "a")) == NULL) { 55 | perror(fn); 56 | exit(1); 57 | } 58 | *(buf + 22) = 0x20; 59 | fprintf(f, "%s\n", buf); 60 | continue; 61 | } 62 | if (f) 63 | fprintf(f, "%s\n", buf); 64 | } 65 | if (f) 66 | fclose(f); 67 | exit(0); 68 | } 69 | -------------------------------------------------------------------------------- /innodb_sampler/parser_55.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | // parser for 5.5 Percona 6 | // rpizzi@blackbirdit.com 7 | // compile with: gcc -O3 -o parser parser55.c 8 | // feed collected sample(s) into the executable via stdin pipe 9 | 10 | #define VERSION "1.0 for Percona 5.5" 11 | #define LOCK_WARNING 1000 12 | #define LOCK_CRITICAL 10000 13 | #define RUNTIME_WARNING 3 14 | #define RUNTIME_CRITICAL 10 15 | 16 | static void print_transaction(); 17 | 18 | struct Transaction { 19 | char id[16]; 20 | char status[256]; 21 | char from[64]; 22 | char user[64]; 23 | char query[16384]; 24 | char lock_info[16384]; 25 | int running_time; 26 | int rows_locked; 27 | }; 28 | 29 | static int all_transactions, lock_info; 30 | 31 | int main(argc, argv) 32 | int argc; 33 | char **argv; 34 | { 35 | char buf[1024], buf2[256]; 36 | char *p; 37 | char timestamp[24]; 38 | int c, sc = 0, tr = 0; 39 | int queries, views, transactions, history; 40 | struct Transaction t; 41 | int opt; 42 | 43 | while ((opt = getopt(argc, argv, "alhv")) != -1) { 44 | switch (opt) { 45 | case 'a': 46 | all_transactions++; 47 | break; 48 | case 'l': 49 | lock_info++; 50 | break; 51 | default: 52 | fprintf(stderr, "InnoDB parser %s\n", VERSION); 53 | fprintf(stderr, "by rpizzi@blackbirdit.com\n\n"); 54 | fprintf(stderr, "Usage: %s [-a] [-l]\n", argv[0]); 55 | fprintf(stderr, "\t-a show all transactions even those without running queries\n"); 56 | fprintf(stderr, "\t-l show transaction lock information when available\n"); 57 | exit(1); 58 | break; 59 | } 60 | } 61 | while (fgets(buf, 1023, stdin)) { 62 | *(buf + strlen(buf) - 1) = 0x00; 63 | if (strstr(buf, "INNODB MONITOR OUTPUT") && strstr(buf, "END OF") == NULL) { 64 | *(buf + 16) = 0x00; 65 | printf("Sample #%d, %s\n", ++sc, buf); 66 | tr=0; 67 | continue; 68 | } 69 | if (strstr(buf, "queries inside InnoDB")) { 70 | *(strchr(buf, 'q')) = 0x00; 71 | queries = atoi(buf); 72 | continue; 73 | } 74 | if (strstr(buf, "read views open inside InnoDB")) { 75 | *(strchr(buf, 'r')) = 0x00; 76 | views = atoi(buf); 77 | continue; 78 | } 79 | if (strstr(buf, "transactions active inside InnoDB")) { 80 | *(strchr(buf, 't')) = 0x00; 81 | transactions = atoi(buf); 82 | continue; 83 | } 84 | if (strstr(buf, "History list length")) { 85 | history = atoi(buf + 19); 86 | printf("Queries: %d Views: %d Transactions: %d History len: %d\n", 87 | queries, views, transactions, history); 88 | continue; 89 | } 90 | if (!strncmp(buf, "---TRANSACTION ", 15)) { 91 | if (tr) { 92 | print_transaction(&t); 93 | } 94 | else 95 | printf("--------------------------------------------------------------\n"); 96 | tr = 1; 97 | memset(&t, 0x00, sizeof(struct Transaction)); 98 | p = strchr(buf + 15, ','); 99 | *p = 0x00; 100 | strcpy(t.id, buf + 15); 101 | strcpy(t.status, p +1); 102 | *p = ','; 103 | if ((p = strstr(buf, "ACTIVE "))) { 104 | *(strchr(p + 7, ' ')) = 0x00; 105 | t.running_time = atoi(p + 7); 106 | } 107 | continue; 108 | } 109 | if (strstr(buf, "MySQL thread id")) { 110 | strtok(buf, ","); 111 | while ((p = strtok(NULL, ",")) != NULL) 112 | strcpy(buf2, p); 113 | strtok(buf2, " "); 114 | c = 0; 115 | while ((p = strtok(NULL, " ")) != NULL) { 116 | switch(c++) { 117 | case 2: 118 | strcpy(t.from, p); 119 | break; 120 | case 3: 121 | strcpy(t.user, p); 122 | break; 123 | } 124 | } 125 | continue; 126 | } 127 | if (!strncmp(buf, "TABLE LOCK ", 11) || !strncmp(buf, "RECORD LOCKS ", 13) || strstr(buf, "TOO MANY LOCKS PRINTED FOR THIS TRX")) { 128 | if (tr) { 129 | strcat(t.lock_info, buf); 130 | strcat(t.lock_info, "\n"); 131 | } 132 | continue; 133 | } 134 | if (strstr(buf, "row lock(s)")) { 135 | strtok(buf, ","); 136 | c=0; 137 | while ((p = strtok(NULL, ",")) != NULL) { 138 | switch(c++) { 139 | case 1: 140 | *(strchr(p + 1, ' ')) = 0x00; 141 | t.rows_locked = atoi(p); 142 | break; 143 | } 144 | } 145 | continue; 146 | } 147 | if (strstr(buf, "mysql tables in use")) 148 | continue; 149 | if (strstr(buf, "Trx read view will not see trx")) 150 | continue; 151 | if (tr && !strcmp(buf, "----------------------------")) { 152 | print_transaction(&t); 153 | tr=0; 154 | continue; 155 | } 156 | if (tr) { 157 | strcat(t.query, buf); 158 | strcat(t.query, "\n"); 159 | continue; 160 | } 161 | // printf("NOT INTERESTING -> %s\n", buf); 162 | } 163 | exit(0); 164 | } 165 | 166 | static void print_transaction(t) 167 | struct Transaction *t; 168 | { 169 | char *status; 170 | 171 | if (!strlen(t->query) && !all_transactions) 172 | return; 173 | if (strstr(t->query, "show engine innodb status") && !all_transactions) 174 | return; 175 | status = "OK"; 176 | if (t->running_time >= RUNTIME_WARNING) 177 | status = "RT_WARNING"; 178 | if (t->running_time >= RUNTIME_CRITICAL) 179 | status = "RT_CRITICAL"; 180 | if (strpbrk(t->from, ".1234567890")) 181 | printf("Transaction id %s, runtime %d secs (%s), %s@%s state: %s\n", 182 | t->id, t->running_time, status, t->user, t->from, t->status); 183 | else 184 | printf("Transaction id %s, runtime %d secs (%s), state: %s %s\n", 185 | t->id, t->running_time, status, t->from, t->user); 186 | if (t->rows_locked) { 187 | status = "OK"; 188 | if (t->rows_locked >= LOCK_WARNING) 189 | status = "LOCK_WARNING"; 190 | if (t->rows_locked >= LOCK_CRITICAL) 191 | status = "LOCK_CRITICAL"; 192 | printf("Rows locked %d (%s)\n", t->rows_locked, status); 193 | } 194 | printf("%s\n", t->query); 195 | if (lock_info) 196 | printf("%s\n", t->lock_info); 197 | } 198 | -------------------------------------------------------------------------------- /innodb_sampler/parser_56.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # innodb_parser.sh v 1.01 for: MySQL 5.6.13 4 | # parses saved output of SHOW ENGINE INNODB STATUS and looks at transactions, 5 | # pretty printing them and highlighting critical locking situations 6 | # 7 | # usage: pipe output of the saved sample file(s) into this script 8 | # (C) rpizzi@blackbirdit.com 9 | # 10 | 11 | print_transaction() { 12 | # echo $transaction >> debug.log 13 | rlidx=$(echo "$transaction" | fgrep -b -o "row lock(s)" | cut -d":" -f 1) 14 | if [ "$rlidx" != "" ] 15 | then 16 | rlidx=$(expr $rlidx - 10) 17 | rows_locked=$(echo ${transaction:$rlidx:10} | cut -d"," -f 2) 18 | else 19 | rows_locked=0 20 | fi 21 | section1=$(echo $transaction | cut -d"|" -f 1) 22 | section2=$(echo $transaction | cut -d"|" -f 2) 23 | section3=$(echo $transaction | cut -d"|" -f 3) 24 | section4=$(echo $transaction | cut -d"|" -f 4) 25 | section5=$(echo $transaction | cut -d"|" -f 5) 26 | id=$(echo $section1 | cut -d " " -f 2) 27 | case "$section1" in 28 | *"PREPARED"*) 29 | tm=$(echo "$section1" | cut -d " " -f 5,6) 30 | case "$section2" in 31 | "mysql tables in use"*) 32 | query="$section5" 33 | ;; 34 | *) 35 | query="$section4" 36 | ;; 37 | esac 38 | ;; 39 | *) 40 | tm=$(echo "$section1" | cut -d " " -f 4,5) 41 | case "$section2" in 42 | "mysql tables in use"*) 43 | query="$section5" 44 | ;; 45 | *) 46 | case "$section3" in 47 | *" init") 48 | query="$section4" 49 | ;; 50 | *" cleaning up") 51 | query="cleaning up" 52 | ;; 53 | "Trx read view"*) 54 | query="cleaning up" 55 | ;; 56 | *) 57 | query="" 58 | ;; 59 | esac 60 | ;; 61 | esac 62 | ;; 63 | esac 64 | alert="OK" 65 | [ $rows_locked -ge 1000 ] && alert="WARNING" 66 | [ $rows_locked -ge 10000 ] && alert="CRITICAL" 67 | echo "sample #$c, id $id time $tm, locked $rows_locked rows ($alert), $query" 68 | } 69 | 70 | IFS=" 71 | " 72 | transaction="" 73 | trsection=0 74 | tr_start="---TRANSACTION" 75 | tr_end="--------" 76 | c=1 77 | act=0 78 | timestamp="" 79 | havets=0 80 | not=0 81 | while read row 82 | do 83 | if [ $trsection -eq 0 -a $havets -eq 0 ] 84 | then 85 | case "$row" in 86 | "END OF INNODB MONITOR"*) 87 | ;; 88 | *"INNODB MONITOR"*) 89 | timestamp="${row:0:19}" 90 | havets=1 91 | ;; 92 | esac 93 | continue 94 | fi 95 | if [ $trsection -eq 1 -a "$row" = "$tr_end" ] 96 | then 97 | if [ -n "$transaction" ] 98 | then 99 | print_transaction 100 | act=$(expr $act + 1) 101 | fi 102 | c=$(expr $c + 1) 103 | trsection=0 104 | havets=0 105 | continue 106 | fi 107 | if [ "${row:0:14}" = "$tr_start" ] 108 | then 109 | not=0 110 | case "$row" in 111 | *"not started"*) not=1;; 112 | esac 113 | if [ $trsection -eq 0 ] 114 | then 115 | [ $act -gt 0 ] && echo "$act total active" 116 | echo "=========================================" 117 | echo "Sample #$c $timestamp" 118 | echo "=========================================" 119 | act=0 120 | trsection=1 121 | fi 122 | if [ -n "$transaction" ] 123 | then 124 | print_transaction 125 | act=$(expr $act + 1) 126 | transaction="" 127 | fi 128 | fi 129 | [ $trsection -eq 1 -a $not -eq 0 ] && transaction="$transaction$row|" 130 | done 131 | if [ -n "$transaction" ] 132 | then 133 | print_transaction 134 | act=$(expr $act + 1) 135 | [ $act -gt 0 ] && echo "$act total active" 136 | fi 137 | exit 0 138 | -------------------------------------------------------------------------------- /innodb_sampler/sampler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # takes a sample of SHOW ENGINE INNODB STATUS every 10 seconds and stores it in files 5 | # under $SAMPLEDIR for later use 6 | # (run it in background with nohup; user should have password in dot file) 7 | # get_reply routine only needed to drain the mysql output pipe otherwise script will block when it fills up 8 | # rpizzi@blackbirdit.com 9 | # 10 | SAMPLEDIR=$HOME/sampling/data/$(hostname) 11 | USER=photographer 12 | # 13 | get_reply() 14 | { 15 | while read -t 0.2 -u ${mysqlc[0]} row 16 | do 17 | echo "$row" >/dev/null 18 | done 19 | } 20 | 21 | coproc mysqlc { script -c "mysql -ANrs -u$USER 2>&1" /dev/null; } 22 | c=0 23 | echo "set session interactive_timeout=30;" >&${mysqlc[1]} 24 | echo "set session wait_timeout=30;" >&${mysqlc[1]} 25 | while true 26 | do 27 | month=$(date +%m) 28 | day=$(date +%d) 29 | hour=$(date +%H) 30 | folder=$SAMPLEDIR/$month/$day 31 | [ ! -d $folder ] && mkdir -p $folder 32 | echo "pager cat >> $folder/$hour.sample" >&${mysqlc[1]} 33 | echo "show engine innodb status;" >&${mysqlc[1]} 34 | get_reply 35 | sleep 10 36 | done 37 | -------------------------------------------------------------------------------- /innodb_watcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # rpizzi@blackbirdit.com 4 | # reports interesting INNODB errors from output of SHOW ENGINE INNODB 5 | # Tested with 5.1 and 5.5 6 | # 7 | # db.conf format: 8 | # nickname:hostname or IPaddr:path-to-mysql-sock 9 | # 10 | # script assumes that the user which is running under has ssh and sudo access to the DB servers 11 | # if not, tweaks are needed 12 | # 13 | DBLIST=db.conf 14 | WANTED="LATEST FOREIGN KEY ERROR|LATEST DETECTED DEADLOCK" 15 | # 16 | tmpf=/tmp/innomon.$$ 17 | trap 'rm -f $tmpf' 0 18 | savedir=/tmp/innodbmon_saves 19 | [ ! -d $savedir ] && mkdir $savedir 20 | IFS=" 21 | " 22 | something_found=0 23 | for db in $(grep -v "^#" $DBLIST) 24 | do 25 | nickname=$(echo $db | cut -d":" -f 1) 26 | host=$(echo $db | cut -d":" -f 2) 27 | socket=$(echo $db | cut -d":" -f 3) 28 | ssh -q $host "sudo su - -c \"echo 'show engine innodb status' | mysql -A -N -r -u root -S $socket\"" > $tmpf 2>/dev/null 29 | if [ ! -s $tmpf ] 30 | then 31 | echo 32 | echo "ERROR: unable to fetch data from $nickname ($host)" 33 | continue 34 | fi 35 | parsing_hdr=0 36 | fake=0 37 | wantit=0 38 | capture_ts=0 39 | for row in $(cat $tmpf | sed -e "s/^=====/-----/g") 40 | do 41 | hdr=$(echo $row | cut -c1-5) 42 | if [ $hdr = "---OL" ] 43 | then 44 | hdr="-----" 45 | fake=1 46 | fi 47 | if [ $hdr = "-----" ] 48 | then 49 | if [ $parsing_hdr -eq 0 ] 50 | then 51 | parsing_hdr=1 52 | wantit=0 53 | else 54 | parsing_hdr=0 55 | fake=0 56 | fi 57 | continue 58 | fi 59 | if [ $parsing_hdr -eq 1 ] 60 | then 61 | [ $fake -eq 1 ] && continue 62 | # echo "HEADER: $row ($nickname)" 63 | wantit=$(echo $row | egrep -c "$WANTED") 64 | if [ $wantit -eq 1 ] 65 | then 66 | curr_header="$row" 67 | capture_ts=1 68 | fi 69 | continue 70 | fi 71 | if [ $wantit -eq 1 ] 72 | then 73 | if [ $capture_ts -eq 1 ] 74 | then 75 | header_fn=$(echo $curr_header | tr "[:upper:]" "[:lower:]" | tr -s "[ ]" "[_]") 76 | timestamp=$(echo $row | cut -d" " -f 1,2) 77 | if [ -f $savedir/${nickname}_${header_fn} ] 78 | then 79 | last_timestamp=$(cat $savedir/${nickname}_${header_fn}) 80 | if [ "$last_timestamp" = "$timestamp" ] 81 | then 82 | #echo "ALREADY SEEN $curr_header" 83 | wantit=0 84 | continue 85 | fi 86 | fi 87 | echo 88 | echo "===============================" 89 | echo "DB: $nickname" 90 | echo "TYPE: $curr_header" 91 | echo "TIMESTAMP: $timestamp" 92 | echo "===============================" 93 | echo 94 | echo $timestamp > $savedir/${nickname}_${header_fn} 95 | capture_ts=0 96 | something_found=1 97 | continue 98 | fi 99 | echo "$row" 100 | fi 101 | done 102 | done 103 | if [ $something_found -eq 0 ] 104 | then 105 | echo 106 | echo "No new errors have been found." 107 | fi 108 | exit 0 109 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/Makefile: -------------------------------------------------------------------------------- 1 | CFLAGS+=-std=gnu99 -g -Wall -Wextra -pedantic -DPZ 2 | LIBS=-lm 3 | PREFIX=/usr/local 4 | BINDIR=$(PREFIX)/bin 5 | MAN1DIR=$(PREFIX)/share/man/man1 6 | 7 | SRCS=ioping.c 8 | OBJS=$(SRCS:.c=.o) 9 | BINS=ioping 10 | MANS=ioping.1 11 | MANS_F=$(MANS:.1=.txt) $(MANS:.1=.pdf) 12 | DOCS=README changelog 13 | SPEC=ioping.spec 14 | 15 | PACKAGE=ioping 16 | VERSION=$(shell cat version) 17 | DISTDIR=$(PACKAGE)-$(VERSION) 18 | DISTFILES=$(SRCS) $(MANS) $(DOCS) $(SPEC) Makefile 19 | PACKFILES=$(BINS) $(MANS) $(MANS_F) $(DOCS) 20 | 21 | STRIP=strip 22 | TARGET=$(shell ${CC} -dumpmachine | cut -d- -f 2) 23 | 24 | ifdef MINGW 25 | CC=i686-w64-mingw32-gcc 26 | STRIP=i686-w64-mingw32-strip 27 | TARGET=win32 28 | BINS:=$(BINS:=.exe) 29 | endif 30 | 31 | all: version $(BINS) 32 | 33 | version: $(DISTFILES) 34 | test ! -d .git || git describe --tags --dirty=+ | sed 's/^v//;s/-/./g' > $@ 35 | 36 | clean: 37 | $(RM) -f $(OBJS) $(BINS) $(MANS_F) 38 | 39 | install: $(BINS) $(MANS) 40 | mkdir -p $(DESTDIR)$(BINDIR) 41 | install -s -m 0755 $(BINS) $(DESTDIR)$(BINDIR) 42 | mkdir -p $(DESTDIR)$(MAN1DIR) 43 | install -m 644 $(MANS) $(DESTDIR)$(MAN1DIR) 44 | 45 | %.o: %.c version 46 | $(CC) $(CFLAGS) -DVERSION=\"${VERSION}\" -c -o $@ $< 47 | 48 | %.ps: %.1 49 | man -t ./$< > $@ 50 | 51 | %.pdf: %.ps 52 | ps2pdf $< $@ 53 | 54 | %.txt: %.1 55 | MANWIDTH=80 man ./$< | col -b > $@ 56 | 57 | $(BINS): $(OBJS) 58 | $(CC) -o $@ $^ $(CFLAGS) $(LDFLAGS) $(LIBS) 59 | 60 | dist: version $(DISTFILES) 61 | tar -cz --transform='s,^,$(DISTDIR)/,S' $^ -f $(DISTDIR).tar.gz 62 | 63 | binary-tgz: $(PACKFILES) 64 | ${STRIP} ${BINS} 65 | tar czf ${PACKAGE}-${VERSION}-${TARGET}.tgz $^ 66 | 67 | binary-zip: $(PACKFILES) 68 | ${STRIP} ${BINS} 69 | zip ${PACKAGE}-${VERSION}-${TARGET}.zip $^ 70 | 71 | .PHONY: all clean install dist 72 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/README: -------------------------------------------------------------------------------- 1 | ioping 2 | ====== 3 | 4 | An tool to monitor I/O latency in real time. 5 | It shows disk latency in the same way as ping shows network latency. 6 | 7 | Homepage: http://code.google.com/p/ioping/ 8 | 9 | Please send your patches, issues and questions to 10 | http://code.google.com/p/ioping/issues/ 11 | 12 | Authors: 13 | Konstantin Khlebnikov 14 | Kir Kolyshkin 15 | 16 | Licensed under GPLv3 (or later) 17 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/changelog: -------------------------------------------------------------------------------- 1 | since v0.4 2 | - rate-test, new key -R 3 | - FreeBSD port 4 | - OSX port 5 | 6 | since v0.5 7 | - fixes in man page and internal help 8 | - fixes in OSX and FreeBSD ports 9 | - Debian/kFreeBSD port 10 | - Debian/HURD port 11 | 12 | since v0.6 13 | - Linux AIO support, new key -A 14 | - extended raw statistics, keys -p -P 15 | - batched mode, new key -B 16 | - write requests, key -W 17 | - automatic git-based versioning 18 | - Windows/MinGW port 19 | - DragonFlyBSD port 20 | 21 | since v0.7 22 | - OpenBSD port 23 | - switch to IEEE 1541-2002 units: MiB/s 24 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/ioping: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/ioping-0.8-PZ/ioping -------------------------------------------------------------------------------- /ioping-0.8-PZ/ioping.1: -------------------------------------------------------------------------------- 1 | .TH IOPING "1" "Dec 2013" "" "User Commands" 2 | .SH NAME 3 | ioping \- simple disk I/O latency monitoring tool 4 | .SH SYNOPSYS 5 | .SY ioping 6 | .OP \-LABCDWRq 7 | .OP \-c count 8 | .OP \-w deadline 9 | .OP \-p period 10 | .OP \-P period 11 | .OP \-i interval 12 | .OP \-s size 13 | .OP \-S wsize 14 | .OP \-o offset 15 | .IR directory | file | device 16 | .br 17 | .SY ioping 18 | .B -h 19 | | 20 | .B -v 21 | .br 22 | .SH DESCRIPTION 23 | This tool lets you monitor I/O latency in real time. 24 | .SH OPTIONS 25 | .TP 26 | .BI \-c \ count 27 | Stop after \fIcount\fR requests. 28 | .TP 29 | .BI \-w \ deadline 30 | Stop after \fIdeadline\fR time passed. 31 | .TP 32 | .BI \-p \ period 33 | Print raw statistics for every \fIperiod\fR requests. 34 | .TP 35 | .BI \-P \ period 36 | Print raw statistics for every \fIperiod\fR in time. 37 | .TP 38 | .BI \-i \ interval 39 | Set time between requests to \fIinterval\fR (\fB1s\fR). 40 | .TP 41 | .BI \-s \ size 42 | Request size (\fB4k\fR). 43 | .TP 44 | .BI \-S \ size 45 | Working set size (\fB1m\fR for directory, full size for file or device). 46 | .TP 47 | .BI \-o \ offset 48 | Offset of working set in the file/device (0). 49 | .TP 50 | .B \-L 51 | Use sequential operations rather than random. This also sets request size 52 | to \fB256k\fR (as in \fB-s 256k\fR). 53 | .TP 54 | .B \-A 55 | Use asynchronous I/O (syscalls \fBio_submit\fR(2), \fBio_submit\fR(2), etc). 56 | .TP 57 | .B \-C 58 | Use cached I/O (suppress cache invalidation via \fBposix_fadvise\fR(2)). 59 | .TP 60 | .B \-D 61 | Use direct I/O (see \fBO_DIRECT\fR in \fBopen\fR(2)). 62 | .TP 63 | .B \-W 64 | Use writes rather than reads. 65 | \fB*DANGEROUS*\fR It will shred your data if target is file or device, 66 | repeat key tree times (\fB-WWW\fR) to do this. 67 | .TP 68 | .B \-R 69 | Disk seek rate test (same as \fB\-q \-i 0 \-w 3 \-S 64m\fR). 70 | .TP 71 | .B \-B 72 | Batch mode. Be quiet and print final statistics in raw format. 73 | .TP 74 | .B \-q 75 | Suppress periodical human-readable output. 76 | .TP 77 | .B \-h 78 | Display help message and exit. 79 | .TP 80 | .B \-v 81 | Display version and exit. 82 | .SS Argument suffixes 83 | For options that expect time argument (\fB\-i\fR, \fB\-P\fR and \fB\-w\fR), 84 | default is seconds, unless you specify one of the following suffixes 85 | (case-insensitive): 86 | .TP 87 | .BR us ,\ usec 88 | microseconds (a millionth of a second, 1 / 1 000 000) 89 | .TP 90 | .BR ms ,\ msec 91 | milliseconds (a thousandth of a second, 1 / 1 000) 92 | .TP 93 | .BR s ,\ sec 94 | seconds 95 | .TP 96 | .BR m ,\ min 97 | minutes 98 | .TP 99 | .BR h ,\ hour 100 | hours 101 | .PP 102 | For options that expect "size" argument (\fB\-s\fR, \fB\-S\fR and \fB\-o\fR), 103 | default is bytes, unless you specify one of the following suffixes 104 | (case-insensitive): 105 | .TP 106 | .B sector 107 | disk sectors (a sector is always 512). 108 | .TP 109 | .BR KiB ,\ k ,\ kb 110 | kilobytes (1 024 bytes) 111 | .TP 112 | .B page 113 | memory pages (a page is always 4KiB). 114 | .TP 115 | .BR MiB ,\ m ,\ mb 116 | megabytes (1 048 576 bytes) 117 | .TP 118 | .BR GiB ,\ g ,\ gb 119 | gigabytes (1 073 741 824 bytes) 120 | .TP 121 | .BR TiB ,\ t ,\ tb 122 | terabytes (1 099 511 627 776 bytes) 123 | .PP 124 | For options that expect "number" argument (\fB-p\fR and \fB-c\fR) you 125 | can optionally specify one of the following suffixes (case-insensitive): 126 | .TP 127 | .B k 128 | kilo (thousands, 1 000) 129 | .TP 130 | .B m 131 | mega (millions, 1 000 000) 132 | .TP 133 | .B g 134 | giga (billions, 1 000 000 000) 135 | .TP 136 | .B t 137 | tera (trillions, 1 000 000 000 000) 138 | .SH EXIT STATUS 139 | Returns \fB0\fR upon success. The following error codes are defined: 140 | .TP 141 | .B 1 142 | Invalid usage (error in arguments). 143 | .TP 144 | .B 2 145 | Error during preparation stage. 146 | .TP 147 | .B 3 148 | Error during runtime. 149 | .SH RAW STATISTICS 150 | .B ioping -p 100 -c 200 -i 0 -q . 151 | .ad l 152 | .br 153 | \f(CW100 26694 3746 15344272 188 267 1923 228 154 | .br 155 | 100 24165 4138 16950134 190 242 2348 214 156 | .br 157 | (1) (2) (3) (4) (5) (6) (7) (8) 158 | .br 159 | 160 | .br 161 | (1) number of requests 162 | .br 163 | (2) serving time (usec) 164 | .br 165 | (3) requests per second (iops) 166 | .br 167 | (4) transfer speed (bytes/sec) 168 | .br 169 | (5) minimal request time (usec) 170 | .br 171 | (6) average request time (usec) 172 | .br 173 | (7) maximum request time (usec) 174 | .br 175 | (8) request time standard deviation (usec) 176 | .SH EXAMPLES 177 | .TP 178 | .B ioping . 179 | Show disk I/O latency using the default values and the current directory, 180 | until interrupted. 181 | .TP 182 | .B ioping -c 10 -s 1M /tmp 183 | Measure latency on \fB/tmp\fR using 10 requests of 1 megabyte each. 184 | .TP 185 | .B ioping -R /dev/sda 186 | Measure disk seek rate. 187 | .TP 188 | .B ioping -RL /dev/sda 189 | Measure disk sequential speed. 190 | .SH SEE ALSO 191 | .BR iostat (1), 192 | .BR dd (1), 193 | .BR fio (1), 194 | .BR dbench (1), 195 | .BR fsstress, 196 | .BR xfstests, 197 | .BR hdparm (8), 198 | .BR badblocks (8), 199 | .BR 200 | .SH HOMEPAGE 201 | .UR http://code.google.com/p/ioping/ 202 | .UE . 203 | .SH AUTHORS 204 | This program was written by Konstantin Khlebnikov 205 | .MT koct9i@gmail.com 206 | .ME . 207 | .br 208 | Man-page was written by Kir Kolyshkin 209 | .MT kir@openvz.org 210 | .ME . 211 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/ioping.man: -------------------------------------------------------------------------------- 1 | IOPING(1) User Commands IOPING(1) 2 | 3 | 4 | 5 | NNAAMMEE 6 | ioping - simple disk I/O latency monitoring tool 7 | 8 | SSYYNNOOPPSSYYSS 9 | _d_i_r_e_c_t_o_r_y|_f_i_l_e|_d_e_v_i_c_e 10 | --hh | --vv 11 | 12 | DDEESSCCRRIIPPTTIIOONN 13 | This tool lets you monitor I/O latency in real time. 14 | 15 | OOPPTTIIOONNSS 16 | --cc _c_o_u_n_t 17 | Stop after _c_o_u_n_t requests. 18 | 19 | --ww _d_e_a_d_l_i_n_e 20 | Stop after _d_e_a_d_l_i_n_e time passed. 21 | 22 | --pp _p_e_r_i_o_d 23 | Print raw statistics for every _p_e_r_i_o_d requests. 24 | 25 | --PP _p_e_r_i_o_d 26 | Print raw statistics for every _p_e_r_i_o_d in time. 27 | 28 | --ii _i_n_t_e_r_v_a_l 29 | Set time between requests to _i_n_t_e_r_v_a_l (11ss). 30 | 31 | --ss _s_i_z_e 32 | Request size (44kk). 33 | 34 | --SS _s_i_z_e 35 | Working set size (11mm for directory, full size for file or 36 | device). 37 | 38 | --oo _o_f_f_s_e_t 39 | Offset of working set in the file/device (0). 40 | 41 | --LL Use sequential operations rather than random. This also sets 42 | request size to 225566kk (as in --ss 225566kk). 43 | 44 | --AA Use asynchronous I/O (syscalls iioo__ssuubbmmiitt(2), iioo__ssuubbmmiitt(2), etc). 45 | 46 | --CC Use cached I/O (suppress cache invalidation via ppoossiixx__ffaadd-- 47 | vviissee(2)). 48 | 49 | --DD Use direct I/O (see OO__DDIIRREECCTT in ooppeenn(2)). 50 | 51 | --WW Use writes rather than reads. **DDAANNGGEERROOUUSS** It will shred your 52 | data if target is file or device, repeat key tree times (--WWWWWW) 53 | to do this. 54 | 55 | --RR Disk seek rate test (same as --qq --ii 00 --ww 33 --SS 6644mm). 56 | 57 | --BB Batch mode. Be quiet and print final statistics in raw format. 58 | 59 | --qq Suppress periodical human-readable output. 60 | 61 | --hh Display help message and exit. 62 | 63 | --vv Display version and exit. 64 | 65 | AArrgguummeenntt ssuuffffiixxeess 66 | For options that expect time argument (--ii, --PP and --ww), default is sec- 67 | onds, unless you specify one of the following suffixes (case- 68 | insensitive): 69 | 70 | uuss, uusseecc 71 | microseconds (a millionth of a second, 1 / 1 000 000) 72 | 73 | mmss, mmsseecc 74 | milliseconds (a thousandth of a second, 1 / 1 000) 75 | 76 | ss, sseecc seconds 77 | 78 | mm, mmiinn minutes 79 | 80 | hh, hhoouurr 81 | hours 82 | 83 | For options that expect "size" argument (--ss, --SS and --oo), default is 84 | bytes, unless you specify one of the following suffixes (case-insensi- 85 | tive): 86 | 87 | sseeccttoorr disk sectors (a sector is always 512). 88 | 89 | KKiiBB, kk, kkbb 90 | kilobytes (1 024 bytes) 91 | 92 | ppaaggee memory pages (a page is always 4KiB). 93 | 94 | MMiiBB, mm, mmbb 95 | megabytes (1 048 576 bytes) 96 | 97 | GGiiBB, gg, ggbb 98 | gigabytes (1 073 741 824 bytes) 99 | 100 | TTiiBB, tt, ttbb 101 | terabytes (1 099 511 627 776 bytes) 102 | 103 | For options that expect "number" argument (--pp and --cc) you can option- 104 | ally specify one of the following suffixes (case-insensitive): 105 | 106 | kk kilo (thousands, 1 000) 107 | 108 | mm mega (millions, 1 000 000) 109 | 110 | gg giga (billions, 1 000 000 000) 111 | 112 | tt tera (trillions, 1 000 000 000 000) 113 | 114 | EEXXIITT SSTTAATTUUSS 115 | Returns 00 upon success. The following error codes are defined: 116 | 117 | 11 Invalid usage (error in arguments). 118 | 119 | 22 Error during preparation stage. 120 | 121 | 33 Error during runtime. 122 | 123 | RRAAWW SSTTAATTIISSTTIICCSS 124 | iiooppiinngg --pp 110000 --cc 220000 --ii 00 --qq .. 125 | 100 26694 3746 15344272 188 267 1923 228 126 | 100 24165 4138 16950134 190 242 2348 214 127 | (1) (2) (3) (4) (5) (6) (7) (8) 128 | 129 | (1) number of requests 130 | (2) serving time (usec) 131 | (3) requests per second (iops) 132 | (4) transfer speed (bytes/sec) 133 | (5) minimal request time (usec) 134 | (6) average request time (usec) 135 | (7) maximum request time (usec) 136 | (8) request time standard deviation (usec) 137 | 138 | EEXXAAMMPPLLEESS 139 | iiooppiinngg .. 140 | Show disk I/O latency using the default values and the current 141 | directory, until interrupted. 142 | 143 | iiooppiinngg --cc 1100 --ss 11MM //ttmmpp 144 | Measure latency on //ttmmpp using 10 requests of 1 megabyte each. 145 | 146 | iiooppiinngg --RR //ddeevv//ssddaa 147 | Measure disk seek rate. 148 | 149 | iiooppiinngg --RRLL //ddeevv//ssddaa 150 | Measure disk sequential speed. 151 | 152 | SSEEEE AALLSSOO 153 | iioossttaatt(1), dddd(1), ffiioo(1), ddbbeenncchh(1), ffssssttrreessss,, xxffsstteessttss,, hhddppaarrmm(8), 154 | bbaaddbblloocckkss(8), 155 | 156 | HHOOMMEEPPAAGGEE 157 | AAUUTTHHOORRSS 158 | This program was written by Konstantin Khlebnikov 159 | Man-page was written by Kir Kolyshkin 160 | 161 | 162 | 163 | Dec 2013 IOPING(1) 164 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/ioping.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RickPizzi/pztools/827473bf0e2d71dcfdb3f4b94b29a38500796137/ioping-0.8-PZ/ioping.o -------------------------------------------------------------------------------- /ioping-0.8-PZ/ioping.spec: -------------------------------------------------------------------------------- 1 | Name: ioping 2 | Version: %(cat version) 3 | Release: 1%{?dist} 4 | Summary: simple disk I/O latency monitoring tool 5 | 6 | Group: Applications/System 7 | License: GPLv3+ 8 | URL: http://code.google.com/p/ioping 9 | Source0: ioping-%{version}.tar.gz 10 | BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) 11 | 12 | BuildRequires: gcc, make 13 | 14 | %description 15 | This tool lets you monitor I/O latency in real time, in a way 16 | similar to how ping(1) does for network latency. 17 | 18 | %prep 19 | %setup -q 20 | 21 | 22 | %build 23 | CFLAGS="$RPM_OPT_FLAGS" make %{?_smp_mflags} 24 | 25 | 26 | %install 27 | rm -rf $RPM_BUILD_ROOT 28 | make install PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT 29 | 30 | 31 | %clean 32 | rm -rf $RPM_BUILD_ROOT 33 | 34 | 35 | %files 36 | %defattr(-,root,root,-) 37 | %attr(755,root,root) %{_bindir}/ioping 38 | %attr(644, root, root) %{_mandir}/man1/ioping.1.* 39 | %doc 40 | 41 | %changelog 42 | * Mon Aug 1 2011 Konstantin Khlebnikov - 0.6-1 43 | - many fixes 44 | - port to GNU/Hurd 45 | 46 | * Thu Jun 16 2011 Konstantin Khlebnikov - 0.5-1 47 | - rate tests 48 | - freebsd and macosx ports 49 | 50 | * Thu Jun 2 2011 Konstantin Khlebnikov - 0.4-1 51 | - fix linking 52 | 53 | * Thu Jun 2 2011 Kir Kolyshkin - 0.3-1 54 | - fix i386 build on x86_64 55 | 56 | * Mon May 30 2011 Kir Kolyshkin - 0.1-1 57 | - initial packaging 58 | -------------------------------------------------------------------------------- /ioping-0.8-PZ/version: -------------------------------------------------------------------------------- 1 | 0.8 2 | -------------------------------------------------------------------------------- /magic_change_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # magic change master: repoint a slave to a different galera master automatically 4 | # 5 | # script will look up the specified slave position and find out the change master command to repoint the slave for you 6 | # usage: magic_change_master.sh [ dead ]" 7 | # if dead parameter is specified, script assumes master dead and uses the slave relay log to find out galera Xid 8 | # 9 | if [ "$2" = "" ] 10 | then 11 | echo "Usage: $0 [ dead ]" 12 | exit 1 13 | fi 14 | new_master=$(host $2 | fgrep address | cut -d " " -f 4) 15 | [ "$new_master" = "" ] && new_master=$2 16 | curpos=($(echo "show slave status\G" | ssh -tq $1 mysql -Ar | egrep " Master_Host| Master_Log_File| Exec_Master_Log_Pos| Relay_Log_File| Relay_Log_Pos" | cut -d ":" -f 2)) 17 | master=${curpos[0]} 18 | if [ "$master" = "$new_master" ] 19 | then 20 | echo "Cannot change master to the current master" 21 | exit 1 22 | fi 23 | binlog=${curpos[1]} 24 | relay=${curpos[2]} 25 | relay_pos=${curpos[3]} 26 | exec_pos=${curpos[4]} 27 | echo "On slave $1, current situation is: Master $master, File $binlog, Position $exec_pos" 28 | binlog_base=$(echo "select @@log_bin_basename" | ssh -tq $master mysql -ANr) 29 | relay_base=$(echo "select @@datadir" | ssh -tq $master mysql -ANr) 30 | if [ "$3" = "dead" ] 31 | then 32 | echo -n "looking up Xid in relay log for position $exec_pos... " 33 | xid=$(ssh -tnq $1 sudo mysqlbinlog $relay_base/$relay | fgrep "end_log_pos $exec_pos " | cut -d"=" -f 2 | tr -d " ") 34 | else 35 | echo -n "looking up Xid in $binlog for position $exec_pos... " 36 | xid=$(ssh -tnq $master sudo mysqlbinlog $(dirname $binlog_base)/$binlog | fgrep "end_log_pos $exec_pos " | cut -d"=" -f 2 | tr -d " ") 37 | fi 38 | echo $xid 39 | binlog_base=$(echo "select @@log_bin_basename" | ssh -tq $new_master mysql -ANr) 40 | echo -n "searching binlogs on $2 using Xid range search... " 41 | for b in $(ssh -tq $new_master ls -t $binlog_base*.[0-9]*) 42 | do 43 | new_pos_binlog=$(basename $b) 44 | echo -n $new_pos_binlog" " 45 | bxid=$(ssh -tnq $new_master sudo mysqlbinlog $b | fgrep -m 1 "Xid = " | cut -d "=" -f 2 | tr -d " ") 46 | if [ "$bxid" = "" ] 47 | then 48 | echo " FAILED! Check that passwordless sudo is enabled!" 49 | exit 1 50 | fi 51 | [ $bxid -lt $xid ] && break 52 | done 53 | echo 54 | echo -n "looking up position in $new_pos_binlog for Xid $xid... " 55 | new_pos=$(ssh -tnq $new_master sudo mysqlbinlog $b | fgrep "Xid = $xid" | tr -s " " | cut -d " " -f 7) 56 | echo $new_pos 57 | echo "All done! Connect to $1 and issue: CHANGE MASTER TO MASTER_HOST='$new_master', MASTER_PORT=3306, MASTER_LOG_FILE='$new_pos_binlog', MASTER_LOG_POS=$new_pos" 58 | exit 0 59 | -------------------------------------------------------------------------------- /migrate2innodb.c: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | migrate to InnoDB - v1.2 4 | converts MyISAM and Aria and whatever else you may have as engine, to InnoDB 5 | It replaces Engine=... in CREATE TABLE, and also converts any (var)char longer than 6 | the specified length to a (TINY)TEXT. This to avoid the row too large error. 7 | 8 | usage: 9 | 10 | gcc -O3 -o migrate2innodb migrate2innodb.c 11 | cat yourdump.sql | ./migrate2innodb maxcharlen logfile | mysql -A -u... schema 12 | 13 | the logfile will contain a list of the converted columns, if any 14 | 15 | Note: do NOT include the "mysql" schema in the dump, that should not be converted 16 | */ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #define MAX_SKIPS 512 24 | #define MAX_LINE_SIZE 16777216 25 | #define TMPFILE "/tmp/migrate2innodb.XXXXXX" 26 | 27 | static char *xtract_cols(); 28 | static void read_create_table(); 29 | static void replace_log(); 30 | 31 | static struct SkipCol { 32 | char columns[512]; 33 | char keytype[32]; 34 | } skipcol[MAX_SKIPS]; 35 | 36 | static int skidx; 37 | 38 | main(argc, argv) 39 | int argc; 40 | char **argv; 41 | { 42 | static char buf[MAX_LINE_SIZE]; 43 | char *p1; 44 | int ct = 0; 45 | FILE *f; 46 | char t[256]; 47 | char tmpl[256]; 48 | char *tmpfn; 49 | 50 | if (argc != 3) { 51 | fprintf(stderr, "usage: %s \n", argv[0]); 52 | exit(1); 53 | } 54 | while(fgets(buf, MAX_LINE_SIZE -1, stdin)) { 55 | *(buf + strlen(buf) -1 ) = 0x00; 56 | if (!strncmp(buf, "CREATE TABLE ", 13)) { 57 | ct++; 58 | skidx=0; 59 | strcpy(t, buf + 12); 60 | *(strchr(t, '(')) = 0x00; 61 | strcpy(tmpl, TMPFILE); 62 | tmpfn = mktemp(tmpl); 63 | f = fopen(tmpfn, "w"); 64 | } 65 | if (ct) { 66 | if (!strncmp(buf, " PRIMARY KEY ", 14)) { 67 | if ((p1 = xtract_cols(buf)) != NULL) { 68 | strcpy(skipcol[skidx].columns, p1); 69 | strcpy(skipcol[skidx++].keytype, "PRIMARY KEY"); 70 | } 71 | } 72 | if (!strncmp(buf, " UNIQUE KEY ", 13)) { 73 | if ((p1 = xtract_cols(buf)) != NULL) { 74 | strcpy(skipcol[skidx].columns, p1); 75 | strcpy(skipcol[skidx++].keytype, "UNIQUE KEY"); 76 | } 77 | } 78 | if (!strncmp(buf, " KEY ", 6)) { 79 | if ((p1 = xtract_cols(buf)) != NULL) { 80 | strcpy(skipcol[skidx].columns, p1); 81 | strcpy(skipcol[skidx++].keytype, "INDEX"); 82 | } 83 | } 84 | if (skidx == MAX_SKIPS) { 85 | fprintf(stderr, "out of memory - please increase MAX_SKIPS and recompile\n"); 86 | exit(1); 87 | } 88 | fprintf(f, "%s\n", buf); 89 | if (*(buf + strlen(buf) - 1) == ';') { 90 | ct--; 91 | fclose(f); 92 | read_create_table(t, atoi(argv[1]), argv[2], tmpfn); 93 | unlink(tmpfn); 94 | skidx=0; 95 | } 96 | continue; 97 | } 98 | printf(" %s\n", buf); 99 | } 100 | fflush(stdout); 101 | } 102 | 103 | static void read_create_table(t, maxlen, l, fn) 104 | char *t; 105 | int maxlen; 106 | char *l; 107 | char *fn; 108 | { 109 | static char buf[MAX_LINE_SIZE]; 110 | char *p0, *p1,*p2,*p3, *p4; 111 | FILE *f; 112 | int i, len; 113 | int skip=0; 114 | char r[256]; 115 | 116 | f = fopen(fn, "r"); 117 | while (fgets(buf, MAX_LINE_SIZE - 1, f)) { 118 | *(buf + strlen(buf) -1 ) = 0x00; 119 | if ((p0 = strstr(buf, " char(")) != NULL) { 120 | p2 = strchr(p0, ')'); 121 | *p2 = 0x00; 122 | len=atoi(p0+6); 123 | *p2=')'; 124 | if (len >= maxlen) { 125 | for (i = 0; i < skidx; i++) { 126 | if ((p1 = strchr(buf + 2, ' ')) != NULL) { 127 | *p1 = 0x00; 128 | if (strstr(skipcol[i].columns, buf + 2)) { 129 | sprintf(r, "%s, used in %s(%s)", buf + 2, skipcol[i].keytype, skipcol[i].columns); 130 | replace_log(l, t, "skipped", r); 131 | skip++; 132 | } 133 | *p1 = 0x20; 134 | } 135 | } 136 | if (!skip) { 137 | if ((p3 = strchr(buf + 2, ' ')) != NULL) 138 | if ((p4 = strchr(++p3, ' ')) != NULL) 139 | *p4=0x00; 140 | replace_log(l, t, "replaced", buf + 2); 141 | if (p4) 142 | *p4 = 0x20; 143 | *p0 = 0x00; 144 | printf("%s%s%s\n", buf, len < 256 ? " tinytext" : " text", ++p2); 145 | continue; 146 | } 147 | else 148 | skip=0; 149 | } 150 | } 151 | if ((p0 = strstr(buf, " varchar(")) != NULL) { 152 | p2 = strchr(p0, ')'); 153 | *p2 = 0x00; 154 | len=atoi(p0+9); 155 | *p2=')'; 156 | if (len >= maxlen) { 157 | for (i = 0; i < skidx; i++) { 158 | if ((p1 = strchr(buf + 2, ' ')) != NULL) { 159 | *p1 = 0x00; 160 | if (strstr(skipcol[i].columns, buf + 2)) { 161 | sprintf(r, "%s, used in %s(%s)", buf + 2, skipcol[i].keytype, skipcol[i].columns); 162 | replace_log(l, t, "skipped", r); 163 | skip++; 164 | } 165 | *p1 = 0x20; 166 | } 167 | } 168 | if (!skip) { 169 | if ((p3 = strchr(buf + 2, ' ')) != NULL) 170 | if ((p4 = strchr(++p3, ' ')) != NULL) 171 | *p4=0x00; 172 | replace_log(l, t, "replaced", buf + 2); 173 | if (p4) 174 | *p4 = 0x20; 175 | *p0 = 0x00; 176 | printf("%s%s%s\n", buf, len < 256 ? " tinytext" : " text", ++p2); 177 | continue; 178 | } 179 | else 180 | skip=0; 181 | } 182 | } 183 | if (!strncmp(buf, ") ENGINE=", 9)) { 184 | if ((p1 = strchr(buf + 9, 0x20)) != NULL) 185 | printf(") ENGINE=InnoDB %s\n", p1); 186 | } 187 | else 188 | printf("%s\n", buf); 189 | skip=0; 190 | } 191 | fclose(f); 192 | } 193 | 194 | static void replace_log(l, t, w, d) 195 | char *l, *t, *w, *d; 196 | { 197 | FILE *f; 198 | 199 | if ((f = fopen(l, "a")) != NULL) { 200 | fprintf(f, "%s: %s %s\n", t, w, d); 201 | fclose(f); 202 | } 203 | } 204 | 205 | static char *xtract_cols(s) 206 | char *s; 207 | { 208 | char *p; 209 | static char buf[MAX_LINE_SIZE]; 210 | static char ldef[256]; 211 | 212 | strcpy(buf, s); 213 | if ((p = strrchr(buf, ')')) != NULL) 214 | *p = 0x00; 215 | else 216 | return(NULL); 217 | if ((p = strrchr(buf, '(')) != NULL) 218 | strcpy(ldef, ++p); 219 | else 220 | return(NULL); 221 | return(ldef); 222 | } 223 | 224 | -------------------------------------------------------------------------------- /move_grants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # move grants for a specified user from one network to another 4 | # then optionally drop old grants 5 | # 6 | # (c) 2015 riccardo.pizzi@rumbo.com 7 | # 8 | echo -n "User to clone: " 9 | read user 10 | echo -n "From network: " 11 | read from_net 12 | echo -n "To network: " 13 | read to_net 14 | echo -n "Password: " 15 | stty -echo 16 | read password 17 | stty echo 18 | echo 19 | echo "show grants for '$user'@'$from_net'" | mysql -ANr -p$password | sed -e "s/'$user'@'$from_net'/'$user'@'$to_net'/g" -e "s/$/;/g" | mysql -f -A -p$password 20 | while true 21 | do 22 | echo -n "Drop user '$user'@'$from_net'? y/N " 23 | read yn 24 | case "${yn,,}" in 25 | 'n'|'') break;; 26 | 'y') echo "drop user '$user'@'$from_net'" | mysql -A -p$password 27 | break;; 28 | esac 29 | done 30 | exit 0 31 | -------------------------------------------------------------------------------- /mysql_case_insensitive_replace.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS replace_ci; 2 | SET NAMES UTF8; 3 | DELIMITER $$ 4 | 5 | CREATE FUNCTION `replace_ci` ( str TEXT CHARSET utf8, needle CHAR(255) CHARSET utf8, str_rep CHAR(255) CHARSET utf8) 6 | RETURNS TEXT CHARSET utf8 7 | DETERMINISTIC 8 | BEGIN 9 | DECLARE return_str TEXT CHARSET utf8 DEFAULT ''; 10 | DECLARE lower_str TEXT CHARSET utf8; 11 | DECLARE lower_needle TEXT CHARSET utf8; 12 | DECLARE pos INT DEFAULT 1; 13 | DECLARE old_pos INT DEFAULT 1; 14 | IF needle = '' THEN 15 | RETURN str; 16 | END IF; 17 | 18 | SELECT lower(str) INTO lower_str; 19 | SELECT lower(needle) INTO lower_needle; 20 | SELECT locate(lower_needle, lower_str, pos) INTO pos; 21 | WHILE pos > 0 22 | DO 23 | SELECT concat(return_str, substr(str, old_pos, pos-old_pos), str_rep) INTO return_str; 24 | SELECT pos + char_length(needle) INTO pos; 25 | SELECT pos INTO old_pos; 26 | SELECT locate(lower_needle, lower_str, pos) INTO pos; 27 | END WHILE; 28 | SELECT concat(return_str, substr(str, old_pos, char_length(str))) INTO return_str; 29 | RETURN return_str; 30 | END 31 | $$ 32 | 33 | DELIMITER ; 34 | 35 | -------------------------------------------------------------------------------- /nagios/mysql_check_max_connections.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Nagios check if max_connections limit is close or reached 4 | # riccardo.pizzi@rumbo.com 5 | # Note: MySQL user needs RELOAD privs from localhost 6 | # 7 | USER="" 8 | PASS="" 9 | THRESHOLD=20 10 | # 11 | limit=$(echo "show variables like 'max_connections'" | /usr/bin/mysql -ANr -u $USER -p$PASS 2>/dev/null | cut -f 2) 12 | max=$(echo "show global status like 'Max_used_connections'" | /usr/bin/mysql -ANr -u $USER -p$PASS 2>/dev/null | cut -f 2) 13 | wt=$(($limit*(100-$THRESHOLD)/100)) 14 | if [ $max -ge $limit ] 15 | then 16 | echo "CRITICAL: 100% of available connections in use ($max)" 17 | echo "FLUSH STATUS" | /usr/bin/mysql -ANr -u $USER -p$PASS 2>/dev/null 18 | exit 2 19 | fi 20 | if [ $max -ge $wt ] 21 | then 22 | echo "WARNING: more than $((100-$THRESHOLD))% of available connections in use ($max/$limit)" 23 | echo "FLUSH STATUS" | /usr/bin/mysql -ANr -u $USER -p$PASS 2>/dev/null 24 | exit 1 25 | fi 26 | echo "OK: max used connections $max ($(echo "scale=2;$max/$limit*100" | bc | cut -d"." -f 1)%)" 27 | exit 0 28 | -------------------------------------------------------------------------------- /nagios/mysql_check_partitions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Nagios check for presence of MySQL partition for a given day in the future 4 | # riccardo.pizzi@rumbo.com 5 | # $1: schema, $2: table, $3: how many days in advance to check 6 | # 7 | USER="" 8 | PASS="" 9 | # 10 | [ $# -ne 3 ] && exit 3 11 | case $(echo "select concat('select if(',replace(replace(upper(substring_index(partition_expression, '(',1)), 'TO_DAYS', 'FROM_DAYS'), 'UNIX_TIMESTAMP', 'FROM_UNIXTIME'), '(', partition_description, ') > date_add(curdate(), interval $3 day), 0, 2)') from information_schema.partitions where table_schema = '$1' and table_name = '$2' order by partition_description desc limit 1" | /usr/bin/mysql -ANr -u $USER -p$PASS | /usr/bin/mysql -ANr -u $USER -p$PASS) in 12 | 0) echo "OK: Partitions for next $3 days exist."; exit 0;; 13 | 2) echo "CRITICAL: Not enough partitions for next $3 days!"; exit 2;; 14 | *) echo "UNKOWN"; exit 3;; 15 | esac 16 | -------------------------------------------------------------------------------- /osc_progress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # shows real progress of an online schema change session 4 | # works when PK is a numeric autoinc column 5 | # uses current user to log to the DB; you can specify the user running the OSC as an argument, 6 | # if different than the user running this script (eg. if you want to run this on a slave) 7 | # 8 | # Version: 1.1 9 | # Author: rick.pizzi@mariadb.com 10 | # 11 | # 12 | # 13 | AVG_BUF=120 14 | # 15 | [ "$1" != "" ] && OSC_USER=$1 || OSC_USER=$USER 16 | # 17 | echo -n "Getting OSC information... " 18 | while true 19 | do 20 | row=$(echo "select * from information_schema.processlist where user = '"$OSC_USER"' and info like 'INSERT LOW%'" | mysql -AN -u $OSC_USER 2>/dev/null | sed -e "s/\`//g") 21 | [ "$row" != "" ] && break 22 | sleep 1 23 | done 24 | echo "done." 25 | table=$(echo $row | grep -oP '(?<=FROM\s)\w+\.\w+' | uniq) 26 | table_schema=$(echo $table | cut -d"." -f1) 27 | table_name=$(echo $table | cut -d"." -f2) 28 | pk=$(echo $row | grep -oP '(?<=WHERE\s\(\()\w+' | uniq) 29 | pksize=$(echo "select count(*) from information_schema.columns where table_schema = '"$table_schema"' and table_name = '"$table_name"' and column_key = 'PRI'" | mysql -AN -u $OSC_USER 2>/dev/null) 30 | coltype=$(echo "select data_type from information_schema.columns where table_schema = '"$table_schema"' and table_name = '"$table_name"' and column_name = '"$pk"'" | mysql -AN -u $OSC_USER 2>/dev/null) 31 | case "$coltype" in 32 | 'int'|'bigint') ;; 33 | *) echo "sorry, this script only works when PK is an integer"; exit 1;; 34 | esac 35 | pval=0 36 | declare -a speed 37 | while true 38 | do 39 | target=$(echo "select $pk from $table order by 1 desc limit 1" | mysql -AN -u $OSC_USER 2>/dev/null) 40 | if [ $pksize -eq 1 ] 41 | then 42 | curr=$(echo "select substring_index(substring_index(right(info, 100), '<= \'', -1), '\')', 1) from information_schema.processlist where user = '"$OSC_USER"' and info like 'INSERT LOW%'" | mysql -AN -u $OSC_USER 2>/dev/null) 43 | else 44 | curr=$(echo "select substring_index(substring_index(right(info, 200), '= \'', -2), '\')', 1) from information_schema.processlist where user = '"$OSC_USER"' and info like 'INSERT LOW%'" | mysql -AN -u $OSC_USER 2>/dev/null | cut -d"'" -f 1) 45 | fi 46 | if [ "$curr" = "" ] 47 | then 48 | sleep 2 49 | else 50 | percd=$(echo "scale=6; $curr / $target " | bc) 51 | perc=$(echo "scale=2; $percd * 100" | bc) 52 | cval=$(expr $(echo $percd | tr -d ".") + 0) 53 | if [ $pval -gt 0 ] 54 | then 55 | pmin=$((cval-pval)) 56 | if [ "${speed[$((AVG_BUF-1))]}" != "" ] 57 | then 58 | n=0 59 | avg=$(for i in $(seq 0 1 $((AVG_BUF-1))) 60 | do 61 | [ $i -eq 0 ] && echo -n "scale=4; (0" 62 | if [ "${speed[$i]}" != "" ] 63 | then 64 | echo -n "+${speed[$i]}" 65 | n=$((n+1)) 66 | fi 67 | [ $i -eq $((AVG_BUF-1)) ] && echo ")/$n" 68 | done | bc) 69 | fi 70 | if [ "$avg" != "" ] 71 | then 72 | tr=$(echo "(1000000-$cval)/($avg*2)" | bc) 73 | #echo "curr $pmin avg $avg tr $tr cval $cval" 74 | ed=$((tr/1440)) 75 | eh=$(((tr-ed*1440)/60)) 76 | em=$((tr-eh*60-ed*1440)) 77 | /usr/bin/printf "%s: %s/%s (%.2f%%) ETA: %dd%02dh%02dm\n" $table $curr $target $perc $ed $eh $em 78 | else 79 | /usr/bin/printf "%s: %s/%s (%.2f%%) ETA: estimating...\n" $table $curr $target $perc $ed $eh $em 80 | fi 81 | for i in $(seq 0 1 $((AVG_BUF-2))) 82 | do 83 | speed[$i]=${speed[$((i+1))]} 84 | done 85 | speed[$((AVG_BUF-1))]=$pmin 86 | else 87 | /usr/bin/printf "%s: %s/%s (%.2f%%) ETA: estimating...\n" $table $curr $target $perc $ed $eh $em 88 | fi 89 | pval=$cval 90 | sleep 30 91 | fi 92 | done 93 | exit 0 94 | -------------------------------------------------------------------------------- /parallel_import/Config: -------------------------------------------------------------------------------- 1 | files.new_files 2 | comments.new_del_comments 3 | snapshot.answers 4 | -------------------------------------------------------------------------------- /parallel_import/README: -------------------------------------------------------------------------------- 1 | Scripts to import a set of MySQL schemas from different servers into a single target MySQL server. 2 | The import happens in parallel. The scripts are optimized for speed, therefore the import is done 3 | using MyISAM engine (this means the target server should only be used for read workloads). 4 | -------------------------------------------------------------------------------- /parallel_import/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # selective parallel import of tables into a target DB 4 | # tables are imported in MyISAM to speed up things, partitions are coalesced if present, foreign keys dropped 5 | # tables go in Config file, one per line, in format server.schema.tablename or if InnoDB import desired - slow - 6 | # you should use server.schema.tablename.I 7 | # 8 | # v2.0 14-Dec-2015 9 | # riccardo.pizzi@rumbo.com 10 | # 11 | export BASEDIR=/lm 12 | export TARGET=8.8.8.8 # target server 13 | export DUMPER=/usr/bin/mysqldump 14 | export SOURCE_USER= 15 | export SOURCE_PASS= 16 | # 17 | MAIL_RECIP="you@youremail.com" 18 | # 19 | tmpf=/tmp/dsload.$$ 20 | trap 'rm -f $tmpf' 0 21 | IFS=" 22 | " 23 | t_start=$(date +%s) 24 | for row in $(cat $BASEDIR/etc/Config) 25 | do 26 | source=$(echo $row | cut -d"." -f 1) 27 | schema=$(echo $row | cut -d"." -f 2) 28 | table=$(echo $row | cut -d"." -f 3) 29 | innodb=$(echo $row | cut -d"." -f 4) 30 | [ "$source" = "" -o "$schema" = "" -o "$table" = "" ] && continue 31 | $BASEDIR/bin/single.sh $schema $table $source $innodb 2>&1 & 32 | done > $tmpf 33 | wait 34 | t_end=$(date +%s) 35 | elapsed=$(expr $t_end - $t_start) 36 | echo "-------------------------------------------" >> $tmpf 37 | min=$((elapsed/60)) 38 | secs=$((elapsed-min*60)) 39 | echo "Total running time $min minutes $secs seconds" >> $tmpf 40 | cat $tmpf | tee $BASEDIR/log/last.log | mailx -s "Prod to QA import transcript" $MAIL_RECIP 41 | exit 0 42 | -------------------------------------------------------------------------------- /parallel_import/single.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # part of datascience load - should be called by load.sh 4 | # db user and password desumed from .my.cnf dot file 5 | # rpizzi@blackbirdit.com 6 | # 7 | # 10-Apr-2014 fixes for partition check 8 | # 10-Apr-1014 create schema on TARGET if not there 9 | # 14-Dec-2015 added innodb option 10 | # 11 | schema=$1 12 | table=$2 13 | source=$3 14 | [ "$4" = "I" ] && myisam="InnoDB" || myisam="MyISAM" 15 | rows=$(echo "SELECT table_rows FROM tables WHERE table_schema = '$schema' AND table_name ='${table}_NEW'" | mysql -N -r -h $TARGET information_schema 2>/dev/null) 16 | if [ "$rows" != "" ] 17 | then 18 | if [ $rows -gt 0 ] 19 | then 20 | echo "[ $schema.$table ] ERROR: found $schema.${table}_NEW with $rows rows in it, drop it to reload $table" 1>&2 21 | exit 1 22 | fi 23 | fi 24 | ts=$(date "+%m-%d-%Y %H:%M:%S ") 25 | echo "$ts [ $schema.$table ] importing "$schema"."$table" from $source" 26 | partitions=$(echo "SELECT COUNT(*) FROM partitions WHERE table_schema = '$schema' AND table_name = '$table'" | mysql -N -r -h $source -u$SOURCE_USER -p$SOURCE_PASS information_schema 2>/dev/null) 27 | if [ $partitions -gt 1 ] 28 | then 29 | filter="sed -e '/PARTITION/d' -e '/ENGINE/s/$/;/' -e 's/\`$table\`/\`${table}_NEW\`/g' -e 's/InnoDB/$myisam/g'" 30 | else 31 | filter="sed -e 's/\`$table\`/\`${table}_NEW\`/g' -e 's/InnoDB/$myisam/g'" 32 | fi 33 | echo "CREATE DATABASE IF NOT EXISTS $schema" | mysql -h $TARGET 2>/dev/null 34 | # make this a loop to trap mysqldump random failures 35 | while true 36 | do 37 | $DUMPER --skip-lock-tables --single-transaction --skip-add-drop-table --no-create-info --max_allowed_packet=768M -h $source -u$SOURCE_USER -p$SOURCE_PASS $schema $table 2>/dev/null | sed -e "s/\`$table\`/\`${table}_NEW\`/g" | ( 38 | $DUMPER --skip-lock-tables --no-data --skip-add-drop-table -h $source -u$SOURCE_USER -p$SOURCE_PASS $schema $table 2>/dev/null | eval $filter 39 | echo "ALTER TABLE ${table}_NEW DISABLE KEYS;" 40 | echo "SELECT CONCAT('ALTER TABLE $schema'.${table}_NEW DROP FOREIGN KEY ',constraint_name,';') FROM information_schema.table_constraints WHERE constraint_type='FOREIGN KEY' AND table_schema='$schema';" | mysql -N -r -h $source -u$SOURCE_USER -p$SOURCE_PASS 2>/dev/null 41 | cat 42 | echo "ALTER TABLE ${table}_NEW ENABLE KEYS;" 43 | ) | mysql -A -h $TARGET $schema 2>/dev/null 44 | status=${PIPESTATUS[0]} 45 | if [ $status -eq 0 ] 46 | then 47 | break 48 | else 49 | ts=$(date "+%m-%d-%Y %H:%M:%S ") 50 | echo "$ts [ $schema.$table ] WARNING: exited with error code $status, retrying" 51 | echo "DROP TABLE ${table}_NEW" | mysql -h $TARGET $schema 2>/dev/null 52 | fi 53 | done 54 | new_rows=$(echo "SELECT table_rows FROM tables WHERE table_schema = '$schema' AND table_name ='${table}_NEW'" | mysql -N -r -h $TARGET information_schema 2>/dev/null) 55 | if [ $(echo "show tables" | mysql -A -N -r -h $TARGET $schema 2>/dev/null | grep -c "^"$table"$") -eq 1 ] 56 | then 57 | old_rows=$(echo "SELECT table_rows FROM tables WHERE table_schema = '$schema' AND table_name ='${table}'" | mysql -N -r -h $TARGET information_schema 2>/dev/null) 58 | new_table=0 59 | else 60 | old_rows=0 # table does not exist on target 61 | new_table=1 62 | fi 63 | ts=$(date "+%m-%d-%Y %H:%M:%S ") 64 | if [ "$new_rows" = "" -o "$old_rows" = "" ] 65 | then 66 | echo "$ts [ $schema.$table ] ERROR: loading of $schema.$table failed, original ${table} left untouched" 1>&2 67 | echo "DROP table ${table}_NEW" | mysql -h $TARGET $schema 2>/dev/null # cleanup 68 | exit 1 69 | fi 70 | if [ $new_rows -lt $old_rows ] 71 | then 72 | echo "$ts [ $schema.$table ] WARNING: new table has less rows than old table, renaming it as ${table}_SAVED" 1>&2 73 | echo "DROP table IF EXISTS ${table}_SAVED" | mysql -h $TARGET $schema 2>/dev/null 74 | echo "RENAME TABLE $table TO ${table}_SAVED" | mysql -h $TARGET $schema 2>/dev/null 75 | echo "RENAME TABLE ${table}_NEW TO $table" | mysql -h $TARGET $schema 2>/dev/null 76 | else 77 | if [ $new_table -eq 1 ] 78 | then 79 | echo "RENAME TABLE ${table}_NEW TO $table" | mysql -h $TARGET $schema 2>/dev/null 80 | else 81 | echo "RENAME TABLE $table TO ${table}_OLD, ${table}_NEW TO $table" | mysql -h $TARGET $schema 2>/dev/null 82 | echo "DROP table ${table}_OLD" | mysql -h $TARGET $schema 2>/dev/null 83 | fi 84 | fi 85 | echo "$ts [ $schema.$table ] successfully loaded $new_rows rows into $schema.$table" 86 | exit 0 87 | -------------------------------------------------------------------------------- /pz-arrayperf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # pz-arrayperf.sh - pretty prints storage statistics, autodetecting LVM layout 4 | # 5 | # requires ioping, and is best used with "watch", as in "watch -n 1 ./pz-arrayperf.sh" 6 | # pizzi@leopardus.com 7 | # 8 | # 9 | REQSIZE=16k # can override this on command line 10 | # 11 | IFS=" 12 | " 13 | ovg="" 14 | devlist="" 15 | sg=0 16 | 17 | list_devs() 18 | { 19 | printf "%-16s %-24s %5s %5s %15s %9s %13s %13s %13s %13s %13s\n" "Volume Group" "Device" "Strip" "#req" "Time spent" "$REQSIZE IOPS" "xfer spd B/s" "min time (us)" "avg time (us)" "max time (us)" "stddev (us)" 20 | printf "%-16s %-24s %5s %5s %-15s %9s %11s %13s %13s %13s %13s\n" "----------------" "------------------------" "-----" "-----" "---------------" "-------" "-------------" "-------------" "-------------" "-------------" "-------------" 21 | sc=1 22 | for dev in $2 23 | do 24 | ( 25 | IFS=" " 26 | printf "%-16s %-24s %5s %5d %'15d %9d %'13d %'13d %'13d %'13d %'13d %.0s %.0s\n" $1 $dev "$sc/$3" $(ioping -D -B -q -i 0 -w 1 -S 64m -s $REQSIZE $dev) 27 | ) & 28 | sc=$((sc+1)) 29 | done 30 | wait 31 | } 32 | 33 | #(1) count of requests in statistics 34 | #(2) running time (usec) 35 | #(3) requests per second (iops) 36 | #(4) transfer speed (bytes/sec) 37 | #(5) minimal request time (usec) 38 | #(6) average request time (usec) 39 | #(7) maximum request time (usec) 40 | #(8) request time standard deviation (usec) 41 | #(9) total requests (including too slow and too fast) 42 | #(10) total running time (usec) 43 | 44 | [ "$1" != "" ] && REQSIZE=$1 45 | for d in $(lvs -o name,devices,stripes| fgrep -v "Device" | tr -s " " | tr " " "\t") 46 | do 47 | vg=$(echo $d | cut -f 2) 48 | ss=$(echo $d | cut -f 4) 49 | devlist=$(echo $d | cut -f 3 | sed -e "s/([^)]*)//g" | tr "," "\n") 50 | list_devs $vg "$devlist" $ss 51 | done 52 | exit 0 53 | 54 | -------------------------------------------------------------------------------- /pz-maria-multi-skip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # auto-skip statements on selected slave in a multi source replication env 4 | # args: 5 | # $1 => master connection to use for skipping 6 | # 7 | [ "$1" = "" ] && exit 1 8 | echo -n "Password: " 9 | stty -echo 10 | read password 11 | stty echo 12 | echo 13 | pc=$(mysqladmin --protocol=tcp -p$password ping | fgrep -c alive) 14 | [ $pc -ne 1 ] && exit 1 15 | while true 16 | do 17 | status=$(echo "show slave '$1' status\G" | mysql -Ar -p$password 2>/dev/null | egrep "Seconds_Behind_Master:|Slave_IO_Running:" | tr -d "[ ]" | tr -s "[\n]" "[:]") 18 | iothread=$(echo "$status" | cut -d ":" -f 2) 19 | behind=$(echo "$status" | cut -d":" -f 4) 20 | if [ "$iothread" != "Yes" ] 21 | then 22 | sleep 0.05 23 | continue 24 | fi 25 | if [ "$behind" = "NULL" ] 26 | then 27 | echo -n "." 28 | echo "set session default_master_connection='$1'; stop slave; set global sql_slave_skip_counter = 1; start slave" | mysql -ANr -p$password 2>/dev/null 29 | else 30 | if [ $behind -eq 0 ] 31 | then 32 | echo "Slave '$1' is caught up" 33 | sleep 58 34 | else 35 | echo $behind 36 | fi 37 | sleep 2 38 | fi 39 | done 40 | -------------------------------------------------------------------------------- /pz-slave-monitor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # pz-slave-monitor 1.01 5 | # monitor performances of SQL_thread and saves queries 6 | # longer than in a file in the output folder 7 | # 8 | # rick.pizzi@mariadb.com 9 | # 10 | # 11 | RELAYDIR=/var/lib/mysql 12 | FOLDER=./output 13 | MIN_THRESHOLD=25 # milliseconds 14 | # 15 | [ ! -d $FOLDER ] && mkdir $FOLDER 16 | if [ "$1" = "" ] 17 | then 18 | echo "usage: $0 " 19 | exit 1 20 | fi 21 | threshold=$1 22 | if [ $threshold -lt $MIN_THRESHOLD ] 23 | then 24 | echo "NOTE: Minimum query time is $MIN_THRESHOLD ms, adjusted accordingly" 25 | threshold=$MIN_THRESHOLD 26 | fi 27 | IFS=" 28 | " 29 | start_ts=$(date +%s%N) 30 | i=0 31 | rbmsg=0 32 | while true 33 | do 34 | for sr in $(echo "show slave status\G" | mysql -Ar | egrep "Relay_Log_File|Relay_Log_Pos|Slave_SQL_Running") 35 | do 36 | r=${sr// /} 37 | case "${r%%:*}" in 38 | 'Relay_Log_File') relay_file=${r#*:};; 39 | 'Relay_Log_Pos') relay_pos=${r#*:};; 40 | 'Slave_SQL_Running') running=${r#*:};; 41 | esac 42 | done 43 | if [ "$running" = "No" ] 44 | then 45 | if [ $rbmsg -eq 0 ] 46 | then 47 | echo -e "\rReplication broken, waiting" 48 | rbmsg=1 49 | fi 50 | sleep 5 51 | continue 52 | fi 53 | [ "$running" = "" ] && break 54 | if [ "$oldpos" != "$relay_file:$relay_pos" ] 55 | then 56 | if [ $i -gt 1 ] 57 | then 58 | rbmsg=0 59 | diff=$((($(date +%s%N)-start_ts)/1000000)) 60 | if [ $diff -ge $threshold ] 61 | then 62 | echo -e "\rExecuted $oldpos in $diff ms ($i loops avg. $(bc <<< "scale=1;$diff/$i") ms)" 63 | mysqlbinlog --base64-output=never -j ${oldpos#*:} $RELAYDIR/${oldpos%%:*} | egrep -v "^SET|^/\*!\*/;" | head -200 | fgrep -A 50 "# at ${oldpos#*:}" | grep -B 50 -m 1 "^COMMIT" > $FOLDER/${diff}_${oldpos%%:*}:${oldpos#*:} & 64 | fi 65 | fi 66 | oldpos="$relay_file:$relay_pos" 67 | i=0 68 | start_ts=$(date +%s%N) 69 | fi 70 | i=$((i+1)) 71 | echo -en "\r $i " 72 | done 73 | 74 | -------------------------------------------------------------------------------- /pzdd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # reads entire device in order to warm it up (useful for EC2 EBS volumes) 4 | # pretty prints progress as it goes. to be run in a screen session! 5 | # 6 | # rpizzi@blackbirdit.com 7 | # 8 | tmpf=/tmp/pzdd.$$ 9 | if [ $# -ne 1 ] 10 | then 11 | echo "Usage: $0 " 12 | exit 1 13 | fi 14 | if [ ! -b $1 ] 15 | then 16 | echo "$1 non existent or invalid device" 17 | exit 1 18 | fi 19 | echo "Launching: dd if=$1 of=/dev/null bs=1024k" 20 | dd if=$1 of=/dev/null bs=1024k 2>$tmpf & 21 | pid=$! 22 | trap 'rm -f $tmpf; kill $pid' 0 23 | echo "dd process id $pid" 24 | sleep 3 25 | while true 26 | do 27 | kill -0 $pid 2>/dev/null || break 28 | kill -SIGUSR1 $pid 29 | sleep 1 30 | echo -n "$(date '+%Y-%m-%d %T ')" 31 | tail -1 $tmpf 32 | sleep 59 33 | done 34 | echo "Completed." 35 | exit 0 36 | -------------------------------------------------------------------------------- /remaster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # script to move from master to the passive master 4 | # WARNING: it assumes all slaves caught up!! 5 | # rpizzi@blackbirdit.com 6 | # 7 | # Config file: 8 | # master=1.2.3.4 9 | # comaster=1.2.3.5 10 | # slaves=1.2.1.1 1.2.1.2 .... 11 | # 12 | USER=root 13 | PASSWORD= 14 | # 15 | tmpf=/tmp/remaster.$$ 16 | trap 'rm -f $tmpf' 0 17 | master=$(grep ^master Config | cut -d"=" -f2) 18 | new_master=$(grep ^comaster Config | cut -d"=" -f2) 19 | slaves=$(grep ^slaves Config | cut -d"=" -f2) 20 | # 21 | echo -n "setting master $master read only... " 22 | echo "set global read_only=1" | mysql -u$USER -p$PASSWORD -h $master 23 | ro=$(echo "show variables like 'read_only'" | mysql -u$USER -p$PASSWORD -ANr -h $master | cut -f 2) 24 | if [ "$ro" = "ON" ] 25 | then 26 | echo OK 27 | else 28 | echo "FAILED, but continuing" 29 | fi 30 | echo -n "getting master status from new master $new_master... " 31 | status=$(echo "show master status" | mysql -u$USER -p$PASSWORD -ANr -h $new_master) 32 | master_log_file=$(echo "$status" | cut -f 1) 33 | master_log_pos=$(echo "$status" | cut -f 2) 34 | if [ "$master_log_file" = "" -o "$master_log_pos" = "" ] 35 | then 36 | echo "FAILED" 37 | exit 1 38 | else 39 | echo "OK ($master_log_file, position $master_log_pos)" 40 | fi 41 | echo -n "resetting slave status on new master... " 42 | echo "stop slave; reset slave all" | mysql -u$USER -p$PASSWORD -ANr -h $new_master 43 | sc=$(echo "show slave status" | mysql -u$USER -p$PASSWORD -ANr -h $new_master | wc -c) 44 | if [ $sc -eq 0 ] 45 | then 46 | echo "OK" 47 | else 48 | echo "FAILED" 49 | exit 1 50 | fi 51 | echo -n "setting new master $new_master read write... " 52 | echo "set global read_only=0" | mysql -u$USER -p$PASSWORD -h $new_master 53 | ro=$(echo "show variables like 'read_only'" | mysql -u$USER -p$PASSWORD -ANr -h $new_master | cut -f 2) 54 | if [ "$ro" = "OFF" ] 55 | then 56 | echo OK 57 | else 58 | echo "FAILED" 59 | exit 1 60 | fi 61 | for slave in $slaves 62 | do 63 | echo -n "changing master of slave $slave... " 64 | echo "stop slave; change master to master_host = '$new_master', master_log_file = '$master_log_file', master_log_pos = $master_log_pos; start slave" | mysql -u$USER -p$PASSWORD -h $slave 65 | echo "show slave status\G" | mysql -u$USER -p$PASSWORD -Ar -h $slave | egrep "Master_Host:|Slave_IO_Running:|Slave_SQL_Running:" | tr -d "[ ]" > $tmpf 66 | check=$(grep "Master_Host" $tmpf | cut -d":" -f 2) 67 | if [ "$check" != "$new_master" ] 68 | then 69 | echo "FAILED - fix manually" 70 | continue 71 | fi 72 | check=$(grep "Slave_IO_Running" $tmpf | cut -d":" -f 2) 73 | if [ "$check" != "Yes" ] 74 | then 75 | echo "FAILED - fix manually" 76 | continue 77 | fi 78 | check=$(grep "Slave_SQL_Running" $tmpf | cut -d":" -f 2) 79 | if [ "$check" != "Yes" ] 80 | then 81 | echo "FAILED - fix manually" 82 | continue 83 | fi 84 | echo "OK" 85 | done 86 | echo "Now entering slave replication check loop." 87 | echo "Interrupt with Ctrl-C when you're bored." 88 | while true 89 | do 90 | for slave in $slaves 91 | do 92 | echo -n $slave" ... " 93 | echo "show slave status\G" | mysql -u$USER -p$PASSWORD -Ar -h $slave | fgrep Seconds 94 | done 95 | sleep 5 96 | done 97 | -------------------------------------------------------------------------------- /rotate_slow_logs.sh: -------------------------------------------------------------------------------- 1 | # 2 | # gracefully rotates slow query log 3 | # rpizzi@blackbirdit.com 4 | # 5 | SLOWDIR=/var/log/mysql 6 | SLOWFILE=mysqld_slow.log 7 | RETENTION=7 8 | # 9 | USER=reload 10 | PASS="...." 11 | HOST=localhost 12 | # 13 | c=$RETENTION 14 | mv $SLOWDIR/$SLOWFILE $SLOWDIR/$SLOWFILE.1 15 | echo "FLUSH LOGS" | mysql -u $USER -h $HOST -p$PASS 16 | while true 17 | do 18 | [ $c -eq 0 ] && break 19 | if [ $c -eq $RETENTION ] 20 | then 21 | [ -f $SLOWDIR/$SLOWFILE.${c}.gz ] && rm -f $SLOWDIR/$SLOWFILE.$c.gz 22 | else 23 | older=$(expr $c + 1) 24 | [ -f $SLOWDIR/$SLOWFILE.${c}.gz ] && mv $SLOWDIR/$SLOWFILE.${c}.gz $SLOWDIR/$SLOWFILE.${older}.gz 25 | fi 26 | c=$(expr $c - 1) 27 | done 28 | gzip $SLOWDIR/$SLOWFILE.1 29 | exit 0 30 | -------------------------------------------------------------------------------- /rtm/README: -------------------------------------------------------------------------------- 1 | 2 | RTM - a realtime transaction monitor for MariaDB 3 | 4 | A small script that will behave much InnoTop, but instead of showing you tons of uninteresting information 5 | it will focus on transactions, with a view at a glance of the workload in real-time. 6 | 7 | Can work interactively, or can be used in batch mode (--quiet) with --kill or --snap-if-waiting to monitor or 8 | control a problematic workload. 9 | 10 | As a bonus, a small C program that will help find transactions that took more than X seconds from a binlog file. 11 | 12 | usage: ./rtm [ options ... ] 13 | 14 | Valid options: 15 | 16 | --host hostname for connection to target instance 17 | --port port for connection to target instance 18 | --user user for connection to target instance 19 | --password password for connection to target instance 20 | --ask-password password for connection is asked on tty 21 | --socket socket for connection to target instance 22 | --interval refresh interval (1-60 seconds) 23 | --all-transactions include just started transactions (lifetime < 1s) 24 | --processlist include info from processlist 25 | --kill kill transactions running since X seconds or more (30-1800) 26 | --locking-only filter transactions holding row locks 27 | --waiting-only filter transactions waiting to acquire a lock 28 | --snap-if-waiting-time take a snapshot if there are transactions waiting X seconds or more 29 | --snap-if-waiting-count take a snapshot if at least X transactions are waiting 30 | --snapshot-dir directory to use for snapshots (default /tmp) 31 | --use-file display info from given file (useful with snapshots) 32 | --name-resolve-on when reading from file, assume server has skip_name_resolve=OFF 33 | --logfile log activity about snapshots and kills 34 | --quiet suppresses output; useful with --kill 35 | --help this help 36 | 37 | 38 | -------------------------------------------------------------------------------- /rtm/binspector.c: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | binspector - filters a binlog file and shows transactions that took more than X seconds 4 | 5 | gcc -O3 -o binspector binspector.c 6 | 7 | (C) 2022 Rick Pizzi pizzi@leopardus.com 8 | 9 | 10 | */ 11 | 12 | #define _XOPEN_SOURCE 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | static char * pretty_t(); 19 | 20 | static char *pretty_t(t1, t2) 21 | time_t t1, t2; 22 | { 23 | static char buf[128]; 24 | char tbuf[64]; 25 | 26 | strftime(tbuf, sizeof(tbuf), "%T", gmtime(&t1)); 27 | sprintf(buf, "started %s ", tbuf); 28 | strftime(tbuf, sizeof(tbuf), "%T", gmtime(&t2)); 29 | strcat(buf, "ended "); 30 | strcat(buf, tbuf); 31 | return(buf); 32 | } 33 | 34 | main(argc, argv) 35 | int argc; 36 | char **argv; 37 | { 38 | char buf [1024]; 39 | struct tm tm; 40 | time_t t, log_t, start_t, min_t; 41 | int tr=0; 42 | long pos = 0L, start_p, n; 43 | 44 | if (argv[1] != NULL) 45 | min_t = atoi(argv[1]); 46 | else 47 | min_t = 1; 48 | while(fgets(buf, 1023, stdin)) { 49 | *(buf + strlen(buf) - 1) = 0x00; 50 | if (!strcmp(buf, "START TRANSACTION") || !strcmp(buf, "BEGIN")) { 51 | tr=1; 52 | start_t=0; 53 | start_p=pos; 54 | } 55 | if (!strcmp(buf, "COMMIT/*!*/;")) { 56 | tr=0; 57 | if (t - start_t >= min_t) { 58 | if (fgets(buf, 1023, stdin) != NULL) 59 | if (sscanf(buf, "# at %ld", &n)) 60 | pos=n; 61 | printf("%ld - %ld %s runtime %ds\n", start_p, pos, pretty_t(start_t, t), (int)(t - start_t)); 62 | continue; 63 | } 64 | } 65 | if (strptime(buf, "#%y%m%d%t%T%tserver", &tm) != NULL) { 66 | t=timegm(&tm); 67 | if (tr && !start_t) 68 | start_t=t; 69 | } 70 | if (sscanf(buf, "# at %ld", &n)) 71 | pos=n; 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /rtm/rtm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # RTM - Realtime Transactions Monitor for MariaDB 4 | # (c) 2023,2024,2025 Rick Pizzi, pizzi@leopardus.com 5 | # 6 | VERSION=1.5.5 7 | PROCLIST_MIN_TERM_SIZE=185 8 | HEADER_LINES=5 9 | LANG= 10 | 11 | prettify() 12 | { 13 | if [ $1 -lt 1000000 ] 14 | then 15 | pretty=$1 16 | return 17 | fi 18 | mi=$(($1/1000000)) 19 | r=$(($1-mi*1000000)) 20 | d=$((r/10000)) 21 | pretty="$mi.${d}M" 22 | } 23 | 24 | pretty_time() 25 | { 26 | t=$1 27 | h=$((t/3600)) 28 | m=$(((t-h*3600)/60)) 29 | s=$((t-h*3600-m*60)) 30 | } 31 | 32 | init_terminal() 33 | { 34 | term_bold=$(tput bold) 35 | term_rev=$(tput rev) 36 | term_reset=$(tput sgr0) 37 | } 38 | 39 | check_terminal_width() 40 | { 41 | [ $COLUMNS -lt $PROCLIST_MIN_TERM_SIZE ] && return 1 || return 0 42 | } 43 | 44 | pak() 45 | { 46 | echo $1 47 | echo -n "Hit any key to continue " 48 | read -n 1 junk 49 | } 50 | 51 | help_menu() 52 | { 53 | scr_menu_opt=( 54 | "A=all transactions ($all)" 55 | "I=change refresh interval (${refresh}s)" 56 | "K=kill transaction" 57 | "L=locking only ($lockingonly)" 58 | "O=older first ($olderfirst)" 59 | "P=processlist info ($processlist)" 60 | "S=take snapshot" 61 | "W=waiting only ($waitingonly)" 62 | "Q=quit monitor" 63 | ) 64 | echo -n "keys: " 65 | p=6 66 | for entry in "${scr_menu_opt[@]}" 67 | do 68 | if [ $((p+${#entry})) -gt $COLUMNS ] 69 | then 70 | echo 71 | tl=$((tl+1)) 72 | echo -n " " 73 | p=6 74 | fi 75 | echo -n "$entry " 76 | p=$((p+${#entry}+2)) 77 | done 78 | echo 79 | tl=$((tl+1)) 80 | } 81 | 82 | header() 83 | { 84 | printf "$term_bold%-63s $term_reset" "MariaDB Realtime Transactions Monitor v$VERSION" 85 | case $hdrmsg in 86 | 1) 87 | printf "$term_rev%-30s$term_reset " " *** SNAPSHOT TAKEN ***" 88 | ;; 89 | 2) 90 | printf "$term_rev%-30s$term_reset " "snapshot $snapt" 91 | ;; 92 | *) 93 | printf "%-30s " $hostname 94 | ;; 95 | esac 96 | printf "%20s" " " 97 | if [ "$arg_use_file" = "" ] 98 | then 99 | prettify $(cat $snapfile | grep ^History | sed -re "s/History list length ([0-9]*)/\1/") 100 | if [ $COLUMNS -gt 170 ] 101 | then 102 | printf "%26s" "History list length $pretty" 103 | printf "%$((COLUMNS-169))s" " " 104 | else 105 | printf "%$((COLUMNS-143))s" " " 106 | fi 107 | else 108 | printf "%$((COLUMNS-143))s" " " 109 | fi 110 | printf "%s\n" "$(date)" 111 | [ $arg_debug -eq 1 ] && echo "DEBUG: w=$COLUMNS h=$LINES min=$PROCLIST_MIN_TERM_SIZE" 112 | hdrmsg=0 113 | tl=$HEADER_LINES 114 | help_menu 115 | printf "%-8s %-17s %-8s %-6s %-6s %-10s %-16s %-16s %-20s" " state" " transaction ID" " time" "locks" "undos" "thread ID" "IP address" "user" "InnoDB state" 116 | if [ "$processlist" = "yes" ] 117 | then 118 | printf " query\n" 119 | else 120 | printf "\n" 121 | fi 122 | printf "%8s %-17s %-8s %-6s %-6s %-10s %-16s %-16s %-20s" "--------" "-----------------" "--------" "------" "------" "----------" "----------------" "----------------" "--------------------" 123 | if [ "$processlist" = "yes" ] 124 | then 125 | printf " %s\n" "----------------------------------------------------------------------" 126 | else 127 | printf "\n" 128 | fi 129 | } 130 | 131 | ask() 132 | { 133 | while true 134 | do 135 | echo -n "$1 " 1>&2 136 | case "$2" in 137 | 'number') 138 | [ "$3" != "" ] && echo -n "($3-$4): " 1>&2 || echo -n ": " 1>&2 139 | read ui 140 | [ "$3" = "" -a "$ui" = "" ] && break 141 | [ ! -z "${ui##*[!0-9]*}" ] || continue 142 | if [ "$3" != "" ] 143 | then 144 | [ "$ui" -lt $3 -o $ui -gt $4 ] && continue 145 | fi 146 | echo $ui 147 | break 148 | ;; 149 | 'yesno') 150 | if [ "$3" != "" ] 151 | then 152 | case "$3" in 153 | 'yes') echo no;; 154 | 'no') echo yes;; 155 | esac 156 | break 157 | fi 158 | echo -n "(y/n): " 1>&2 159 | read -n 1 ui 160 | if [ "$ui" = "y" -o "$ui" = "n" ] 161 | then 162 | [ "$ui" = "y" ] && echo yes || echo no 163 | break 164 | fi 165 | echo 1>&2 166 | ;; 167 | esac 168 | done 169 | } 170 | 171 | kill_transaction() 172 | { 173 | [ "$1" = "" ] && return 174 | found=$(echo "select 'found' from information_schema.processlist where id = $1" | mysql -ANr $connstr 2>/dev/null) 175 | if [ $arg_quiet -eq 1 ] 176 | then 177 | if [ "$found" = "" ] 178 | then 179 | log INFO "tried to kill transaction $t_id, thread $t_thread_id, running since ${t_runtime}s but was not found" 180 | else 181 | echo "kill $1" | mysql -ANr $connstr 182 | log INFO "killed transaction $t_id, thread $t_thread_id, running since ${t_runtime}s" 183 | fi 184 | else 185 | if [ "$found" = "" ] 186 | then 187 | echo "thread ID $1 does not exist anymore" 188 | else 189 | echo "kill $1" | mysql -ANr $connstr 190 | echo "transaction killed" 191 | fi 192 | pak 193 | fi 194 | } 195 | 196 | get_nr() 197 | { 198 | [ "$arg_use_file" = "" ] && arg_nr=$(echo "select 1 - @@skip_name_resolve" | mysql -ANr $connstr 2>/dev/null) 199 | if [ $arg_nr -eq 0 ] 200 | then 201 | parser="s/^(MySQL|MariaDB) thread id ([0-9]*), OS thread handle ([0-9]*), query id ([0-9]*) ([A-Za-z0-9\._]*)(\s*)([A-Za-z0-9_\-]*)\s?(.*)$/\2+\5+\7+\8@/g" 202 | localhost_edit="s/no_match__g//" 203 | else 204 | parser="s/^(MySQL|MariaDB) thread id ([0-9]*), OS thread handle ([0-9]*), query id ([0-9]*) ([A-Za-z0-9\._-]*) ([A-Za-z0-9\._]*) ([A-Za-z0-9_\-]*)\s?(.*)$/\2+\6+\7+\8@/g" 205 | localhost_edit="s/localhost\s/localhost localhost /" 206 | fi 207 | } 208 | 209 | snapshot() 210 | { 211 | fn=$arg_snap_dir/rtm.snap.$(date '+%Y%m%d%H%M%S') 212 | tail -n +5 $snapfile > $fn 213 | [ $arg_quiet -eq 1 ] && log INFO "snap condition(s) met, snapshot taken: $fn" || hdrmsg=1 214 | } 215 | 216 | log() 217 | { 218 | [ "$arg_logfile" != "" ] && printf "%-20s %-10s %s\n" "$(date '+%Y-%m-%d %T')" $1 "$2" >> $arg_logfile || echo "$1: $2" 219 | } 220 | 221 | parse_arguments() 222 | { 223 | arg_debug=0 224 | arg_quiet=0 225 | arg_snap_time=0 226 | arg_snap_count=0 227 | arg_nr=0 228 | arg_snap_dir="/tmp" 229 | arg_ask_pass=0 230 | argl=$(getopt -o x -u -l"help,quiet,logfile:,interval:,kill:,snap-if-waiting-time:,snap-if-waiting-count:,snapshot-dir:,use-file:,locking-only,waiting-only,all-transactions,processlist,debug,name-resolve-on,host:,port:,user:,password:,ask-password,socket:" -- $@ 2>$tmpf) 231 | [ -s $tmpf ] && usage 232 | argr=($argl) 233 | i=0 234 | while true 235 | do 236 | case "${argr[$i]}" in 237 | '--quiet') 238 | arg_quiet=1 239 | ;; 240 | '--locking-only') 241 | lockingonly="yes" 242 | ;; 243 | '--waiting-only') 244 | waitingonly="yes" 245 | ;; 246 | '--help') 247 | usage 248 | ;; 249 | '--debug') 250 | arg_debug=1 251 | ;; 252 | '--name-resolve-on') 253 | arg_nr=1 254 | ;; 255 | '--use-file') 256 | check_for_arg 257 | arg_use_file=${argr[$((i+1))]} 258 | ;; 259 | '--interval') 260 | check_for_arg 261 | arg_interval=${argr[$((i+1))]} 262 | ;; 263 | '--all-transactions') 264 | all="yes" 265 | ;; 266 | '--processlist') 267 | check_terminal_width 268 | if [ $? -eq 1 ] 269 | then 270 | echo "--processlist requires a terminal at least $PROCLIST_MIN_TERM_SIZE chars wide" 1>&2 271 | exit 1 272 | fi 273 | processlist="yes" 274 | ;; 275 | '--kill') 276 | check_for_arg 277 | arg_kill=${argr[$((i+1))]} 278 | [ ! -z "${arg_kill##*[!0-9]*}" ] || usage 279 | ;; 280 | '--snap-if-waiting-time') 281 | check_for_arg 282 | arg_snap_time=${argr[$((i+1))]} 283 | [ ! -z "${arg_snap_time##*[!0-9]*}" ] || usage 284 | ;; 285 | '--snap-if-waiting-count') 286 | check_for_arg 287 | arg_snap_count=${argr[$((i+1))]} 288 | [ ! -z "${arg_snap_count##*[!0-9]*}" ] || usage 289 | ;; 290 | '--snapshot-dir') 291 | check_for_arg 292 | arg_snap_dir=${argr[$((i+1))]} 293 | ;; 294 | '--logfile') 295 | check_for_arg 296 | arg_logfile=${argr[$((i+1))]} 297 | ;; 298 | '--host') 299 | check_for_arg 300 | arg_host=${argr[$((i+1))]} 301 | ;; 302 | '--port') 303 | check_for_arg 304 | arg_port=${argr[$((i+1))]} 305 | [ ! -z "${arg_port##*[!0-9]*}" ] || usage 306 | ;; 307 | '--user') 308 | check_for_arg 309 | arg_user=${argr[$((i+1))]} 310 | ;; 311 | '--password') 312 | check_for_arg 313 | arg_password=${argr[$((i+1))]} 314 | ;; 315 | '--ask-password') 316 | arg_ask_pass=1 317 | ;; 318 | '--socket') 319 | check_for_arg 320 | arg_socket=${argr[$((i+1))]} 321 | ;; 322 | esac 323 | i=$((i+1)) 324 | [ $i -eq ${#argr[@]} ] && break 325 | done 326 | if [ "$arg_interval" != "" ] 327 | then 328 | [ ! -z "${arg_interval##*[!0-9]*}" ] || usage 329 | [ $arg_interval -lt 1 -o $arg_interval -gt 60 ] && usage 330 | refresh=$arg_interval 331 | fi 332 | if [ "$arg_kill" != "" ] 333 | then 334 | [ ! -z "${arg_kill##*[!0-9]*}" ] || usage 335 | #[ $arg_kill -lt 30 -o $arg_kill -gt 1800 ] && usage 336 | fi 337 | if [ $arg_quiet -eq 1 ] 338 | then 339 | log NOTICE "quiet mode - realtime transaction info suppressed" 340 | fi 341 | if [ $arg_ask_pass -eq 1 ] 342 | then 343 | stty -echo 344 | echo -n "Enter password: " 345 | read arg_password 346 | stty echo 347 | echo 348 | if [ "$arg_password" = "" ] 349 | then 350 | echo "password is required." 351 | exit 1 352 | fi 353 | fi 354 | if [ "$arg_host" != "" ] 355 | then 356 | if [ "$arg_user" = "" ] 357 | then 358 | echo "--host requires --user and --password" 359 | exit 1 360 | fi 361 | if [ "$arg_password" = "" -a $arg_ask_pass -eq 0 ] 362 | then 363 | echo "--host requires --user and --password" 364 | exit 1 365 | fi 366 | fi 367 | if [ "$arg_user" != "" ] 368 | then 369 | if [ "$arg_host" = "" ] 370 | then 371 | echo "--user requires --host" 372 | exit 1 373 | fi 374 | if [ "$arg_password" = "" -a $arg_ask_pass -eq 0 ] 375 | then 376 | echo "--user requires --password" 377 | exit 1 378 | fi 379 | fi 380 | if [ "$arg_password" != "" ] 381 | then 382 | if [ "$arg_host" = "" ] 383 | then 384 | echo "--password requires --host" 385 | exit 1 386 | fi 387 | if [ "$arg_user" = "" ] 388 | then 389 | echo "--password requires --user" 390 | exit 1 391 | fi 392 | fi 393 | if [ "$arg_host" != "" ] 394 | then 395 | connstr="-h $arg_host -u $arg_user -p$arg_password" 396 | if [ "$arg_socket" != "" ] 397 | then 398 | connstr="$connstr -S $arg_socket" 399 | fi 400 | if [ "$arg_port" != "" ] 401 | then 402 | connstr="$connstr -P $arg_port" 403 | fi 404 | fi 405 | if [ "$arg_snap_dir" != "" ] 406 | then 407 | if [ ! -d $arg_snap_dir ] 408 | then 409 | echo "$arg_snap_dir does not exist, please create it" 410 | exit 1 411 | fi 412 | fi 413 | if [ $arg_nr -eq 1 -a "$arg_use_file" = "" ] 414 | then 415 | echo "--name-resolve-on is only meaningful when --use-file is specified" 416 | exit 1 417 | fi 418 | } 419 | 420 | check_for_arg() 421 | { 422 | [ ${argr[$((i+1))]:0:2} != "--" ] && return 423 | reqmsg="getopt: option '${argr[$i]}' requires an argument" 424 | usage 425 | } 426 | 427 | usage() 428 | { 429 | printf "\n${term_bold}MariaDB Realtime Transactions Monitor v$VERSION\n\n$term_reset" 430 | [ -s $tmpf ] && cat $tmpf 431 | [ "$reqmsg" != "" ] && echo -e $reqmsg 432 | printf "usage: $0 [ options ... ]\n\n" 433 | printf "Valid options:\n\n" 434 | printf -- "--host\t\t\thostname for connection to target instance\n" 435 | printf -- "--port\t\t\tport for connection to target instance\n" 436 | printf -- "--user\t\t\tuser for connection to target instance\n" 437 | printf -- "--password\t\tpassword for connection to target instance\n" 438 | printf -- "--ask-password\t\tpassword for connection to target will be asked on terminal\n" 439 | printf -- "--socket\t\tsocket for connection to target instance\n" 440 | printf -- "--interval\t\trefresh interval (1-60 seconds)\n" 441 | printf -- "--all-transactions\tinclude just started transactions (lifetime < 1s)\n" 442 | printf -- "--processlist\t\tinclude info from processlist\n" 443 | printf -- "--kill\t\t\tkill transactions running since X seconds or more (30-1800)\n" 444 | printf -- "--locking-only\t\tfilter transactions holding row locks\n" 445 | printf -- "--waiting-only\t\tfilter transactions waiting to acquire a lock\n" 446 | printf -- "--snap-if-waiting-time\ttake a snapshot if there are transactions waiting X seconds or more\n" 447 | printf -- "--snap-if-waiting-count\ttake a snapshot if at least X transactions are waiting\n" 448 | printf -- "--snapshot-dir\t\tdirectory to use for snapshots (default /tmp)\n" 449 | printf -- "--use-file\t\tdisplay info from given file (useful with snapshots)\n" 450 | printf -- "--name-resolve-on\twhen reading from file, assume server has skip_name_resolve=OFF\n" 451 | printf -- "--logfile\t\tlog activity about snapshots and kills\n" 452 | printf -- "--quiet\t\t\tsuppresses output; useful with --kill\n" 453 | printf -- "--help\t\t\tthis help\n" 454 | printf "\n" 455 | exit 1 456 | } 457 | 458 | init() 459 | { 460 | [ $arg_quiet -eq 1 ] && return 461 | hostname=$(hostname) 462 | [ $arg_debug -eq 0 ] && clear 463 | header 464 | } 465 | 466 | access_check() 467 | { 468 | [ "$arg_use_file" != "" ] && return 469 | mysql $connstr >/dev/null 2>$tmpf < /dev/null 470 | grep -i ^error $tmpf && exit 1 471 | rm -f $tmpf 472 | } 473 | 474 | screen_update() 475 | { 476 | header > $scrh_tmpf 477 | [ $arg_debug -eq 0 ] && clear 478 | fgrep -avh __SNAP $scrh_tmpf $scrb_tmpf 479 | } 480 | 481 | refresh=3 482 | hdrmsg=0 483 | tl=0 484 | all=no 485 | processlist=no 486 | olderfirst=yes 487 | lockingonly=no 488 | waitingonly=no 489 | tmpf=/tmp/rtw.$$ 490 | scrh_tmpf=/tmp/rtw_sh.$$ 491 | scrb_tmpf=/tmp/rtw_sb.$$ 492 | snapfile=/tmp/rtw.sn.$$ 493 | trap 'rm -f $tmpf $scrh_tmpf $scrb_tmpf $snapfile' 0 494 | 495 | init_terminal 496 | parse_arguments $* 497 | access_check 498 | get_nr 499 | init 500 | while true 501 | do 502 | if [ "$arg_use_file" != "" ] 503 | then 504 | cat $arg_use_file | tee $scrb_tmpf | grep -zoPa "(?s)(?<=LIST OF TRANSACTIONS FOR EACH SESSION\:)(.*)(?=FILE I/O)" 2>/tmp/grepout | grep -av "^mysql tables in use" | egrep -aA2 "^---T(.*)ACTIVE" | egrep -a "ACTIVE|row lock|thread id" > $tmpf 505 | snapt=$(egrep -a "^(.*)0x(.*)INNODB MONITOR OUTPUT$" $scrb_tmpf | sed -re "s/^(.*) 0x(.*)$/\1/") 506 | hdrmsg=2 507 | else 508 | wc=0 509 | echo "show engine innodb status\G" | mysql -ANr $connstr | tee $snapfile | grep -zoPa "(?s)(?<=LIST OF TRANSACTIONS FOR EACH SESSION\:)(.*)(?=FILE I/O)" | grep -av "^mysql tables in use" | egrep -aA2 "^---T(.*)ACTIVE" | egrep -a "ACTIVE|row lock|thread id" > $tmpf 510 | fi 511 | if [ -s $tmpf ] 512 | then 513 | [ "$olderfirst" = "yes" ] && sortorder="-r" || sortorder="" 514 | sed -re "s/^---(.*)\s(.*), (ACTIVE|ACTIVE \(PREPARED\)) ([0-9]*) sec(.*)/\2+\4+/g" -e 's/^([RL0-9])(.*) lock (.*), (.*), ([0-9]*) row lock(.*)$/\1+\5+\6+/g' -e "$localhost_edit" -e "$parser" < $tmpf | tr -d "\n" | tr "@" "\n" | sort -nk2 -t'+' $sortorder | while IFS="+" read t_id t_runtime t_ts t_locks t_undostr t_thread_id t_ipaddr t_user t_extra t_extra2 515 | do 516 | #[[ $t_runtime != ?(-)+([0-9]) ]] && cp $tmpf /tmp/rtm.debug 517 | ext_d=0 518 | case "$t_ipaddr" in 519 | 'thread') 520 | t_thread_id=$(echo "$t_extra" | sed -re "s/^([0-9]*)(.*)$/\1/") 521 | t_user="system user" 522 | t_ipaddr="" 523 | t_extra=$(echo "$t_extra" | sed -re "s/^(.*), query id (.*) (.*)$/\3/") 524 | ;; 525 | 'Waiting'|'closing'|'After log apply event'|'Delete_rows_log_event'|'Update_rows_log_event'|'Write_rows_log_event') 526 | t_extra="$t_ipaddr $t_user $t_extra $t_extra2" 527 | t_user="SQL_thread" 528 | t_ipaddr="" 529 | ext_d=1 530 | ;; 531 | 'event_scheduler') 532 | t_user=$t_ipaddr 533 | t_ipaddr="" 534 | t_extra="" 535 | ;; 536 | *) t_extra="$t_extra $t_extra2" 537 | ;; 538 | esac 539 | if [ "$t_user" = "" ] 540 | then 541 | t_extra="$t_ipaddr" 542 | t_ipaddr="" 543 | fi 544 | if [ $tl -eq $LINES -a "$arg_use_file" = "" ] 545 | then 546 | echo -n "$term_rev some transactions suppressed due to screen size $term_reset " 547 | break 548 | fi 549 | [ $t_runtime -eq 0 -a "$all" = "no" ] && continue 550 | [ $t_locks -eq 0 -a "$lockingonly" = "yes" ] && continue 551 | case "$t_ts" in 552 | 'R') t_ts="ROLLBACK" 553 | t_ts_bold=$term_bold 554 | ;; 555 | 'L') t_ts="WAITING" 556 | wc=$((wc+1)) 557 | [ $arg_snap_time -gt 0 -a $t_runtime -ge $arg_snap_time ] && echo "__SNAP" 558 | [ $arg_snap_count -gt 0 -a $wc -ge $arg_snap_count ] && echo "__SNAP" 559 | t_ts_bold=$term_bold 560 | ;; 561 | *) t_ts="RUNNING" 562 | t_ts_bold="" 563 | ;; 564 | esac 565 | [ "$t_ts" != "WAITING" -a "$waitingonly" = "yes" ] && continue 566 | if [ "$arg_kill" != "" ] 567 | then 568 | if [ $t_runtime -gt $arg_kill ] 569 | then 570 | kill_transaction $t_thread_id 571 | continue 572 | fi 573 | fi 574 | pretty_time $t_runtime 575 | prettify $t_locks 576 | p_locks=$pretty 577 | t_undos=$(echo "$t_undostr" | sed -nre "s/^(.*)entries ([0-9]*)$/\2/p") 578 | p_undos="" 579 | if [ "$t_undos" != "" ] 580 | then 581 | prettify $t_undos 582 | p_undos=$pretty 583 | fi 584 | [ "$t_extra" = "" ] && t_extra=" " 585 | if [ $arg_quiet -eq 0 ] 586 | then 587 | if [ "$processlist" = "yes" ] 588 | then 589 | read p_info <<<$(echo "select replace(replace(left(info, 68 + $((COLUMNS-PROCLIST_MIN_TERM_SIZE))), '\n', ' '), '\r', '') from information_schema.processlist where id = $t_thread_id" | mysql -ANr $connstr) 590 | p_info=$(echo "$p_info" | tr "\t" " " | tr "\n" " " | tr -s " ") 591 | printf "$t_ts_bold%-8s %17s %2d:%02d:%02d %6s %6s %10d %-16s %-16.16s %-20.20s %s$term_reset\n" "$t_ts" $t_id $h $m $s $p_locks "$p_undos" $t_thread_id "$t_ipaddr" "$t_user" "$t_extra" "$p_info" 592 | else 593 | printf "$t_ts_bold%-8s %17s %2d:%02d:%02d %6s %6s %10d %-16s %-16.16s %-20.20s$term_reset\n" "$t_ts" $t_id $h $m $s $p_locks "$p_undos" $t_thread_id "$t_ipaddr" "$t_user" "$t_extra" 594 | fi 595 | tl=$((tl+1)) 596 | fi 597 | done | tee $scrb_tmpf | fgrep -aq __SNAP && snapshot 598 | else 599 | cat /dev/null > $scrb_tmpf 600 | fi 601 | if [ $arg_quiet -eq 1 ] 602 | then 603 | sleep $refresh 604 | continue 605 | fi 606 | screen_update 607 | [ "$arg_use_file" != "" ] && break 608 | read -s -t $refresh -n 1 ui 609 | case "${ui,,}" in 610 | 'a') all=$(ask "all transactions" yesno $all);; 611 | 'i') refresh=$(ask "refresh interval" number 1 60);; 612 | 'k') kill_transaction $(ask "thread ID to kill" number);; 613 | 'l') lockingonly=$(ask "only locking transactions" yesno $lockingonly);; 614 | 'o') olderfirst=$(ask "older transactions first" yesno $olderfirst);; 615 | 'p') check_terminal_width && processlist=$(ask "processlist info" yesno $processlist) || pak "Insufficient terminal width";; 616 | 'q') echo; echo "Bye";; 617 | 's') snapshot;; 618 | 'w') waitingonly=$(ask "only waiting transactions" yesno $waitingonly);; 619 | esac 620 | case "${ui,,}" in 621 | 'a'|'l'|'o'|'p'|'s'|'w') 622 | screen_update 623 | sleep 1.5 624 | ;; 625 | esac 626 | [ "$ui" = "q" ] && break 627 | done 628 | -------------------------------------------------------------------------------- /show_grants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # show all grants for a given DB, without having to install percona toolkit 4 | # 5 | echo -n Password: 6 | stty -echo 7 | read p 8 | stty echo 9 | echo "SELECT CONCAT('show grants for ',User,'@\'',Host,'\';') from mysql.user" | mysql --skip-column-names -uroot -p$p | mysql -t -uroot -p$p 10 | -------------------------------------------------------------------------------- /simple_warmup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # script is using SELECT COUNT(*) to read top 10 tables on the server 3 | # will not work with MyISAM 4 | # (c) Vlad Fedorkov 2012, exclusive for PalominoDB 5 | # (c) converted to bash from PHP by rpizzi@palominodb.com 6 | # 7 | [ $# -ne 3 ] && echo "usage: $0 host user password" && exit 1 8 | mysql_host=$1 9 | mysql_user=$2 10 | mysql_pass=$3 11 | time for table in $(echo "SELECT concat(concat(table_schema,'.',table_name), '|', concat(round((data_length+index_length)/(1024*1024*1024),2),'G')) FROM information_schema.TABLES ORDER BY data_length+index_length DESC LIMIT 10" | mysql -N -r -u $mysql_user -h $mysql_host -p$mysql_pass) 12 | do 13 | name=$(echo $table | cut -d '|' -f 1) 14 | size=$(echo $table | cut -d '|' -f 2) 15 | echo "Warming up table $name, Size: $size" 16 | echo "SELECT COUNT(*) FROM $name" | mysql -N -r -u $mysql_user -h $mysql_host -p$mysql_pass 17 | done 18 | exit 0 19 | -------------------------------------------------------------------------------- /table_growth/README: -------------------------------------------------------------------------------- 1 | sample.sh gathers info from various servers and stores consolidated info in local DB (invoked once a day) 2 | other scripts read the local DB and extract growth statistics 3 | 4 | Create an empty DB and the following table: 5 | 6 | mysql> show create table growth\G 7 | *************************** 1. row *************************** 8 | Table: growth 9 | Create Table: CREATE TABLE `growth` ( 10 | `id` int(11) NOT NULL AUTO_INCREMENT, 11 | `sample_date` date NOT NULL, 12 | `server` char(64) NOT NULL, 13 | `schema_name` char(32) NOT NULL, 14 | `table_name` char(64) NOT NULL, 15 | `table_size` decimal(11,2) DEFAULT NULL, 16 | PRIMARY KEY (`id`), 17 | KEY `server` (`server`,`schema_name`,`table_name`), 18 | KEY `sample_date` (`sample_date`) 19 | ) ENGINE=InnoDB DEFAULT CHARSET=latin1 20 | 1 row in set (0.00 sec) 21 | 22 | -------------------------------------------------------------------------------- /table_growth/pz-schema-growth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # create list of all schemas and their monthly growth 4 | # rpizzi@blackbirdit.com 5 | echo 6 | echo "database growth in the last 30 days, in GB" 7 | echo 8 | ( 9 | echo "select db_schema, db_growth from (" 10 | c=0 11 | for schema in $(echo "select distinct schema_name from growth" | mysql -ANr tablegrowth) 12 | do 13 | [ $c -gt 0 ] && echo "union " 14 | echo "select '$schema' as db_schema, sum(table_size) as db_growth from ((select sum(table_size) as table_size from growth where schema_name = '$schema' and sample_date >= date_sub(curdate(), interval 1 month) group by sample_date order by sample_date desc limit 1) union (select -sum(table_size) as table_size from growth where schema_name = '$schema' and sample_date >= date_sub(curdate(), interval 1 month) group by sample_date order by sample_date limit 1))d " 15 | c=$(expr $c + 1) 16 | done 17 | echo ") d order by db_growth desc" 18 | ) | mysql -At tablegrowth 19 | exit 0 20 | -------------------------------------------------------------------------------- /table_growth/pz-table-growth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # statistics on table growth 4 | # riccardo.pizzi@lastminute.com 5 | # 6 | # 7 | USER= 8 | PASS= 9 | # 10 | case "$1" in 11 | 'size') type=1;; 12 | 'percent') type=2;; 13 | 'delta') type=3;; 14 | *) echo "usage: $0 [ size | percent | delta ]"; exit 1;; 15 | esac 16 | d7=$(echo "select id from growth where sample_date = date_sub(curdate(), interval 1 week) limit 1" | mysql -u$USER -p$PASS -ANr tablegrowth | wc -l) 17 | d14=$(echo "select id from growth where sample_date = date_sub(curdate(), interval 2 week) limit 1" | mysql -u$USER -p$PASS -ANr tablegrowth | wc -l) 18 | d30=$(echo "select id from growth where sample_date = date_sub(curdate(), interval 1 month) limit 1" | mysql -u$USER -p$PASS -ANr tablegrowth | wc -l) 19 | d60=$(echo "select id from growth where sample_date = date_sub(curdate(), interval 2 month) limit 1" | mysql -u$USER -p$PASS -ANr tablegrowth | wc -l) 20 | echo 21 | case $type in 22 | 1) echo "Top 20 tables by disk usage in GB";; 23 | 2) echo "Top 20 growing tables by percentage";; 24 | 3) echo "Top 20 growing tables by GB";; 25 | esac 26 | echo 27 | ( 28 | echo "select " 29 | echo " t0.server, t0.schema_name, t0.table_name, " 30 | if [ $d60 -eq 1 ] 31 | then 32 | case $type in 33 | 1) echo -n " t60.table_size";; 34 | 2) echo -n " concat(truncate(100 - (t60.table_size * 100 / t0.table_size),1), '%')";; 35 | 3) echo -n " t0.table_size - t60.table_size";; 36 | esac 37 | else 38 | echo -n " 'N/A'" 39 | fi 40 | echo " as 2_months, " 41 | if [ $d30 -eq 1 ] 42 | then 43 | case $type in 44 | 1) echo -n " t30.table_size";; 45 | 2) echo -n " concat(truncate(100 - (t30.table_size * 100 / t0.table_size),1), '%')";; 46 | 3) echo -n " t0.table_size - t30.table_size";; 47 | esac 48 | else 49 | echo -n " 'N/A'" 50 | fi 51 | echo " as 1_month, " 52 | if [ $d14 -eq 1 ] 53 | then 54 | case $type in 55 | 1) echo -n " t14.table_size";; 56 | 2) echo -n " concat(truncate(100 - (t14.table_size * 100 / t0.table_size),1), '%')";; 57 | 3) echo -n " t0.table_size - t14.table_size";; 58 | esac 59 | else 60 | echo -n " 'N/A'" 61 | fi 62 | echo " as 2_weeks, " 63 | if [ $d7 -eq 1 ] 64 | then 65 | case $type in 66 | 1) echo -n " t7.table_size" ;; 67 | 2) echo -n " concat(truncate(100 - (t7.table_size * 100 / t0.table_size),1), '%')";; 68 | 3) echo -n " t0.table_size - t7.table_size";; 69 | esac 70 | else 71 | echo -n " 'N/A'" 72 | fi 73 | echo " as 1_week, " 74 | case $type in 75 | 1) echo " t1.table_size as 1_day,";; 76 | 2) echo " concat(truncate(100 - (t1.table_size * 100 / t0.table_size), 1), '%') as 1_day";; 77 | 3) echo " t0.table_size - t1.table_size as 1_day";; 78 | esac 79 | case $type in 80 | 1) echo " t0.table_size as today ";; 81 | esac 82 | echo "from growth t0 " 83 | echo " left join growth t1 on t1.server = t0.server and t1.schema_name = t0.schema_name and t1.table_name = t0.table_name and t1.sample_date = date_sub(curdate(), interval 1 day)" 84 | [ $d7 -eq 1 ] && echo " left join growth t7 on t7.server = t0.server and t7.schema_name = t0.schema_name and t7.table_name = t0.table_name and t7.sample_date = date_sub(curdate(), interval 1 week)" 85 | [ $d14 -eq 1 ] && echo " left join growth t14 on t14.server = t0.server and t14.schema_name = t0.schema_name and t14.table_name = t0.table_name and t14.sample_date = date_sub(curdate(), interval 2 week)" 86 | [ $d30 -eq 1 ] && echo " left join growth t30 on t30.server = t0.server and t30.schema_name = t0.schema_name and t30.table_name = t0.table_name and t30.sample_date = date_sub(curdate(), interval 1 month)" 87 | [ $d60 -eq 1 ] && echo " left join growth t60 on t60.server = t0.server and t60.schema_name = t0.schema_name and t60.table_name = t0.table_name and t60.sample_date = date_sub(curdate(), interval 2 month)" 88 | echo "where t0.sample_date = curdate()" 89 | case $type in 90 | 2) echo "order by truncate(100 - (t1.table_size * 100 / t0.table_size), 1) desc limit 20;";; 91 | *) echo "order by 1_day desc limit 20;";; 92 | esac 93 | ) | mysql -u$USER -p$PASS -At tablegrowth 94 | -------------------------------------------------------------------------------- /table_growth/run_and_mail.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | RECIPIENTS="rpizzi@blackbirdit.com" 5 | ( 6 | echo 'From: Health Checks ' 7 | echo "To: $RECIPIENTS" 8 | echo "Subject: table growth statistics" 9 | echo 'MIME-Version: 1.0' 10 | echo 'Content-Type: text/html' 11 | echo 'Content-Disposition: inline' 12 | echo '' 13 | echo '' 14 | echo '
'
15 | 	/root/Table_Growth/pz-schema-growth.sh
16 | 	/root/Table_Growth/pz-table-growth.sh size
17 | 	/root/Table_Growth/pz-table-growth.sh delta
18 | 	/root/Table_Growth/pz-table-growth.sh percent
19 | 	echo '
' 20 | echo '' 21 | echo '' 22 | ) | /usr/sbin/sendmail -t 23 | exit 0 24 | -------------------------------------------------------------------------------- /table_growth/sample.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # populates table growth DB from selected slaves 4 | # rick.pizzi@mariadb.com 5 | # 6 | SKIP_SCHEMAS="information_schema performance_schema mysql" 7 | DB_SCHEMA=growth 8 | SKIP_TABLES="$DB_SCHEMA.growth" 9 | DB_USER=tablegrowth 10 | DB_PASS=yourpasshere 11 | DATADIR=/var/lib/mysql 12 | # 13 | c=0 14 | for ss in $SKIP_SCHEMAS 15 | do 16 | [ $c -gt 0 ] && sskip="$sskip|" 17 | sskip="$sskip^$ss/" 18 | c=$((c+1)) 19 | done 20 | c=0 21 | for ss in $(echo $SKIP_TABLES | tr "[.]" "[/]") 22 | do 23 | [ $c -gt 0 ] && tskip="$tskip|" 24 | tskip="$tskip^$ss" 25 | c=$((c+1)) 26 | done 27 | cd $DATADIR 28 | IFS=" 29 | " 30 | for ts in $(find . -type f -name \*ibd | cut -d "/" -f 2,3 | egrep -v "$sskip" | egrep -v "$tskip" | cut -d "." -f 1) 31 | do 32 | echo "insert into growth values (null, curdate(), @@hostname, $(echo $ts| sed -re "s/(.*)\/(.*)/'\1', '\2'/g"), $(($(stat -c %s ./$ts.ibd)/1024/1024)));" 33 | done | mysql -A $DB_SCHEMA 34 | exit 0 35 | -------------------------------------------------------------------------------- /truelag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # reports about replication lag highlighting both I/O thread and SQL thread lag 4 | # rpizzi@blackbirdit.com 5 | # 6 | # usage: run as root after setting the LUSER variable to the mysql user to use locally to connect 7 | # root access needed to read master.info file and get information from there 8 | # note: reported lag is not 100% accurate due to data being sourced in different places 9 | # 10 | LUSER=rpizzi 11 | echo -n "Password: " 12 | stty -echo 13 | read lpass 14 | stty echo 15 | echo 16 | tmpf=/tmp/truelag$$ 17 | trap 'rm -f $tmpf' 0 18 | datadir=$(fgrep datadir /etc/my.cnf | tr -d "[ ]" | cut -d"=" -f 2) 19 | c=1 20 | for row in $(cat $datadir/master.info | head -6 | tail -3) 21 | do 22 | case $c in 23 | 1) master=$row;; 24 | 2) user=$row;; 25 | 3) pass=$row;; 26 | esac 27 | c=$(expr $c + 1) 28 | done 29 | while true 30 | do 31 | ( echo "show slave status\G" | mysql -Ar -u$LUSER -p$lpass > $tmpf 2>/dev/null) & 32 | #master_status=$(echo "show master status" | mysql -ANr -h $master -u $user -p$pass) 33 | master_status=$(echo "show master status" | mysql -ANr -h $master -u $LUSER -p$lpass 2>/dev/null) 34 | master_file=$(echo "$master_status" | cut -f 1) 35 | master_pos=$(echo "$master_status" | cut -f 2) 36 | wait 37 | for row in $(cat $tmpf | tr -d "[ ]") 38 | do 39 | var=$(echo $row | cut -d":" -f 1) 40 | val=$(echo $row | cut -d":" -f 2) 41 | case $var in 42 | 'Master_Log_File') io_file=$val;; 43 | 'Read_Master_Log_Pos') io_pos=$val;; 44 | 'Relay_Master_Log_File') sql_file=$val;; 45 | 'Exec_Master_Log_Pos') sql_pos=$val;; 46 | esac 47 | done 48 | sql_filenum=$(echo $sql_file | cut -d"." -f 2) 49 | io_filenum=$(echo $io_file | cut -d"." -f 2) 50 | master_filenum=$(echo $master_file | cut -d"." -f 2) 51 | sql_poslag=$(expr $io_pos - $sql_pos) 52 | sql_fnolag=$(expr $io_filenum - $sql_filenum) 53 | io_poslag=$(expr $master_pos - $io_pos) 54 | io_fnolag=$(expr $master_filenum - $io_filenum) 55 | [ $io_fnolag -gt 0 ] && io_poslag="and some" 56 | [ $sql_fnolag -gt 0 ] && sql_poslag="and some" 57 | date 58 | printf "Master: %s %12d\n" $master_file $master_pos 59 | printf "I/O thread: %s %12d (%d binlogs %s bytes behind)\n" $io_file $io_pos $io_fnolag "$io_poslag" 60 | printf "SQL thread: %s %12d (%d binlogs %s bytes behind)\n" $sql_file $sql_pos $sql_fnolag "$sql_poslag" 61 | echo 62 | [ $io_fnolag -gt 0 ] && echo -n  63 | sleep 5 64 | done 65 | exit 0 66 | -------------------------------------------------------------------------------- /tungsten_osc_babysitter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # looks for a running pt-online-schema-change process and throttles it so that 4 | # no node of cluster has more than MAX_LATENCY replication latency 5 | # rpizzi@blackbirdit.com 6 | # 7 | MAX_LATENCY=600 8 | # 9 | pid=$(ps -eAo pid,args | fgrep pt-online-schema-change | grep -v grep | tr -s "[ ]" | sed -e "s/^ //g" | cut -d" " -f 1) 10 | if [ "$pid" = "" ] 11 | then 12 | echo "nothing to babysit" 13 | exit 1 14 | fi 15 | running=1 16 | while true 17 | do 18 | kill -0 $pid 2>/dev/null || break 19 | latency=$(echo ls | su - tungsten -c cctrl | fgrep -i latency | tr -d "[)|]" | cut -d"=" -f 2 | cut -d"." -f 1 | sort -nrk 1 | head -1) 20 | if [ $latency -gt $MAX_LATENCY ] 21 | then 22 | if [ $running -eq 1 ] 23 | then 24 | kill -STOP $pid 25 | [ $? -ne 0 ] && break 26 | running=0 27 | fi 28 | echo "Paused: latency $latency > $MAX_LATENCY" 29 | else 30 | if [ $running -eq 0 ] 31 | then 32 | kill -CONT $pid 33 | [ $? -ne 0 ] && break 34 | running=1 35 | echo "Resumed: latency $latency < $MAX_LATENCY" 36 | else 37 | echo "Running: latency $latency" 38 | fi 39 | fi 40 | sleep 15 41 | done 42 | echo "Done" 43 | exit 0 44 | -------------------------------------------------------------------------------- /zrm_print.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # pretty prints ZRM backup status for all backup sets 4 | # rpizzi@blackbirdit.com 5 | # 6 | TZ=$(date +%Z) 7 | IFS=" 8 | " 9 | for set in $(mysql-zrm-reporter backup-performance-info --fields backup-set --noheader 2> /dev/null | sort | uniq | sort) 10 | do 11 | echo "Backup set: $set" 12 | echo "-------------------------------------------------------------------" 13 | for row in $(mysql-zrm-reporter backup-performance-info --fields backup-date,backup-level,backup-size,backup-status --noheader --where backup-set=$set 2>/dev/null | fgrep -v $TZ | fgrep -v REPORT) 14 | do 15 | type=$(echo $row | cut -c63-63) 16 | run=$(echo $row | cut -c78-79) 17 | echo $row 18 | [ $type -eq 0 -a $run != "--" ] && break 19 | done 20 | echo 21 | done 22 | --------------------------------------------------------------------------------