├── AmqpGraphite.pm
├── AmqpJson.pm
├── AmqpJsonUdp.pm
├── CPUSummary.pm
├── CloudWatchMetrics.pm
├── HTTPCheck.pm
├── InnoDBParser.pm
├── JMX.pm
├── MySQL.pm
├── README
├── RabbitMQ.pm
├── Riak.pm
└── types.db.sample
/AmqpGraphite.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugins::AmqpGraphite;
2 |
3 | use strict;
4 | use warnings;
5 | use Collectd qw( :all );
6 | use threads::shared;
7 | use Net::RabbitMQ;
8 |
9 |
10 | =head1 NAME
11 |
12 | Collectd::Plugins::AmqpGraphite - Send collectd metrics to AMQP in graphite format, based on Collectd::Plugins::Graphite by Joe Miller
13 |
14 | =head1 VERSION
15 |
16 | Version 1
17 |
18 | =cut
19 |
20 | our $VERSION = '1';
21 |
22 |
23 | =head1 SYNOPSIS
24 |
25 | This is a collectd plugin for sending collectd metrics to AMQP in graphite format.
26 |
27 | In your collectd config:
28 |
29 |
30 | Globals true
31 |
32 |
33 |
34 | BaseName "Collectd::Plugins"
35 | LoadPlugin "AmqpGraphite"
36 |
37 |
38 | Buffer "256000"
39 | Prefix "datacenter"
40 | Host "amqp.host"
41 | Port "2003"
42 | User "amqpuser"
43 | Password "amqppass"
44 | Exchange "exchangename"
45 | VHost "/virtualhost"
46 |
47 |
48 |
49 | =head1 AUTHOR
50 |
51 | Mark Steele, C<< >>, original author of graphite plugin Joe Miller
52 |
53 | =cut
54 |
55 | my $buff :shared;
56 | my $buffer_size = 8192;
57 | my $prefix;
58 | my $host = 'localhost';
59 | my $port = 5672;
60 | my $user;
61 | my $password;
62 | my $exchange;
63 | my $vhost;
64 |
65 | sub amqp_graphite_config {
66 | my ($ci) = @_;
67 | foreach my $item (@{$ci->{'children'}}) {
68 | my $key = lc($item->{'key'});
69 | my $val = $item->{'values'}->[0];
70 |
71 | if ($key eq 'buffer' ) {
72 | $buffer_size = $val;
73 | } elsif ($key eq 'prefix' ) {
74 | $prefix = $val;
75 | } elsif ($key eq 'host') {
76 | $host = $val;
77 | } elsif ($key eq 'port') {
78 | $port = $val;
79 | } elsif ($key eq 'user') {
80 | $user = $val;
81 | } elsif ($key eq 'password') {
82 | $password = $val;
83 | } elsif ($key eq 'exchange') {
84 | $exchange = $val;
85 | } elsif ($key eq 'vhost') {
86 | $vhost = $val;
87 | }
88 | }
89 |
90 | return 1;
91 | }
92 |
93 | sub amqp_graphite_write {
94 | my ($type, $ds, $vl) = @_;
95 |
96 | my $host = $vl->{'host'};
97 | $host =~ s/\./_/g;
98 | my $plugin_str = $vl->{'plugin'};
99 | my $type_str = $vl->{'type'};
100 | if ( defined $vl->{'plugin_instance'} ) {
101 | $plugin_str .= "-" . $vl->{'plugin_instance'};
102 | }
103 | if ( defined $vl->{'type_instance'} ) {
104 | $type_str .= "-" . $vl->{'type_instance'};
105 | }
106 |
107 | my $bufflen;
108 | {
109 | lock($buff);
110 | for (my $i = 0; $i < scalar (@$ds); ++$i) {
111 | my $graphite_path = sprintf "%s.%s.%s.%s.%s",
112 | $plugin_str,
113 | $type_str,
114 | $ds->[$i]->{'name'},
115 | $prefix,
116 | $host;
117 |
118 | # convert any spaces that may have snuck in
119 | $graphite_path =~ s/\s+/_/g;
120 | $buff .= sprintf "%s %s %d\n",
121 | $graphite_path,
122 | $vl->{'values'}->[$i],
123 | $vl->{'time'};
124 | }
125 | $bufflen = length($buff);
126 | }
127 | if ( $bufflen >= $buffer_size ) {
128 | send_to_amqp();
129 | }
130 | return 1;
131 | }
132 |
133 | sub send_to_amqp {
134 | # Best effort to send
135 | lock($buff);
136 | return 0 if !length($buff);
137 | my $mq = Net::RabbitMQ->new();
138 | eval { $mq->connect($host , { port => $port, user => $user, password => $password, vhost => $vhost }); };
139 | if ($@ eq '') {
140 | eval {
141 | $mq->channel_open(1);
142 | $mq->publish(1, '', $buff, { exchange => $exchange });
143 | $mq->disconnect();
144 | };
145 | if ($@ ne '') {
146 | plugin_log(LOG_ERR, "AmqpGraphite.pm: error publishing to amqp, losing data");
147 | }
148 | } else {
149 | plugin_log(LOG_ERR, "AmqpGraphite.pm: failed to connect to amqp, losing data");
150 | }
151 | $buff = '';
152 | return 1;
153 | }
154 |
155 | sub amqp_graphite_flush {
156 | send_to_amqp();
157 | return 1;
158 | }
159 |
160 | plugin_register (TYPE_CONFIG, "AmqpGraphite", "amqp_graphite_config");
161 | plugin_register (TYPE_WRITE, "AmqpGraphite", "amqp_graphite_write");
162 | plugin_register (TYPE_FLUSH, "AmqpGraphite", "amqp_graphite_flush");
163 |
164 | 1; # End of Collectd::Plugins::AmqpGraphite
165 |
--------------------------------------------------------------------------------
/AmqpJson.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugins::AmqpJson;
2 |
3 | use strict;
4 | use warnings;
5 | use Collectd qw( :all );
6 | use threads::shared;
7 | use Net::RabbitMQ;
8 | use JSON;
9 | use Compress::Zlib;
10 |
11 | =head1 NAME
12 |
13 | Collectd::Plugins::AmqpJson - Send collectd metrics to AMQP in json format, based on Collectd::Plugins::Graphite by Joe Miller
14 |
15 | =head1 VERSION
16 |
17 | Version 1
18 |
19 | =cut
20 |
21 | our $VERSION = '1';
22 |
23 |
24 | =head1 SYNOPSIS
25 |
26 | This is a collectd plugin for sending collectd metrics to AMQP in json format.
27 |
28 | In your collectd config:
29 |
30 |
31 | Globals true
32 |
33 |
34 |
35 | BaseName "Collectd::Plugins"
36 | LoadPlugin "AmqpJson"
37 |
38 |
39 | Buffer "65507"
40 | Prefix "datacenter"
41 | Host "amqp.host"
42 | Port "2003"
43 | User "amqpuser"
44 | Password "amqppass"
45 | Exchange "exchangename"
46 | VHost "/virtualhost"
47 | Compression "On"
48 |
49 |
50 |
51 | =head1 AUTHOR
52 |
53 | Mark Steele, C<< >>, original author of graphite plugin Joe Miller
54 |
55 | =cut
56 |
57 | my $buff :shared;
58 | my $buffer_size = 8192;
59 | my $prefix;
60 | my $host = 'localhost';
61 | my $port = 5672;
62 | my $user;
63 | my $password;
64 | my $exchange;
65 | my $vhost;
66 | my $compress;
67 | my $event_type = 'CollectdMetric';
68 |
69 | sub amqp_json_config {
70 | my ($ci) = @_;
71 | foreach my $item (@{$ci->{'children'}}) {
72 | my $key = lc($item->{'key'});
73 | my $val = $item->{'values'}->[0];
74 |
75 | if ($key eq 'buffer' ) {
76 | $buffer_size = $val;
77 | } elsif ($key eq 'prefix' ) {
78 | $prefix = $val;
79 | } elsif ($key eq 'host') {
80 | $host = $val;
81 | } elsif ($key eq 'port') {
82 | $port = $val;
83 | } elsif ($key eq 'user') {
84 | $user = $val;
85 | } elsif ($key eq 'password') {
86 | $password = $val;
87 | } elsif ($key eq 'exchange') {
88 | $exchange = $val;
89 | } elsif ($key eq 'vhost') {
90 | $vhost = $val;
91 | } elsif ($key eq 'compression' && lc($val) eq 'on') {
92 | $compress = 1;
93 | } elsif ($key eq 'eventtype') {
94 | $event_type = $val;
95 | }
96 | }
97 |
98 | return 1;
99 | }
100 |
101 |
102 | sub amqp_json_write {
103 | my ($type, $ds, $vl) = @_;
104 | my $host = $vl->{'host'};
105 | $host =~ s/\./_/g;
106 | my $hashtemplate = {};
107 | $hashtemplate->{'plugin'} = $vl->{'plugin'};
108 | $hashtemplate->{'type'} = $vl->{'type'};
109 | if ( defined $vl->{'plugin_instance'} ) {
110 | $hashtemplate->{'plugin_instance'} = $vl->{'plugin_instance'};
111 | }
112 | if ( defined $vl->{'type_instance'} ) {
113 | $hashtemplate->{'type_instance'} = $vl->{'type_instance'};
114 | }
115 |
116 | my $bufflen;
117 | {
118 | lock($buff);
119 | for (my $i = 0; $i < scalar (@$ds); ++$i) {
120 | my $hashref = $hashtemplate;
121 | $hashref->{'name'} = $ds->[$i]->{'name'};
122 | $hashref->{'value'} = $vl->{'values'}->[$i];
123 | $hashref->{'time'} = $vl->{'time'};
124 | $hashref->{'datacenter'} = $prefix;
125 | $hashref->{'host'} = $host;
126 | $hashref->{'event_type'} = $event_type;
127 | $buff .= encode_json($hashref) . "\n";
128 | }
129 | $bufflen = length($buff);
130 | }
131 | if ( $bufflen >= $buffer_size ) {
132 | send_to_amqp();
133 | }
134 | return 1;
135 | }
136 |
137 |
138 | sub send_to_amqp {
139 | lock($buff);
140 | return 0 if !length($buff);
141 | my $mq = Net::RabbitMQ->new();
142 | eval { $mq->connect($host , { port => $port, user => $user, password => $password, vhost => $vhost }); };
143 | if ($@ eq '') {
144 | eval {
145 | $mq->channel_open(1);
146 | $mq->exchange_declare(1, $exchange, { 'exchange_type' => 'topic', 'durable' => 1, 'auto_delete' => 0 });
147 | $mq->publish(1, '', $compress ? compress($buff) : $buff, { exchange => $exchange });
148 | $mq->disconnect();
149 | };
150 | if ($@ ne '') {
151 | plugin_log(LOG_ERR, "AmqpJson.pm: error publishing to amqp, losing data: " . $@);
152 | }
153 | } else {
154 | plugin_log(LOG_ERR, "AmqpJson.pm: failed to connect to amqp, losing data");
155 | }
156 | $buff = '';
157 | return 1;
158 | }
159 |
160 | sub amqp_json_flush {
161 | send_to_amqp();
162 | return 1;
163 | }
164 |
165 | plugin_register (TYPE_CONFIG, "AmqpJson", "amqp_json_config");
166 | plugin_register (TYPE_WRITE, "AmqpJson", "amqp_json_write");
167 | plugin_register (TYPE_FLUSH, "AmqpJson", "amqp_json_flush");
168 |
169 | 1; # End of Collectd::Plugins::AmqpJson
170 |
--------------------------------------------------------------------------------
/AmqpJsonUdp.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugins::AmqpJsonUdpUdp;
2 |
3 | use strict;
4 | use warnings;
5 | use Collectd qw( :all );
6 | use threads::shared;
7 | use IO::Socket;
8 | use JSON;
9 | use Compress::Zlib;
10 |
11 | =head1 NAME
12 |
13 | Collectd::Plugins::AmqpJsonUdp - Send collectd metrics to AMQP in json format to a UDP endpoint, based on Collectd::Plugins::Graphite by Joe Miller
14 |
15 | =head1 VERSION
16 |
17 | Version 1
18 |
19 | =cut
20 |
21 | our $VERSION = '1';
22 |
23 |
24 | =head1 SYNOPSIS
25 |
26 | This is a collectd plugin for sending collectd metrics to AMQP in json format using the UDP exchange for RabbitMQ. This is fire and forget!
27 |
28 | In your collectd config:
29 |
30 |
31 | Globals true
32 |
33 |
34 |
35 | BaseName "Collectd::Plugins"
36 | LoadPlugin "AmqpJsonUdp"
37 |
38 |
39 | Buffer "65507"
40 | Prefix "datacenter"
41 | Host "amqp.host"
42 | Port "2003"
43 | Compression "On"
44 |
45 |
46 |
47 | =head1 AUTHOR
48 |
49 | Mark Steele, C<< >>, original author of graphite plugin Joe Miller
50 |
51 | =cut
52 |
53 | my $buff :shared;
54 | # Informal testing has shown that with compression on, a 32k buffer can compress down to about 2k
55 | # My recommendation is to have the buffer size at 1432 bytes if not using compression, and
56 | # set it to 16k if compression is on (which compresses down to roughly 1k).
57 | # This is to accomodate the min MTU to avoid fragmentation on ethernet networks.
58 | #
59 | # On gigabit networks with jumbo frames on between hosts exchanging data, this value can be bumped
60 | # up quite a bit.
61 | #
62 | # Fragmentation in UDP is a recipe for lost data, so it's best avoided. Plus, these recommendations will
63 | # yield highest possible throughput by minimizing header overhead.
64 | #
65 | # Having compression on is highly advisable.
66 | #
67 | #
68 |
69 | my $buffer_size = 16384;
70 | my $prefix;
71 | my $host = 'localhost';
72 | my $port = 5672;
73 | my $compress;
74 | my $event_type = 'CollectdMetric';
75 |
76 | ## TODO: Consider optimizing compression a pre-built zlib dictionary
77 |
78 | sub amqp_json_config {
79 | my ($ci) = @_;
80 | foreach my $item (@{$ci->{'children'}}) {
81 | my $key = lc($item->{'key'});
82 | my $val = $item->{'values'}->[0];
83 |
84 | if ($key eq 'buffer' ) {
85 | $buffer_size = $val;
86 | } elsif ($key eq 'prefix' ) {
87 | $prefix = $val;
88 | } elsif ($key eq 'host') {
89 | $host = $val;
90 | } elsif ($key eq 'port') {
91 | $port = $val;
92 | } elsif ($key eq 'compression' && lc($val) eq 'on') {
93 | $compress = 1;
94 | } elsif ($key eq 'eventtype') {
95 | $event_type = $val;
96 | }
97 | }
98 | return 1;
99 | }
100 |
101 | sub amqp_json_write {
102 | my ($type, $ds, $vl) = @_;
103 | my $host = $vl->{'host'};
104 | $host =~ s/\./_/g;
105 | my $hashtemplate = {};
106 | $hashtemplate->{'plugin'} = $vl->{'plugin'};
107 | $hashtemplate->{'type'} = $vl->{'type'};
108 | if ( defined $vl->{'plugin_instance'} ) {
109 | $hashtemplate->{'plugin_instance'} = $vl->{'plugin_instance'};
110 | }
111 | if ( defined $vl->{'type_instance'} ) {
112 | $hashtemplate->{'type_instance'} = $vl->{'type_instance'};
113 | }
114 |
115 | my $bufflen;
116 | {
117 | lock($buff);
118 | for (my $i = 0; $i < scalar (@$ds); ++$i) {
119 | my $hashref = $hashtemplate;
120 | $hashref->{'name'} = $ds->[$i]->{'name'};
121 | $hashref->{'value'} = $vl->{'values'}->[$i];
122 | $hashref->{'time'} = $vl->{'time'};
123 | $hashref->{'datacenter'} = $prefix;
124 | $hashref->{'host'} = $host;
125 | $hashref->{'event_type'} = $event_type;
126 | $buff .= encode_json($hashref) . "\n";
127 | }
128 | $bufflen = length($buff);
129 | }
130 | if ( $bufflen >= $buffer_size ) {
131 | send_to_amqp();
132 | }
133 | return 1;
134 | }
135 |
136 | sub send_to_amqp {
137 | # Best effort to send
138 | lock($buff);
139 | return 0 if !length($buff);
140 | if ($compress) {
141 | plugin_log(LOG_DEBUG,"Uncompressed: " . length($buff));
142 | $buff = compress($buff, Z_BEST_COMPRESSION);
143 | plugin_log(LOG_DEBUG,"Compressed: " . length($buff));
144 | }
145 | my $sock = IO::Socket::INET->new(Proto => 'udp',PeerPort => $port,PeerAddr => $host) or plugin_log(LOG_ERR, "AmqpJsonUdp.pm: Unable to connect to udp socket $host:$port");
146 | $sock->send($buff) or plugin_log(LOG_ERR, "AmqpJsonUdp.pm: Unable to send data");
147 | $buff = '';
148 | return 1;
149 | }
150 |
151 | sub amqp_json_flush {
152 | send_to_amqp();
153 | return 1;
154 | }
155 |
156 | plugin_register (TYPE_CONFIG, "AmqpJsonUdp", "amqp_json_config");
157 | plugin_register (TYPE_WRITE, "AmqpJsonUdp", "amqp_json_write");
158 | plugin_register (TYPE_FLUSH, "AmqpJsonUdp", "amqp_json_flush");
159 |
160 | 1; # End of Collectd::Plugins::AmqpJsonUdp
161 |
--------------------------------------------------------------------------------
/CPUSummary.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::CPUSummary;
2 | use Collectd qw(:all);
3 | use Sys::CPU;
4 | #
5 | # Add the following line to /usr/share/collectd/types.db:
6 | # cpusummary user:COUNTER:U:U,nice:COUNTER:U:U,system:COUNTER:U:U,idle:COUNTER:U:U,iowait:COUNTER:U:U,irq:COUNTER:U:U,softirq:COUNTER:U:U,cpucount:GAUGE:U:U
7 | #
8 |
9 |
10 |
11 | plugin_register (TYPE_READ, 'cpusummary', 'my_read');
12 |
13 | my $cpus = Sys::CPU::cpu_count();
14 |
15 | sub my_read
16 | {
17 | open(F,"/proc/stat");
18 | my $line = ;
19 | close(F);
20 | chomp($line);
21 | my $vl = {};
22 | $vl->{'plugin'} = 'cpusummary';
23 | $vl->{'type'} = 'cpusummary';
24 | $vl->{'values'} = [ (map { sprintf("%d",$_/$cpus); } (split(/\s+/,$line))[1..7]), $cpus ];
25 | plugin_dispatch_values($vl);
26 | return 1;
27 | }
28 |
--------------------------------------------------------------------------------
/CloudWatchMetrics.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugins::CloudWatchMetrics;
2 |
3 | use strict;
4 | use warnings;
5 | use Collectd qw( :all );
6 | use Paws;
7 | use Paws::CloudWatch::MetricDatum;
8 | use Paws::CloudWatch::Dimension;
9 |
10 | =head1 NAME
11 |
12 | Collectd::Plugins::CloudWatchMetrics - Sends metrics to CloudWatch
13 |
14 | =head1 VERSION
15 |
16 | Version 1
17 |
18 | =cut
19 |
20 | our $VERSION = '1';
21 |
22 |
23 | =head1 SYNOPSIS
24 |
25 | This is a collectd plugin for sending collectd metrics to CloudWatch
26 |
27 | In your collectd config:
28 |
29 | LoadPlugin perl
30 |
31 | IncludeDir "/usr/lib64/collectd/perl"
32 | BaseName "Collectd::Plugins"
33 | LoadPlugin "CloudWatchMetrics"
34 |
35 | Region "us-east-1"
36 | AccessKey "AKIAI3243242IB776DVASDFASDF"
37 | SecretKey "87GuZX3242/VwaFme234RszZg/2UfuuKASDFASDFASDFASDF"
38 |
39 |
40 |
41 | =head1 AUTHOR
42 |
43 | Mark Steele, C<< >>
44 |
45 | =cut
46 |
47 | my $region = 'us-east-1';
48 |
49 | sub cw_config {
50 | my ($ci) = @_;
51 | foreach my $item (@{$ci->{'children'}}) {
52 | my $key = lc($item->{'key'});
53 | my $val = $item->{'values'}->[0];
54 | if ($key eq 'region') {
55 | $region = $val;
56 | } elsif ($key eq 'accesskey') {
57 | $ENV{'AWS_ACCESS_KEY'} = $val;
58 | } elsif ($key eq 'secretkey') {
59 | $ENV{'AWS_SECRET_KEY'} = $val;
60 | }
61 | }
62 | return 1;
63 | }
64 |
65 | sub cw_write {
66 | my ($type, $ds, $vl) = @_;
67 | my $cw = Paws->service('CloudWatch',region => $region);
68 | my @items = ();
69 | my @dimensions = (new Paws::CloudWatch::Dimension(Name => 'host', Value => $vl->{'host'}));
70 | push(@dimensions, new Paws::CloudWatch::Dimension(Name => 'plugin',Value => $vl->{'plugin'}));
71 | push(@dimensions, new Paws::CloudWatch::Dimension(Name => 'type',Value => $vl->{'type'}));
72 | if ( defined $vl->{'plugin_instance'} ) {
73 | push(@dimensions, new Paws::CloudWatch::Dimension(Name => 'plugin_instance',Value => $vl->{'plugin_instance'}));
74 | }
75 | if ( defined $vl->{'type_instance'} ) {
76 | push(@dimensions, new Paws::CloudWatch::Dimension(Name => 'type_instance',Value => $vl->{'type_instance'}));
77 | }
78 | for (my $i = 0; $i < scalar (@$ds); ++$i) {
79 | push(@items,
80 | new Paws::CloudWatch::MetricDatum(
81 | 'MetricName' => $ds->[$i]->{'name'},
82 | 'Unit' => 'Count',
83 | 'Value' => $vl->{'values'}->[$i],
84 | 'Dimensions' => \@dimensions));
85 | }
86 | if (scalar(@items)) {
87 | my $cw = Paws->service('CloudWatch',region => $region);
88 | my $res = $cw->PutMetricData(MetricData => \@items, Namespace => 'Collectd');
89 | #plugin_log(LOG_ERR, "CW: Sent data: $res");
90 | }
91 | return 1;
92 | }
93 |
94 | plugin_register (TYPE_CONFIG, "CloudWatchMetrics", "cw_config");
95 | plugin_register (TYPE_WRITE, "CloudWatchMetrics", "cw_write");
96 |
97 | 1; # End of Collectd::Plugins::CloudWatchMetrics
98 |
--------------------------------------------------------------------------------
/HTTPCheck.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::HTTPCheck;
2 | use Collectd qw(:all);
3 | use LWP::UserAgent;
4 | use HTTP::Request::Common qw(GET);
5 | use JSON;
6 |
7 | my $url;
8 | my $regex;
9 | my $instance = 'default';
10 | my $check_type = 'plain';
11 | my $check_field = 'default';
12 | my $check_expected = 'default';
13 |
14 | plugin_register (TYPE_READ, 'HTTPCheck', 'my_read');
15 | plugin_register (TYPE_CONFIG, "HTTPCheck", "httpcheck_config");
16 |
17 | sub httpcheck_config {
18 | plugin_log(LOG_ERR, "HTTPCheck: reading config values");
19 | my ($ci) = @_;
20 | foreach my $item (@{$ci->{'children'}}) {
21 | my $key = lc($item->{'key'});
22 | my $val = $item->{'values'}->[0];
23 | if ($key eq 'url' ) {
24 | $url = $val;
25 | } elsif ($key eq 'instance' ) {
26 | $instance = $val;
27 | } elsif ($key eq 'regex' ) {
28 | $regex = $val;
29 | } elsif ($key eq 'checktype' ) {
30 | $check_type = $val;
31 | } elsif ($key eq 'checkfield' ) {
32 | $check_field = $val;
33 | } elsif ($key eq 'checkexpected' ) {
34 | $check_expected = $val;
35 | }
36 | }
37 | plugin_log(LOG_ERR, "HTTPCheck: done reading configuration");
38 | return 1;
39 | }
40 |
41 | sub my_read
42 | {
43 | my $vl = {};
44 | $vl->{'plugin'} = 'http';
45 | $vl->{'type'} = 'gauge';
46 | $vl->{'type_instance'} = $instance;
47 | $vl->{'values'} = [ 0 ];
48 | my $res;
49 | eval {
50 | my $ua = LWP::UserAgent->new;
51 | $ua->timeout(5);
52 | my $req = GET $url;
53 | $res = $ua->simple_request($req);
54 | };
55 | if ($@) {
56 | plugin_log(LOG_ERR, "HTTPCheck: caught exception");
57 | }
58 | # plugin_log(LOG_ERR, "HTTPCheck: done fetching http document");
59 |
60 | if ($res->code ne '200') {
61 | plugin_log(LOG_ERR, "HTTPCheck: non 200");
62 | plugin_dispatch_values($vl);
63 | return 1;
64 | }
65 |
66 | my $contents = $res->content();
67 | if ($check_type eq 'default') {
68 | if ($contents =~ /$regex/) {
69 | # plugin_log(LOG_ERR, "HTTPCheck: Regex match");
70 | $vl->{'values'} = [ 1 ];
71 | } else {
72 | plugin_log(LOG_ERR, "HTTPCheck: Regex non-match: " . $contents);
73 | }
74 | } elsif ($check_type eq 'json') {
75 | eval {
76 | my $data = decode_json($contents);
77 | if ($data->{$check_field} eq $check_expected) {
78 | # plugin_log(LOG_ERR, "HTTPCheck: field match");
79 | $vl->{'values'} = [ 1 ];
80 | } else {
81 | plugin_log(LOG_ERR, "HTTPCheck: field not match: " . $data->{$check_field});
82 | }
83 | };
84 | if ($@) {
85 | plugin_log(LOG_ERR, "HTTPCheck: caught exception");
86 | }
87 | }
88 | plugin_dispatch_values($vl);
89 | return 1;
90 | }
91 |
92 |
--------------------------------------------------------------------------------
/InnoDBParser.pm:
--------------------------------------------------------------------------------
1 | use strict;
2 | use warnings FATAL => 'all';
3 |
4 | package InnoDBParser;
5 |
6 | # This program is copyright (c) 2006 Baron Schwartz, baron at xaprb dot com.
7 | # Feedback and improvements are gratefully received.
8 | #
9 | # THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
10 | # WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
11 | # MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
12 | #
13 | # This program is free software; you can redistribute it and/or modify it under
14 | # the terms of the GNU General Public License as published by the Free Software
15 | # Foundation, version 2; OR the Perl Artistic License. On UNIX and similar
16 | # systems, you can issue `man perlgpl' or `man perlartistic' to read these
17 |
18 | # You should have received a copy of the GNU General Public License along with
19 | # this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 | # Place, Suite 330, Boston, MA 02111-1307 USA
21 |
22 | use Data::Dumper;
23 | $Data::Dumper::Sortkeys = 1;
24 | use English qw(-no_match_vars);
25 | use List::Util qw(max);
26 | use POSIX qw(strftime);
27 |
28 | # Some common patterns
29 | my $d = qr/(\d+)/; # Digit
30 | my $f = qr/(\d+\.\d+)/; # Float
31 | my $t = qr/((?:\d+ \d+)|(?:[A-Fa-f0-9]+))/; # Transaction ID
32 | my $i = qr/((?:\d{1,3}\.){3}\d+)/; # IP address
33 | my $n = qr/([^`\s]+)/; # MySQL object name
34 | my $w = qr/(\w+)/; # Words
35 | my $fl = qr/([\w\.\/]+) line $d/; # Filename and line number
36 | my $h = qr/((?:0x)?[0-9a-f]*)/; # Hex
37 | my $s = qr/(\d{6} .?\d:\d\d:\d\d)/; # InnoDB timestamp
38 |
39 | # If you update this variable, also update the SYNOPSIS in the pod.
40 | my %innodb_section_headers = (
41 | "TRANSACTIONS" => "tx",
42 | "BUFFER POOL AND MEMORY" => "bp",
43 | "SEMAPHORES" => "sm",
44 | "LOG" => "lg",
45 | "ROW OPERATIONS" => "ro",
46 | "INSERT BUFFER AND ADAPTIVE HASH INDEX" => "ib",
47 | "FILE I/O" => "io",
48 | "LATEST DETECTED DEADLOCK" => "dl",
49 | "LATEST FOREIGN KEY ERROR" => "fk",
50 | "BACKGROUND THREAD" => "bt",
51 | );
52 |
53 | my %parser_for = (
54 | tx => \&parse_tx_section,
55 | bp => \&parse_bp_section,
56 | sm => \&parse_sm_section,
57 | lg => \&parse_lg_section,
58 | ro => \&parse_ro_section,
59 | ib => \&parse_ib_section,
60 | io => \&parse_io_section,
61 | dl => \&parse_dl_section,
62 | fk => \&parse_fk_section,
63 | );
64 |
65 | my %fk_parser_for = (
66 | Transaction => \&parse_fk_transaction_error,
67 | Error => \&parse_fk_bad_constraint_error,
68 | Cannot => \&parse_fk_cant_drop_parent_error,
69 | );
70 |
71 | # A thread's proc_info can be at least 98 different things I've found in the
72 | # source. Fortunately, most of them begin with a gerunded verb. These are
73 | # the ones that don't.
74 | my %is_proc_info = (
75 | 'After create' => 1,
76 | 'Execution of init_command' => 1,
77 | 'FULLTEXT initialization' => 1,
78 | 'Reopen tables' => 1,
79 | 'Repair done' => 1,
80 | 'Repair with keycache' => 1,
81 | 'System lock' => 1,
82 | 'Table lock' => 1,
83 | 'Thread initialized' => 1,
84 | 'User lock' => 1,
85 | 'copy to tmp table' => 1,
86 | 'discard_or_import_tablespace' => 1,
87 | 'end' => 1,
88 | 'got handler lock' => 1,
89 | 'got old table' => 1,
90 | 'init' => 1,
91 | 'key cache' => 1,
92 | 'locks' => 1,
93 | 'malloc' => 1,
94 | 'query end' => 1,
95 | 'rename result table' => 1,
96 | 'rename' => 1,
97 | 'setup' => 1,
98 | 'statistics' => 1,
99 | 'status' => 1,
100 | 'table cache' => 1,
101 | 'update' => 1,
102 | );
103 |
104 | sub new {
105 | bless {}, shift;
106 | }
107 |
108 | # Parse the status and return it.
109 | # See srv_printf_innodb_monitor in innobase/srv/srv0srv.c
110 | # Pass in the text to parse, whether to be in debugging mode, which sections
111 | # to parse (hashref; if empty, parse all), and whether to parse full info from
112 | # locks and such (probably shouldn't unless you need to).
113 | sub parse_status_text {
114 | my ( $self, $fulltext, $debug, $sections, $full, $mysqlversion) = @_;
115 |
116 | die "I can't parse undef" unless defined $fulltext;
117 | $fulltext =~ s/[\r\n]+/\n/g;
118 |
119 | $sections ||= {};
120 | die '$sections must be a hashref' unless ref($sections) eq 'HASH';
121 |
122 | my %innodb_data = (
123 | got_all => 0, # Whether I was able to get the whole thing
124 | ts => '', # Timestamp the server put on it
125 | last_secs => 0, # Num seconds the averages are over
126 | sections => {}, # Parsed values from each section
127 | );
128 |
129 | if ( $debug ) {
130 | $innodb_data{'fulltext'} = $fulltext;
131 | }
132 |
133 | # Get the most basic info about the status: beginning and end, and whether
134 | # I got the whole thing (if there has been a big deadlock and there are
135 | # too many locks to print, the output might be truncated)
136 | my $time_text;
137 | if ( $mysqlversion =~ /^5\.6/ ) {
138 | ( $time_text ) = $fulltext =~ m/^([0-9-]* [0-9:]*) [0-9a-f]* INNODB MONITOR OUTPUT/m;
139 | $innodb_data{'ts'} = [ parse_innodb_timestamp_56( $time_text ) ];
140 | } else {
141 | ( $time_text ) = $fulltext =~ m/^$s INNODB MONITOR OUTPUT$/m;
142 | $innodb_data{'ts'} = [ parse_innodb_timestamp( $time_text ) ];
143 | }
144 | $innodb_data{'timestring'} = ts_to_string($innodb_data{'ts'});
145 | ( $innodb_data{'last_secs'} ) = $fulltext
146 | =~ m/Per second averages calculated from the last $d seconds/;
147 |
148 | ( my $got_all ) = $fulltext =~ m/END OF INNODB MONITOR OUTPUT/;
149 | $innodb_data{'got_all'} = $got_all || 0;
150 |
151 | # Split it into sections. Each section begins with
152 | # -----
153 | # LABEL
154 | # -----
155 | my %innodb_sections;
156 | my @matches = $fulltext
157 | =~ m#\n(---+)\n([A-Z /]+)\n\1\n(.*?)(?=\n(---+)\n[A-Z /]+\n\4\n|$)#gs;
158 | while ( my ( $start, $name, $text, $end ) = splice(@matches, 0, 4) ) {
159 | $innodb_sections{$name} = [ $text, $end ? 1 : 0 ];
160 | }
161 | # The Row Operations section is a special case, because instead of ending
162 | # with the beginning of another section, it ends with the end of the file.
163 | # So this section is complete if the entire file is complete.
164 | $innodb_sections{'ROW OPERATIONS'}->[1] ||= $innodb_data{'got_all'};
165 |
166 | # Just for sanity's sake, make sure I understand what to do with each
167 | # section.
168 | eval {
169 | foreach my $section ( keys %innodb_sections ) {
170 | my $header = $innodb_section_headers{$section};
171 | if ( !$header && $debug ) {
172 | warn "Unknown section $section in $fulltext\n";
173 | }
174 |
175 | # The last section in the file is a special case, because instead of
176 | # ending with the beginning of another section, it ends with the end of
177 | # the file. So this section is complete if the entire file is
178 | # complete. In different versions of InnoDB, various sections are
179 | # last.
180 | if ( $innodb_sections{$section}->[0] =~ s/\n---+\nEND OF INNODB.+\n=+$// ) {
181 | $innodb_sections{$section}->[1] ||= $innodb_data{'got_all'};
182 | }
183 |
184 | if ( $header && $section ) {
185 | $innodb_data{'sections'}->{ $header }
186 | ->{'fulltext'} = $innodb_sections{$section}->[0];
187 | $innodb_data{'sections'}->{ $header }
188 | ->{'complete'} = $innodb_sections{$section}->[1];
189 | }
190 | else {
191 | _debug( $debug, "header = " . ($header || 'undef') . ", section = " . ($section || 'undef')) if $debug;
192 | }
193 | }
194 | };
195 | if ( $EVAL_ERROR ) {
196 | _debug( $debug, $EVAL_ERROR);
197 | }
198 |
199 | # ################################################################
200 | # Parse the detailed data out of the sections.
201 | # ################################################################
202 | eval {
203 | foreach my $section ( keys %parser_for ) {
204 | if ( defined $innodb_data{'sections'}->{$section}
205 | && (!%$sections || (defined($sections->{$section} && $sections->{$section})) )) {
206 | $parser_for{$section}->(
207 | $innodb_data{'sections'}->{$section},
208 | $innodb_data{'sections'}->{$section}->{'complete'},
209 | $debug,
210 | $full,
211 | $mysqlversion)
212 | or delete $innodb_data{'sections'}->{$section};
213 | }
214 | else {
215 | delete $innodb_data{'sections'}->{$section};
216 | }
217 | }
218 | };
219 | if ( $EVAL_ERROR ) {
220 | _debug( $debug, $EVAL_ERROR);
221 | }
222 |
223 | return \%innodb_data;
224 | }
225 |
226 | # Parses the status text and returns it flattened out as a single hash.
227 | sub get_status_hash {
228 | my ( $self, $fulltext, $debug, $sections, $full, $mysqlversion ) = @_;
229 |
230 | # Parse the status text...
231 | my $innodb_status
232 | = $self->parse_status_text($fulltext, $debug, $sections, $full, $mysqlversion );
233 |
234 | # Flatten the hierarchical structure into a single list by grabbing desired
235 | # sections from it.
236 | return
237 | (map { 'IB_' . $_ => $innodb_status->{$_} } qw(timestring last_secs got_all)),
238 | (map { 'IB_bp_' . $_ => $innodb_status->{'sections'}->{'bp'}->{$_} }
239 | qw( writes_pending buf_pool_hit_rate total_mem_alloc buf_pool_reads
240 | awe_mem_alloc pages_modified writes_pending_lru page_creates_sec
241 | reads_pending pages_total buf_pool_hits writes_pending_single_page
242 | page_writes_sec pages_read pages_written page_reads_sec
243 | writes_pending_flush_list buf_pool_size add_pool_alloc
244 | dict_mem_alloc pages_created buf_free complete )),
245 | (map { 'IB_tx_' . $_ => $innodb_status->{'sections'}->{'tx'}->{$_} }
246 | qw( num_lock_structs history_list_len purge_done_for transactions
247 | purge_undo_for is_truncated trx_id_counter complete )),
248 | (map { 'IB_ib_' . $_ => $innodb_status->{'sections'}->{'ib'}->{$_} }
249 | qw( hash_table_size hash_searches_s non_hash_searches_s
250 | bufs_in_node_heap used_cells size free_list_len seg_size inserts
251 | merged_recs merges complete )),
252 | (map { 'IB_lg_' . $_ => $innodb_status->{'sections'}->{'lg'}->{$_} }
253 | qw( log_ios_done pending_chkp_writes last_chkp log_ios_s
254 | log_flushed_to log_seq_no pending_log_writes complete )),
255 | (map { 'IB_sm_' . $_ => $innodb_status->{'sections'}->{'sm'}->{$_} }
256 | qw( wait_array_size rw_shared_spins rw_excl_os_waits mutex_os_waits
257 | mutex_spin_rounds mutex_spin_waits rw_excl_spins rw_shared_os_waits
258 | waits signal_count reservation_count complete )),
259 | (map { 'IB_ro_' . $_ => $innodb_status->{'sections'}->{'ro'}->{$_} }
260 | qw( queries_in_queue n_reserved_extents main_thread_state
261 | main_thread_proc_no main_thread_id read_sec del_sec upd_sec ins_sec
262 | read_views_open num_rows_upd num_rows_ins num_rows_read
263 | queries_inside num_rows_del complete )),
264 | (map { 'IB_fk_' . $_ => $innodb_status->{'sections'}->{'fk'}->{$_} }
265 | qw( trigger parent_table child_index parent_index attempted_op
266 | child_db timestring fk_name records col_name reason txn parent_db
267 | type child_table parent_col complete )),
268 | (map { 'IB_io_' . $_ => $innodb_status->{'sections'}->{'io'}->{$_} }
269 | qw( pending_buffer_pool_flushes pending_pwrites pending_preads
270 | pending_normal_aio_reads fsyncs_s os_file_writes pending_sync_ios
271 | reads_s flush_type avg_bytes_s pending_ibuf_aio_reads writes_s
272 | threads os_file_reads pending_aio_writes pending_log_ios os_fsyncs
273 | pending_log_flushes complete )),
274 | (map { 'IB_dl_' . $_ => $innodb_status->{'sections'}->{'dl'}->{$_} }
275 | qw( timestring rolled_back txns complete ));
276 |
277 | }
278 |
279 | sub ts_to_string {
280 | my $parts = shift;
281 | return sprintf('%02d-%02d-%02d %02d:%02d:%02d', @$parts);
282 | }
283 | sub parse_innodb_timestamp_56 {
284 | my $text = shift;
285 | my ( $y, $m, $d, $h, $i, $s )
286 | = $text =~ m/^(\d\d\d\d)-(\d\d)-(\d\d) +(\d+):(\d+):(\d+)$/;
287 | die("Can't get timestamp from $text\n") unless $y;
288 | return ( $y, $m, $d, $h, $i, $s );
289 | }
290 | sub parse_innodb_timestamp {
291 | my $text = shift;
292 | my ( $y, $m, $d, $h, $i, $s )
293 | = $text =~ m/^(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)$/;
294 | die("Can't get timestamp from $text\n") unless $y;
295 | $y += 2000;
296 | return ( $y, $m, $d, $h, $i, $s );
297 | }
298 |
299 | sub parse_fk_section {
300 | my ( $section, $complete, $debug, $full, $mysqlversion ) = @_;
301 | my $fulltext = $section->{'fulltext'};
302 |
303 | return 0 unless $fulltext;
304 |
305 | my ( $ts, $type );
306 | if ( $mysqlversion =~ /^5.6/ ) {
307 | ( $ts, $type ) = $fulltext =~ m/^([0-9-]* [0-9:]*)\s[0-9a-f]*\s+(\w+)/m;
308 | $section->{'ts'} = [ parse_innodb_timestamp_56( $ts ) ];
309 | } else {
310 | ( $ts, $type ) = $fulltext =~ m/^$s\s+(\w+)/m;
311 | $section->{'ts'} = [ parse_innodb_timestamp( $ts ) ];
312 | }
313 |
314 | $section->{'timestring'} = ts_to_string($section->{'ts'});
315 | $section->{'type'} = $type;
316 |
317 | # Decide which type of FK error happened, and dispatch to the right parser.
318 | if ( $type && $fk_parser_for{$type} ) {
319 | $fk_parser_for{$type}->( $section, $complete, $debug, $fulltext, $full );
320 | }
321 |
322 | delete $section->{'fulltext'} unless $debug;
323 |
324 | return 1;
325 | }
326 |
327 | sub parse_fk_cant_drop_parent_error {
328 | my ( $section, $complete, $debug, $fulltext, $full ) = @_;
329 |
330 | # Parse the parent/child table info out
331 | @{$section}{ qw(attempted_op parent_db parent_table) } = $fulltext
332 | =~ m{Cannot $w table `(.*)/(.*)`}m;
333 | @{$section}{ qw(child_db child_table) } = $fulltext
334 | =~ m{because it is referenced by `(.*)/(.*)`}m;
335 |
336 | ( $section->{'reason'} ) = $fulltext =~ m/(Cannot .*)/s;
337 | if ( !defined $section->{reason} ) {
338 | ( $section->{'reason'} ) = $fulltext =~ m/(Trying to add .*)/s;
339 | }
340 | $section->{'reason'} =~ s/\n(?:InnoDB: )?/ /gm
341 | if $section->{'reason'};
342 |
343 | # Certain data may not be present. Make them '' if not present.
344 | map { $section->{$_} ||= "" }
345 | qw(child_index fk_name col_name parent_col);
346 | }
347 |
348 | # See dict/dict0dict.c, function dict_foreign_error_report
349 | # I don't care much about these. There are lots of different messages, and
350 | # they come from someone trying to create a foreign key, or similar
351 | # statements. They aren't indicative of some transaction trying to insert,
352 | # delete or update data. Sometimes it is possible to parse out a lot of
353 | # information about the tables and indexes involved, but often the message
354 | # contains the DDL string the user entered, which is way too much for this
355 | # module to try to handle.
356 | sub parse_fk_bad_constraint_error {
357 | my ( $section, $complete, $debug, $fulltext, $full ) = @_;
358 |
359 | # Parse the parent/child table and index info out
360 | @{$section}{ qw(child_db child_table) } = $fulltext
361 | =~ m{Error in foreign key constraint of table (.*)/(.*):$}m;
362 | $section->{'attempted_op'} = 'DDL';
363 |
364 | # FK name, parent info... if possible.
365 | @{$section}{ qw(fk_name col_name parent_db parent_table parent_col) }
366 | = $fulltext
367 | =~ m/CONSTRAINT `?$n`? FOREIGN KEY \(`?$n`?\) REFERENCES (?:`?$n`?\.)?`?$n`? \(`?$n`?\)/;
368 |
369 | if ( !defined($section->{'fk_name'}) ) {
370 | # Try to parse SQL a user might have typed in a CREATE statement or such
371 | @{$section}{ qw(col_name parent_db parent_table parent_col) }
372 | = $fulltext
373 | =~ m/FOREIGN\s+KEY\s*\(`?$n`?\)\s+REFERENCES\s+(?:`?$n`?\.)?`?$n`?\s*\(`?$n`?\)/i;
374 | }
375 | $section->{'parent_db'} ||= $section->{'child_db'};
376 |
377 | # Name of the child index (index in the same table where the FK is, see
378 | # definition of dict_foreign_struct in include/dict0mem.h, where it is
379 | # called foreign_index, as opposed to referenced_index which is in the
380 | # parent table. This may not be possible to find.
381 | @{$section}{ qw(child_index) } = $fulltext
382 | =~ m/^The index in the foreign key in table is $n$/m;
383 |
384 | @{$section}{ qw(reason) } = $fulltext =~ m/:\s*([^:]+)(?= Constraint:|$)/ms;
385 | $section->{'reason'} =~ s/\s+/ /g
386 | if $section->{'reason'};
387 |
388 | # Certain data may not be present. Make them '' if not present.
389 | map { $section->{$_} ||= "" }
390 | qw(child_index fk_name col_name parent_table parent_col);
391 | }
392 |
393 | # see source file row/row0ins.c
394 | sub parse_fk_transaction_error {
395 | my ( $section, $complete, $debug, $fulltext, $full ) = @_;
396 |
397 | # Parse the txn info out
398 | my ( $txn ) = $fulltext
399 | =~ m/Transaction:\n(TRANSACTION.*)\nForeign key constraint fails/s;
400 | if ( $txn ) {
401 | $section->{'txn'} = parse_tx_text( $txn, $complete, $debug, $full );
402 | }
403 |
404 | # Parse the parent/child table and index info out. There are two types: an
405 | # update or a delete of a parent record leaves a child orphaned
406 | # (row_ins_foreign_report_err), and an insert or update of a child record has
407 | # no matching parent record (row_ins_foreign_report_add_err).
408 |
409 | @{$section}{ qw(reason child_db child_table) }
410 | = $fulltext =~ m{^(Foreign key constraint fails for table `(.*?)`?[/.]`?(.*)`:)$}m;
411 |
412 | @{$section}{ qw(fk_name col_name parent_db parent_table parent_col) }
413 | = $fulltext
414 | =~ m/CONSTRAINT `$n` FOREIGN KEY \(`$n`\) REFERENCES (?:`$n`\.)?`$n` \(`$n`\)/;
415 | $section->{'parent_db'} ||= $section->{'child_db'};
416 |
417 | # Special case, which I don't know how to trigger, but see
418 | # innobase/row/row0ins.c row_ins_check_foreign_constraint
419 | if ( $fulltext =~ m/ibd file does not currently exist!/ ) {
420 | my ( $attempted_op, $index, $records )
421 | = $fulltext =~ m/^Trying to (add to index) `$n` tuple:\n(.*))?/sm;
422 | $section->{'child_index'} = $index;
423 | $section->{'attempted_op'} = $attempted_op || '';
424 | if ( $records && $full ) {
425 | ( $section->{'records'} )
426 | = parse_innodb_record_dump( $records, $complete, $debug );
427 | }
428 | @{$section}{qw(parent_db parent_table)}
429 | =~ m/^But the parent table `$n`\.`$n`$/m;
430 | }
431 | else {
432 | my ( $attempted_op, $which, $index )
433 | = $fulltext =~ m/^Trying to ([\w ]*) in (child|parent) table, in index `$n` tuple:$/m;
434 | if ( $which ) {
435 | $section->{$which . '_index'} = $index;
436 | $section->{'attempted_op'} = $attempted_op || '';
437 |
438 | # Parse out the related records in the other table.
439 | my ( $search_index, $records );
440 | if ( $which eq 'child' ) {
441 | ( $search_index, $records ) = $fulltext
442 | =~ m/^But in parent table [^,]*, in index `$n`,\nthe closest match we can find is record:\n(.*)/ms;
443 | $section->{'parent_index'} = $search_index;
444 | }
445 | else {
446 | ( $search_index, $records ) = $fulltext
447 | =~ m/^But in child table [^,]*, in index `$n`, (?:the record is not available|there is a record:\n(.*))?/ms;
448 | $section->{'child_index'} = $search_index;
449 | }
450 | if ( $records && $full ) {
451 | $section->{'records'}
452 | = parse_innodb_record_dump( $records, $complete, $debug );
453 | }
454 | else {
455 | $section->{'records'} = '';
456 | }
457 | }
458 | }
459 |
460 | # Parse out the tuple trying to be updated, deleted or inserted.
461 | my ( $trigger ) = $fulltext =~ m/^(DATA TUPLE: \d+ fields;\n.*)$/m;
462 | if ( $trigger ) {
463 | $section->{'trigger'} = parse_innodb_record_dump( $trigger, $complete, $debug );
464 | }
465 |
466 | # Certain data may not be present. Make them '' if not present.
467 | map { $section->{$_} ||= "" }
468 | qw(child_index fk_name col_name parent_table parent_col);
469 | }
470 |
471 | # There are new-style and old-style record formats. See rem/rem0rec.c
472 | # TODO: write some tests for this
473 | sub parse_innodb_record_dump {
474 | my ( $dump, $complete, $debug ) = @_;
475 | return undef unless $dump;
476 |
477 | my $result = {};
478 |
479 | if ( $dump =~ m/PHYSICAL RECORD/ ) {
480 | my $style = $dump =~ m/compact format/ ? 'new' : 'old';
481 | $result->{'style'} = $style;
482 |
483 | # This is a new-style record.
484 | if ( $style eq 'new' ) {
485 | @{$result}{qw( heap_no type num_fields info_bits )}
486 | = $dump
487 | =~ m/^(?:Record lock, heap no $d )?([A-Z ]+): n_fields $d; compact format; info bits $d$/m;
488 | }
489 |
490 | # OK, it's old-style. Unfortunately there are variations here too.
491 | elsif ( $dump =~ m/-byte offs / ) {
492 | # Older-old style.
493 | @{$result}{qw( heap_no type num_fields byte_offset info_bits )}
494 | = $dump
495 | =~ m/^(?:Record lock, heap no $d )?([A-Z ]+): n_fields $d; $d-byte offs [A-Z]+; info bits $d$/m;
496 | if ( $dump !~ m/-byte offs TRUE/ ) {
497 | $result->{'byte_offset'} = 0;
498 | }
499 | }
500 | else {
501 | # Newer-old style.
502 | @{$result}{qw( heap_no type num_fields byte_offset info_bits )}
503 | = $dump
504 | =~ m/^(?:Record lock, heap no $d )?([A-Z ]+): n_fields $d; $d-byte offsets; info bits $d$/m;
505 | }
506 |
507 | }
508 | else {
509 | $result->{'style'} = 'tuple';
510 | @{$result}{qw( type num_fields )}
511 | = $dump =~ m/^(DATA TUPLE): $d fields;$/m;
512 | }
513 |
514 | # Fill in default values for things that couldn't be parsed.
515 | map { $result->{$_} ||= 0 }
516 | qw(heap_no num_fields byte_offset info_bits);
517 | map { $result->{$_} ||= '' }
518 | qw(style type );
519 |
520 | my @fields = $dump =~ m/ (\d+:.*?;?);(?=$| \d+:)/gm;
521 | $result->{'fields'} = [ map { parse_field($_, $complete, $debug ) } @fields ];
522 |
523 | return $result;
524 | }
525 |
526 | # New/old-style applies here. See rem/rem0rec.c
527 | # $text should not include the leading space or the second trailing semicolon.
528 | sub parse_field {
529 | my ( $text, $complete, $debug ) = @_;
530 |
531 | # Sample fields:
532 | # '4: SQL NULL, size 4 '
533 | # '1: len 6; hex 000000005601; asc V ;'
534 | # '6: SQL NULL'
535 | # '5: len 30; hex 687474703a2f2f7777772e737765657477617465722e636f6d2f73746f72; asc http://www.sweetwater.com/stor;...(truncated)'
536 | my ( $id, $nullsize, $len, $hex, $asc, $truncated );
537 | ( $id, $nullsize ) = $text =~ m/^$d: SQL NULL, size $d $/;
538 | if ( !defined($id) ) {
539 | ( $id ) = $text =~ m/^$d: SQL NULL$/;
540 | }
541 | if ( !defined($id) ) {
542 | ( $id, $len, $hex, $asc, $truncated )
543 | = $text =~ m/^$d: len $d; hex $h; asc (.*);(\.\.\.\(truncated\))?$/;
544 | }
545 |
546 | die "Could not parse this field: '$text'" unless defined $id;
547 | return {
548 | id => $id,
549 | len => defined($len) ? $len : defined($nullsize) ? $nullsize : 0,
550 | 'hex' => defined($hex) ? $hex : '',
551 | asc => defined($asc) ? $asc : '',
552 | trunc => $truncated ? 1 : 0,
553 | };
554 |
555 | }
556 |
557 | sub parse_dl_section {
558 | my ( $dl, $complete, $debug, $full ) = @_;
559 | return unless $dl;
560 | my $fulltext = $dl->{'fulltext'};
561 | return 0 unless $fulltext;
562 |
563 | my ( $ts ) = $fulltext =~ m/^$s$/m;
564 | return 0 unless $ts;
565 |
566 | $dl->{'ts'} = [ parse_innodb_timestamp( $ts ) ];
567 | $dl->{'timestring'} = ts_to_string($dl->{'ts'});
568 | $dl->{'txns'} = {};
569 |
570 | my @sections
571 | = $fulltext
572 | =~ m{
573 | ^\*{3}\s([^\n]*) # *** (1) WAITING FOR THIS...
574 | (.*?) # Followed by anything, non-greedy
575 | (?=(?:^\*{3})|\z) # Followed by another three stars or EOF
576 | }gmsx;
577 |
578 |
579 | # Loop through each section. There are no assumptions about how many
580 | # there are, who holds and wants what locks, and who gets rolled back.
581 | while ( my ($header, $body) = splice(@sections, 0, 2) ) {
582 | my ( $txn_id, $what ) = $header =~ m/^\($d\) (.*):$/;
583 | next unless $txn_id;
584 | $dl->{'txns'}->{$txn_id} ||= {};
585 | my $txn = $dl->{'txns'}->{$txn_id};
586 |
587 | if ( $what eq 'TRANSACTION' ) {
588 | $txn->{'tx'} = parse_tx_text( $body, $complete, $debug, $full );
589 | }
590 | else {
591 | push @{$txn->{'locks'}}, parse_innodb_record_locks( $body, $complete, $debug, $full );
592 | }
593 | }
594 |
595 | @{ $dl }{ qw(rolled_back) }
596 | = $fulltext =~ m/^\*\*\* WE ROLL BACK TRANSACTION \($d\)$/m;
597 |
598 | # Make sure certain values aren't undef
599 | map { $dl->{$_} ||= '' } qw(rolled_back);
600 |
601 | delete $dl->{'fulltext'} unless $debug;
602 | return 1;
603 | }
604 |
605 | sub parse_innodb_record_locks {
606 | my ( $text, $complete, $debug, $full ) = @_;
607 | my @result;
608 |
609 | foreach my $lock ( $text =~ m/(^(?:RECORD|TABLE) LOCKS?.*$)/gm ) {
610 | my $hash = {};
611 | @{$hash}{ qw(lock_type space_id page_no n_bits index db table txn_id lock_mode) }
612 | = $lock
613 | =~ m{^(RECORD|TABLE) LOCKS? (?:space id $d page no $d n bits $d index `?$n`? of )?table `$n(?:/|`\.`)$n` trx id $t lock.mode (\S+)}m;
614 | ( $hash->{'special'} )
615 | = $lock =~ m/^(?:RECORD|TABLE) .*? locks (rec but not gap|gap before rec)/m;
616 | $hash->{'insert_intention'}
617 | = $lock =~ m/^(?:RECORD|TABLE) .*? insert intention/m ? 1 : 0;
618 | $hash->{'waiting'}
619 | = $lock =~ m/^(?:RECORD|TABLE) .*? waiting/m ? 1 : 0;
620 |
621 | # Some things may not be in the text, so make sure they are not
622 | # undef.
623 | map { $hash->{$_} ||= 0 } qw(n_bits page_no space_id);
624 | map { $hash->{$_} ||= "" } qw(index special);
625 | push @result, $hash;
626 | }
627 |
628 | return @result;
629 | }
630 |
631 | sub parse_tx_text {
632 | my ( $txn, $complete, $debug, $full ) = @_;
633 |
634 | my ( $txn_id, $txn_status )
635 | = $txn
636 | =~ m/^(?:---)?TRANSACTION $t, ([^\n0-9,]*[^\s\d])/m;
637 | $txn_status =~ s/,$// if $txn_status;
638 | my ( $active_secs)
639 | = $txn
640 | =~ m/^[^\n]*\b$d sec\b/m;
641 | my ( $proc_no )
642 | = $txn
643 | =~ m/process no $d/m;
644 | my ( $os_thread_id )
645 | = $txn
646 | =~ m/OS thread id $d/m;
647 | my ( $thread_status, $thread_decl_inside )
648 | = $txn
649 | =~ m/(?:OS thread id \d+|\d sec)(?: ([^,]+?))?(?:, thread declared inside InnoDB $d)?$/m;
650 |
651 | # Parsing the line that begins 'MySQL thread id' is complicated. The only
652 | # thing always in the line is the thread and query id. See function
653 | # innobase_mysql_print_thd in InnoDB source file sql/ha_innodb.cc.
654 | my ( $thread_line ) = $txn =~ m/^(MySQL thread id .*)$/m;
655 | my ( $mysql_thread_id, $query_id, $hostname, $ip, $user, $query_status );
656 |
657 | if ( $thread_line ) {
658 | # These parts can always be gotten.
659 | ( $mysql_thread_id, $query_id ) = $thread_line =~ m/^MySQL thread id $d, .*?query id $d/m;
660 |
661 | # If it's a master/slave thread, "Has (read|sent) all" may be the thread's
662 | # proc_info. In these cases, there won't be any host/ip/user info
663 | ( $query_status ) = $thread_line =~ m/(Has (?:read|sent) all .*$)/m;
664 | if ( defined($query_status) ) {
665 | $user = 'system user';
666 | }
667 |
668 | # It may be the case that the query id is the last thing in the line.
669 | elsif ( $thread_line =~ m/query id \d+ / ) {
670 | # The IP address is the only non-word thing left, so it's the most
671 | # useful marker for where I have to start guessing.
672 | ( $hostname, $ip ) = $thread_line =~ m/query id \d+(?: ([A-Za-z]\S+))? $i/m;
673 | if ( defined $ip ) {
674 | ( $user, $query_status ) = $thread_line =~ m/$ip $w(?: (.*))?$/;
675 | }
676 | else { # OK, there wasn't an IP address.
677 | # There might not be ANYTHING except the query status.
678 | ( $query_status ) = $thread_line =~ m/query id \d+ (.*)$/;
679 | if ( $query_status !~ m/^\w+ing/ && !exists($is_proc_info{$query_status}) ) {
680 | # The remaining tokens are, in order: hostname, user, query_status.
681 | # It's basically impossible to know which is which.
682 | ( $hostname, $user, $query_status ) = $thread_line
683 | =~ m/query id \d+(?: ([A-Za-z]\S+))?(?: $w(?: (.*))?)?$/m;
684 | if ( ($hostname || '') eq 'Slave' ) {
685 | $hostname = '';
686 | $user = 'system user';
687 | $query_status = "Slave has $query_status";
688 | }
689 | }
690 | else {
691 | $user = 'system user';
692 | }
693 | }
694 | }
695 | }
696 |
697 | my ( $lock_wait_status, $lock_structs, $heap_size, $row_locks, $undo_log_entries )
698 | = $txn
699 | =~ m/^(?:(\D*) )?$d lock struct\(s\), heap size $d(?:, $d row lock\(s\))?(?:, undo log entries $d)?$/m;
700 | my ( $lock_wait_time )
701 | = $txn
702 | =~ m/^------- TRX HAS BEEN WAITING $d SEC/m;
703 |
704 | my $locks;
705 | # If the transaction has locks, grab the locks.
706 | if ( $txn =~ m/^TABLE LOCK|RECORD LOCKS/ ) {
707 | $locks = [parse_innodb_record_locks($txn, $complete, $debug, $full)];
708 | }
709 |
710 | my ( $tables_in_use, $tables_locked )
711 | = $txn
712 | =~ m/^mysql tables in use $d, locked $d$/m;
713 | my ( $txn_doesnt_see_ge, $txn_sees_lt )
714 | = $txn
715 | =~ m/^Trx read view will not see trx with id >= $t, sees < $t$/m;
716 | my $has_read_view = defined($txn_doesnt_see_ge);
717 | # Only a certain number of bytes of the query text are included here, at least
718 | # under some circumstances. Some versions include 300, some 600.
719 | my ( $query_text )
720 | = $txn
721 | =~ m{
722 | ^MySQL\sthread\sid\s[^\n]+\n # This comes before the query text
723 | (.*?) # The query text
724 | (?= # Followed by any of...
725 | ^Trx\sread\sview
726 | |^-------\sTRX\sHAS\sBEEN\sWAITING
727 | |^TABLE\sLOCK
728 | |^RECORD\sLOCKS\sspace\sid
729 | |^(?:---)?TRANSACTION
730 | |^\*\*\*\s\(\d\)
731 | |\Z
732 | )
733 | }xms;
734 | if ( $query_text ) {
735 | $query_text =~ s/\s+$//;
736 | }
737 | else {
738 | $query_text = '';
739 | }
740 |
741 | my %stuff = (
742 | active_secs => $active_secs,
743 | has_read_view => $has_read_view,
744 | heap_size => $heap_size,
745 | hostname => $hostname,
746 | ip => $ip,
747 | lock_structs => $lock_structs,
748 | lock_wait_status => $lock_wait_status,
749 | lock_wait_time => $lock_wait_time,
750 | mysql_thread_id => $mysql_thread_id,
751 | os_thread_id => $os_thread_id,
752 | proc_no => $proc_no,
753 | query_id => $query_id,
754 | query_status => $query_status,
755 | query_text => $query_text,
756 | row_locks => $row_locks,
757 | tables_in_use => $tables_in_use,
758 | tables_locked => $tables_locked,
759 | thread_decl_inside => $thread_decl_inside,
760 | thread_status => $thread_status,
761 | txn_doesnt_see_ge => $txn_doesnt_see_ge,
762 | txn_id => $txn_id,
763 | txn_sees_lt => $txn_sees_lt,
764 | txn_status => $txn_status,
765 | undo_log_entries => $undo_log_entries,
766 | user => $user,
767 | );
768 | $stuff{'fulltext'} = $txn if $debug;
769 | $stuff{'locks'} = $locks if $locks;
770 |
771 | # Some things may not be in the txn text, so make sure they are not
772 | # undef.
773 | map { $stuff{$_} ||= 0 } qw(active_secs heap_size lock_structs
774 | tables_in_use undo_log_entries tables_locked has_read_view
775 | thread_decl_inside lock_wait_time proc_no row_locks);
776 | map { $stuff{$_} ||= "" } qw(thread_status txn_doesnt_see_ge
777 | txn_sees_lt query_status ip query_text lock_wait_status user);
778 | $stuff{'hostname'} ||= $stuff{'ip'};
779 |
780 | return \%stuff;
781 | }
782 |
783 | sub parse_tx_section {
784 | my ( $section, $complete, $debug, $full ) = @_;
785 | return unless $section && $section->{'fulltext'};
786 | my $fulltext = $section->{'fulltext'};
787 | $section->{'transactions'} = [];
788 |
789 | # Handle the individual transactions
790 | my @transactions = $fulltext =~ m/(---TRANSACTION [0-9A-Fa-f].*?)(?=\n---TRANSACTION|$)/gs;
791 | foreach my $txn ( @transactions ) {
792 | my $stuff = parse_tx_text( $txn, $complete, $debug, $full );
793 | delete $stuff->{'fulltext'} unless $debug;
794 | push @{$section->{'transactions'}}, $stuff;
795 | }
796 |
797 | # Handle the general info
798 | @{$section}{ 'trx_id_counter' }
799 | = $fulltext =~ m/^Trx id counter $t$/m;
800 | @{$section}{ 'purge_done_for', 'purge_undo_for' }
801 | = $fulltext =~ m/^Purge done for trx's n:o < $t undo n:o < $t$/m;
802 | @{$section}{ 'history_list_len' } # This isn't present in some 4.x versions
803 | = $fulltext =~ m/^History list length $d$/m;
804 | @{$section}{ 'num_lock_structs' }
805 | = $fulltext =~ m/^Total number of lock structs in row lock hash table $d$/m;
806 | @{$section}{ 'is_truncated' }
807 | = $fulltext =~ m/^\.\.\. truncated\.\.\.$/m ? 1 : 0;
808 |
809 | # Fill in things that might not be present
810 | foreach ( qw(history_list_len) ) {
811 | $section->{$_} ||= 0;
812 | }
813 |
814 | delete $section->{'fulltext'} unless $debug;
815 | return 1;
816 | }
817 |
818 | # I've read the source for this section.
819 | sub parse_ro_section {
820 | my ( $section, $complete, $debug, $full ) = @_;
821 | return unless $section && $section->{'fulltext'};
822 | my $fulltext = $section->{'fulltext'};
823 |
824 | # Grab the info
825 | @{$section}{ 'queries_inside', 'queries_in_queue' }
826 | = $fulltext =~ m/^$d queries inside InnoDB, $d queries in queue$/m;
827 | ( $section->{ 'read_views_open' } )
828 | = $fulltext =~ m/^$d read views open inside InnoDB$/m;
829 | ( $section->{ 'n_reserved_extents' } )
830 | = $fulltext =~ m/^$d tablespace extents now reserved for B-tree/m;
831 | @{$section}{ 'main_thread_proc_no', 'main_thread_id', 'main_thread_state' }
832 | = $fulltext =~ m/^Main thread (?:process no. $d, )?id $d, state: (.*)$/m;
833 | @{$section}{ 'num_rows_ins', 'num_rows_upd', 'num_rows_del', 'num_rows_read' }
834 | = $fulltext =~ m/^Number of rows inserted $d, updated $d, deleted $d, read $d$/m;
835 | @{$section}{ 'ins_sec', 'upd_sec', 'del_sec', 'read_sec' }
836 | = $fulltext =~ m#^$f inserts/s, $f updates/s, $f deletes/s, $f reads/s$#m;
837 | $section->{'main_thread_proc_no'} ||= 0;
838 |
839 | map { $section->{$_} ||= 0 } qw(read_views_open n_reserved_extents);
840 | delete $section->{'fulltext'} unless $debug;
841 | return 1;
842 | }
843 |
844 | sub parse_lg_section {
845 | my ( $section, $complete, $debug, $full ) = @_;
846 | return unless $section;
847 | my $fulltext = $section->{'fulltext'};
848 |
849 | # Grab the info
850 | ( $section->{ 'log_seq_no' } )
851 | = $fulltext =~ m/Log sequence number \s*(\d.*)$/m;
852 | ( $section->{ 'log_flushed_to' } )
853 | = $fulltext =~ m/Log flushed up to \s*(\d.*)$/m;
854 | ( $section->{ 'last_chkp' } )
855 | = $fulltext =~ m/Last checkpoint at \s*(\d.*)$/m;
856 | @{$section}{ 'pending_log_writes', 'pending_chkp_writes' }
857 | = $fulltext =~ m/$d pending log writes, $d pending chkp writes/;
858 | @{$section}{ 'log_ios_done', 'log_ios_s' }
859 | = $fulltext =~ m#$d log i/o's done, $f log i/o's/second#;
860 |
861 | delete $section->{'fulltext'} unless $debug;
862 | return 1;
863 | }
864 |
865 | sub parse_ib_section {
866 | my ( $section, $complete, $debug, $full ) = @_;
867 | return unless $section && $section->{'fulltext'};
868 | my $fulltext = $section->{'fulltext'};
869 |
870 | # Some servers will output ibuf information for tablespace 0, as though there
871 | # might be many tablespaces with insert buffers. (In practice I believe
872 | # the source code shows there will only ever be one). I have to parse both
873 | # cases here, but I assume there will only be one.
874 | @{$section}{ 'size', 'free_list_len', 'seg_size' }
875 | = $fulltext =~ m/^Ibuf(?: for space 0)?: size $d, free list len $d, seg size $d/m;
876 | @{$section}{ 'inserts', 'merged_recs', 'merges' }
877 | = $fulltext =~ m/^$d inserts, $d merged recs, $d merges$/m;
878 | if ( ! defined $section->{inserts} ) {
879 | @{$section}{ 'inserts' }
880 | = $fulltext =~ m/merged operations:\n insert $d,/s;
881 | # This isn't really true, but it's not really important either. We already
882 | # aren't supporting the 'delete' operations.
883 | @{$section}{ 'merged_recs', 'merges' } = (0, 0);
884 | }
885 |
886 | @{$section}{ 'hash_table_size', 'used_cells', 'bufs_in_node_heap' }
887 | = $fulltext =~ m/^Hash table size $d(?:, used cells $d)?, node heap has $d buffer\(s\)$/m;
888 | @{$section}{ 'hash_searches_s', 'non_hash_searches_s' }
889 | = $fulltext =~ m{^$f hash searches/s, $f non-hash searches/s$}m;
890 |
891 | delete $section->{'fulltext'} unless $debug;
892 | return 1;
893 | }
894 |
895 | sub parse_wait_array {
896 | my ( $text, $complete, $debug, $full ) = @_;
897 | my %result;
898 |
899 | @result{ qw(thread waited_at_filename waited_at_line waited_secs) }
900 | = $text =~ m/^--Thread $d has waited at $fl for $f seconds/m;
901 |
902 | # Depending on whether it's a SYNC_MUTEX,RW_LOCK_EX,RW_LOCK_SHARED,
903 | # there will be different text output
904 | if ( $text =~ m/^Mutex at/m ) {
905 | $result{'request_type'} = 'M';
906 | @result{ qw( lock_mem_addr lock_cfile_name lock_cline lock_var) }
907 | = $text =~ m/^Mutex at $h created file $fl, lock var $d$/m;
908 | @result{ qw( waiters_flag )}
909 | = $text =~ m/^waiters flag $d$/m;
910 | }
911 | else {
912 | @result{ qw( request_type lock_mem_addr lock_cfile_name lock_cline) }
913 | = $text =~ m/^(.)-lock on RW-latch at $h created in file $fl$/m;
914 | @result{ qw( writer_thread writer_lock_mode ) }
915 | = $text =~ m/^a writer \(thread id $d\) has reserved it in mode (.*)$/m;
916 | @result{ qw( num_readers waiters_flag )}
917 | = $text =~ m/^number of readers $d, waiters flag $d$/m;
918 | @result{ qw(last_s_file_name last_s_line ) }
919 | = $text =~ m/Last time read locked in file $fl$/m;
920 | @result{ qw(last_x_file_name last_x_line ) }
921 | = $text =~ m/Last time write locked in file $fl$/m;
922 | }
923 |
924 | $result{'cell_waiting'} = $text =~ m/^wait has ended$/m ? 0 : 1;
925 | $result{'cell_event_set'} = $text =~ m/^wait is ending$/m ? 1 : 0;
926 |
927 | # Because there are two code paths, some things won't get set.
928 | map { $result{$_} ||= '' }
929 | qw(last_s_file_name last_x_file_name writer_lock_mode);
930 | map { $result{$_} ||= 0 }
931 | qw(num_readers lock_var last_s_line last_x_line writer_thread);
932 |
933 | return \%result;
934 | }
935 |
936 | sub parse_sm_section {
937 | my ( $section, $complete, $debug, $full ) = @_;
938 | return 0 unless $section && $section->{'fulltext'};
939 | my $fulltext = $section->{'fulltext'};
940 |
941 | # Grab the info
942 | @{$section}{ 'reservation_count', 'signal_count' }
943 | = $fulltext =~ m/^OS WAIT ARRAY INFO: reservation count $d, signal count $d$/m;
944 | @{$section}{ 'mutex_spin_waits', 'mutex_spin_rounds', 'mutex_os_waits' }
945 | = $fulltext =~ m/^Mutex spin waits $d, rounds $d, OS waits $d$/m;
946 | @{$section}{ 'rw_shared_spins', 'rw_shared_os_waits', 'rw_excl_spins', 'rw_excl_os_waits' }
947 | = $fulltext =~ m/^RW-shared spins $d, OS waits $d; RW-excl spins $d, OS waits $d$/m;
948 | if ( ! defined $section->{rw_shared_spins} ) {
949 | @{$section}{ 'rw_shared_spins', 'rw_shared_os_waits'}
950 | = $fulltext =~ m/^RW-shared spins $d, rounds \d+, OS waits $d$/m;
951 | @{$section}{ 'rw_excl_spins', 'rw_excl_os_waits' }
952 | = $fulltext =~ m/^RW-excl spins $d, rounds \d+, OS waits $d$/m;
953 | }
954 |
955 | # Look for info on waits.
956 | my @waits = $fulltext =~ m/^(--Thread.*?)^(?=Mutex spin|--Thread)/gms;
957 | $section->{'waits'} = [ map { parse_wait_array($_, $complete, $debug) } @waits ];
958 | $section->{'wait_array_size'} = scalar(@waits);
959 |
960 | delete $section->{'fulltext'} unless $debug;
961 | return 1;
962 | }
963 |
964 | # I've read the source for this section.
965 | sub parse_bp_section {
966 | my ( $section, $complete, $debug, $full ) = @_;
967 | return unless $section && $section->{'fulltext'};
968 | my $fulltext = $section->{'fulltext'};
969 |
970 | # Grab the info
971 | @{$section}{ 'total_mem_alloc', 'add_pool_alloc' }
972 | = $fulltext =~ m/^Total memory allocated $d; in additional pool allocated $d$/m;
973 | @{$section}{'dict_mem_alloc'} = $fulltext =~ m/Dictionary memory allocated $d/;
974 | @{$section}{'awe_mem_alloc'} = $fulltext =~ m/$d MB of AWE memory/;
975 | @{$section}{'buf_pool_size'} = $fulltext =~ m/^Buffer pool size\s*$d$/m;
976 | @{$section}{'buf_free'} = $fulltext =~ m/^Free buffers\s*$d$/m;
977 | @{$section}{'pages_total'} = $fulltext =~ m/^Database pages\s*$d$/m;
978 | @{$section}{'pages_modified'} = $fulltext =~ m/^Modified db pages\s*$d$/m;
979 | @{$section}{'pages_read', 'pages_created', 'pages_written'}
980 | = $fulltext =~ m/^Pages read $d, created $d, written $d$/m;
981 | @{$section}{'page_reads_sec', 'page_creates_sec', 'page_writes_sec'}
982 | = $fulltext =~ m{^$f reads/s, $f creates/s, $f writes/s$}m;
983 | @{$section}{'buf_pool_hits', 'buf_pool_reads'}
984 | = $fulltext =~ m{Buffer pool hit rate $d / $d}m;
985 | if ($fulltext =~ m/^No buffer pool page gets since the last printout$/m) {
986 | @{$section}{'buf_pool_hits', 'buf_pool_reads'} = (0, 0);
987 | @{$section}{'buf_pool_hit_rate'} = '--';
988 | }
989 | else {
990 | @{$section}{'buf_pool_hit_rate'}
991 | = $fulltext =~ m{Buffer pool hit rate (\d+ / \d+)}m;
992 | }
993 | @{$section}{'reads_pending'} = $fulltext =~ m/^Pending reads $d/m;
994 | @{$section}{'writes_pending_lru', 'writes_pending_flush_list', 'writes_pending_single_page' }
995 | = $fulltext =~ m/^Pending writes: LRU $d, flush list $d, single page $d$/m;
996 |
997 | map { $section->{$_} ||= 0 }
998 | qw(writes_pending_lru writes_pending_flush_list writes_pending_single_page
999 | awe_mem_alloc dict_mem_alloc);
1000 | @{$section}{'writes_pending'} = List::Util::sum(
1001 | @{$section}{ qw(writes_pending_lru writes_pending_flush_list writes_pending_single_page) });
1002 |
1003 | delete $section->{'fulltext'} unless $debug;
1004 | return 1;
1005 | }
1006 |
1007 | # I've read the source for this.
1008 | sub parse_io_section {
1009 | my ( $section, $complete, $debug, $full ) = @_;
1010 | return unless $section && $section->{'fulltext'};
1011 | my $fulltext = $section->{'fulltext'};
1012 | $section->{'threads'} = {};
1013 |
1014 | # Grab the I/O thread info
1015 | my @threads = $fulltext =~ m<^(I/O thread \d+ .*)$>gm;
1016 | foreach my $thread (@threads) {
1017 | my ( $tid, $state, $purpose, $event_set )
1018 | = $thread =~ m{I/O thread $d state: (.+?) \((.*)\)(?: ev set)?$}m;
1019 | if ( defined $tid ) {
1020 | $section->{'threads'}->{$tid} = {
1021 | thread => $tid,
1022 | state => $state,
1023 | purpose => $purpose,
1024 | event_set => $event_set ? 1 : 0,
1025 | };
1026 | }
1027 | }
1028 |
1029 | # Grab the reads/writes/flushes info
1030 | @{$section}{ 'pending_normal_aio_reads', 'pending_aio_writes' }
1031 | = $fulltext =~ m/^Pending normal aio reads: $d(?: [^\]]*\])?, aio writes: $d/m;
1032 | @{$section}{ 'pending_ibuf_aio_reads', 'pending_log_ios', 'pending_sync_ios' }
1033 | = $fulltext =~ m{^ ibuf aio reads: $d, log i/o's: $d, sync i/o's: $d$}m;
1034 | @{$section}{ 'flush_type', 'pending_log_flushes', 'pending_buffer_pool_flushes' }
1035 | = $fulltext =~ m/^Pending flushes \($w\) log: $d; buffer pool: $d$/m;
1036 | @{$section}{ 'os_file_reads', 'os_file_writes', 'os_fsyncs' }
1037 | = $fulltext =~ m/^$d OS file reads, $d OS file writes, $d OS fsyncs$/m;
1038 | @{$section}{ 'reads_s', 'avg_bytes_s', 'writes_s', 'fsyncs_s' }
1039 | = $fulltext =~ m{^$f reads/s, $d avg bytes/read, $f writes/s, $f fsyncs/s$}m;
1040 | @{$section}{ 'pending_preads', 'pending_pwrites' }
1041 | = $fulltext =~ m/$d pending preads, $d pending pwrites$/m;
1042 | @{$section}{ 'pending_preads', 'pending_pwrites' } = (0, 0)
1043 | unless defined($section->{'pending_preads'});
1044 |
1045 | delete $section->{'fulltext'} unless $debug;
1046 | return 1;
1047 | }
1048 |
1049 | sub _debug {
1050 | my ( $debug, $msg ) = @_;
1051 | if ( $debug ) {
1052 | die $msg;
1053 | }
1054 | else {
1055 | warn $msg;
1056 | }
1057 | return 1;
1058 | }
1059 |
1060 | 1;
1061 |
1062 | # end_of_package
1063 | # ############################################################################
1064 | # Perldoc section. I put this last as per the Dog book.
1065 | # ############################################################################
1066 | =pod
1067 |
1068 | =head1 NAME
1069 |
1070 | InnoDBParser - Parse InnoDB monitor text.
1071 |
1072 | =head1 DESCRIPTION
1073 |
1074 | InnoDBParser tries to parse the output of the InnoDB monitor. One way to get
1075 | this output is to connect to a MySQL server and issue the command SHOW ENGINE
1076 | INNODB STATUS (omit 'ENGINE' on earlier versions of MySQL). The goal is to
1077 | turn text into data that something else (e.g. innotop) can use.
1078 |
1079 | The output comes from all over, but the place to start in the source is
1080 | innobase/srv/srv0srv.c.
1081 |
1082 | =head1 SYNOPSIS
1083 |
1084 | use InnoDBParser;
1085 | use DBI;
1086 |
1087 | # Get the status text.
1088 | my $dbh = DBI->connect(
1089 | "DBI::mysql:test;host=localhost",
1090 | 'user',
1091 | 'password'
1092 | );
1093 | my $query = 'SHOW /*!5 ENGINE */ INNODB STATUS';
1094 | my $text = $dbh->selectcol_arrayref($query)->[0];
1095 |
1096 | # 1 or 0
1097 | my $debug = 1;
1098 |
1099 | # Choose sections of the monitor text you want. Possible values:
1100 | # TRANSACTIONS => tx
1101 | # BUFFER POOL AND MEMORY => bp
1102 | # SEMAPHORES => sm
1103 | # LOG => lg
1104 | # ROW OPERATIONS => ro
1105 | # INSERT BUFFER AND ADAPTIVE HASH INDEX => ib
1106 | # FILE I/O => io
1107 | # LATEST DETECTED DEADLOCK => dl
1108 | # LATEST FOREIGN KEY ERROR => fk
1109 |
1110 | my $required_sections = {
1111 | tx => 1,
1112 | };
1113 |
1114 | # Parse the status text.
1115 | my $parser = InnoDBParser->new;
1116 | $innodb_status = $parser->parse_status_text(
1117 | $text,
1118 | $debug,
1119 | # Omit the following parameter to get all sections.
1120 | $required_sections,
1121 | );
1122 |
1123 | =head1 COPYRIGHT, LICENSE AND WARRANTY
1124 |
1125 | This package is copyright (c) 2006 Baron Schwartz, baron at xaprb dot com.
1126 | Feedback and improvements are gratefully received.
1127 |
1128 | THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
1129 | WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
1130 | MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
1131 |
1132 | This program is free software; you can redistribute it and/or modify it under
1133 | the terms of the GNU General Public License as published by the Free Software
1134 | Foundation, version 2; OR the Perl Artistic License. On UNIX and similar
1135 | systems, you can issue `man perlgpl' or `man perlartistic' to read these
1136 | licenses.
1137 |
1138 | You should have received a copy of the GNU General Public License along with
1139 | this program; if not, write to the Free Software Foundation, Inc., 59 Temple
1140 | Place, Suite 330, Boston, MA 02111-1307 USA
1141 |
1142 | =head1 AUTHOR
1143 |
1144 | Baron Schwartz, baron at xaprb dot com.
1145 |
1146 | =head1 BUGS
1147 |
1148 | None known, but I bet there are some. The InnoDB monitor text wasn't really
1149 | designed to be parsable.
1150 |
1151 | =head1 SEE ALSO
1152 |
1153 | innotop - a program that can format the parsed status information for humans
1154 | to read and enjoy.
1155 |
1156 | =cut
1157 |
--------------------------------------------------------------------------------
/JMX.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::JMX;
2 | use Collectd qw(:all);
3 | #WARNING WIP DOES NOT WORK YET
4 | =head1 NAME
5 |
6 | Collectd::Plugins::JMX - Monitor JMX stats from within perl from within Collectd with an embedded java JVM. Fun!
7 |
8 | =head1 VERSION
9 |
10 | Version 1
11 |
12 | =cut
13 |
14 | our $VERSION = '1';
15 |
16 |
17 | =head1 SYNOPSIS
18 |
19 | This is a collectd plugin for monitoring JMX stats. In progress!
20 |
21 | In your collectd config:
22 |
23 |
24 | Globals true
25 |
26 |
27 |
28 | BaseName "Collectd::Plugins"
29 | LoadPlugin "JMX"
30 |
31 |
32 | ... TBD ...
33 |
34 |
35 |
36 | =head1 AUTHOR
37 |
38 | Mark Steele, C<< >>, heavily inspired/code borrowed from tcollector
39 |
40 | =cut
41 |
42 | ## Shameless rip from here: https://github.com/tsuna/tcollector/blob/master/collectors/0/hadoop_datanode_jmx.py
43 | ## and https://github.com/tsuna/tcollector/blob/master/stumbleupon/monitoring/jmx.java
44 |
45 |
46 | use Inline Java => <<'END_OF_JAVA_CODE', AUTOSTUDY => 1, CLASSPATH=> '/usr/java/jdk1.7.0_45/lib/tools.jar',PACKAGE => 'main';
47 | //
48 | //
49 | // This file is part of OpenTSDB.
50 | // Copyright (C) 2010 The tcollector Authors.
51 | //
52 | // This program is free software: you can redistribute it and/or modify it
53 | // under the terms of the GNU Lesser General Public License as published by
54 | // the Free Software Foundation, either version 3 of the License, or (at your
55 | // option) any later version. This program is distributed in the hope that it
56 | // will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
57 | // of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
58 | // General Public License for more details. You should have received a copy
59 | // of the GNU Lesser General Public License along with this program. If not,
60 | // see .
61 |
62 | /** Quick CLI tool to get JMX MBean attributes. */
63 |
64 | import java.io.File;
65 | import java.io.IOException;
66 | import java.util.ArrayList;
67 | import java.util.Collection;
68 | import java.util.Collections;
69 | import java.util.Comparator;
70 | import java.util.HashMap;
71 | import java.util.Iterator;
72 | import java.util.Map;
73 | import java.util.Set;
74 | import java.util.TreeMap;
75 | import java.util.regex.Pattern;
76 | import java.util.regex.PatternSyntaxException;
77 |
78 | import javax.management.MBeanAttributeInfo;
79 | import javax.management.MBeanInfo;
80 | import javax.management.MBeanServerConnection;
81 | import javax.management.ObjectName;
82 | import javax.management.openmbean.TabularData;
83 | import javax.management.remote.JMXConnector;
84 | import javax.management.remote.JMXConnectorFactory;
85 | import javax.management.remote.JMXServiceURL;
86 |
87 | // Composite Data
88 | import javax.management.openmbean.CompositeDataSupport;
89 | import javax.management.openmbean.CompositeType;
90 | import javax.management.openmbean.OpenType;
91 |
92 | // Sun specific
93 | import com.sun.tools.attach.AgentInitializationException;
94 | import com.sun.tools.attach.AgentLoadException;
95 | import com.sun.tools.attach.AttachNotSupportedException;
96 | import com.sun.tools.attach.VirtualMachine;
97 | import com.sun.tools.attach.VirtualMachineDescriptor;
98 |
99 | // Sun private
100 | import sun.management.ConnectorAddressLink;
101 | import sun.jvmstat.monitor.HostIdentifier;
102 | import sun.jvmstat.monitor.MonitoredHost;
103 | import sun.jvmstat.monitor.MonitoredVm;
104 | import sun.jvmstat.monitor.MonitoredVmUtil;
105 | import sun.jvmstat.monitor.VmIdentifier;
106 |
107 |
108 | final class jmx {
109 |
110 | private static final String LOCAL_CONNECTOR_ADDRESS =
111 | "com.sun.management.jmxremote.localConnectorAddress";
112 |
113 | private static void usage() {
114 | System.out.println("Usage:\n"
115 | + " jmx -l Lists all reachable VMs.\n"
116 | + " jmx Lists all MBeans for this JVM (PID or regexp).\n"
117 | + " jmx Prints all the attributes of this MBean.\n"
118 | + " jmx Prints the matching attributes of this MBean.\n"
119 | + "\n"
120 | + "You can pass multiple pairs to match multiple different\n"
121 | + "attributes for different MBeans. For example:\n"
122 | + " jmx --long JConsole Class Count Thread Total Garbage Collection\n"
123 | + " LoadedClassCount 2808 java.lang:type=ClassLoading\n"
124 | + " UnloadedClassCount 0 java.lang:type=ClassLoading\n"
125 | + " TotalLoadedClassCount 2808 java.lang:type=ClassLoading\n"
126 | + " CollectionCount 0 java.lang:type=GarbageCollector,name=ConcurrentMarkSweep\n"
127 | + " CollectionTime 0 java.lang:type=GarbageCollector,name=ConcurrentMarkSweep\n"
128 | + " CollectionCount 1 java.lang:type=GarbageCollector,name=ParNew\n"
129 | + " CollectionTime 19 java.lang:type=GarbageCollector,name=ParNew\n"
130 | + " TotalStartedThreadCount 43 java.lang:type=Threading\n"
131 | + "The command above searched for a JVM with `JConsole' in its name, and then searched\n"
132 | + "for MBeans with `Class' in the name and `Count' in the attribute (first 3 matches\n"
133 | + "in this output), MBeans with `Thread' in the name and `Total' in the attribute (last\n"
134 | + "line in the output) and MBeans matching `Garbage' with a `Collection' attribute.\n"
135 | + "\n"
136 | + "Other flags you can pass:\n"
137 | + " --long Print a longer but more explicit output for each value.\n"
138 | + " --timestamp Print a timestamp at the beginning of each line.\n"
139 | + " --watch N Reprint the output every N seconds.\n"
140 | + "\n"
141 | + "Return value:\n"
142 | + " 0: Everything OK.\n"
143 | + " 1: Invalid usage or unexpected error.\n"
144 | + " 2: No JVM matched.\n"
145 | + " 3: No MBean matched.\n"
146 | + " 4: No attribute matched for the MBean(s) selected.");
147 | }
148 |
149 | private static void fatal(final int rv, final String errmsg) {
150 | System.err.println(errmsg);
151 | System.exit(rv);
152 | throw new AssertionError("You should never see this, really.");
153 | }
154 |
155 | public static void main(final String[] args) throws Exception {
156 | if (args.length == 0 || "-h".equals(args[0]) || "--help".equals(args[0])) {
157 | usage();
158 | System.exit(args.length == 0 ? 1 : 0);
159 | return;
160 | }
161 |
162 | int current_arg = 0;
163 | int watch = 0;
164 | boolean long_output = false;
165 | boolean print_timestamps = false;
166 | while (current_arg < args.length) {
167 | if ("--watch".equals(args[current_arg])) {
168 | current_arg++;
169 | try {
170 | watch = Integer.parseInt(args[current_arg]);
171 | } catch (NumberFormatException e) {
172 | fatal(1, "Invalid value for --watch: " + e.getMessage());
173 | return;
174 | }
175 | if (watch < 1) {
176 | fatal(1, "Invalid value for --watch: " + watch);
177 | }
178 | current_arg++;
179 | } else if ("--long".equals(args[current_arg])) {
180 | long_output = true;
181 | current_arg++;
182 | } else if ("--timestamp".equals(args[current_arg])) {
183 | print_timestamps = true;
184 | current_arg++;
185 | } else {
186 | break;
187 | }
188 | }
189 |
190 | if (current_arg == args.length) {
191 | usage();
192 | fatal(1, "error: Missing argument (-l or JVM specification).");
193 | return;
194 | }
195 |
196 | HashMap vms = getJVMs();
197 | if ("-l".equals(args[current_arg])) {
198 | printVmList(vms.values());
199 | return;
200 | }
201 |
202 | final JVM jvm = selectJVM(args[current_arg++], vms);
203 | vms = null;
204 | final JMXConnector connection = JMXConnectorFactory.connect(jvm.jmxUrl());
205 | try {
206 | final MBeanServerConnection mbsc = connection.getMBeanServerConnection();
207 | if (args.length == current_arg) {
208 | for (final ObjectName mbean : listMBeans(mbsc)) {
209 | System.out.println(mbean);
210 | }
211 | return;
212 | }
213 |
214 | final TreeMap objects = selectMBeans(args, current_arg, mbsc);
215 | if (objects.isEmpty()) {
216 | fatal(3, "No MBean matched your query in " + jvm.name());
217 | return;
218 | }
219 | do {
220 | boolean found = false;
221 | for (final Map.Entry entry : objects.entrySet()) {
222 | final ObjectName object = entry.getKey();
223 | final MBeanInfo mbean = mbsc.getMBeanInfo(object);
224 | final Pattern wanted = entry.getValue();
225 | for (final MBeanAttributeInfo attr : mbean.getAttributes()) {
226 | if (wanted == null || wanted.matcher(attr.getName()).find()) {
227 | dumpMBean(long_output, print_timestamps, mbsc, object, attr);
228 | found = true;
229 | }
230 | }
231 | }
232 | if (!found) {
233 | fatal(4, "No attribute of " + objects.keySet()
234 | + " matched your query in " + jvm.name());
235 | return;
236 | }
237 | System.out.flush();
238 | Thread.sleep(watch * 1000);
239 | } while (watch > 0);
240 | } finally {
241 | connection.close();
242 | }
243 | }
244 |
245 | private static TreeMap selectMBeans(final String[] args,
246 | final int current_arg,
247 | final MBeanServerConnection mbsc) throws IOException {
248 | final TreeMap mbeans = new TreeMap();
249 | for (int i = current_arg; i < args.length; i += 2) {
250 | final Pattern object_re = compile_re(args[i]);
251 | final Pattern attr_re = i + 1 < args.length ? compile_re(args[i + 1]) : null;
252 | for (final ObjectName o : listMBeans(mbsc)) {
253 | if (object_re.matcher(o.toString()).find()) {
254 | mbeans.put(o, attr_re);
255 | }
256 | }
257 | }
258 | return mbeans;
259 | }
260 |
261 | private static void dumpMBean(final boolean long_output,
262 | final boolean print_timestamps,
263 | final MBeanServerConnection mbsc,
264 | final ObjectName object,
265 | final MBeanAttributeInfo attr) throws Exception {
266 | final String name = attr.getName();
267 | Object value = null;
268 | try {
269 | value = mbsc.getAttribute(object, name);
270 | } catch (Exception e) {
271 | // Above may raise errors for some attributes like
272 | // CollectionUsage
273 | return;
274 | }
275 | if (value instanceof TabularData) {
276 | final TabularData tab = (TabularData) value;
277 | int i = 0;
278 | for (final Object o : tab.keySet()) {
279 | dumpMBeanValue(long_output, print_timestamps, object, name + "." + i, o);
280 | i++;
281 | }
282 | } else if (value instanceof CompositeDataSupport){
283 | CompositeDataSupport cds = (CompositeDataSupport) value;
284 | CompositeType ct = cds.getCompositeType();
285 | for (final String item: ct.keySet()){
286 | dumpMBeanValue(long_output, print_timestamps, object, name + "." + item, cds.get(item));
287 | }
288 | } else {
289 | dumpMBeanValue(long_output, print_timestamps, object, name, value);
290 | }
291 | }
292 |
293 | private static void dumpMBeanValue(final boolean long_output,
294 | final boolean print_timestamps,
295 | final ObjectName object,
296 | final String name,
297 | final Object value) {
298 | // Ignore non numeric values
299 | if ((value instanceof String)||
300 | (value instanceof String[])||
301 | (value instanceof Boolean)) {
302 | return;
303 | }
304 | final StringBuilder buf = new StringBuilder();
305 | final long timestamp = System.currentTimeMillis() / 1000;
306 | if (print_timestamps) {
307 | buf.append(timestamp).append('\t');
308 | }
309 | if (value instanceof Object[]) {
310 | for (final Object o : (Object[]) value) {
311 | buf.append(o).append('\t');
312 | }
313 | if (buf.length() > 0) {
314 | buf.setLength(buf.length() - 1);
315 | }
316 | } else {
317 | buf.append(name).append('\t').append(value);
318 | }
319 | if (long_output) {
320 | buf.append('\t').append(object);
321 | }
322 | buf.append('\n');
323 | System.out.print(buf);
324 | }
325 |
326 | private static ArrayList listMBeans(final MBeanServerConnection mbsc) throws IOException {
327 | ArrayList mbeans = new ArrayList(mbsc.queryNames(null, null));
328 | Collections.sort(mbeans, new Comparator() {
329 | public int compare(final ObjectName a, final ObjectName b) {
330 | return a.toString().compareTo(b.toString());
331 | }
332 | });
333 | return mbeans;
334 | }
335 |
336 | private static Pattern compile_re(final String re) {
337 | try {
338 | return Pattern.compile(re);
339 | } catch (PatternSyntaxException e) {
340 | fatal(1, "Invalid regexp: " + re + ", " + e.getMessage());
341 | throw new AssertionError("Should never be here");
342 | }
343 | }
344 |
345 | private static final String MAGIC_STRING = "this.is.jmx.magic";
346 |
347 | private static JVM selectJVM(final String selector,
348 | final HashMap vms) {
349 | String error = null;
350 | try {
351 | final int pid = Integer.parseInt(selector);
352 | if (pid < 2) {
353 | throw new IllegalArgumentException("Invalid PID: " + pid);
354 | }
355 | final JVM jvm = vms.get(pid);
356 | if (jvm != null) {
357 | return jvm;
358 | }
359 | error = "Couldn't find a JVM with PID " + pid;
360 | } catch (NumberFormatException e) {
361 | /* Ignore. */
362 | }
363 | if (error == null) {
364 | try {
365 | final Pattern p = compile_re(selector);
366 | final ArrayList matches = new ArrayList(2);
367 | for (final JVM jvm : vms.values()) {
368 | if (p.matcher(jvm.name()).find()) {
369 | matches.add(jvm);
370 | }
371 | }
372 | // Exclude ourselves from the matches.
373 | System.setProperty(MAGIC_STRING,
374 | "LOL Java processes can't get their own PID");
375 | final String me = jmx.class.getName();
376 | final Iterator it = matches.iterator();
377 | while (it.hasNext()) {
378 | final JVM jvm = it.next();
379 | final String name = jvm.name();
380 | // Ignore other long running jmx clients too.
381 | if (name.contains("--watch") && name.contains(me)) {
382 | it.remove();
383 | continue;
384 | }
385 | final VirtualMachine vm = VirtualMachine.attach(String.valueOf(jvm.pid()));
386 | try {
387 | if (vm.getSystemProperties().containsKey(MAGIC_STRING)) {
388 | it.remove();
389 | continue;
390 | }
391 | } finally {
392 | vm.detach();
393 | }
394 | }
395 | System.clearProperty(MAGIC_STRING);
396 | if (matches.size() == 0) {
397 | error = "No JVM matched your regexp " + selector;
398 | } else if (matches.size() > 1) {
399 | printVmList(matches);
400 | error = matches.size() + " JVMs matched your regexp " + selector
401 | + ", it's too ambiguous, please refine it.";
402 | } else {
403 | return matches.get(0);
404 | }
405 | } catch (PatternSyntaxException e) {
406 | error = "Invalid pattern: " + selector + ", " + e.getMessage();
407 | } catch (Exception e) {
408 | e.printStackTrace();
409 | error = "Unexpected Exception: " + e.getMessage();
410 | }
411 | }
412 | fatal(2, error);
413 | return null;
414 | }
415 |
416 | private static void printVmList(final Collection vms) {
417 | final ArrayList sorted_vms = new ArrayList(vms);
418 | Collections.sort(sorted_vms, new Comparator() {
419 | public int compare(final JVM a, final JVM b) {
420 | return a.pid() - b.pid();
421 | }
422 | });
423 | for (final JVM jvm : sorted_vms) {
424 | System.out.println(jvm.pid() + "\t" + jvm.name());
425 | }
426 | }
427 |
428 | private static final class JVM {
429 | final int pid;
430 | final String name;
431 | String address;
432 |
433 | public JVM(final int pid, final String name, final String address) {
434 | if (name.isEmpty()) {
435 | throw new IllegalArgumentException("empty name");
436 | }
437 | this.pid = pid;
438 | this.name = name;
439 | this.address = address;
440 | }
441 |
442 | public int pid() {
443 | return pid;
444 | }
445 |
446 | public String name() {
447 | return name;
448 | }
449 |
450 | public JMXServiceURL jmxUrl() {
451 | if (address == null) {
452 | ensureManagementAgentStarted();
453 | }
454 | try {
455 | return new JMXServiceURL(address);
456 | } catch (Exception e) {
457 | throw new RuntimeException("Error", e);
458 | }
459 | }
460 |
461 | public void ensureManagementAgentStarted() {
462 | if (address != null) { // already started
463 | return;
464 | }
465 | VirtualMachine vm;
466 | try {
467 | vm = VirtualMachine.attach(String.valueOf(pid));
468 | } catch (AttachNotSupportedException e) {
469 | throw new RuntimeException("Failed to attach to " + this, e);
470 | } catch (IOException e) {
471 | throw new RuntimeException("Failed to attach to " + this, e);
472 | }
473 | try {
474 | // java.sun.com/javase/6/docs/technotes/guides/management/agent.html#gdhkz
475 | // + code mostly stolen from JConsole's code.
476 | final String home = vm.getSystemProperties().getProperty("java.home");
477 |
478 | // Normally in ${java.home}/jre/lib/management-agent.jar but might
479 | // be in ${java.home}/lib in build environments.
480 |
481 | String agent = home + File.separator + "jre" + File.separator
482 | + "lib" + File.separator + "management-agent.jar";
483 | File f = new File(agent);
484 | if (!f.exists()) {
485 | agent = home + File.separator + "lib" + File.separator
486 | + "management-agent.jar";
487 | f = new File(agent);
488 | if (!f.exists()) {
489 | throw new RuntimeException("Management agent not found");
490 | }
491 | }
492 |
493 | agent = f.getCanonicalPath();
494 | try {
495 | vm.loadAgent(agent, "com.sun.management.jmxremote");
496 | } catch (AgentLoadException e) {
497 | throw new RuntimeException("Failed to load the agent into " + this, e);
498 | } catch (AgentInitializationException e) {
499 | throw new RuntimeException("Failed to initialize the agent into " + this, e);
500 | }
501 | address = (String) vm.getAgentProperties().get(LOCAL_CONNECTOR_ADDRESS);
502 | } catch (IOException e) {
503 | throw new RuntimeException("Error while loading agent into " + this, e);
504 | } finally {
505 | try {
506 | vm.detach();
507 | } catch (IOException e) {
508 | throw new RuntimeException("Failed to detach from " + vm + " = " + this, e);
509 | }
510 | }
511 | if (address == null) {
512 | throw new RuntimeException("Couldn't start the management agent.");
513 | }
514 | }
515 |
516 | public String toString() {
517 | return "JVM(" + pid + ", \"" + name + "\", "
518 | + (address == null ? null : '"' + address + '"') + ')';
519 | }
520 | }
521 |
522 | /**
523 | * Returns a map from PID to JVM.
524 | */
525 | private static HashMap getJVMs() throws Exception {
526 | final HashMap vms = new HashMap();
527 | getMonitoredVMs(vms);
528 | getAttachableVMs(vms);
529 | return vms;
530 | }
531 |
532 | private static void getMonitoredVMs(final HashMap out) throws Exception {
533 | final MonitoredHost host =
534 | MonitoredHost.getMonitoredHost(new HostIdentifier((String) null));
535 | @SuppressWarnings("unchecked")
536 | final Set vms = host.activeVms();
537 | for (final Integer pid : vms) {
538 | try {
539 | final VmIdentifier vmid = new VmIdentifier(pid.toString());
540 | final MonitoredVm vm = host.getMonitoredVm(vmid);
541 | out.put(pid, new JVM(pid, MonitoredVmUtil.commandLine(vm),
542 | ConnectorAddressLink.importFrom(pid)));
543 | vm.detach();
544 | } catch (Exception x) {
545 | System.err.println("Ignoring exception:");
546 | x.printStackTrace();
547 | }
548 | }
549 | }
550 |
551 | private static void getAttachableVMs(final HashMap out) {
552 | for (final VirtualMachineDescriptor vmd : VirtualMachine.list()) {
553 | int pid;
554 | try {
555 | pid = Integer.parseInt(vmd.id());
556 | } catch (NumberFormatException e) {
557 | System.err.println("Ignoring invalid vmd.id(): " + vmd.id()
558 | + ' ' + e.getMessage());
559 | continue;
560 | }
561 | if (out.containsKey(pid)) {
562 | continue;
563 | }
564 | try {
565 | final VirtualMachine vm = VirtualMachine.attach(vmd);
566 | out.put(pid, new JVM(pid, String.valueOf(pid),
567 | (String) vm.getAgentProperties().get(LOCAL_CONNECTOR_ADDRESS)));
568 | vm.detach();
569 | } catch (AttachNotSupportedException e) {
570 | System.err.println("VM not attachable: " + vmd.id()
571 | + ' ' + e.getMessage());
572 | } catch (IOException e) {
573 | System.err.println("Could not attach: " + vmd.id()
574 | + ' ' + e.getMessage());
575 | }
576 | }
577 | }
578 |
579 | }
580 | END_OF_JAVA_CODE
581 |
582 | my @options = ('--long','--timestamp','DataNode','hadoop','','Threading','Count|Time$','OperatingSystem','OpenFile','GarbageCollector','Collection');
583 | #print jmx->main(\@options);
584 | #print jmx->main(${('-l')});
585 | my $blah;
586 |
587 | #plugin_register (TYPE_READ, 'JMX', 'jmx_read');
588 | #plugin_register (TYPE_CONFIG, "JMX", "jmx_config");
589 | #
590 | #sub jmx_config {
591 | # my ($ci) = @_;
592 | # foreach my $item (@{$ci->{'children'}}) {
593 | # my $key = lc($item->{'key'});
594 | # my $val = $item->{'values'}->[0];
595 | # if ($key eq 'blah' ) {
596 | # $blah = $val;
597 | # }
598 | # }
599 | # return 1;
600 | #}
601 | #
602 | #sub jmx_read
603 | #{
604 | #
605 | # my $vl = {};
606 | # $vl->{'plugin'} = 'rabbitmq';
607 | # $vl->{'type'} = 'rabbitmq';
608 | #
609 | # foreach my $result (@{$ref}) {
610 | # $vl->{'plugin_instance'} = $result->{'vhost'};
611 | # $vl->{'type_instance'} = $result->{'name'};
612 | # $vl->{'plugin_instance'} =~ s#[/-]#_#g;
613 | # $vl->{'type_instance'} =~ s#[/-]#_#g;
614 | # $vl->{'values'} = [
615 | # $result->{'messages'} ? $result->{'messages'} : 0,
616 | # ];
617 | # plugin_log(LOG_ERR, "RabbitMQ: dispatching stats for " . $result->{'vhost'} . '/' . $result->{'name'});
618 | # plugin_dispatch_values($vl);
619 | # }
620 | # plugin_log(LOG_ERR, "RabbitMQ: done processing results");
621 | # return 1;
622 | #}
623 | #
624 | #1;
625 |
--------------------------------------------------------------------------------
/MySQL.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::MySQL;
2 | use Collectd qw(:all);
3 | use DBD::mysql;
4 | require InnoDBParser;
5 |
6 | =head1 NAME
7 |
8 | Collectd::Plugins::MySQL - Monitor a mysql server
9 |
10 | =head1 VERSION
11 |
12 | Version 1
13 |
14 | =cut
15 |
16 | our $VERSION = '1';
17 |
18 |
19 | =head1 SYNOPSIS
20 |
21 | This is a collectd plugin for monitoring a mysql server
22 |
23 | In your collectd config:
24 |
25 |
26 | Globals true
27 |
28 |
29 |
30 | BaseName "Collectd::Plugins"
31 | LoadPlugin "MySQL"
32 |
33 |
34 |
35 | Host "localhost"
36 | Port "3306"
37 | User "root"
38 | Pass "mypass"
39 |
40 |
41 | Socket "/var/mysql/mysql.sock"
42 | User "root"
43 | Pass "mypass"
44 |
45 |
46 |
47 |
48 | =head1 AUTHOR
49 |
50 | Mark Steele, C<< >>
51 |
52 | =cut
53 |
54 | my $databases = {};
55 |
56 | my %keys = ();
57 | my @status_keys = qw(
58 | aborted_clients aborted_connects binlog_cache_disk_use binlog_cache_use binlog_commits binlog_group_commits binlog_stmt_cache_disk_use binlog_stmt_cache_use bytes_received bytes_sent
59 | com_admin_commands com_alter_db com_alter_db_upgrade com_alter_event com_alter_function com_alter_procedure com_alter_server com_alter_table com_alter_tablespace com_analyze com_assign_to_keycache
60 | com_begin com_binlog com_call_procedure com_change_db com_change_master com_check com_checksum com_commit com_create_db com_create_event com_create_function com_create_index com_create_procedure
61 | com_create_server com_create_table com_create_trigger com_create_udf com_create_user com_create_view com_dealloc_sql com_delete com_delete_multi com_do com_drop_db com_drop_event com_drop_function
62 | com_drop_index com_drop_procedure com_drop_server com_drop_table com_drop_trigger com_drop_user com_drop_view com_empty_query com_execute_sql com_flush com_grant com_ha_close com_ha_open com_ha_read
63 | com_help com_insert com_insert_select com_install_plugin com_kill com_load com_lock_tables com_optimize com_preload_keys com_prepare_sql com_purge com_purge_before_date com_release_savepoint
64 | com_rename_table com_rename_user com_repair com_replace com_replace_select com_reset com_resignal com_revoke com_revoke_all com_rollback com_rollback_to_savepoint com_savepoint com_select com_set_option
65 | com_show_authors com_show_binlog_events com_show_binlogs com_show_charsets com_show_client_statistics com_show_collations com_show_contributors com_show_create_db com_show_create_event
66 | com_show_create_func com_show_create_proc com_show_create_table com_show_create_trigger com_show_databases com_show_engine_logs com_show_engine_mutex com_show_engine_status com_show_errors
67 | com_show_events com_show_fields com_show_function_status com_show_grants com_show_index_statistics com_show_keys com_show_master_status com_show_open_tables com_show_plugins com_show_privileges
68 | com_show_procedure_status com_show_processlist com_show_profile com_show_profiles com_show_relaylog_events com_show_slave_hosts com_show_slave_status com_show_slave_status_nolock com_show_status
69 | com_show_storage_engines com_show_table_statistics com_show_table_status com_show_tables com_show_temporary_tables com_show_thread_statistics com_show_triggers com_show_user_statistics
70 | com_show_variables com_show_warnings com_signal com_slave_start com_slave_stop com_stmt_close com_stmt_execute com_stmt_fetch com_stmt_prepare com_stmt_reprepare com_stmt_reset com_stmt_send_long_data
71 | com_truncate com_uninstall_plugin com_unlock_tables com_update com_update_multi com_xa_commit com_xa_end com_xa_prepare com_xa_recover com_xa_rollback com_xa_start compression connections
72 | created_tmp_disk_tables created_tmp_files created_tmp_tables delayed_errors delayed_insert_threads delayed_writes flashcache_enabled flush_commands handler_commit handler_delete handler_discover
73 | handler_prepare handler_read_first handler_read_key handler_read_last handler_read_next handler_read_prev handler_read_rnd handler_read_rnd_next handler_rollback handler_savepoint
74 | handler_savepoint_rollback handler_update handler_write innodb_adaptive_hash_cells innodb_adaptive_hash_hash_searches innodb_adaptive_hash_heap_buffers innodb_adaptive_hash_non_hash_searches
75 | innodb_background_log_sync innodb_buffer_pool_pages_lru_flushed innodb_buffer_pool_pages_data innodb_buffer_pool_pages_dirty innodb_buffer_pool_pages_flushed innodb_buffer_pool_pages_free
76 | innodb_buffer_pool_pages_made_not_young innodb_buffer_pool_pages_made_young innodb_buffer_pool_pages_misc innodb_buffer_pool_pages_old innodb_buffer_pool_pages_total innodb_buffer_pool_read_ahead
77 | innodb_buffer_pool_read_ahead_evicted innodb_buffer_pool_read_ahead_rnd innodb_buffer_pool_read_requests innodb_buffer_pool_reads innodb_buffer_pool_wait_free innodb_buffer_pool_write_requests
78 | innodb_checkpoint_age innodb_checkpoint_max_age innodb_checkpoint_target_age innodb_current_row_locks innodb_data_fsyncs innodb_data_pending_fsyncs innodb_data_pending_reads innodb_data_pending_writes
79 | innodb_data_read innodb_data_reads innodb_data_writes innodb_data_written innodb_dblwr_pages_written innodb_dblwr_writes innodb_deadlocks innodb_dict_tables innodb_have_atomic_builtins
80 | innodb_history_list_length innodb_ibuf_discarded_delete_marks innodb_ibuf_discarded_deletes innodb_ibuf_discarded_inserts innodb_ibuf_free_list innodb_ibuf_merged_delete_marks innodb_ibuf_merged_deletes
81 | innodb_ibuf_merged_inserts innodb_ibuf_merges innodb_ibuf_segment_size innodb_ibuf_size innodb_log_waits innodb_log_write_requests innodb_log_writes innodb_lsn_current innodb_lsn_flushed
82 | innodb_lsn_last_checkpoint innodb_master_thread_10_second_loops innodb_master_thread_1_second_loops innodb_master_thread_background_loops innodb_master_thread_main_flush_loops
83 | innodb_master_thread_sleeps innodb_max_trx_id innodb_mem_adaptive_hash innodb_mem_dictionary innodb_mem_total innodb_mutex_os_waits innodb_mutex_spin_rounds innodb_mutex_spin_waits
84 | innodb_oldest_view_low_limit_trx_id innodb_os_log_fsyncs innodb_os_log_pending_fsyncs innodb_os_log_pending_writes innodb_os_log_written innodb_page_size innodb_pages_created innodb_pages_read
85 | innodb_pages_written innodb_purge_trx_id innodb_purge_undo_no innodb_row_lock_current_waits innodb_row_lock_time innodb_row_lock_time_avg innodb_row_lock_time_max innodb_row_lock_waits
86 | innodb_rows_deleted innodb_rows_inserted innodb_rows_read innodb_rows_updated innodb_s_lock_os_waits innodb_s_lock_spin_rounds innodb_s_lock_spin_waits innodb_truncated_status_writes
87 | innodb_x_lock_os_waits innodb_x_lock_spin_rounds innodb_x_lock_spin_waits key_blocks_not_flushed key_blocks_unused key_blocks_used key_read_requests key_reads key_write_requests key_writes
88 | last_query_cost max_used_connections not_flushed_delayed_rows open_files open_streams open_table_definitions open_tables opened_files opened_table_definitions opened_tables
89 | performance_schema_cond_classes_lost performance_schema_cond_instances_lost performance_schema_file_classes_lost performance_schema_file_handles_lost performance_schema_file_instances_lost
90 | performance_schema_locker_lost performance_schema_mutex_classes_lost performance_schema_mutex_instances_lost performance_schema_rwlock_classes_lost performance_schema_rwlock_instances_lost
91 | performance_schema_table_handles_lost performance_schema_table_instances_lost performance_schema_thread_classes_lost performance_schema_thread_instances_lost prepared_stmt_count qcache_free_blocks
92 | qcache_free_memory qcache_hits qcache_inserts qcache_lowmem_prunes qcache_not_cached qcache_queries_in_cache qcache_total_blocks queries questions rpl_status select_full_join select_full_range_join
93 | select_range select_range_check select_scan slave_heartbeat_period slave_open_temp_tables slave_received_heartbeats slave_retried_transactions slave_running slow_launch_threads slow_queries
94 | sort_merge_passes sort_range sort_rows sort_scan ssl_accept_renegotiates ssl_accepts ssl_callback_cache_hits ssl_cipher ssl_cipher_list ssl_client_connects ssl_connect_renegotiates ssl_ctx_verify_depth
95 | ssl_ctx_verify_mode ssl_default_timeout ssl_finished_accepts ssl_finished_connects ssl_session_cache_hits ssl_session_cache_misses ssl_session_cache_mode ssl_session_cache_overflows
96 | ssl_session_cache_size ssl_session_cache_timeouts ssl_sessions_reused ssl_used_session_cache_entries ssl_verify_depth ssl_verify_mode ssl_version table_locks_immediate table_locks_waited
97 | tc_log_max_pages_used tc_log_page_size tc_log_page_waits threads_cached threads_connected threads_created threads_running uptime uptime_since_flush_status
98 | wsrep_apply_oooe wsrep_apply_oool wsrep_causal_reads wsrep_commit_oooe wsrep_commit_oool wsrep_flow_control_recv wsrep_flow_control_sent wsrep_local_bf_aborts
99 | wsrep_local_commits wsrep_local_replays wsrep_received wsrep_received_bytes wsrep_replicated wsrep_replicated_bytes
100 | wsrep_apply_window wsrep_cert_deps_distance wsrep_local_recv_queue wsrep_local_recv_queue_avg wsrep_local_send_queue wsrep_local_send_queue_avg wsrep_commit_window
101 | wsrep_local_cert_failures wsrep_cert_index_size wsrep_local_state wsrep_flow_control_paused wsrep_cluster_size wsrep_last_committed
102 | innodb_buffer_pool_bytes_data threadpool_idle_threads wsrep_thread_count innodb_descriptors_memory innodb_read_views_memory innodb_buffer_pool_bytes_dirty
103 | threadpool_threads
104 | );
105 |
106 |
107 | my @slave_keys = qw(exec_master_log_pos read_master_log_pos seconds_behind_master slave_io_running slave_sql_running);
108 |
109 | my @innodb_bp_keys = qw(
110 | add_pool_alloc awe_mem_alloc buf_free buf_pool_hits buf_pool_reads buf_pool_size dict_mem_alloc page_creates_sec page_reads_sec page_writes_sec pages_created pages_modified
111 | pages_read pages_total pages_written reads_pending total_mem_alloc writes_pending writes_pending_flush_list writes_pending_lru writes_pending_single_page
112 | );
113 |
114 | my @innodb_ib_keys = qw(
115 | bufs_in_node_heap free_list_len hash_searches_s hash_table_size inserts merged_recs merges non_hash_searches_s seg_size size used_cells
116 | );
117 |
118 | my @innodb_io_keys = qw(
119 | avg_bytes_s flush_type fsyncs_s os_file_reads os_file_writes os_fsyncs pending_aio_writes pending_buffer_pool_flushes pending_ibuf_aio_reads pending_log_flushes pending_log_ios
120 | pending_normal_aio_reads pending_preads pending_pwrites pending_sync_ios reads_s writes_s
121 | );
122 |
123 | my @innodb_lg_keys = qw(last_chkp log_flushed_to log_ios_done log_ios_s log_seq_no pending_chkp_writes pending_log_writes);
124 |
125 | my @innodb_ro_keys = qw(del_sec ins_sec n_reserved_extents num_rows_del num_rows_ins num_rows_read num_rows_upd queries_in_queue queries_inside read_sec read_views_open upd_sec);
126 |
127 | my @innodb_sm_keys = qw(mutex_os_waits mutex_spin_rounds mutex_spin_waits reservation_count rw_excl_os_waits rw_excl_spins rw_shared_os_waits rw_shared_spins signal_count wait_array_size);
128 |
129 | my @pstate_keys = qw(
130 | after_create analyzing checking_permissions checking_table cleaning_up closing_tables converting_heap_to_myisam copy_to_tmp_table copying_to_tmp_table_on_disk creating_index creating_sort_index
131 | copying_to_group_table creating_table creating_tmp_table deleting_from_main_table deleting_from_reference_table discard_or_import_tablespace end executing execution_of_init_command freeing_items
132 | flushing_tables fulltext_initialization init killed locked logging_slow_query null manage_keys opening_table optimizing preparing purging_old_relay_logs query_end reading_from_net removing_duplicates
133 | removing_tmp_table rename rename_result_table reopen_tables repair_by_sorting repair_done repair_with_keycache rolling_back
134 | saving_state searching_rows_for_update sending_data setup sleep sorting_for_group sorting_for_order sorting_index sorting_result statistics system_lock
135 | updating updating_main_table updating_reference_tables user_lock user_
136 | waiting_for_table waiting_on_cond writing_to_net wsrep wsrep_commit wsrep_write_row
137 | other
138 | );
139 |
140 | @{$keys{'status'}}{@status_keys} = undef;
141 | @{$keys{'slave'}}{@slave_keys} = undef;
142 | @{$keys{'innodb'}{'bp'}}{@innodb_bp_keys} = undef;
143 | @{$keys{'innodb'}{'ib'}}{@innodb_ib_keys} = undef;
144 | @{$keys{'innodb'}{'io'}}{@innodb_io_keys} = undef;
145 | @{$keys{'innodb'}{'lg'}}{@innodb_lg_keys} = undef;
146 | @{$keys{'innodb'}{'ro'}}{@innodb_ro_keys} = undef;
147 | @{$keys{'innodb'}{'sm'}}{@innodb_sm_keys} = undef;
148 | @{$keys{'pstate'}}{@pstate_keys} = undef;
149 |
150 | plugin_register (TYPE_READ, 'MySQL', 'my_read');
151 | plugin_register (TYPE_CONFIG, 'MySQL', 'mysql_config');
152 |
153 | sub mysql_config {
154 | my ($ci) = @_;
155 | foreach my $database (@{$ci->{'children'}}) {
156 | my $db_name = $database->{'values'}->[0];
157 | my $host = 'localhost';
158 | my $port = 3306;
159 | my $socket = '';
160 | my $user = 'root';
161 | my $pass = '';
162 | foreach my $item (@{$database->{'children'}}) {
163 | my $key = lc($item->{'key'});
164 | my $val = $item->{'values'}->[0];
165 |
166 |
167 | if ($key eq 'host' ) {
168 | $host = $val;
169 | } elsif ($key eq 'port' ) {
170 | $port = $val;
171 | } elsif ($key eq 'socket' ) {
172 | $socket = $val;
173 | } elsif ($key eq 'user') {
174 | $user = $val;
175 | } elsif ($key eq 'pass') {
176 | $pass = $val;
177 | }
178 | }
179 | $databases->{$db_name}->{'host'} = $host;
180 | $databases->{$db_name}->{'port'} = $port;
181 | $databases->{$db_name}->{'socket'} = $socket;
182 | $databases->{$db_name}->{'user'} = $user;
183 | $databases->{$db_name}->{'pass'} = $pass;
184 | }
185 | return 1;
186 | }
187 |
188 | # Support function. Reads and returns a single configuration value from MySQL itself.
189 | sub read_mysql_variable {
190 | my ( $dbh, $varname ) = @_;
191 | my $value = ( $dbh->selectrow_array( qq{SHOW /*!40003 GLOBAL*/ VARIABLES LIKE "\Q$varname\E"} ) )[1];
192 | return $value;
193 | }
194 |
195 | # Support function. Reads and returns the PID of MySQL from a given filename.
196 | sub read_mysql_pid_from_file {
197 | my $pid_file = shift;
198 | open( my $fh, '<', $pid_file ) or die qq{Cannot open '$pid_file' for reading: $!};
199 | my $pid = readline $fh;
200 | close $fh or die qq{Cannot close '$pid_file' after reading: $!};
201 | chomp $pid;
202 | return $pid;
203 | }
204 |
205 | # Support function. Calculates and returns the name of the "innodb_status" file
206 | # from MySQL's configuration.
207 | sub innodb_status_filename {
208 | my $dbh = shift;
209 | my $mysql_datadir = read_mysql_variable $dbh, 'datadir';
210 | my $mysql_pidfile = read_mysql_variable $dbh, 'pid_file';
211 | my $mysql_pid = read_mysql_pid_from_file $mysql_pidfile;
212 | my $innodb_status_filename = qq{$mysql_datadir/innodb_status.$mysql_pid};
213 | return $innodb_status_filename;
214 | }
215 |
216 | # Support function. Reads innodb status from either the dump-file
217 | # (${mysql::datadir}/innodb_status.${mysql::pid}) or
218 | # 'SHOW ENGINE INNODB STATUS'. It prefers the file to the SQL to
219 | # avoid the 64KB limitation on the SQL wherever possible.
220 | sub read_innodb_status_from_file_or_sql {
221 | my $dbh = shift;
222 | my $innodb_status_filename = innodb_status_filename $dbh;
223 | my $innodb_status_fulltext;
224 | if( -r $innodb_status_filename ){
225 | open my $fh, '<', $innodb_status_filename
226 | or die qq{Cannot open innodb status file '$innodb_status_filename' for reading: $!};
227 | $innodb_status_fulltext = do{ local $/ = undef; <$fh> };
228 | close $fh
229 | or die qq{CAnnot close innodb status file '$innodb_status_filename' after reading: $!};
230 | } else {
231 | $innodb_status_fulltext = ${ $dbh->selectrow_hashref( q{SHOW /*!50000 ENGINE*/ INNODB STATUS} ) }{Status};
232 | # my @result = $dbh->selectrow_array( q{SHOW /*!50000 ENGINE*/ INNODB STATUS} );
233 | # $innodb_status_fulltext = $result[1];
234 | }
235 |
236 | return $innodb_status_fulltext;
237 | }
238 |
239 | sub my_read {
240 | foreach my $db_name (keys %{$databases}) {
241 | my_read_each_db($db_name, $databases->{$db_name});
242 | }
243 | 1;
244 | }
245 |
246 | sub my_read_each_db {
247 | my $db_name = shift(@_);
248 | my $database = shift(@_);
249 | my $host = $database->{'host'};
250 | my $port = $database->{'port'};
251 | my $socket = $database->{'socket'};
252 | my $user = $database->{'user'};
253 | my $pass = $database->{'pass'};
254 |
255 | my $dbh;
256 | if ($socket eq "") {
257 | $dbh = DBI->connect("DBI:mysql:database=mysql;host=$host;port=$port", $user, $pass) || return 0;
258 | } else {
259 | $dbh = DBI->connect("DBI:mysql:database=mysql;mysql_socket=$socket", $user, $pass) || return 0;
260 | }
261 |
262 | my $status = $dbh->selectall_hashref("SHOW /*!50002 GLOBAL */ STATUS",'Variable_name');
263 | $status = { map { lc($_) => $status->{$_}} keys %{$status}};
264 | my $slave = $dbh->selectrow_hashref("SHOW SLAVE STATUS");
265 | $slave = {map { lc($_) => $slave->{$_}} keys %{$slave}};
266 | my $sql = 'SELECT VERSION();';
267 | my ($mysqlver) = $dbh->selectrow_array($sql);
268 | my $parser = InnoDBParser->new;
269 | my $innodb_status = $parser->parse_status_text(read_innodb_status_from_file_or_sql( $dbh ), 0, undef, undef, $mysqlver);
270 | my $plist = $dbh->selectall_arrayref("SHOW PROCESSLIST", { Slice => {}});
271 | $dbh->disconnect();
272 |
273 | my %states;
274 | foreach my $item (@{$plist}) {
275 | if ($item->{'State'}) {
276 | my $pstate = lc($item->{'State'});
277 | $pstate =~ s/^(?:table lock|taiting.*lock)$/locked/;
278 | $pstate =~ s/^opening tables/opening table/;
279 | $pstate =~ s/^waiting for tables/waiting_for_table/;
280 | $pstate =~ s/^Sleeping.*/sleep/i;
281 | $pstate =~ s/^update.*/updating/i;
282 | $pstate =~ s/^write_rows_log_event__write_row.*/wsrep_write_row/i;
283 | $pstate =~ s/^committed_.*/wsrep_commit/i;
284 | $pstate =~ s/^wsrep_aborter_idle.*/wsrep/i;
285 | $pstate =~ s/^(.+?);.*/$1/;
286 | $pstate =~ s/[^a-zA-Z0-9_]/_/g;
287 |
288 | if (exists $keys{'plist'}{$pstate}) {
289 | $states{$pstate}++;
290 | } else {
291 | plugin_log(LOG_WARNING, "MySQL: Unknown pstate: '$pstate'");
292 | }
293 | } else {
294 | $states{'null'}++;
295 | }
296 | }
297 |
298 | for (keys %{$keys{'status'}}) {
299 | my $vl = {};
300 | $vl->{'plugin'} = 'mysql';
301 | $vl->{'type'} = 'gauge';
302 | $vl->{'plugin_instance'} = "status-$db_name";
303 | $vl->{'type_instance'} = $_;
304 | if (defined($status->{$_}->{'Value'})) {
305 | if ($status->{$_}->{'Value'} =~ /^\d+(\.\d+)?$/) {
306 | $vl->{'values'} = [ $status->{$_}->{'Value'} + 0 ];
307 | } else {
308 | if ($status->{$_}->{'Value'} =~ /(?:yes|on|enabled)/i) {
309 | $vl->{'values'} = [ 1 ];
310 | } else {
311 | $vl->{'values'} = [ 0 ];
312 | }
313 | }
314 | } else {
315 | $vl->{'values'} = [ 0 ];
316 | }
317 | plugin_dispatch_values($vl);
318 | }
319 |
320 | for (keys %{$keys{'slave'}}) {
321 | my $vl = {};
322 | $vl->{'plugin'} = 'mysql';
323 | $vl->{'type'} = 'gauge';
324 | $vl->{'plugin_instance'} = "slave-$db_name";
325 | $vl->{'type_instance'} = $_;
326 | if (defined($slave->{$_})) {
327 | if ($slave->{$_} =~ /^\d+(?:\.\d+)?$/) {
328 | $vl->{'values'} = [ $slave->{$_} + 0];
329 | } else {
330 | if ($slave->{$_} =~ /(?:yes|on|enabled)/i) {
331 | $vl->{'values'} = [ $slave->{$_} ];
332 | } else {
333 | $vl->{'values'} = [ 0 ];
334 | }
335 | }
336 | } else {
337 | $vl->{'values'} = [ 0 ];
338 | }
339 | plugin_dispatch_values($vl);
340 | }
341 |
342 | my $vl = {};
343 | $vl->{'plugin'} = 'mysql';
344 | $vl->{'plugin_instance'} = 'slave';
345 | $vl->{'type'} = 'gauge';
346 | $vl->{'plugin_instance'} = "slave-$db_name";
347 | $vl->{'type_instance'} = 'binlog_synched_to_master';
348 |
349 | if ($slave->{'Master_Log_File'} eq $slave->{'Relay_Master_Log_File'}) { ## Slave processing same binlog as master
350 | $vl->{'values'} = [ 1 ];
351 | } else {
352 | $vl->{'values'} = [ 0 ];
353 | }
354 | plugin_dispatch_values($vl);
355 |
356 | foreach my $section (keys %{$keys{'innodb'}}) {
357 | my $vl = {};
358 | $vl->{'plugin'} = 'mysql';
359 | $vl->{'type'} = 'gauge';
360 | $vl->{'plugin_instance'} = "innodb-$db_name";
361 | foreach my $item (keys %{$keys{'innodb'}{$section}}) {
362 | $vl->{'type_instance'} = $section . '_' . $item;
363 | if ($innodb_status->{'sections'}->{$section}->{$item} =~ /^\d+(?:\.\d+)?$/) {
364 | $vl->{'values'} = [ $innodb_status->{'sections'}->{$section}->{$item} + 0];
365 | } else {
366 | if ($innodb_status->{'sections'}->{$section}->{$item} =~ /(?:yes|on|enabled)/i) {
367 | $vl->{'values'} = [ 1 ];
368 | } else {
369 | $vl->{'values'} = [ 0 ];
370 | }
371 | }
372 | plugin_dispatch_values($vl);
373 | }
374 | }
375 |
376 | foreach my $item (keys %{$keys{'pstate'}}) {
377 | my $vl = {};
378 | $vl->{'plugin'} = 'mysql';
379 | $vl->{'type'} = 'gauge';
380 | $vl->{'plugin_instance'} = "process-$db_name";
381 | $vl->{'type_instance'} = $item;
382 | if (defined($states{$item})) {
383 | $vl->{'values'} = [ $states{$item} + 0];
384 | } else {
385 | $vl->{'values'} = [ 0 ];
386 | }
387 | plugin_dispatch_values($vl);
388 | }
389 |
390 | # plugin_log(LOG_ERR, "MySQL: finished submitting values");
391 | return 1;
392 | }
393 |
394 |
395 | 1;
396 |
397 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | CloudWatchMetrics: Writes metrics to CloudWatch.
2 |
3 | Beware metric costs, at 0.50$ per metric you want to make sure you're using match filters to only send what you really want to pay for.
4 |
5 |
6 | --
7 |
8 | AmqpGraphite collectd perl plugin: Writes to AMQP with metrics in graphite format.
9 |
10 | --
11 |
12 | AmqpJson collectd perl plugin: Writes to AMQP with metrics in json format.
13 |
14 | --
15 |
16 | AmqpJsonUdp collectd perl plugin: Writes to AMQP with metrics in json format. Uses the UDP-exchange to bypass all the AMQP overhead and makes sending metrics fire-and-forget.
17 |
18 | --
19 |
20 | CPUSUmmary perl plugin: A CPU summary plugin that takes into account number of CPUs to give a percentage utilization for each metric
21 |
22 | --
23 |
24 | HTTPCheck perl plugin: An http check that can either use a simple regular expression match or can parse a json document and compare a key to an expected value.
25 |
26 | ---
27 |
28 | RabbitMQ
29 |
30 | A module to monitor RabbitMQ AMQP broker message queues and message rates. The following metrics are pulled on a per-vhost/queue basis
31 | - messages
32 | - messages_rate
33 | - messages_unacknolwedged
34 | - messages_unacknolwedged_rate
35 | - messages_ready
36 | - message_ready_rate
37 | - memory
38 | - consumers
39 | - publish
40 | - publish_rate
41 | - deliver_no_ack
42 | - deliver_no_ack_rate
43 | - deliver_get
44 | - deliver_get_rate
45 |
46 | Refer to the RabbitMQ documentation for details on the meanings of these metrics.
47 |
48 | You'll need to add a new type to the collectd types db (/usr/share/collectd/types.db). The types.db.sample contains the relevant information to add. A quick way to do this is to do
49 | cat types.db.sample >>/usr/share/collectd/types.db
50 |
51 | ---
52 |
53 | Riak
54 |
55 | This plugin gathers statistics from the /stats REST endpoint. See the Riak documentation for the meanings of these variables. (http://docs.basho.com/riak/latest/cookbooks/Statistics-and-Monitoring/)
56 |
57 |
58 | --
59 |
60 | MySQL
61 |
62 |
63 | In your collectd config:
64 |
65 |
66 | Globals true
67 |
68 |
69 |
70 | BaseName "Collectd::Plugins"
71 | LoadPlugin "MySQL"
72 |
73 |
74 |
75 | Host "localhost"
76 | Port "3306"
77 | User "root"
78 | Pass "mypass"
79 |
80 |
81 | Socket "/var/mysql/mysql.sock"
82 | User "root"
83 | Pass "mypass"
84 |
85 |
86 |
87 |
88 | This plugin gathers about 500 metrics from the MySQL server. Specifically the following variables are gathered:
89 |
90 | Server status:
91 |
92 | aborted_clients aborted_connects binlog_cache_disk_use binlog_cache_use binlog_commits binlog_group_commits binlog_stmt_cache_disk_use binlog_stmt_cache_use bytes_received bytes_sent
93 | com_admin_commands com_alter_db com_alter_db_upgrade com_alter_event com_alter_function com_alter_procedure com_alter_server com_alter_table com_alter_tablespace com_analyze com_assign_to_keycache
94 | com_begin com_binlog com_call_procedure com_change_db com_change_master com_check com_checksum com_commit com_create_db com_create_event com_create_function com_create_index com_create_procedure
95 | com_create_server com_create_table com_create_trigger com_create_udf com_create_user com_create_view com_dealloc_sql com_delete com_delete_multi com_do com_drop_db com_drop_event com_drop_function
96 | com_drop_index com_drop_procedure com_drop_server com_drop_table com_drop_trigger com_drop_user com_drop_view com_empty_query com_execute_sql com_flush com_grant com_ha_close com_ha_open com_ha_read
97 | com_help com_insert com_insert_select com_install_plugin com_kill com_load com_lock_tables com_optimize com_preload_keys com_prepare_sql com_purge com_purge_before_date com_release_savepoint
98 | com_rename_table com_rename_user com_repair com_replace com_replace_select com_reset com_resignal com_revoke com_revoke_all com_rollback com_rollback_to_savepoint com_savepoint com_select com_set_option
99 | com_show_authors com_show_binlog_events com_show_binlogs com_show_charsets com_show_client_statistics com_show_collations com_show_contributors com_show_create_db com_show_create_event
100 | com_show_create_func com_show_create_proc com_show_create_table com_show_create_trigger com_show_databases com_show_engine_logs com_show_engine_mutex com_show_engine_status com_show_errors
101 | com_show_events com_show_fields com_show_function_status com_show_grants com_show_index_statistics com_show_keys com_show_master_status com_show_open_tables com_show_plugins com_show_privileges
102 | com_show_procedure_status com_show_processlist com_show_profile com_show_profiles com_show_relaylog_events com_show_slave_hosts com_show_slave_status com_show_slave_status_nolock com_show_status
103 | com_show_storage_engines com_show_table_statistics com_show_table_status com_show_tables com_show_temporary_tables com_show_thread_statistics com_show_triggers com_show_user_statistics
104 | com_show_variables com_show_warnings com_signal com_slave_start com_slave_stop com_stmt_close com_stmt_execute com_stmt_fetch com_stmt_prepare com_stmt_reprepare com_stmt_reset com_stmt_send_long_data
105 | com_truncate com_uninstall_plugin com_unlock_tables com_update com_update_multi com_xa_commit com_xa_end com_xa_prepare com_xa_recover com_xa_rollback com_xa_start compression connections
106 | created_tmp_disk_tables created_tmp_files created_tmp_tables delayed_errors delayed_insert_threads delayed_writes flashcache_enabled flush_commands handler_commit handler_delete handler_discover
107 | handler_prepare handler_read_first handler_read_key handler_read_last handler_read_next handler_read_prev handler_read_rnd handler_read_rnd_next handler_rollback handler_savepoint
108 | handler_savepoint_rollback handler_update handler_write innodb_adaptive_hash_cells innodb_adaptive_hash_hash_searches innodb_adaptive_hash_heap_buffers innodb_adaptive_hash_non_hash_searches
109 | innodb_background_log_sync innodb_buffer_pool_pages_lru_flushed innodb_buffer_pool_pages_data innodb_buffer_pool_pages_dirty innodb_buffer_pool_pages_flushed innodb_buffer_pool_pages_free
110 | innodb_buffer_pool_pages_made_not_young innodb_buffer_pool_pages_made_young innodb_buffer_pool_pages_misc innodb_buffer_pool_pages_old innodb_buffer_pool_pages_total innodb_buffer_pool_read_ahead
111 | innodb_buffer_pool_read_ahead_evicted innodb_buffer_pool_read_ahead_rnd innodb_buffer_pool_read_requests innodb_buffer_pool_reads innodb_buffer_pool_wait_free innodb_buffer_pool_write_requests
112 | innodb_checkpoint_age innodb_checkpoint_max_age innodb_checkpoint_target_age innodb_current_row_locks innodb_data_fsyncs innodb_data_pending_fsyncs innodb_data_pending_reads innodb_data_pending_writes
113 | innodb_data_read innodb_data_reads innodb_data_writes innodb_data_written innodb_dblwr_pages_written innodb_dblwr_writes innodb_deadlocks innodb_dict_tables innodb_have_atomic_builtins
114 | innodb_history_list_length innodb_ibuf_discarded_delete_marks innodb_ibuf_discarded_deletes innodb_ibuf_discarded_inserts innodb_ibuf_free_list innodb_ibuf_merged_delete_marks innodb_ibuf_merged_deletes
115 | innodb_ibuf_merged_inserts innodb_ibuf_merges innodb_ibuf_segment_size innodb_ibuf_size innodb_log_waits innodb_log_write_requests innodb_log_writes innodb_lsn_current innodb_lsn_flushed
116 | innodb_lsn_last_checkpoint innodb_master_thread_10_second_loops innodb_master_thread_1_second_loops innodb_master_thread_background_loops innodb_master_thread_main_flush_loops
117 | innodb_master_thread_sleeps innodb_max_trx_id innodb_mem_adaptive_hash innodb_mem_dictionary innodb_mem_total innodb_mutex_os_waits innodb_mutex_spin_rounds innodb_mutex_spin_waits
118 | innodb_oldest_view_low_limit_trx_id innodb_os_log_fsyncs innodb_os_log_pending_fsyncs innodb_os_log_pending_writes innodb_os_log_written innodb_page_size innodb_pages_created innodb_pages_read
119 | innodb_pages_written innodb_purge_trx_id innodb_purge_undo_no innodb_row_lock_current_waits innodb_row_lock_time innodb_row_lock_time_avg innodb_row_lock_time_max innodb_row_lock_waits
120 | innodb_rows_deleted innodb_rows_inserted innodb_rows_read innodb_rows_updated innodb_s_lock_os_waits innodb_s_lock_spin_rounds innodb_s_lock_spin_waits innodb_truncated_status_writes
121 | innodb_x_lock_os_waits innodb_x_lock_spin_rounds innodb_x_lock_spin_waits key_blocks_not_flushed key_blocks_unused key_blocks_used key_read_requests key_reads key_write_requests key_writes
122 | last_query_cost max_used_connections not_flushed_delayed_rows open_files open_streams open_table_definitions open_tables opened_files opened_table_definitions opened_tables
123 | performance_schema_cond_classes_lost performance_schema_cond_instances_lost performance_schema_file_classes_lost performance_schema_file_handles_lost performance_schema_file_instances_lost
124 | performance_schema_locker_lost performance_schema_mutex_classes_lost performance_schema_mutex_instances_lost performance_schema_rwlock_classes_lost performance_schema_rwlock_instances_lost
125 | performance_schema_table_handles_lost performance_schema_table_instances_lost performance_schema_thread_classes_lost performance_schema_thread_instances_lost prepared_stmt_count qcache_free_blocks
126 | qcache_free_memory qcache_hits qcache_inserts qcache_lowmem_prunes qcache_not_cached qcache_queries_in_cache qcache_total_blocks queries questions rpl_status select_full_join select_full_range_join
127 | select_range select_range_check select_scan slave_heartbeat_period slave_open_temp_tables slave_received_heartbeats slave_retried_transactions slave_running slow_launch_threads slow_queries
128 | sort_merge_passes sort_range sort_rows sort_scan ssl_accept_renegotiates ssl_accepts ssl_callback_cache_hits ssl_cipher ssl_cipher_list ssl_client_connects ssl_connect_renegotiates ssl_ctx_verify_depth
129 | ssl_ctx_verify_mode ssl_default_timeout ssl_finished_accepts ssl_finished_connects ssl_session_cache_hits ssl_session_cache_misses ssl_session_cache_mode ssl_session_cache_overflows
130 | ssl_session_cache_size ssl_session_cache_timeouts ssl_sessions_reused ssl_used_session_cache_entries ssl_verify_depth ssl_verify_mode ssl_version table_locks_immediate table_locks_waited
131 | tc_log_max_pages_used tc_log_page_size tc_log_page_waits threads_cached threads_connected threads_created threads_running uptime uptime_since_flush_status
132 | wsrep_*
133 |
134 | Slave status:
135 |
136 | exec_master_log_pos read_master_log_pos seconds_behind_master slave_io_running slave_sql_running
137 |
138 |
139 | InnoDB status:
140 |
141 | add_pool_alloc awe_mem_alloc buf_free buf_pool_hit_rate buf_pool_hits buf_pool_reads buf_pool_size dict_mem_alloc page_creates_sec page_reads_sec page_writes_sec pages_created pages_modified
142 | pages_read pages_total pages_written reads_pending total_mem_alloc writes_pending writes_pending_flush_list writes_pending_lru writes_pending_single_page
143 | bufs_in_node_heap free_list_len hash_searches_s hash_table_size inserts merged_recs merges non_hash_searches_s seg_size size used_cells
144 | avg_bytes_s flush_type fsyncs_s os_file_reads os_file_writes os_fsyncs pending_aio_writes pending_buffer_pool_flushes pending_ibuf_aio_reads pending_log_flushes pending_log_ios
145 | pending_normal_aio_reads pending_preads pending_pwrites pending_sync_ios reads_s writes_s
146 | last_chkp log_flushed_to log_ios_done log_ios_s log_seq_no pending_chkp_writes pending_log_writes
147 | del_sec ins_sec n_reserved_extents num_rows_del num_rows_ins num_rows_read num_rows_upd queries_in_queue queries_inside read_sec read_views_open upd_sec
148 | mutex_os_waits mutex_spin_rounds mutex_spin_waits reservation_count rw_excl_os_waits rw_excl_spins rw_shared_os_waits rw_shared_spins signal_count wait_array_size
149 |
150 | Process states:
151 |
152 | after_create analyzing checking_permissions checking_table cleaning_up closing_tables converting_heap_to_myisam copy_to_tmp_table copying_to_tmp_table_on_disk creating_index creating_sort_index
153 | copying_to_group_table creating_table creating_tmp_table deleting_from_main_table deleting_from_reference_table discard_or_import_tablespace end executing execution_of_init_command freeing_items
154 | flushing_tables fulltext_initiialization init killed locked logging_slow_query null manage_keys opening_table optimizing preparing purging_old_relay_logs query_end reading_from_net removing_duplicates
155 | removing_tmp_table rename rename_result_table reopen_tables repair_by_sorting repair_done repair_with_keycache rolling_back saving_state searching_rows_for_update sending_data setup sorting_for_group
156 | sorting_for_order sorting_index sorting_result statistics system_lock updating updating_main_table updating_reference_tables user_lock user_sleep waiting_for_table waiting_on_cond writing_to_net
157 |
158 | Reading InnoDB data from the status file is supported and will kick in automatically if the server
159 | supports it. See this URL for turning it on:
160 | http://dev.mysql.com/doc/refman/5.1/en/innodb-parameters.html#option_mysqld_innodb-status-file
161 |
162 | DiskStats
163 |
--------------------------------------------------------------------------------
/RabbitMQ.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::RabbitMQ;
2 | use Collectd qw(:all);
3 | use LWP::UserAgent;
4 | use HTTP::Request::Common qw(GET);
5 | use JSON;
6 |
7 | =head1 NAME
8 |
9 | Collectd::Plugins::RabbitMQ - Monitor RabbitMQ queues and message rates
10 |
11 | =head1 VERSION
12 |
13 | Version 1
14 |
15 | =cut
16 |
17 | our $VERSION = '1';
18 |
19 |
20 | =head1 SYNOPSIS
21 |
22 | This is a collectd plugin for monitoring message rates and queues on a RabbitMQ broker. It uses the RabbitMQ management plugin and depends on following perl modules: LWP::UserAgent, HTTP::Request::Common, and JSON.
23 |
24 | In your collectd config:
25 |
26 |
27 | Globals true
28 |
29 |
30 |
31 | BaseName "Collectd::Plugins"
32 | LoadPlugin "RabbitMQ"
33 |
34 |
35 | Username "user"
36 | Password "pass"
37 | Realm "RabbitMQ Management"
38 | Host "localhost"
39 | Port "55672"
40 |
41 |
42 |
43 | The 'Realm' value is annoyingly dependant on the version of RabbitMQ you're running. It corresponds to the authentication realm that LWP::UserAgent will send credentials to (based on HTTP headers sent by the broker). For RabbitMQ 2.8.1, the value is "RabbitMQ Management", for 2.5.1 it's "Management: Web UI". If neither of these work, sniff the HTTP traffic to find the basic authentication realm.
44 |
45 | =head1 AUTHOR
46 |
47 | Mark Steele, C<< >>
48 |
49 | =cut
50 |
51 | my $username = 'user';
52 | my $password = 'pass';
53 | my $host = 'localhost';
54 | my $port = 55672;
55 | my $realm = '';
56 |
57 | plugin_register (TYPE_READ, 'RabbitMQ', 'my_read');
58 | plugin_register (TYPE_CONFIG, "RabbitMQ", "rabbit_config");
59 |
60 | sub rabbit_config {
61 | plugin_log(LOG_ERR, "RabbitMQ: reading configuration");
62 | my ($ci) = @_;
63 | foreach my $item (@{$ci->{'children'}}) {
64 | my $key = lc($item->{'key'});
65 | my $val = $item->{'values'}->[0];
66 | if ($key eq 'host' ) {
67 | $host = $val;
68 | } elsif ($key eq 'username' ) {
69 | $username = $val;
70 | } elsif ($key eq 'password' ) {
71 | $password = $val;
72 | } elsif ($key eq 'port' ) {
73 | $port = $val;
74 | } elsif ($key eq 'realm' ) {
75 | $realm = $val;
76 | }
77 | }
78 | plugin_log(LOG_ERR, "RabbitMQ: reading configuration done");
79 | return 1;
80 | }
81 |
82 | sub my_read
83 | {
84 | plugin_log(LOG_ERR, "RabbitMQ: starting http request");
85 | eval {
86 | my $ua = LWP::UserAgent->new;
87 | $ua->timeout(5);
88 | $ua->credentials("$host:$port",$realm,$username,$password);
89 | my $req = GET "http://$host:$port/api/queues";
90 | $res = $ua->request($req);
91 | };
92 | if ($@) {
93 | plugin_log(LOG_ERR, "RabbitMQ: exception fetching document by http");
94 | return 1;
95 | }
96 |
97 | plugin_log(LOG_ERR, "RabbitMQ: finished http request");
98 | if ($res->code ne '200') {
99 | plugin_log(LOG_ERR, "RabbitMQ: non-200 response");
100 | return 1;
101 | }
102 | plugin_log(LOG_ERR, "RabbitMQ: got 200 response");
103 |
104 | my $contents = $res->content();
105 | my $ref;
106 | eval {
107 | $ref = decode_json($contents);
108 | };
109 | if ($@) {
110 | plugin_log(LOG_ERR, "RabbitMQ: exception decoding response");
111 | return 1;
112 | }
113 | plugin_log(LOG_ERR, "RabbitMQ: decoded response");
114 |
115 | my $vl = {};
116 | $vl->{'plugin'} = 'rabbitmq';
117 | $vl->{'type'} = 'rabbitmq';
118 |
119 | foreach my $result (@{$ref}) {
120 | $vl->{'plugin_instance'} = $result->{'vhost'};
121 | $vl->{'type_instance'} = $result->{'name'};
122 | $vl->{'plugin_instance'} =~ s#[/-]#_#g;
123 | $vl->{'type_instance'} =~ s#[/-]#_#g;
124 | $vl->{'values'} = [
125 | $result->{'messages'} ? $result->{'messages'} : 0,
126 | $result->{'messages_details'}->{'rate'} ? $result->{'messages_details'}->{'rate'} : 0,
127 | $result->{'messages_unacknowledged'} ? $result->{'messages_unacknowledged'} : 0,
128 | $result->{'messages_unacknowledged_details'}->{'rate'} ? $result->{'messages_unacknowledged_details'}->{'rate'} : 0,
129 | $result->{'messages_ready'} ? $result->{'messages_ready'} : 0,
130 | $result->{'message_ready_details'}->{'rate'} ? $result->{'message_ready_details'}->{'rate'} : 0,
131 | $result->{'memory'} ? $result->{'memory'} : 0,
132 | $result->{'consumers'} ? $result->{'consumers'} : 0,
133 | $result->{'message_stats'}->{'publish'} ? $result->{'message_stats'}->{'publish'} : 0,
134 | $result->{'message_stats'}->{'publish_details'}->{'rate'} ? $result->{'message_stats'}->{'publish_details'}->{'rate'} : 0,
135 | $result->{'message_stats'}->{'deliver_no_ack'} ? $result->{'message_stats'}->{'deliver_no_ack'} : 0,
136 | $result->{'message_stats'}->{'deliver_no_ack_details'}->{'rate'} ? $result->{'message_stats'}->{'deliver_no_ack_details'}->{'rate'} : 0,
137 | $result->{'message_stats'}->{'deliver_get'} ? $result->{'message_stats'}->{'deliver_get'} : 0,
138 | $result->{'message_stats'}->{'deliver_get_details'}->{'rate'} ? $result->{'message_stats'}->{'deliver_get_details'}->{'rate'} : 0,
139 | ];
140 | plugin_log(LOG_ERR, "RabbitMQ: dispatching stats for " . $result->{'vhost'} . '/' . $result->{'name'});
141 | plugin_dispatch_values($vl);
142 | }
143 | plugin_log(LOG_ERR, "RabbitMQ: done processing results");
144 | return 1;
145 | }
146 |
--------------------------------------------------------------------------------
/Riak.pm:
--------------------------------------------------------------------------------
1 | package Collectd::Plugin::Riak;
2 | use Collectd qw(:all);
3 | use LWP::UserAgent;
4 | use HTTP::Request::Common qw(GET);
5 | use JSON;
6 |
7 | =head1 NAME
8 |
9 | Collectd::Plugins::Riak - Monitor a Riak node
10 |
11 | =head1 VERSION
12 |
13 | Version 1
14 |
15 | =cut
16 |
17 | our $VERSION = '1';
18 |
19 |
20 | =head1 SYNOPSIS
21 |
22 | This is a collectd plugin for monitoring a Riak node. It depends on following perl modules: LWP::UserAgent, HTTP::Request::Common, and JSON.
23 |
24 | In your collectd config:
25 |
26 |
27 | Globals true
28 |
29 |
30 |
31 | BaseName "Collectd::Plugins"
32 | LoadPlugin "Riak"
33 |
34 |
35 | Host "localhost"
36 | Port "8098"
37 |
38 |
39 |
40 | =head1 AUTHOR
41 |
42 | Mark Steele, C<< >>
43 |
44 | =cut
45 |
46 | my $host = 'localhost';
47 | my $port = 8098;
48 |
49 | my @fields = qw(converge_delay_last converge_delay_max converge_delay_mean converge_delay_min coord_redirs_total cpu_avg1 cpu_avg15 cpu_avg5 cpu_nprocs dropped_vnode_requests_total executing_mappers gossip_received handoff_timeouts ignored_gossip_total mem_allocated
50 | memory_atom memory_atom_used memory_binary memory_code memory_ets memory_processes memory_processes_used memory_system memory_total mem_total node_get_fsm_objsize_100 node_get_fsm_objsize_95 node_get_fsm_objsize_99 node_get_fsm_objsize_mean
51 | node_get_fsm_objsize_median node_get_fsm_in_rate node_get_fsm_out_rate node_get_fsm_rejected_60s node_get_fsm_rejected node_get_fsm_rejected_total node_get_fsm_siblings_100 node_get_fsm_siblings_95 node_get_fsm_siblings_99 node_get_fsm_siblings_mean node_get_fsm_siblings_median node_get_fsm_time_100 node_get_fsm_time_95 node_get_fsm_time_99
52 | node_get_fsm_time_mean node_get_fsm_time_median node_gets node_gets_total node_put_fsm_time_100 node_put_fsm_time_95 node_put_fsm_time_99 node_put_fsm_time_mean node_put_fsm_time_median node_puts node_puts_total pbc_active pbc_connects
53 | pbc_connects_total postcommit_fail precommit_fail read_repairs read_repairs_total rebalance_delay_last rebalance_delay_max rebalance_delay_mean rebalance_delay_min rejected_handoffs riak_kv_vnodeq_max riak_kv_vnodeq_mean riak_kv_vnodeq_median
54 | riak_kv_vnodeq_min riak_kv_vnodeq_total riak_kv_vnodes_running riak_pipe_vnodeq_max riak_pipe_vnodeq_mean riak_pipe_vnodeq_median riak_pipe_vnodeq_min riak_pipe_vnodeq_total riak_pipe_vnodes_running ring_creation_size ring_num_partitions
55 | rings_reconciled rings_reconciled_total sys_global_heaps_size sys_process_count sys_thread_pool_size vnode_gets vnode_gets_total vnode_index_deletes vnode_index_deletes_postings vnode_index_deletes_postings_total vnode_index_deletes_total
56 | vnode_index_reads vnode_index_reads_total vnode_index_writes vnode_index_writes_postings vnode_index_writes_postings_total vnode_index_writes_total vnode_puts vnode_puts_total);
57 |
58 | plugin_register (TYPE_READ, 'Riak', 'my_read');
59 | plugin_register (TYPE_CONFIG, "Riak", "riak_config");
60 |
61 | sub riak_config {
62 | my ($ci) = @_;
63 | foreach my $item (@{$ci->{'children'}}) {
64 | my $key = lc($item->{'key'});
65 | my $val = $item->{'values'}->[0];
66 | if ($key eq 'host' ) {
67 | $host = $val;
68 | } elsif ($key eq 'port' ) {
69 | $port = $val;
70 | }
71 | }
72 | return 1;
73 | }
74 |
75 | sub my_read
76 | {
77 | eval {
78 | my $ua = LWP::UserAgent->new;
79 | $ua->timeout(5);
80 | my $req = GET "http://$host:$port/stats";
81 | $res = $ua->request($req);
82 | };
83 | if ($@) {
84 | plugin_log(LOG_ERR, "Riak: exception fetching document by http");
85 | return 1;
86 | }
87 |
88 | if ($res->code ne '200') {
89 | plugin_log(LOG_ERR, "Riak: non-200 response");
90 | return 1;
91 | }
92 | my $contents = $res->content();
93 | my $ref;
94 | eval {
95 | $ref = decode_json($contents);
96 | };
97 | if ($@) {
98 | plugin_log(LOG_ERR, "Riak: exception decoding response");
99 | return 1;
100 | }
101 | plugin_log(LOG_ERR, "Riak: decoded response");
102 |
103 | foreach my $field (@fields) {
104 | my $vl = {};
105 | $vl->{'plugin'} = 'riak';
106 | $vl->{'type'} = 'counter';
107 | $vl->{'type_instance'} = $field;
108 | $vl->{'values'} = [ $ref->{$field} ? $ref->{$field} : 0 ];
109 | plugin_dispatch_values($vl);
110 | }
111 | return 1;
112 | }
113 |
--------------------------------------------------------------------------------
/types.db.sample:
--------------------------------------------------------------------------------
1 | cpusummary user:COUNTER:U:U, nice:COUNTER:U:U, system:COUNTER:U:U, idle:COUNTER:U:U, iowait:COUNTER:U:U, irq:COUNTER:U:U, softirq:COUNTER:U:U, cpucount:GAUGE:U:U
2 | rabbitmq messages:GAUGE:0:U, messages_rate:GAUGE:0:U, messages_unacknolwedged:GAUGE:0:U, messages_unacknowledged_rate:GAUGE:0:U, messages_ready:GAUGE:0:U, message_ready_rate:GAUGE:0:U, memory:GAUGE:0:U, consumers:GAUGE:0:U, publish:GAUGE:0:U, publish_rate:GAUGE:0:U, deliver_no_ack:GAUGE:0:U, deliver_no_ack_rate:GAUGE:0:U, deliver_get:GAUGE:0:U, deliver_get_rate:GAUGE:0:U
3 |
--------------------------------------------------------------------------------