├── .gitignore ├── .rspec ├── BENCHLOG ├── Gemfile ├── LICENSE ├── README.md ├── Rakefile ├── bench ├── pace_http.rb ├── pace_newrelic.rb ├── pace_simple.rb ├── pace_throttled.rb └── resque_http.rb ├── config.ru ├── examples ├── defer.rb ├── echo.rb ├── http.rb ├── pause.rb ├── redis_test.rb └── sleep.rb ├── lib ├── pace.rb └── pace │ ├── airbrake.rb │ ├── event.rb │ ├── mock.rb │ ├── newrelic.rb │ ├── queue.rb │ ├── version.rb │ └── worker.rb ├── pace.gemspec └── spec ├── pace ├── airbrake_spec.rb ├── event_spec.rb ├── mock_spec.rb ├── queue_spec.rb └── worker_spec.rb ├── pace_spec.rb └── spec_helper.rb /.gitignore: -------------------------------------------------------------------------------- 1 | *.gem 2 | .bundle 3 | Gemfile.lock 4 | pkg/* 5 | bench/*.log 6 | config/newrelic.yml 7 | log/newrelic_* 8 | .rvmrc 9 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color -------------------------------------------------------------------------------- /BENCHLOG: -------------------------------------------------------------------------------- 1 | 2011-08-10T18:14:34.935848 50,000 jobs in 18.937803s (2640 jobs/s) 2 | 2011-08-11T15:35:01.304138 50,000 jobs in 15.123245s (3306 jobs/s) (ff15ccc) 3 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby; -*- 2 | source :rubygems 3 | gemspec 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | (The MIT-License) 2 | 3 | Copyright (c) 2011 GroupMe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pace - A Resque Reactor # 2 | 3 | Pace provides a high-throughput way to process Resque jobs inside an 4 | EventMachine reactor. 5 | 6 | When combined with EM::HttpRequest you can send __thousands of 7 | requests per second__ to a remote web service. 8 | 9 | Tested under: 10 | 11 | * REE 1.8.7 12 | * MRI 1.9.2 (best memory performance) 13 | 14 | ## Examples ## 15 | 16 | To have fun with the examples, fire one up and then start 17 | enqueuing Resque jobs: 18 | 19 | $ rake examples:http 20 | 21 | $ irb 22 | > require "rubygems" 23 | > require "resque" 24 | > class MyJob; def self.queue; "normal"; end; end 25 | > Resque.enqueue(MyJob) 26 | > 10.times { |n| Resque.enqueue(MyJob, :n => n) } 27 | 28 | 29 | In a separate process, start up a worker: 30 | 31 | require 'pace' 32 | 33 | worker = Pace::Worker.new(:queue => "normal") 34 | worker.start do |job| 35 | klass = job["class"] 36 | options = job["args"].first 37 | 38 | # do work with options 39 | end 40 | 41 | ## Redis 42 | 43 | Pace connects to Redis with a URI that's looked up in the following order: 44 | 45 | * Pace.redis_url attr_accessor 46 | * REDIS_URL environment variable 47 | * Defaults to 127.0.0.1:6379/0 48 | 49 | ## Throttling 50 | 51 | It's very easy to overwhelm a remote service with pace. You can specify 52 | the maximum number of jobs to consume per second. 53 | 54 | Pace::Worker.new(:queue => "normal", :jobs_per_second => 100) 55 | 56 | ## Pause/Resume 57 | 58 | If you need to pause a worker (for example, during remote service failure): 59 | 60 | worker.pause 61 | 62 | And when ready: 63 | 64 | worker.resume 65 | 66 | You can also pause for a set period of time. The worker will resume 67 | automatically. 68 | 69 | worker.pause(0.5) # 500ms 70 | 71 | ## Errors 72 | 73 | Pace attempts to keep the reactor going at all costs with explicit rescues 74 | and EM's catch-all error handler. A hook is provided for errors so that 75 | action can be taken: 76 | 77 | worker.add_hook(:error) do |json, error| 78 | message = error.message 79 | 80 | # The job JSON can be nil if the error is raised in a callback. 81 | message << json if json 82 | 83 | Pace.logger.warn(message) 84 | end 85 | 86 | Hooks can also be attached at the class-level, which affects all workers. 87 | 88 | Pace::Worker.add_hook(:error, handler) 89 | 90 | Finally, an Airbrake hook is provided that will notify Airbrake on all 91 | exceptions: 92 | 93 | require "pace/airbrake" 94 | 95 | Pace::Worker.add_hook(:error, Pace::Airbrake.hook) 96 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'bundler' 2 | require "resque/tasks" 3 | require "rspec/core/rake_task" 4 | 5 | Bundler.setup 6 | Bundler::GemHelper.install_tasks 7 | 8 | $: << File.dirname(__FILE__) 9 | 10 | namespace :examples do 11 | desc "Simply echo jobs to the console" 12 | task :echo do 13 | require "examples/echo" 14 | end 15 | 16 | desc "Serial processing when the block doesn't defer" 17 | task :sleep do 18 | require "examples/sleep" 19 | end 20 | 21 | desc "Concurrent processing using EM.defer" 22 | task :defer do 23 | require "examples/defer" 24 | end 25 | 26 | desc "Concurrent processing by blocking on an HTTP connection" 27 | task :http do 28 | require "examples/http" 29 | end 30 | end 31 | 32 | namespace :bench do 33 | desc "Bench Pace just running through its loop" 34 | task :pace_simple do 35 | require "bench/pace_simple" 36 | end 37 | 38 | desc "Bench throttled worker" 39 | task :pace_throttled do 40 | require "bench/pace_throttled" 41 | end 42 | 43 | desc "Bench Pace with New Relic monitoring" 44 | task :pace_newrelic do 45 | require "bench/pace_newrelic" 46 | end 47 | 48 | desc "Bench HTTP calls through Pace" 49 | task :pace_http do 50 | ENV["PACE_QUEUE"] = "normal" 51 | require "bench/pace_http" 52 | end 53 | 54 | desc "Bench HTTP calls through Resque" 55 | task :resque_http do 56 | ENV["COUNT"] = "10" 57 | ENV["QUEUE"] = "normal" 58 | # ENV["VERBOSE"] = "1" 59 | 60 | Rake::Task["resque:workers"].invoke 61 | end 62 | 63 | desc "Inject jobs for benchmarking" 64 | task :jobs do 65 | require "resque" 66 | require "bench/resque_http" 67 | 68 | count = (ENV["COUNT"] || 100).to_i 69 | count.times do |n| 70 | Resque.enqueue(ResqueHttp, :n => n) 71 | end 72 | end 73 | end 74 | 75 | # For benchmark purposes 76 | namespace :resque do 77 | task :setup do 78 | require "bench/resque_http" 79 | end 80 | end 81 | 82 | desc "Run specs" 83 | RSpec::Core::RakeTask.new(:spec) do |t| 84 | t.pattern = "spec/**/*_spec.rb" 85 | end 86 | 87 | task :default => :spec 88 | -------------------------------------------------------------------------------- /bench/pace_http.rb: -------------------------------------------------------------------------------- 1 | # Benchmark Pace making HTTP calls 2 | # 3 | # Performed by running: 4 | # 5 | # $ rake bench:pace 6 | # $ COUNT=1000 rake bench:jobs 7 | # 8 | # This was setup to hit a dumb node.js server that simply responds with 200 OK. 9 | # 10 | # For 1000 jobs: 11 | # 1.783s (avg. 5 runs, 1.78ms/job) 12 | # 13 | # For 50,000 jobs: 14 | # 68.708s (avg. 5 runs, 1.37ms/job), memory topped out at a steady 21.3MB 15 | 16 | require "pace" 17 | 18 | Pace.logger = Logger.new(File.join(File.dirname(__FILE__), "pace_http.log")) 19 | Pace.logger.info("Starting #{'%0.6f' % Time.now}") 20 | 21 | Pace::Worker.new.start do |job| 22 | start_time = Time.now 23 | args = job["args"][0].map { |k,v| "#{k}=#{v}" } 24 | args = args.join("&") 25 | 26 | http = EM::Protocols::HttpClient.request( 27 | :host => "localhost", 28 | :port => 9000, 29 | :request => "/?#{args}" 30 | ) 31 | http.callback do |r| 32 | Pace.logger.info("http://localhost:9000/?#{args}") 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /bench/pace_newrelic.rb: -------------------------------------------------------------------------------- 1 | # Benchmark Pace with New Relic 2 | # 3 | # To make this work, you should drop in config/newrelic.yml 4 | # and start this up with the environment specified: 5 | # $ RAILS_ENV=staging rake bench:pace_newrelic 6 | # 7 | # On my system, this appears to add a 0.5ms overhead to job 8 | # processing. YMMV. 9 | 10 | require "pace" 11 | require "pace/newrelic" 12 | require "resque" 13 | 14 | class Work 15 | def self.queue; "pace"; end 16 | end 17 | 18 | Pace.logger.info "Starting benchmark with New Relic..." 19 | 20 | 50_000.times { |n| Resque.enqueue(Work, :n => n) } 21 | Pace.logger.info "Finished adding 50,000 jobs" 22 | 23 | start_time, end_time = Time.now, nil 24 | 25 | worker = Pace::Worker.new(Work.queue) 26 | worker.start do |job| 27 | n = job["args"][0]["n"] 28 | 29 | if n == 49_999 30 | end_time = Time.now 31 | worker.shutdown 32 | end 33 | end 34 | 35 | Pace.logger.info "Finished in #{end_time - start_time}s" 36 | -------------------------------------------------------------------------------- /bench/pace_simple.rb: -------------------------------------------------------------------------------- 1 | # Benchmark Pace just running through its loop 2 | # 3 | # It's highly recommended that you run this simple benchmark before and after 4 | # making any changes to the run loop itself. Keep Pace fast. 5 | 6 | require "pace" 7 | require "resque" 8 | 9 | class Work 10 | def self.queue; "pace"; end 11 | end 12 | 13 | Pace.logger.info "Starting benchmark..." 14 | 15 | 50_000.times { |n| Resque.enqueue(Work, :n => n) } 16 | Pace.logger.info "Finished adding 50,000 jobs" 17 | 18 | start_time, end_time = Time.now, nil 19 | 20 | EM.run do 21 | Pace::Worker.new(Work.queue).start do |job| 22 | n = job["args"][0]["n"] 23 | 24 | if n == 49_999 25 | end_time = Time.now 26 | EM.stop 27 | end 28 | end 29 | end 30 | 31 | Pace.logger.info "Finished in #{end_time - start_time}s" 32 | -------------------------------------------------------------------------------- /bench/pace_throttled.rb: -------------------------------------------------------------------------------- 1 | # Benchmark Pace just running through its loop 2 | # 3 | # It's highly recommended that you run this simple benchmark before and after 4 | # making any changes to the run loop itself. Keep Pace fast. 5 | 6 | $LOAD_PATH << "." << "lib" 7 | 8 | require "pace" 9 | require "resque" 10 | 11 | class Work 12 | def self.queue; "pace"; end 13 | end 14 | 15 | Pace.logger.info "Starting benchmark..." 16 | 17 | Resque.redis.del("queue:pace") 18 | 19 | max_jobs = 5000 20 | max_jobs.times { |n| Resque.enqueue(Work, :n => n) } 21 | Pace.logger.info "Finished adding #{max_jobs} jobs" 22 | 23 | start_time, end_time = Time.now, nil 24 | 25 | EM.run do 26 | jobs = 0 27 | interval = 1.0 28 | EM.add_periodic_timer(interval) { 29 | Pace.logger.info("jobs per second: #{jobs / interval}") 30 | jobs = 0 31 | } 32 | 33 | Pace::Worker.new(Work.queue, :jobs_per_second => 100).start do |job| 34 | n = job["args"][0]["n"] 35 | jobs += 1 if n 36 | 37 | end_time = Time.now 38 | if n >= (max_jobs - 1) 39 | EM.stop 40 | end 41 | end 42 | end 43 | 44 | Pace.logger.info "Finished in #{end_time - start_time}s" 45 | -------------------------------------------------------------------------------- /bench/resque_http.rb: -------------------------------------------------------------------------------- 1 | # Benchmark Resque 2 | # 3 | # This was performed by running: 4 | # 5 | # $ rake bench:resque 6 | # $ COUNT=1000 rake bench:jobs 7 | # 8 | # By default, it spins up ten workers locally, logging to bench/resque.log. A dummy 9 | # node.js server was propped up to simply respond with 200 OK. 10 | # 11 | # For 1000 jobs: 12 | # 18.33s (avg. 5 runs, 18.33ms/job) 13 | # 14 | # For 50,000 jobs: 15 | # 870.81s (just 1 run, but I got stuff to do, 17.42ms/job), 16 | # memory sitting at ~15MB per worker process (10 total) 17 | 18 | require "net/http" 19 | require "logger" 20 | 21 | class ResqueHttp 22 | def self.queue 23 | "normal" 24 | end 25 | 26 | def self.perform(args) 27 | start_time = Time.now 28 | args = args.map { |k,v| "#{k}=#{v}" } 29 | args = args.join("&") 30 | 31 | Net::HTTP.start("localhost", 9000) do |http| 32 | http.get("/?#{args}") 33 | end 34 | 35 | logger.info "http://localhost:9000/?#{args} #{"(%0.6fs)" % (Time.now - start_time)}" 36 | end 37 | 38 | def self.logger 39 | @logger ||= Logger.new(File.join(File.dirname(__FILE__), "resque_http.log")) 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /config.ru: -------------------------------------------------------------------------------- 1 | $: << "lib" << "lib/pace" 2 | 3 | require 'rubygems' 4 | require 'resque' 5 | require 'resque/server' 6 | require 'pace' 7 | require 'pace/server' 8 | 9 | run Resque::Server -------------------------------------------------------------------------------- /examples/defer.rb: -------------------------------------------------------------------------------- 1 | # To gain concurrent processing on jobs that don't block on 2 | # sockets, we can use EM.defer to run in background threads. 3 | 4 | require "pace" 5 | 6 | worker = Pace::Worker.new(ENV["PACE_QUEUE"] || "normal") 7 | worker.start do |job| 8 | start_time = Time.now 9 | 10 | operation = proc { 11 | rand(10).times { sleep 0.1 } 12 | } 13 | callback = proc { |result| 14 | Pace.logger.info(job.inspect) 15 | } 16 | 17 | EM.defer operation, callback 18 | end 19 | -------------------------------------------------------------------------------- /examples/echo.rb: -------------------------------------------------------------------------------- 1 | require "pace" 2 | 3 | worker = Pace::Worker.new(ENV["PACE_QUEUE"] || "normal") 4 | worker.start do |job| 5 | Pace.logger.info(job.inspect) 6 | end 7 | -------------------------------------------------------------------------------- /examples/http.rb: -------------------------------------------------------------------------------- 1 | # Pace (and EventMachine) works best when your job can block 2 | # on a socket and proceed to process jobs (almost) concurrently. 3 | # 4 | # A good explanation can be found here: 5 | # http://www.igvita.com/2008/05/27/ruby-eventmachine-the-speed-demon/ 6 | 7 | require "pace" 8 | 9 | worker = Pace::Worker.new(ENV["PACE_QUEUE"] || "normal") 10 | worker.start do |job| 11 | start_time = Time.now 12 | 13 | http = EM::Protocols::HttpClient.request( 14 | :host => "localhost", 15 | :port => 9000, 16 | :request => "/" 17 | ) 18 | http.callback do |r| 19 | worker.log(job.inspect, start_time) 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /examples/pause.rb: -------------------------------------------------------------------------------- 1 | require "pace" 2 | require "resque" 3 | 4 | class Work 5 | def self.queue; "pace"; end 6 | end 7 | 8 | Pace.logger.info "Starting benchmark..." 9 | 10 | 100.times { |n| Resque.enqueue(Work, :n => n) } 11 | Pace.logger.info "Finished adding 100 jobs" 12 | 13 | start_time, end_time = Time.now, nil 14 | count = 0 15 | 16 | worker = Pace::Worker.new(Work.queue) 17 | worker.start do |job| 18 | count += 1 19 | 20 | case n = job["args"][0]["n"] 21 | when 10 22 | worker.pause(5) 23 | when 99 24 | end_time = Time.now 25 | worker.shutdown 26 | end 27 | end 28 | 29 | Pace.logger.info "Finished in #{end_time - start_time}s" 30 | Pace.logger.info "Completed #{count} jobs" 31 | -------------------------------------------------------------------------------- /examples/redis_test.rb: -------------------------------------------------------------------------------- 1 | require "pace" 2 | 3 | worker = Pace::Worker.new(ENV["PACE_QUEUE"] || "normal") 4 | worker.start do |job| 5 | if job["args"][0]["disconnect"] 6 | worker.instance_eval { @redis.close_connection } 7 | end 8 | 9 | Pace.logger.info(job.inspect) 10 | end 11 | -------------------------------------------------------------------------------- /examples/sleep.rb: -------------------------------------------------------------------------------- 1 | # Our work in this example does not defer nor block on a socket, 2 | # so jobs will be processed serially, finishing one before starting 3 | # the next. 4 | # 5 | # This should be avoided. 6 | 7 | require "pace" 8 | 9 | worker = Pace::Worker.new(ENV["PACE_QUEUE"] || "normal") 10 | worker.start do |job| 11 | start_time = Time.now 12 | rand(10).times { sleep 0.1 } 13 | worker.log(job.inspect, start_time) 14 | end 15 | -------------------------------------------------------------------------------- /lib/pace.rb: -------------------------------------------------------------------------------- 1 | PACE_HEARTBEAT = 10.0 # seconds 2 | 3 | require "eventmachine" 4 | require "em-hiredis" 5 | require "json" 6 | require "uri" 7 | require "uuid" 8 | require "logger" 9 | require "pace/event" 10 | require "pace/worker" 11 | require "pace/queue" 12 | 13 | $uuid = UUID.new 14 | 15 | module Pace 16 | class << self 17 | # Set Pace.namespace if you're using Redis::Namespace. 18 | attr_accessor :namespace 19 | attr_accessor :redis_url 20 | 21 | def redis_connect 22 | EM::Hiredis.logger = logger 23 | EM::Hiredis.connect(redis_url) 24 | end 25 | 26 | def logger 27 | @logger ||= begin 28 | logger = Logger.new(STDOUT) 29 | logger.level = Logger::INFO 30 | logger.formatter = Proc.new { |severity, datetime, progname, msg| 31 | "#{String === msg ? msg : msg.inspect}\n" 32 | } 33 | logger 34 | end 35 | end 36 | 37 | def logger=(new_logger) 38 | @logger = new_logger 39 | end 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /lib/pace/airbrake.rb: -------------------------------------------------------------------------------- 1 | # Notify Airbrake if an exception occurs 2 | # 3 | # This provides a proc that can be used as an error hook. 4 | # Before using this, you need to configure Airbrake. 5 | # 6 | # require "pace" 7 | # require "pace/airbrake" 8 | # 9 | # Airbrake.configure do |config| 10 | # config.api_key = 'API-KEY' 11 | # end 12 | # 13 | # worker = Pace::Worker.new(QUEUE) 14 | # worker.add_hook(:error, Pace::Airbrake.hook) 15 | # 16 | require "airbrake" 17 | 18 | module Pace 19 | module Airbrake 20 | def self.hook 21 | Proc.new do |json, error| 22 | EM.defer do 23 | notification = { 24 | :error_class => error.class.name, 25 | :error_message => "#{error.class.name}: #{error.message}", 26 | :backtrace => error.backtrace, 27 | :parameters => {}, 28 | :environment_name => (ENV["RACK_ENV"] || ENV["RAILS_ENV"]) 29 | } 30 | notification[:parameters][:json] = json if json 31 | 32 | ::Airbrake.notify(notification) 33 | end 34 | end 35 | end 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /lib/pace/event.rb: -------------------------------------------------------------------------------- 1 | module Pace 2 | class Event 3 | include EM::Deferrable 4 | 5 | attr_accessor :type 6 | 7 | def initialize(hooks, *args, &block) 8 | @hooks = hooks.map { |hook| Hook.new(self, hook, *args) } 9 | callback(&block) 10 | end 11 | 12 | def run 13 | @hooks.each(&:run) 14 | end 15 | 16 | def hook_finished! 17 | if @hooks.all?(&:finished?) 18 | succeed 19 | end 20 | end 21 | end 22 | 23 | class Hook 24 | def initialize(event, hook, *args) 25 | @event = event 26 | @hook = hook 27 | @args = args 28 | @finished = false 29 | end 30 | 31 | def run 32 | if @hook.arity > @args.size 33 | @hook.call(*[@args, self].flatten.compact) 34 | else 35 | @hook.call(*@args) 36 | finished! 37 | end 38 | end 39 | 40 | def finished! 41 | @finished = true 42 | @event.hook_finished! 43 | end 44 | 45 | def finished? 46 | @finished 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/pace/mock.rb: -------------------------------------------------------------------------------- 1 | # Ease testing by mocking the event loop 2 | # 3 | # Instead of having to detect and stop the event loop yourself, this helper 4 | # simply returns all jobs in the queue and shuts down the loop. 5 | # 6 | # require "pace/mock" 7 | # 8 | # # Fire it up 9 | # Pace::Mock.enable 10 | # 11 | # # Add some jobs 12 | # Resque.enqueue(Work, ...) 13 | # Resque.enqueue(Work, ...) 14 | # 15 | # # Create a worker with a block that doesn't need to stop the loop 16 | # worker = Pace::Worker.new(:queue => "queue") 17 | # worker.start do |job| 18 | # puts job.inspect 19 | # end 20 | # 21 | # # Turn it off when you're done 22 | # Pace::Mock.disable 23 | # 24 | module Pace 25 | module Mock 26 | def self.enable 27 | Pace.logger.info "Enabling Pace mock" 28 | 29 | Pace::Worker.class_eval do 30 | if private_instance_methods.include?("fetch_next_job_with_mock") || private_instance_methods.include?(:fetch_next_job_with_mock) 31 | alias :fetch_next_job :fetch_next_job_with_mock 32 | else 33 | private 34 | 35 | def fetch_next_job_with_mock 36 | @redis.lrange(queue, 0, -1) do |jobs| 37 | jobs.each do |json| 38 | begin 39 | perform JSON.parse(json) 40 | rescue Exception => e 41 | log_exception("Job failed: #{json}", e) 42 | run_hook(:error, json, e) 43 | end 44 | end 45 | @redis.del(queue) { EM.stop } 46 | end 47 | end 48 | 49 | alias :fetch_next_job_without_mock :fetch_next_job 50 | alias :fetch_next_job :fetch_next_job_with_mock 51 | end 52 | end 53 | end 54 | 55 | def self.disable 56 | Pace.logger.info "Disabling Pace mock" 57 | 58 | Pace::Worker.class_eval do 59 | if private_instance_methods.include?("fetch_next_job_without_mock") || private_instance_methods.include?(:fetch_next_job_without_mock) 60 | alias :fetch_next_job :fetch_next_job_without_mock 61 | end 62 | end 63 | end 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /lib/pace/newrelic.rb: -------------------------------------------------------------------------------- 1 | # Instrument your workers with New Relic 2 | # 3 | # The agent will look for a config file in the usual pace -- config/newrelic.yml. 4 | # The environment will be set to RAILS_ENV or RUBY_ENV by default. 5 | 6 | begin 7 | require "newrelic_rpm" 8 | rescue LoadError 9 | raise "Can't find 'newrelic_rpm' gem. Please add it to your Gemfile or install it." 10 | end 11 | 12 | Pace::Worker.class_eval do 13 | include NewRelic::Agent::Instrumentation::ControllerInstrumentation 14 | 15 | def perform_with_trace(job) 16 | perform_action_with_newrelic_trace( 17 | :name => "perform", 18 | :class_name => job["class"], 19 | :category => "OtherTransaction/Pace", 20 | :params => job 21 | ) do 22 | perform_without_trace(job) 23 | end 24 | end 25 | 26 | alias :perform_without_trace :perform 27 | alias :perform :perform_with_trace 28 | end 29 | 30 | Pace::Worker.add_hook(:start) do 31 | NewRelic::Agent.manual_start(:dispatcher => :pace, :log => Pace.logger) 32 | end 33 | 34 | Pace::Worker.add_hook(:shutdown) do 35 | NewRelic::Agent.shutdown 36 | end 37 | -------------------------------------------------------------------------------- /lib/pace/queue.rb: -------------------------------------------------------------------------------- 1 | module Pace 2 | class Queue 3 | attr_reader :redis 4 | 5 | class << self 6 | def expand_name(queue) 7 | parts = [queue] 8 | parts.unshift("resque:queue") unless queue.index(":") 9 | parts.unshift(Pace.namespace) unless Pace.namespace.nil? 10 | parts.join(":") 11 | end 12 | end 13 | 14 | def initialize(redis_url) 15 | @redis = EM::Hiredis.connect(redis_url) 16 | end 17 | 18 | def enqueue(queue, klass, *args, &block) 19 | job = {:class => klass.to_s, :args => args}.to_json 20 | redis.rpush(name_for(queue), job, &block) 21 | end 22 | 23 | private 24 | 25 | def name_for(queue) 26 | self.class.expand_name(queue) 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/pace/version.rb: -------------------------------------------------------------------------------- 1 | module Pace 2 | VERSION = "1.1.0" 3 | end 4 | -------------------------------------------------------------------------------- /lib/pace/worker.rb: -------------------------------------------------------------------------------- 1 | module Pace 2 | class Worker 3 | attr_reader :queue 4 | 5 | class << self 6 | def add_hook(event, callback = nil, &block) 7 | global_hooks[event] << (callback || block) 8 | end 9 | 10 | def global_hooks 11 | @global_hooks ||= Hash.new { |h,k| h[k] = [] } 12 | end 13 | 14 | def clear_hooks 15 | @global_hooks = nil 16 | end 17 | end 18 | 19 | def initialize(queue = nil, options = {}) 20 | queue ||= ENV["PACE_QUEUE"] 21 | 22 | if queue.nil? || queue.empty? 23 | raise ArgumentError.new("Queue unspecified -- pass a queue name or set PACE_QUEUE") 24 | end 25 | 26 | if options[:jobs_per_second] 27 | @throttle_interval = 1.0 28 | @throttle_limit = @throttle_credits = options[:jobs_per_second] * @throttle_interval 29 | Pace.logger.info "Throttling to #{@throttle_limit} jobs per second" 30 | end 31 | 32 | @queue = Pace::Queue.expand_name(queue) 33 | @hooks = Hash.new { |h, k| h[k] = [] } 34 | 35 | @paused = false 36 | @resuming = false 37 | 38 | run_hook(:initialize, @queue) 39 | end 40 | 41 | def start(&block) 42 | Pace.logger.info "Starting up" 43 | 44 | @block = block 45 | 46 | register_signal_handlers 47 | register_error_handler 48 | 49 | EM.run do 50 | EM.epoll # Change to kqueue for BSD kernels 51 | 52 | # Install throttle refresh 53 | if throttled? 54 | EM::add_periodic_timer(@throttle_interval) do 55 | resume if (@throttle_credits < 1) && @paused 56 | @throttle_credits = @throttle_limit 57 | end 58 | end 59 | 60 | @redis = Pace.redis_connect 61 | @redis.callback do 62 | Pace.logger.info "Connected to Redis, starting fetch loop" 63 | EM.next_tick { fetch_next_job } 64 | end 65 | 66 | @redis.on(:reconnected) do 67 | Pace.logger.info "Reconnected to Redis, restarting fetch loop" 68 | EM.next_tick { fetch_next_job } 69 | end 70 | 71 | run_hook(:start) 72 | end 73 | end 74 | 75 | def pause(duration = nil) 76 | return false if @paused 77 | 78 | Pace.logger.info "Paused at #{Time.now.to_f}" 79 | @paused = true 80 | 81 | EM.add_timer(duration) { resume } if duration 82 | end 83 | 84 | def resume 85 | if @paused && !@resuming 86 | @resuming = true 87 | 88 | EM.next_tick do 89 | Pace.logger.info "Resumed at #{Time.now.to_f}" 90 | @resuming = false 91 | @paused = false 92 | fetch_next_job 93 | end 94 | else 95 | false 96 | end 97 | end 98 | 99 | def shutdown 100 | Pace.logger.info "Shutting down" 101 | run_hook(:shutdown) { EM.stop } 102 | 103 | # Parachute... 104 | EM.add_timer(10) { raise("Dying by exception") } 105 | end 106 | 107 | def add_hook(event, callback = nil, &block) 108 | @hooks[event] << (callback || block) 109 | end 110 | 111 | def throttled? 112 | @throttle_limit 113 | end 114 | 115 | private 116 | 117 | def fetch_next_job 118 | return if @paused 119 | 120 | if throttled? 121 | if @throttle_credits < 1 122 | pause 123 | return 124 | else 125 | @throttle_credits -= 1 126 | end 127 | end 128 | 129 | @redis.blpop(queue, 0) do |queue, json| 130 | EM.next_tick { fetch_next_job } unless @paused 131 | 132 | if json 133 | begin 134 | perform JSON.parse(json) 135 | rescue Exception => e 136 | log_exception("Job failed: #{json}", e) 137 | run_hook(:error, json, e) 138 | end 139 | end 140 | end 141 | end 142 | 143 | def perform(job) 144 | @block.call(job) 145 | run_hook(:processed, job) 146 | end 147 | 148 | def register_signal_handlers 149 | trap('TERM') { shutdown } 150 | trap('QUIT') { shutdown } 151 | trap('INT') { shutdown } 152 | end 153 | 154 | def register_error_handler 155 | EM.error_handler do |error| 156 | run_hook(:error, nil, error) 157 | end 158 | end 159 | 160 | def log_exception(message, exception) 161 | entry = "#{message}\n" 162 | entry << "#{exception.class}: #{exception.message}\n" 163 | entry << exception.backtrace.join("\n") 164 | Pace.logger.error(entry) 165 | end 166 | 167 | def run_hook(type, *args, &block) 168 | begin 169 | hooks = Pace::Worker.global_hooks[type] + @hooks[type] 170 | 171 | if hooks.empty? 172 | block.call if block_given? 173 | else 174 | event = Pace::Event.new(hooks, *args, &block) 175 | event.run 176 | end 177 | rescue Exception => e 178 | log_exception("Hook failed for #{type}: #{args.inspect}", e) 179 | end 180 | end 181 | end 182 | end 183 | -------------------------------------------------------------------------------- /pace.gemspec: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby; encoding: utf-8 -*- 2 | $:.push File.expand_path("../lib", __FILE__) 3 | require "pace/version" 4 | 5 | Gem::Specification.new do |s| 6 | s.name = "pace" 7 | s.version = Pace::VERSION 8 | s.platform = Gem::Platform::RUBY 9 | s.authors = ["Dave Yeu", "Brandon Keene"] 10 | s.email = ["daveyeu@gmail.com", "bkeene@gmail.com"] 11 | s.homepage = "" 12 | s.summary = %q{Resque-compatible job processing in an event loop} 13 | 14 | s.rubyforge_project = "pace" 15 | 16 | s.files = `git ls-files`.split("\n") 17 | s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") 18 | s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } 19 | s.require_paths = ["lib"] 20 | 21 | s.add_dependency "eventmachine", ">= 0.12.10" 22 | s.add_dependency "em-hiredis", "~> 0.1.0" 23 | s.add_dependency "uuid" 24 | s.add_dependency "systemu" # macaddr 1.2.0 breaks this 25 | 26 | s.add_development_dependency "rake" 27 | s.add_development_dependency "resque", "~> 1.17.1" 28 | s.add_development_dependency "rspec", "~> 2.6.0" 29 | s.add_development_dependency "i18n" 30 | s.add_development_dependency "airbrake", "~> 3.0.0" 31 | end 32 | -------------------------------------------------------------------------------- /spec/pace/airbrake_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "pace/airbrake" 3 | 4 | describe "Pace with Airbrake" do 5 | it "notifies Airbrake if a job fails" do 6 | error_params = nil 7 | 8 | ::Airbrake.should_receive(:notify).and_return do |params| 9 | error_params = params 10 | EM.stop 11 | end 12 | 13 | Resque.enqueue(Work) 14 | 15 | worker = Pace::Worker.new(Work.queue) 16 | worker.add_hook(:error, Pace::Airbrake.hook) 17 | worker.start do |job| 18 | raise "FAIL" 19 | end 20 | 21 | error_params[:error_class].should == "RuntimeError" 22 | error_params[:error_message].should == "RuntimeError: FAIL" 23 | error_params[:parameters][:json].should_not be_blank 24 | end 25 | 26 | it "notifies Airbrake for errors raised in callbacks" do 27 | error_params = nil 28 | 29 | ::Airbrake.should_receive(:notify).and_return do |params| 30 | error_params = params 31 | EM.stop 32 | end 33 | 34 | Resque.enqueue(Work) 35 | 36 | EM.run do 37 | redis = Pace.redis_connect 38 | 39 | worker = Pace::Worker.new(Work.queue) 40 | worker.add_hook(:error, Pace::Airbrake.hook) 41 | worker.start do |job| 42 | redis.ping do 43 | raise "FAIL" 44 | end 45 | end 46 | end 47 | 48 | error_params[:error_class].should == "RuntimeError" 49 | error_params[:error_message].should == "RuntimeError: FAIL" 50 | error_params[:parameters][:json].should be_blank 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /spec/pace/event_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | describe Pace::Event do 4 | describe "#run" do 5 | it "simply invokes a set of hooks" do 6 | hook_1_run = false 7 | hook_2_run = false 8 | 9 | hook_1 = Proc.new { hook_1_run = true } 10 | hook_2 = Proc.new { hook_2_run = true } 11 | 12 | event = Pace::Event.new([hook_1, hook_2]) 13 | event.run 14 | 15 | hook_1_run.should be_true 16 | hook_2_run.should be_true 17 | end 18 | 19 | it "supplies args to the hooks" do 20 | hook_run = false 21 | hook = Proc.new do |a, b| 22 | if a == 1 && b == 2 23 | hook_run = true 24 | end 25 | end 26 | 27 | event = Pace::Event.new([hook], 1, 2) 28 | event.run 29 | 30 | hook_run.should be_true 31 | end 32 | 33 | context "when a callback is supplied" do 34 | it "only fires when all hooks call #finished! on their Hook argument" do 35 | hook_1_run = false 36 | hook_2_run = false 37 | callback_run = false 38 | 39 | hook_1 = Proc.new { |hook| hook_1_run = true; hook.finished! } 40 | hook_2 = Proc.new { |hook| hook_2_run = true; hook.finished! } 41 | 42 | event = Pace::Event.new([hook_1, hook_2]) { callback_run = true } 43 | event.run 44 | 45 | hook_1_run.should be_true 46 | hook_2_run.should be_true 47 | callback_run.should be_true 48 | end 49 | 50 | it "does not fire if a hook fails to call #finished!" do 51 | hook_run = false 52 | callback_run = false 53 | 54 | hook = Proc.new { |hook| hook_run = true } 55 | 56 | event = Pace::Event.new([hook]) { callback_run = true } 57 | event.run 58 | 59 | hook_run.should be_true 60 | callback_run.should be_false 61 | end 62 | 63 | it "fires the callback anyway if none of the hooks accepts the Hook argument" do 64 | hook_1_run = false 65 | hook_2_run = false 66 | callback_run = false 67 | 68 | hook_1 = Proc.new { hook_1_run = true } 69 | hook_2 = Proc.new { hook_2_run = true } 70 | 71 | event = Pace::Event.new([hook_1, hook_2]) { callback_run = true } 72 | event.run 73 | 74 | hook_1_run.should be_true 75 | hook_2_run.should be_true 76 | callback_run.should be_true 77 | end 78 | 79 | it "works with mixed-type hooks" do 80 | hook_1_run = false 81 | hook_2_run = false 82 | callback_run = false 83 | 84 | hook_1 = Proc.new { |hook| hook_1_run = true; hook.finished! } 85 | hook_2 = Proc.new { hook_2_run = true } 86 | 87 | event = Pace::Event.new([hook_1, hook_2]) { callback_run = true } 88 | event.run 89 | 90 | hook_1_run.should be_true 91 | hook_2_run.should be_true 92 | callback_run.should be_true 93 | end 94 | end 95 | end 96 | end 97 | -------------------------------------------------------------------------------- /spec/pace/mock_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "pace/mock" 3 | 4 | describe Pace::Mock do 5 | after do 6 | Pace::Mock.disable 7 | end 8 | 9 | describe ".enable" do 10 | it "sets up the mock, which simply passes down Resque jobs and closes the event loop" do 11 | Pace::Mock.enable 12 | Resque.enqueue(Work, :n => 1) 13 | Resque.enqueue(Work, :n => 2) 14 | 15 | results = [] 16 | worker = Pace::Worker.new(Work.queue) 17 | worker.start { |job| results << job } 18 | results.should == [ 19 | {"class" => "Work", "args" => [{"n" => 1}]}, 20 | {"class" => "Work", "args" => [{"n" => 2}]} 21 | ] 22 | 23 | # Clears out the queue 24 | more_results = [] 25 | worker.start { |job| more_results << job } 26 | more_results.should be_empty 27 | end 28 | 29 | it "works after disabling" do 30 | Pace::Mock.enable 31 | Pace::Mock.disable 32 | Pace::Mock.enable 33 | 34 | Resque.enqueue(Work, :n => 2) 35 | 36 | results = [] 37 | worker = Pace::Worker.new(Work.queue) 38 | worker.start do |job| 39 | results << job 40 | end 41 | results.should == [{"class" => "Work", "args" => [{"n" => 2}]}] 42 | end 43 | end 44 | 45 | describe ".disable" do 46 | it "tears down the mock and re-institutes the event loop" do 47 | Pace::Mock.enable 48 | Pace::Mock.disable 49 | Resque.enqueue(Work, :n => 1) 50 | Resque.enqueue(Work, :n => 2) 51 | 52 | results = [] 53 | worker = Pace::Worker.new(Work.queue) 54 | worker.start do |job| 55 | results << job 56 | EM.stop 57 | end 58 | 59 | results.should have(1).items 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /spec/pace/queue_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe Pace::Queue do 4 | class CallbackJob 5 | def self.queue 6 | "callback" 7 | end 8 | end 9 | 10 | describe ".new" do 11 | it "takes a redis URL and returns an instance of Pace::Queue" do 12 | redis_url = "redis://localhost:6379/0" 13 | EM::Hiredis.should_receive(:connect).with(redis_url) 14 | Pace::Queue.new(redis_url) 15 | end 16 | end 17 | 18 | describe "#enqueue" do 19 | it "adds a new Resque-compatible job to the specified queue" do 20 | EM.run { 21 | queue = Pace::Queue.new("redis://localhost:6379/0") 22 | queue.enqueue(CallbackJob.queue, CallbackJob, "some" => "data") { EM.stop } 23 | } 24 | 25 | Resque.pop(CallbackJob.queue).should == { 26 | "class" => "CallbackJob", 27 | "args" => [{"some" => "data"}] 28 | } 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /spec/pace/worker_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | describe Pace::Worker do 4 | let(:worker) { Pace::Worker.new(Work.queue) } 5 | let(:results) { [] } 6 | 7 | describe "#initialize" do 8 | context "when the given name has no colons" do 9 | it "prepends the Resque default queue 'namespace'" do 10 | worker = Pace::Worker.new("normal") 11 | worker.queue.should == "resque:queue:normal" 12 | end 13 | end 14 | 15 | context "when the given name has colons" do 16 | it "does not prepend anything (like an absolute path)" do 17 | worker = Pace::Worker.new("my:special:queue") 18 | worker.queue.should == "my:special:queue" 19 | end 20 | end 21 | 22 | context "when a global namespace is attached to Pace" do 23 | before { Pace.namespace = "test" } 24 | after { Pace.namespace = nil } 25 | 26 | it "prepends the namespace in either case" do 27 | worker = Pace::Worker.new("normal") 28 | worker.queue.should == "test:resque:queue:normal" 29 | 30 | worker = Pace::Worker.new("special:queue") 31 | worker.queue.should == "test:special:queue" 32 | end 33 | end 34 | 35 | context "when the queue argument is nil" do 36 | before { @original_pace_queue = ENV["PACE_QUEUE"] } 37 | after { ENV["PACE_QUEUE"] = @original_pace_queue } 38 | 39 | it "falls back to the PACE_QUEUE environment variable" do 40 | ENV["PACE_QUEUE"] = "high" 41 | worker = Pace::Worker.new 42 | worker.queue.should == "resque:queue:high" 43 | 44 | ENV["PACE_QUEUE"] = "my:special:queue" 45 | worker = Pace::Worker.new 46 | worker.queue.should == "my:special:queue" 47 | end 48 | 49 | it "throws an exception if PACE_QUEUE is nil" do 50 | ENV["PACE_QUEUE"] = nil 51 | expect { Pace::Worker.new }.to raise_error(ArgumentError) 52 | end 53 | end 54 | end 55 | 56 | describe "#start" do 57 | it "yields a serialized Resque jobs" do 58 | Resque.enqueue(Work, :foo => 1, :bar => 2) 59 | 60 | worker.start do |job| 61 | job["class"].should == "Work" 62 | job["args"].should == [{"foo" => 1, "bar" => 2}] 63 | worker.shutdown 64 | end 65 | end 66 | 67 | it "continues to pop jobs until stopped" do 68 | 1.upto(5) { |n| Resque.enqueue(Work, :n => n) } 69 | 70 | worker.start do |job| 71 | n = job["args"][0]["n"] 72 | results << n 73 | worker.shutdown if n == 5 74 | end 75 | 76 | results.should == [1, 2, 3, 4, 5] 77 | end 78 | 79 | it "works if run inside an existing reactor" do 80 | Resque.enqueue(Work) 81 | 82 | EM.run do 83 | worker.start do |job| 84 | results << job 85 | worker.shutdown 86 | end 87 | end 88 | 89 | results.should == [{"class" => "Work", "args" => []}] 90 | end 91 | 92 | it "can process multiple queues with multiple instances of workers" do 93 | 5.times { |n| Resque.enqueue(Work, :n => n) } 94 | 5.times { |n| Resque.enqueue(Play, :n => n) } 95 | 96 | results = { 97 | "Work" => 0, 98 | "Play" => 0, 99 | "Total" => 0 100 | } 101 | 102 | EM.run do 103 | worker_1 = Pace::Worker.new(Work.queue) 104 | worker_2 = Pace::Worker.new(Play.queue) 105 | 106 | block = Proc.new do |job| 107 | results[job["class"]] += 1 108 | results["Total"] += 1 109 | worker.shutdown if results["Total"] == 10 110 | end 111 | 112 | worker_1.start(&block) 113 | worker_2.start(&block) 114 | end 115 | 116 | results["Work"].should == 5 117 | results["Play"].should == 5 118 | results["Total"].should == 10 119 | end 120 | 121 | context "errors" do 122 | before do 123 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 124 | end 125 | 126 | it "rescues any errors in the passed block" do 127 | worker.start do |job| 128 | n = job["args"].first["n"] 129 | 130 | raise "FAIL" if n == 1 131 | results << n 132 | worker.shutdown if n == 3 133 | end 134 | 135 | results.should == [2, 3] 136 | end 137 | 138 | it "tolerates errors in callbacks" do 139 | EM.run do 140 | redis = Pace.redis_connect 141 | 142 | worker.start do |job| 143 | n = job["args"][0]["n"] 144 | 145 | redis.ping do 146 | raise "FAIL" if n == 1 147 | results << n 148 | worker.shutdown if n == 3 149 | end 150 | end 151 | end 152 | 153 | results.should == [2, 3] 154 | end 155 | end 156 | 157 | context "Redis connection errors" do 158 | before do 159 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 160 | 161 | # Previously we just called #close_connection on the connection object 162 | # but em-hiredis no longer automatically reconnects if you close the 163 | # connection. This classy hack reaches into the depths of the connection 164 | # and calls a method to simulate a connection drop which should trigger 165 | # a reconnect. 166 | def worker.simulate_connection_drop 167 | connection = @redis.instance_variable_get(:@connection) 168 | connection.close_connection_after_writing 169 | end 170 | end 171 | 172 | it "continues to fetch jobs if the Redis connection drops inside the job callback" do 173 | worker.start do |job| 174 | n = job["args"][0]["n"] 175 | results << n 176 | 177 | case n 178 | when 1 179 | worker.simulate_connection_drop 180 | when 3 181 | worker.shutdown 182 | end 183 | end 184 | 185 | results.should == [1, 2, 3] 186 | end 187 | 188 | it "continues to fetch jobs if the Redis connection drops when waiting for blpop to return" do 189 | worker.add_hook(:start) do 190 | EM.add_timer(0.1) do 191 | worker.simulate_connection_drop 192 | end 193 | end 194 | 195 | worker.start do |job| 196 | n = job["args"][0]["n"] 197 | results << n 198 | sleep 0.1 199 | worker.shutdown if n == 3 200 | end 201 | 202 | results.should == [1, 2, 3] 203 | end 204 | end 205 | end 206 | 207 | describe "event hooks" do 208 | before do 209 | Resque.enqueue(Work, :n => 1) 210 | Resque.enqueue(Work, :n => 2) 211 | end 212 | 213 | it "can be defined for start, error, and shutdown" do 214 | called_hooks = [] 215 | 216 | worker.add_hook(:start) do 217 | called_hooks.should be_empty 218 | called_hooks << :start 219 | end 220 | 221 | worker.add_hook(:error) do |json, error| 222 | called_hooks.should == [:start] 223 | called_hooks << :error 224 | error.message.should == "FAIL" 225 | end 226 | 227 | worker.add_hook(:shutdown) do 228 | called_hooks.should == [:start, :error] 229 | called_hooks << :shutdown 230 | end 231 | 232 | worker.start do |job| 233 | n = job["args"].first["n"] 234 | 235 | if n == 1 236 | raise "FAIL" 237 | else 238 | worker.shutdown 239 | end 240 | end 241 | 242 | called_hooks.should == [:start, :error, :shutdown] 243 | end 244 | 245 | it "can be defined globally on Pace::Worker" do 246 | called_hooks = [] 247 | 248 | Pace::Worker.add_hook(:start) do 249 | called_hooks.should be_empty 250 | called_hooks << :start 251 | end 252 | 253 | Pace::Worker.add_hook(:error) do |json, error| 254 | called_hooks.should == [:start] 255 | called_hooks << :error 256 | error.message.should == "FAIL" 257 | end 258 | 259 | Pace::Worker.add_hook(:shutdown) do 260 | called_hooks.should == [:start, :error] 261 | called_hooks << :shutdown 262 | end 263 | 264 | worker.start do |job| 265 | n = job["args"].first["n"] 266 | 267 | if n == 1 268 | raise "FAIL" 269 | else 270 | worker.shutdown 271 | end 272 | end 273 | 274 | called_hooks.should == [:start, :error, :shutdown] 275 | end 276 | 277 | it "triggers error hooks for exceptions inside callbacks" do 278 | errors = [] 279 | 280 | EM.run do 281 | redis = Pace.redis_connect 282 | 283 | worker.add_hook(:error) do |json, error| 284 | errors << error 285 | end 286 | 287 | worker.start do |job| 288 | n = job["args"][0]["n"] 289 | 290 | redis.ping do 291 | raise "FAIL" if n == 1 292 | worker.shutdown 293 | end 294 | end 295 | end 296 | 297 | errors.size.should == 1 298 | errors[0].should be_an_instance_of(RuntimeError) 299 | errors[0].message.should == "FAIL" 300 | end 301 | end 302 | 303 | describe "#shutdown" do 304 | it "stops the event loop and calls shutdown hooks" do 305 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 306 | 307 | worker.start do |job| 308 | worker.shutdown 309 | results << job["args"].first["n"] 310 | end 311 | 312 | # Never runs the second job 313 | results.size.should == 1 314 | end 315 | end 316 | 317 | describe "signal handling" do 318 | before do 319 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 320 | end 321 | 322 | ["QUIT", "TERM", "INT"].each do |signal| 323 | it "handles SIG#{signal}" do 324 | worker.start do |job| 325 | n = job["args"].first["n"] 326 | Process.kill(signal, $$) if n == 1 327 | results << n 328 | end 329 | 330 | # trap seems to interrupt the event loop randomly, so it does not appear 331 | # possible to determine exactly how many jobs will be processed 332 | results.should_not be_empty 333 | end 334 | end 335 | end 336 | 337 | describe "pausing and resuming" do 338 | it "pauses the reactor and resumes it" do 339 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 340 | 341 | worker.start do |job| 342 | n = job["args"].first["n"] 343 | if n == 1 344 | worker.pause 345 | EM.add_timer(0.1) { worker.resume } # wait a little 346 | elsif n >= 3 347 | worker.shutdown 348 | end 349 | results << Time.now.to_f 350 | end 351 | 352 | # Check if we actually paused 353 | (results[1] - results[0]).should > 0.1 354 | (results[2] - results[0]).should > 0.1 355 | end 356 | 357 | it "pauses for specified time period" do 358 | 1.upto(3) { |n| Resque.enqueue(Work, :n => n) } 359 | 360 | worker.start do |job| 361 | n = job["args"].first["n"] 362 | if n == 1 363 | worker.pause(0.1) # sleep for 100ms 364 | elsif n >= 3 365 | worker.shutdown 366 | end 367 | results << Time.now.to_f 368 | end 369 | 370 | # Check if we actually paused 371 | (results[1] - results[0]).should > 0.1 372 | (results[2] - results[0]).should > 0.1 373 | end 374 | 375 | it "does not pause if already paused" do 376 | Resque.enqueue(Work) 377 | 378 | worker.start do |job| 379 | worker.pause(0.1) 380 | worker.pause(0.1).should be_false 381 | worker.shutdown 382 | end 383 | end 384 | 385 | it "does not start multiple fetch loops if resume is called multiple times when paused" do 386 | Resque.enqueue(Work) 387 | 388 | worker.start do |job| 389 | worker.pause 390 | 391 | EM.should_receive(:next_tick).once 392 | worker.resume 393 | worker.resume 394 | worker.shutdown 395 | end 396 | end 397 | end 398 | end 399 | -------------------------------------------------------------------------------- /spec/pace_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | describe Pace do 4 | describe ".redis_connect" do 5 | let(:connection) { double(EM::Connection) } 6 | 7 | it "returns a Redis connection" do 8 | EM::Hiredis.should_receive(:connect).with(nil).and_return(connection) 9 | Pace.redis_connect.should == connection 10 | end 11 | 12 | it "uses Pace.redis_url if set" do 13 | Pace.redis_url = "redis://user:secret@some.host.local:9999/1" 14 | EM::Hiredis.should_receive(:connect).with(Pace.redis_url).and_return(connection) 15 | Pace.redis_connect.should == connection 16 | Pace.redis_url = nil 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require "rubygems" 2 | require "bundler/setup" 3 | Bundler.require :default, :development 4 | 5 | class Work 6 | def self.queue 7 | "work" 8 | end 9 | end 10 | 11 | class Play 12 | def self.queue 13 | "play" 14 | end 15 | end 16 | 17 | RSpec.configure do |config| 18 | config.before(:each) do 19 | # We explicitly want to test the Info shutdown hook. 20 | Pace::Worker.clear_hooks 21 | 22 | Pace.logger = Logger.new("/dev/null") 23 | Resque.dequeue(Work) 24 | Resque.dequeue(Play) 25 | 26 | ENV["PACE_REDIS"] = nil 27 | end 28 | end 29 | --------------------------------------------------------------------------------