Key "<%= key %>" is a <%= resque.redis.type key %>
3 |
size: <%= redis_get_size(key) %>
4 |
5 |
6 |
7 | <%= redis_get_value_as_array(key) %>
8 |
9 |
10 |
11 | <% end %>
12 |
--------------------------------------------------------------------------------
/lib/resque/errors.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | # Raised whenever we need a queue but none is provided.
3 | class NoQueueError < RuntimeError; end
4 |
5 | # Raised when trying to create a job without a class
6 | class NoClassError < RuntimeError; end
7 |
8 | # Raised when a worker was killed while processing a job.
9 | class DirtyExit < RuntimeError; end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/resque/server/views/next_more.erb:
--------------------------------------------------------------------------------
1 | <%if start - 20 >= 0 || start + 20 <= size%>
2 |
3 | <% if start - 20 >= 0 %>
4 | « less
5 | <% end %>
6 | <% if start + 20 <= size %>
7 | more »
8 | <% end %>
9 |
10 | <%end%>
--------------------------------------------------------------------------------
/examples/demo/job.rb:
--------------------------------------------------------------------------------
1 | require 'resque'
2 |
3 | module Demo
4 | module Job
5 | @queue = :default
6 |
7 | def self.perform(params)
8 | sleep 1
9 | puts "Processed a job!"
10 | end
11 | end
12 |
13 | module FailingJob
14 | @queue = :failing
15 |
16 | def self.perform(params)
17 | sleep 1
18 | raise 'not processable!'
19 | puts "Processed a job!"
20 | end
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/config.ru:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | require 'logger'
3 |
4 | $LOAD_PATH.unshift ::File.expand_path(::File.dirname(__FILE__) + '/lib')
5 | require 'resque/server'
6 |
7 | # Set the RESQUECONFIG env variable if you've a `resque.rb` or similar
8 | # config file you want loaded on boot.
9 | if ENV['RESQUECONFIG'] && ::File.exists?(::File.expand_path(ENV['RESQUECONFIG']))
10 | load ::File.expand_path(ENV['RESQUECONFIG'])
11 | end
12 |
13 | use Rack::ShowExceptions
14 | run Resque::Server.new
15 |
--------------------------------------------------------------------------------
/lib/resque/server/test_helper.rb:
--------------------------------------------------------------------------------
1 | require 'rack/test'
2 | require 'resque/server'
3 |
4 | module Resque
5 | module TestHelper
6 | class Test::Unit::TestCase
7 | include Rack::Test::Methods
8 | def app
9 | Resque::Server.new
10 | end
11 |
12 | def self.should_respond_with_success
13 | test "should respond with success" do
14 | assert last_response.ok?, last_response.errors
15 | end
16 | end
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/examples/demo/config.ru:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | require 'logger'
3 | $LOAD_PATH.unshift File.dirname(__FILE__) + '/../../lib'
4 | require 'app'
5 | require 'resque/server'
6 |
7 | use Rack::ShowExceptions
8 |
9 | # Set the AUTH env variable to your basic auth password to protect Resque.
10 | AUTH_PASSWORD = ENV['AUTH']
11 | if AUTH_PASSWORD
12 | Resque::Server.use Rack::Auth::Basic do |username, password|
13 | password == AUTH_PASSWORD
14 | end
15 | end
16 |
17 | run Rack::URLMap.new \
18 | "/" => Demo::App.new,
19 | "/resque" => Resque::Server.new
20 |
--------------------------------------------------------------------------------
/lib/resque/server/views/key_sets.erb:
--------------------------------------------------------------------------------
1 | <% if key = params[:key] %>
2 |
3 |
Key "<%= key %>" is a <%= resque.redis.type key %>
9 |
10 | <% for row in redis_get_value_as_array(key, start) %>
11 |
12 |
13 | <%= row %>
14 |
15 |
16 | <% end %>
17 |
18 |
19 | <%= partial :next_more, :start => start, :size => size %>
20 | <% end %>
21 |
--------------------------------------------------------------------------------
/examples/monit/resque.monit:
--------------------------------------------------------------------------------
1 | check process resque_worker_QUEUE
2 | with pidfile /data/APP_NAME/current/tmp/pids/resque_worker_QUEUE.pid
3 | start program = "/bin/sh -c 'cd /data/APP_NAME/current; RAILS_ENV=production QUEUE=queue_name VERBOSE=1 nohup rake environment resque:work& > log/resque_worker_QUEUE.log && echo $! > tmp/pids/resque_worker_QUEUE.pid'" as uid deploy and gid deploy
4 | stop program = "/bin/sh -c 'cd /data/APP_NAME/current && kill -s QUIT `cat tmp/pids/resque_worker_QUEUE.pid` && rm -f tmp/pids/resque_worker_QUEUE.pid; exit 0;'"
5 | if totalmem is greater than 300 MB for 10 cycles then restart # eating up memory?
6 | group resque_workers
7 |
--------------------------------------------------------------------------------
/.kick:
--------------------------------------------------------------------------------
1 | # take control of the growl notifications
2 | module GrowlHacks
3 | def growl(type, subject, body, *args, &block)
4 | case type
5 | when Kicker::GROWL_NOTIFICATIONS[:succeeded]
6 | puts subject = "Success"
7 | body = body.split("\n").last
8 | when Kicker::GROWL_NOTIFICATIONS[:failed]
9 | subject = "Failure"
10 | puts body
11 | body = body.split("\n").last
12 | else
13 | return nil
14 | end
15 | super(type, subject, body, *args, &block)
16 | end
17 | end
18 |
19 | Kicker.send :extend, GrowlHacks
20 |
21 | # no logging
22 | Kicker::Utils.module_eval do
23 | def log(message)
24 | nil
25 | end
26 | end
--------------------------------------------------------------------------------
/bin/resque-web:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 |
3 | $LOAD_PATH.unshift File.expand_path(File.dirname(__FILE__) + '/../lib')
4 | begin
5 | require 'vegas'
6 | rescue LoadError
7 | require 'rubygems'
8 | require 'vegas'
9 | end
10 | require 'resque/server'
11 |
12 |
13 | Vegas::Runner.new(Resque::Server, 'resque-web', {
14 | :before_run => lambda {|v|
15 | path = (ENV['RESQUECONFIG'] || v.args.first)
16 | load path.to_s.strip if path
17 | }
18 | }) do |runner, opts, app|
19 | opts.on('-N NAMESPACE', "--namespace NAMESPACE", "set the Redis namespace") {|namespace|
20 | runner.logger.info "Using Redis namespace '#{namespace}'"
21 | Resque.redis.namespace = namespace
22 | }
23 | end
24 |
--------------------------------------------------------------------------------
/examples/god/stale.god:
--------------------------------------------------------------------------------
1 | # This will ride alongside god and kill any rogue stale worker
2 | # processes. Their sacrifice is for the greater good.
3 |
4 | WORKER_TIMEOUT = 60 * 10 # 10 minutes
5 |
6 | Thread.new do
7 | loop do
8 | begin
9 | `ps -e -o pid,command | grep [r]esque`.split("\n").each do |line|
10 | parts = line.split(' ')
11 | next if parts[-2] != "at"
12 | started = parts[-1].to_i
13 | elapsed = Time.now - Time.at(started)
14 |
15 | if elapsed >= WORKER_TIMEOUT
16 | ::Process.kill('USR1', parts[0].to_i)
17 | end
18 | end
19 | rescue
20 | # don't die because of stupid exceptions
21 | nil
22 | end
23 |
24 | sleep 30
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/test/resque-web_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 | require 'resque/server/test_helper'
3 |
4 | # Root path test
5 | context "on GET to /" do
6 | setup { get "/" }
7 |
8 | test "redirect to overview" do
9 | follow_redirect!
10 | end
11 | end
12 |
13 | # Global overview
14 | context "on GET to /overview" do
15 | setup { get "/overview" }
16 |
17 | test "should at least display 'queues'" do
18 | assert last_response.body.include?('Queues')
19 | end
20 | end
21 |
22 | # Working jobs
23 | context "on GET to /working" do
24 | setup { get "/working" }
25 |
26 | should_respond_with_success
27 | end
28 |
29 | # Failed
30 | context "on GET to /failed" do
31 | setup { get "/failed" }
32 |
33 | should_respond_with_success
34 | end
35 |
--------------------------------------------------------------------------------
/examples/simple.rb:
--------------------------------------------------------------------------------
1 | # This is a simple Resque job.
2 | class Archive
3 | @queue = :file_serve
4 |
5 | def self.perform(repo_id, branch = 'master')
6 | repo = Repository.find(repo_id)
7 | repo.create_archive(branch)
8 | end
9 | end
10 |
11 | # This is in our app code
12 | class Repository < Model
13 | # ... stuff ...
14 |
15 | def async_create_archive(branch)
16 | Resque.enqueue(Archive, self.id, branch)
17 | end
18 |
19 | # ... more stuff ...
20 | end
21 |
22 | # Calling this code:
23 | repo = Repository.find(22)
24 | repo.async_create_archive('homebrew')
25 |
26 | # Will return immediately and create a Resque job which is later
27 | # processed.
28 |
29 | # Essentially, this code is run by the worker when processing:
30 | Archive.perform(22, 'homebrew')
31 |
--------------------------------------------------------------------------------
/examples/async_helper.rb:
--------------------------------------------------------------------------------
1 | # If you want to just call a method on an object in the background,
2 | # we can easily add that functionality to Resque.
3 | #
4 | # This is similar to DelayedJob's `send_later`.
5 | #
6 | # Keep in mind that, unlike DelayedJob, only simple Ruby objects
7 | # can be persisted.
8 | #
9 | # If it can be represented in JSON, it can be stored in a job.
10 |
11 | # Here's our ActiveRecord class
12 | class Repository < ActiveRecord::Base
13 | # This will be called by a worker when a job needs to be processed
14 | def self.perform(id, method, *args)
15 | find(id).send(method, *args)
16 | end
17 |
18 | # We can pass this any Repository instance method that we want to
19 | # run later.
20 | def async(method, *args)
21 | Resque.enqueue(Repository, id, method, *args)
22 | end
23 | end
24 |
25 | # Now we can call any method and have it execute later:
26 |
27 | @repo.async(:update_disk_usage)
28 |
29 | # or
30 |
31 | @repo.async(:update_network_source_id, 34)
32 |
--------------------------------------------------------------------------------
/lib/resque/server/public/reset.css:
--------------------------------------------------------------------------------
1 | html, body, div, span, applet, object, iframe,
2 | h1, h2, h3, h4, h5, h6, p, blockquote, pre,
3 | a, abbr, acronym, address, big, cite, code,
4 | del, dfn, em, font, img, ins, kbd, q, s, samp,
5 | small, strike, strong, sub, sup, tt, var,
6 | dl, dt, dd, ul, li,
7 | form, label, legend,
8 | table, caption, tbody, tfoot, thead, tr, th, td {
9 | margin: 0;
10 | padding: 0;
11 | border: 0;
12 | outline: 0;
13 | font-weight: inherit;
14 | font-style: normal;
15 | font-size: 100%;
16 | font-family: inherit;
17 | }
18 |
19 | :focus {
20 | outline: 0;
21 | }
22 |
23 | body {
24 | line-height: 1;
25 | }
26 |
27 | ul {
28 | list-style: none;
29 | }
30 |
31 | table {
32 | border-collapse: collapse;
33 | border-spacing: 0;
34 | }
35 |
36 | caption, th, td {
37 | text-align: left;
38 | font-weight: normal;
39 | }
40 |
41 | blockquote:before, blockquote:after,
42 | q:before, q:after {
43 | content: "";
44 | }
45 |
46 | blockquote, q {
47 | quotes: "" "";
48 | }
--------------------------------------------------------------------------------
/lib/resque/tasks.rb:
--------------------------------------------------------------------------------
1 | # require 'resque/tasks'
2 | # will give you the resque tasks
3 |
4 | namespace :resque do
5 | task :setup
6 |
7 | desc "Start a Resque worker"
8 | task :work => :setup do
9 | require 'resque'
10 |
11 | worker = nil
12 | queues = (ENV['QUEUES'] || ENV['QUEUE']).to_s.split(',')
13 |
14 | begin
15 | worker = Resque::Worker.new(*queues)
16 | worker.verbose = ENV['LOGGING'] || ENV['VERBOSE']
17 | worker.very_verbose = ENV['VVERBOSE']
18 | rescue Resque::NoQueueError
19 | abort "set QUEUE env var, e.g. $ QUEUE=critical,high rake resque:work"
20 | end
21 |
22 | worker.log "Starting worker #{worker}"
23 |
24 | worker.work(ENV['INTERVAL'] || 5) # interval, will block
25 | end
26 |
27 | desc "Start multiple Resque workers. Should only be used in dev mode."
28 | task :workers do
29 | threads = []
30 |
31 | ENV['COUNT'].to_i.times do
32 | threads << Thread.new do
33 | system "rake resque:work"
34 | end
35 | end
36 |
37 | threads.each { |thread| thread.join }
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2009 Chris Wanstrath
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining
4 | a copy of this software and associated documentation files (the
5 | "Software"), to deal in the Software without restriction, including
6 | without limitation the rights to use, copy, modify, merge, publish,
7 | distribute, sublicense, and/or sell copies of the Software, and to
8 | permit persons to whom the Software is furnished to do so, subject to
9 | the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/examples/demo/app.rb:
--------------------------------------------------------------------------------
1 | require 'sinatra/base'
2 | require 'resque'
3 | require 'job'
4 |
5 | module Demo
6 | class App < Sinatra::Base
7 | get '/' do
8 | info = Resque.info
9 | out = "Resque Demo"
10 | out << "
"
11 | out << "There are #{info[:pending]} pending and "
12 | out << "#{info[:processed]} processed jobs across #{info[:queues]} queues."
13 | out << "
"
14 | out << ''
18 |
19 | out << "'
23 |
24 | out << ""
25 | out
26 | end
27 |
28 | post '/' do
29 | Resque.enqueue(Job, params)
30 | redirect "/"
31 | end
32 |
33 | post '/failing' do
34 | Resque.enqueue(FailingJob, params)
35 | redirect "/"
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | load 'tasks/redis.rake'
2 |
3 | $LOAD_PATH.unshift 'lib'
4 | require 'resque/tasks'
5 |
6 | def command?(command)
7 | system("type #{command} > /dev/null 2>&1")
8 | end
9 |
10 |
11 | #
12 | # Tests
13 | #
14 |
15 | task :default => :test
16 |
17 | desc "Run tests"
18 | task :test do
19 | # Don't use the rake/testtask because it loads a new
20 | # Ruby interpreter - we want to run tests with the current
21 | # `rake` so our library manager still works
22 | Dir['test/*_test.rb'].each do |f|
23 | require f
24 | end
25 | end
26 |
27 | desc "Activate kicker - gem install kicker"
28 | task :kick do
29 | exec "kicker -e rake lib test"
30 | end
31 |
32 |
33 | #
34 | # Install
35 | #
36 |
37 | task :install => [ 'redis:install', 'dtach:install' ]
38 |
39 |
40 | #
41 | # Documentation
42 | #
43 |
44 | begin
45 | require 'sdoc_helpers'
46 | rescue LoadError
47 | end
48 |
49 | desc "Push a new version to Gemcutter"
50 | task :publish do
51 | require 'resque/version'
52 |
53 | sh "gem build resque.gemspec"
54 | sh "gem push resque-#{Resque::Version}.gem"
55 | sh "git tag v#{Resque::Version}"
56 | sh "git push origin v#{Resque::Version}"
57 | sh "git push origin master"
58 | sh "git clean -fd"
59 | exec "rake pages"
60 | end
61 |
--------------------------------------------------------------------------------
/lib/resque/failure/multiple.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | module Failure
3 | # A Failure backend that uses multiple backends
4 | # delegates all queries to the first backend
5 | class Multiple < Base
6 |
7 | class << self
8 | attr_accessor :classes
9 | end
10 |
11 | def self.configure
12 | yield self
13 | Resque::Failure.backend = self
14 | end
15 |
16 | def initialize(*args)
17 | super
18 | @backends = self.class.classes.map {|klass| klass.new(*args)}
19 | end
20 |
21 | def save
22 | @backends.each(&:save)
23 | end
24 |
25 | # The number of failures.
26 | def self.count
27 | classes.first.count
28 | end
29 |
30 | # Returns a paginated array of failure objects.
31 | def self.all(start = 0, count = 1)
32 | classes.first.all(start,count)
33 | end
34 |
35 | # A URL where someone can go to view failures.
36 | def self.url
37 | classes.first.url
38 | end
39 |
40 | # Clear all failure objects
41 | def self.clear
42 | classes.first.clear
43 | end
44 |
45 | def self.requeue(*args)
46 | classes.first.requeue(*args)
47 | end
48 | end
49 | end
50 | end
--------------------------------------------------------------------------------
/lib/resque/server/views/layout.erb:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Resque.
5 |
6 |
7 |
9 |
10 |
11 |
12 |
13 |
14 | <% tabs.each do |tab_name| %>
15 | <%= tab tab_name %>
16 | <% end %>
17 |
59 |
60 | <% else %>
61 |
62 | <% end %>
63 |
--------------------------------------------------------------------------------
/lib/resque/stat.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | # The stat subsystem. Used to keep track of integer counts.
3 | #
4 | # Get a stat: Stat[name]
5 | # Incr a stat: Stat.incr(name)
6 | # Decr a stat: Stat.decr(name)
7 | # Kill a stat: Stat.clear(name)
8 | module Stat
9 | extend self
10 | extend Helpers
11 |
12 | # Returns the int value of a stat, given a string stat name.
13 | def get(stat)
14 | res = mongo_stats.find_one(:stat => stat)
15 | return 0 unless res
16 | res['value'].to_i
17 | end
18 |
19 | # Alias of `get`
20 | def [](stat)
21 | get(stat)
22 | end
23 |
24 | # For a string stat name, increments the stat by one.
25 | #
26 | # Can optionally accept a second int parameter. The stat is then
27 | # incremented by that amount.
28 | def incr(stat, by = 1)
29 | mongo_stats.update({:stat => stat}, {'$inc' => {:value => by}}, :upsert => true)
30 | end
31 |
32 | # Increments a stat by one.
33 | def <<(stat)
34 | incr stat
35 | end
36 |
37 | # For a string stat name, decrements the stat by one.
38 | #
39 | # Can optionally accept a second int parameter. The stat is then
40 | # decremented by that amount.
41 | def decr(stat, by = 1)
42 | mongo_stats.update({:stat => stat}, {'$inc' => {:value => -by}})
43 | end
44 |
45 | # Decrements a stat by one.
46 | def >>(stat)
47 | decr stat
48 | end
49 |
50 | # Removes a stat from Redis, effectively setting it to 0.
51 | def clear(stat)
52 | mongo_stats.remove(:stat => stat)
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/examples/god/resque.god:
--------------------------------------------------------------------------------
1 | rails_env = ENV['RAILS_ENV'] || "production"
2 | rails_root = ENV['RAILS_ROOT'] || "/data/github/current"
3 | num_workers = rails_env == 'production' ? 5 : 2
4 |
5 | num_workers.times do |num|
6 | God.watch do |w|
7 | w.name = "resque-#{num}"
8 | w.group = 'resque'
9 | w.interval = 30.seconds
10 | w.env = {"QUEUE"=>"critical,high,low", "RAILS_ENV"=>rails_env}
11 | w.start = "/usr/bin/rake -f #{rails_root}/Rakefile environment resque:work"
12 |
13 | w.uid = 'git'
14 | w.gid = 'git'
15 |
16 | # retart if memory gets too high
17 | w.transition(:up, :restart) do |on|
18 | on.condition(:memory_usage) do |c|
19 | c.above = 350.megabytes
20 | c.times = 2
21 | end
22 | end
23 |
24 | # determine the state on startup
25 | w.transition(:init, { true => :up, false => :start }) do |on|
26 | on.condition(:process_running) do |c|
27 | c.running = true
28 | end
29 | end
30 |
31 | # determine when process has finished starting
32 | w.transition([:start, :restart], :up) do |on|
33 | on.condition(:process_running) do |c|
34 | c.running = true
35 | c.interval = 5.seconds
36 | end
37 |
38 | # failsafe
39 | on.condition(:tries) do |c|
40 | c.times = 5
41 | c.transition = :start
42 | c.interval = 5.seconds
43 | end
44 | end
45 |
46 | # start if process is not running
47 | w.transition(:up, :start) do |on|
48 | on.condition(:process_running) do |c|
49 | c.running = false
50 | end
51 | end
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/lib/resque/failure/base.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | module Failure
3 | # All Failure classes are expected to subclass Base.
4 | #
5 | # When a job fails, a new instance of your Failure backend is created
6 | # and #save is called.
7 | class Base
8 | # The exception object raised by the failed job
9 | attr_accessor :exception
10 |
11 | # The worker object who detected the failure
12 | attr_accessor :worker
13 |
14 | # The string name of the queue from which the failed job was pulled
15 | attr_accessor :queue
16 |
17 | # The payload object associated with the failed job
18 | attr_accessor :payload
19 |
20 | def initialize(exception, worker, queue, payload)
21 | @exception = exception
22 | @worker = worker
23 | @queue = queue
24 | @payload = payload
25 | end
26 |
27 | # When a job fails, a new instance of your Failure backend is created
28 | # and #save is called.
29 | #
30 | # This is where you POST or PUT or whatever to your Failure service.
31 | def save
32 | end
33 |
34 | # The number of failures.
35 | def self.count
36 | 0
37 | end
38 |
39 | # Returns a paginated array of failure objects.
40 | def self.all(start = 0, count = 1)
41 | []
42 | end
43 |
44 | # A URL where someone can go to view failures.
45 | def self.url
46 | end
47 |
48 | # Clear all failure objects
49 | def self.clear
50 | end
51 |
52 | def self.requeue(index)
53 | end
54 |
55 | # Logging!
56 | def log(message)
57 | @worker.log(message)
58 | end
59 | end
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/lib/resque/server/views/queues.erb:
--------------------------------------------------------------------------------
1 | <% @subtabs = resque.queues unless partial? %>
2 |
3 | <% if queue = params[:id] %>
4 |
5 |
Pending jobs on <%= queue %>
6 |
9 |
Showing <%= start = params[:start].to_i %> to <%= start + 20 %> of <%=size = resque.size(queue)%> jobs
10 |
11 |
12 |
Class
13 |
Args
14 |
15 | <% for job in (jobs = resque.peek(queue, start, 20)) %>
16 |
The list below contains all the registered queues with the number of jobs currently in the queue. Select a queue from above to view all jobs currently pending on the queue.
32 |
33 |
34 |
Name
35 |
Jobs
36 |
37 | <% for queue in resque.queues.sort_by { |q| q.to_s } %>
38 |
61 | <% data = worker.processing || {} %>
62 | <% if data['queue'] %>
63 | <%= data['payload']['class'] %>
64 | "><%= data['run_at'] %>
65 | <% else %>
66 | Waiting for a job...
67 | <% end %>
68 |
69 |
70 | <% end %>
71 | <% if workers.empty? %>
72 |
73 |
There are no registered workers
74 |
75 | <% end %>
76 |
77 | <%=poll%>
78 | <% end %>
79 |
--------------------------------------------------------------------------------
/test/plugin_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 |
3 | context "Resque::Plugin finding hooks" do
4 | module SimplePlugin
5 | extend self
6 | def before_perform1; end
7 | def before_perform; end
8 | def before_perform2; end
9 | def after_perform1; end
10 | def after_perform; end
11 | def after_perform2; end
12 | def perform; end
13 | def around_perform1; end
14 | def around_perform; end
15 | def around_perform2; end
16 | def on_failure1; end
17 | def on_failure; end
18 | def on_failure2; end
19 | end
20 |
21 | test "before_perform hooks are found and sorted" do
22 | assert_equal ["before_perform", "before_perform1", "before_perform2"], Resque::Plugin.before_hooks(SimplePlugin).map {|m| m.to_s}
23 | end
24 |
25 | test "after_perform hooks are found and sorted" do
26 | assert_equal ["after_perform", "after_perform1", "after_perform2"], Resque::Plugin.after_hooks(SimplePlugin).map {|m| m.to_s}
27 | end
28 |
29 | test "around_perform hooks are found and sorted" do
30 | assert_equal ["around_perform", "around_perform1", "around_perform2"], Resque::Plugin.around_hooks(SimplePlugin).map {|m| m.to_s}
31 | end
32 |
33 | test "on_failure hooks are found and sorted" do
34 | assert_equal ["on_failure", "on_failure1", "on_failure2"], Resque::Plugin.failure_hooks(SimplePlugin).map {|m| m.to_s}
35 | end
36 | end
37 |
38 | context "Resque::Plugin linting" do
39 | module ::BadBefore
40 | def self.before_perform; end
41 | end
42 | module ::BadAfter
43 | def self.after_perform; end
44 | end
45 | module ::BadAround
46 | def self.around_perform; end
47 | end
48 | module ::BadFailure
49 | def self.on_failure; end
50 | end
51 |
52 | test "before_perform must be namespaced" do
53 | begin
54 | Resque::Plugin.lint(BadBefore)
55 | assert false, "should have failed"
56 | rescue Resque::Plugin::LintError => e
57 | assert_equal "BadBefore.before_perform is not namespaced", e.message
58 | end
59 | end
60 |
61 | test "after_perform must be namespaced" do
62 | begin
63 | Resque::Plugin.lint(BadAfter)
64 | assert false, "should have failed"
65 | rescue Resque::Plugin::LintError => e
66 | assert_equal "BadAfter.after_perform is not namespaced", e.message
67 | end
68 | end
69 |
70 | test "around_perform must be namespaced" do
71 | begin
72 | Resque::Plugin.lint(BadAround)
73 | assert false, "should have failed"
74 | rescue Resque::Plugin::LintError => e
75 | assert_equal "BadAround.around_perform is not namespaced", e.message
76 | end
77 | end
78 |
79 | test "on_failure must be namespaced" do
80 | begin
81 | Resque::Plugin.lint(BadFailure)
82 | assert false, "should have failed"
83 | rescue Resque::Plugin::LintError => e
84 | assert_equal "BadFailure.on_failure is not namespaced", e.message
85 | end
86 | end
87 |
88 | module GoodBefore
89 | def self.before_perform1; end
90 | end
91 | module GoodAfter
92 | def self.after_perform1; end
93 | end
94 | module GoodAround
95 | def self.around_perform1; end
96 | end
97 | module GoodFailure
98 | def self.on_failure1; end
99 | end
100 |
101 | test "before_perform1 is an ok name" do
102 | Resque::Plugin.lint(GoodBefore)
103 | end
104 |
105 | test "after_perform1 is an ok name" do
106 | Resque::Plugin.lint(GoodAfter)
107 | end
108 |
109 | test "around_perform1 is an ok name" do
110 | Resque::Plugin.lint(GoodAround)
111 | end
112 |
113 | test "on_failure1 is an ok name" do
114 | Resque::Plugin.lint(GoodFailure)
115 | end
116 | end
117 |
--------------------------------------------------------------------------------
/docs/HOOKS.md:
--------------------------------------------------------------------------------
1 | Resque Hooks
2 | ============
3 |
4 | You can customize Resque or write plugins using its hook API. In many
5 | cases you can use a hook rather than mess with Resque's internals.
6 |
7 | For a list of available plugins see
8 | .
9 |
10 |
11 | Worker Hooks
12 | ------------
13 |
14 | If you wish to have a Proc called before the worker forks for the
15 | first time, you can add it in the initializer like so:
16 |
17 | Resque.before_first_fork do
18 | puts "Call me once before the worker forks the first time"
19 | end
20 |
21 | You can also run a hook before _every_ fork:
22 |
23 | Resque.before_fork do |job|
24 | puts "Call me before the worker forks"
25 | end
26 |
27 | The `before_fork` hook will be run in the **parent** process. So, be
28 | careful - any changes you make will be permanent for the lifespan of
29 | the worker.
30 |
31 | And after forking:
32 |
33 | Resque.after_fork do |job|
34 | puts "Call me after the worker forks"
35 | end
36 |
37 | The `after_fork` hook will be run in the child process and is passed
38 | the current job. Any changes you make, therefor, will only live as
39 | long as the job currently being processes.
40 |
41 | All worker hooks can also be set using a setter, e.g.
42 |
43 | Resque.after_fork = proc { puts "called" }
44 |
45 |
46 | Job Hooks
47 | ---------
48 |
49 | Plugins can utilize job hooks to provide additional behavior. A job
50 | hook is a method name in the following format:
51 |
52 | HOOKNAME_IDENTIFIER
53 |
54 | For example, a `before_perform` hook which adds locking may be defined
55 | like this:
56 |
57 | def before_perform_with_lock(*args)
58 | set_lock!
59 | end
60 |
61 | Once this hook is made available to your job (either by way of
62 | inheritence or `extend`), it will be run before the job's `perform`
63 | method is called. Hooks of each type are executed in alphabetical order,
64 | so `before_perform_a` will always be executed before `before_perform_b`.
65 | An unnamed hook (`before_perform`) will be executed first.
66 |
67 | The available hooks are:
68 |
69 | * `before_perform`: Called with the job args before perform. If it raises
70 | `Resque::Job::DontPerform`, the job is aborted. If other exceptions
71 | are raised, they will be propagated up the the `Resque::Failure`
72 | backend.
73 |
74 | * `after_perform`: Called with the job args after it performs. Uncaught
75 | exceptions will propagate up to the `Resque::Failure` backend.
76 |
77 | * `around_perform`: Called with the job args. It is expected to yield in order
78 | to perform the job (but is not required to do so). It may handle exceptions
79 | thrown by `perform`, but any that are not caught will propagate up to the
80 | `Resque::Failure` backend.
81 |
82 | * `on_failure`: Called with the exception and job args if any exception occurs
83 | while performing the job (or hooks).
84 |
85 | Hooks are easily implemented with superclasses or modules. A superclass could
86 | look something like this.
87 |
88 | class LoggedJob
89 | def self.before_perform_log_job(*args)
90 | Logger.info "About to perform #{self} with #{args.inspect}"
91 | end
92 | end
93 |
94 | class MyJob < LoggedJob
95 | def self.perform(*args)
96 | ...
97 | end
98 | end
99 |
100 | Modules are even better because jobs can use many of them.
101 |
102 | module LoggedJob
103 | def before_perform_log_job(*args)
104 | Logger.info "About to perform #{self} with #{args.inspect}"
105 | end
106 | end
107 |
108 | module RetriedJob
109 | def on_failure_retry(e, *args)
110 | Logger.info "Performing #{self} caused an exception (#{e}). Retrying..."
111 | Resque.enqueue self, *args
112 | end
113 | end
114 |
115 | class MyJob
116 | extend LoggedJob
117 | extend RetriedJob
118 | def self.perform(*args)
119 | ...
120 | end
121 | end
122 |
--------------------------------------------------------------------------------
/lib/resque/server/public/jquery.relatize_date.js:
--------------------------------------------------------------------------------
1 | // All credit goes to Rick Olson.
2 | (function($) {
3 | $.fn.relatizeDate = function() {
4 | return $(this).each(function() {
5 | if ($(this).hasClass( 'relatized' )) return
6 | $(this).text( $.relatizeDate(this) ).addClass( 'relatized' )
7 | })
8 | }
9 |
10 | $.relatizeDate = function(element) {
11 | return $.relatizeDate.timeAgoInWords( new Date($(element).text()) )
12 | }
13 |
14 | // shortcut
15 | $r = $.relatizeDate
16 |
17 | $.extend($.relatizeDate, {
18 | shortDays: [ 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat' ],
19 | days: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'],
20 | shortMonths: [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ],
21 | months: [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ],
22 |
23 | /**
24 | * Given a formatted string, replace the necessary items and return.
25 | * Example: Time.now().strftime("%B %d, %Y") => February 11, 2008
26 | * @param {String} format The formatted string used to format the results
27 | */
28 | strftime: function(date, format) {
29 | var day = date.getDay(), month = date.getMonth();
30 | var hours = date.getHours(), minutes = date.getMinutes();
31 |
32 | var pad = function(num) {
33 | var string = num.toString(10);
34 | return new Array((2 - string.length) + 1).join('0') + string
35 | };
36 |
37 | return format.replace(/\%([aAbBcdHImMpSwyY])/g, function(part) {
38 | switch(part[1]) {
39 | case 'a': return $r.shortDays[day]; break;
40 | case 'A': return $r.days[day]; break;
41 | case 'b': return $r.shortMonths[month]; break;
42 | case 'B': return $r.months[month]; break;
43 | case 'c': return date.toString(); break;
44 | case 'd': return pad(date.getDate()); break;
45 | case 'H': return pad(hours); break;
46 | case 'I': return pad((hours + 12) % 12); break;
47 | case 'm': return pad(month + 1); break;
48 | case 'M': return pad(minutes); break;
49 | case 'p': return hours > 12 ? 'PM' : 'AM'; break;
50 | case 'S': return pad(date.getSeconds()); break;
51 | case 'w': return day; break;
52 | case 'y': return pad(date.getFullYear() % 100); break;
53 | case 'Y': return date.getFullYear().toString(); break;
54 | }
55 | })
56 | },
57 |
58 | timeAgoInWords: function(targetDate, includeTime) {
59 | return $r.distanceOfTimeInWords(targetDate, new Date(), includeTime);
60 | },
61 |
62 | /**
63 | * Return the distance of time in words between two Date's
64 | * Example: '5 days ago', 'about an hour ago'
65 | * @param {Date} fromTime The start date to use in the calculation
66 | * @param {Date} toTime The end date to use in the calculation
67 | * @param {Boolean} Include the time in the output
68 | */
69 | distanceOfTimeInWords: function(fromTime, toTime, includeTime) {
70 | var delta = parseInt((toTime.getTime() - fromTime.getTime()) / 1000);
71 | if (delta < 60) {
72 | return 'just now';
73 | } else if (delta < 120) {
74 | return 'about a minute ago';
75 | } else if (delta < (45*60)) {
76 | return (parseInt(delta / 60)).toString() + ' minutes ago';
77 | } else if (delta < (120*60)) {
78 | return 'about an hour ago';
79 | } else if (delta < (24*60*60)) {
80 | return 'about ' + (parseInt(delta / 3600)).toString() + ' hours ago';
81 | } else if (delta < (48*60*60)) {
82 | return '1 day ago';
83 | } else {
84 | var days = (parseInt(delta / 86400)).toString();
85 | if (days > 5) {
86 | var fmt = '%B %d, %Y'
87 | if (includeTime) fmt += ' %I:%M %p'
88 | return $r.strftime(fromTime, fmt);
89 | } else {
90 | return days + " days ago"
91 | }
92 | }
93 | }
94 | })
95 | })(jQuery);
96 |
--------------------------------------------------------------------------------
/lib/resque/failure/hoptoad.rb:
--------------------------------------------------------------------------------
1 | require 'net/https'
2 | require 'builder'
3 | require 'uri'
4 |
5 | module Resque
6 | module Failure
7 | # A Failure backend that sends exceptions raised by jobs to Hoptoad.
8 | #
9 | # To use it, put this code in an initializer, Rake task, or wherever:
10 | #
11 | # require 'resque/failure/hoptoad'
12 | #
13 | # Resque::Failure::Hoptoad.configure do |config|
14 | # config.api_key = 'blah'
15 | # config.secure = true
16 | #
17 | # # optional proxy support
18 | # config.proxy_host = 'x.y.z.t'
19 | # config.proxy_port = 8080
20 | #
21 | # # server env support, defaults to RAILS_ENV or RACK_ENV
22 | # config.server_environment = "test"
23 | # end
24 | class Hoptoad < Base
25 | # From the hoptoad plugin
26 | INPUT_FORMAT = /^([^:]+):(\d+)(?::in `([^']+)')?$/
27 |
28 | class << self
29 | attr_accessor :secure, :api_key, :proxy_host, :proxy_port
30 | attr_accessor :server_environment
31 | end
32 |
33 | def self.count
34 | # We can't get the total # of errors from Hoptoad so we fake it
35 | # by asking Resque how many errors it has seen.
36 | Stat[:failed]
37 | end
38 |
39 | def self.configure
40 | yield self
41 | Resque::Failure.backend = self
42 | end
43 |
44 | def save
45 | http = use_ssl? ? :https : :http
46 | url = URI.parse("#{http}://hoptoadapp.com/notifier_api/v2/notices")
47 |
48 | request = Net::HTTP::Proxy(self.class.proxy_host, self.class.proxy_port)
49 | http = request.new(url.host, url.port)
50 | headers = {
51 | 'Content-type' => 'text/xml',
52 | 'Accept' => 'text/xml, application/xml'
53 | }
54 |
55 | http.read_timeout = 5 # seconds
56 | http.open_timeout = 2 # seconds
57 |
58 | http.use_ssl = use_ssl?
59 |
60 | begin
61 | response = http.post(url.path, xml, headers)
62 | rescue TimeoutError => e
63 | log "Timeout while contacting the Hoptoad server."
64 | end
65 |
66 | case response
67 | when Net::HTTPSuccess then
68 | log "Hoptoad Success: #{response.class}"
69 | else
70 | body = response.body if response.respond_to? :body
71 | log "Hoptoad Failure: #{response.class}\n#{body}"
72 | end
73 | end
74 |
75 | def xml
76 | x = Builder::XmlMarkup.new
77 | x.instruct!
78 | x.notice :version=>"2.0" do
79 | x.tag! "api-key", api_key
80 | x.notifier do
81 | x.name "Resqueue"
82 | x.version "0.1"
83 | x.url "http://github.com/defunkt/resque"
84 | end
85 | x.error do
86 | x.tag! "class", exception.class.name
87 | x.message "#{exception.class.name}: #{exception.message}"
88 | x.backtrace do
89 | fill_in_backtrace_lines(x)
90 | end
91 | end
92 | x.request do
93 | x.url queue.to_s
94 | x.component worker.to_s
95 | x.params do
96 | x.var :key=>"payload_class" do
97 | x.text! payload["class"].to_s
98 | end
99 | x.var :key=>"payload_args" do
100 | x.text! payload["args"].to_s
101 | end
102 | end
103 | end
104 | x.tag!("server-environment") do
105 | x.tag!("environment-name",server_environment)
106 | end
107 |
108 | end
109 | end
110 |
111 | def fill_in_backtrace_lines(x)
112 | Array(exception.backtrace).each do |unparsed_line|
113 | _, file, number, method = unparsed_line.match(INPUT_FORMAT).to_a
114 | x.line :file => file,:number => number
115 | end
116 | end
117 |
118 | def use_ssl?
119 | self.class.secure
120 | end
121 |
122 | def api_key
123 | self.class.api_key
124 | end
125 |
126 | def server_environment
127 | return self.class.server_environment if self.class.server_environment
128 | defined?(RAILS_ENV) ? RAILS_ENV : (ENV['RACK_ENV'] || 'development')
129 | end
130 | end
131 | end
132 | end
133 |
--------------------------------------------------------------------------------
/tasks/redis.rake:
--------------------------------------------------------------------------------
1 | # Inspired by rabbitmq.rake the Redbox project at http://github.com/rick/redbox/tree/master
2 | require 'fileutils'
3 | require 'open-uri'
4 | require 'pathname'
5 |
6 | class RedisRunner
7 |
8 | def self.redis_dir
9 | @redis_dir ||= if ENV['PREFIX']
10 | Pathname.new(ENV['PREFIX'])
11 | else
12 | Pathname.new(`which redis-server`) + '..' + '..'
13 | end
14 | end
15 |
16 | def self.bin_dir
17 | redis_dir + 'bin'
18 | end
19 |
20 | def self.config
21 | @config ||= if File.exists?(redis_dir + 'etc/redis.conf')
22 | redis_dir + 'etc/redis.conf'
23 | else
24 | redis_dir + '../etc/redis.conf'
25 | end
26 | end
27 |
28 | def self.dtach_socket
29 | '/tmp/redis.dtach'
30 | end
31 |
32 | # Just check for existance of dtach socket
33 | def self.running?
34 | File.exists? dtach_socket
35 | end
36 |
37 | def self.start
38 | puts 'Detach with Ctrl+\ Re-attach with rake redis:attach'
39 | sleep 1
40 | command = "#{bin_dir}/dtach -A #{dtach_socket} #{bin_dir}/redis-server #{config}"
41 | sh command
42 | end
43 |
44 | def self.attach
45 | exec "#{bin_dir}/dtach -a #{dtach_socket}"
46 | end
47 |
48 | def self.stop
49 | sh 'echo "SHUTDOWN" | nc localhost 6379'
50 | end
51 |
52 | end
53 |
54 | namespace :redis do
55 |
56 | desc 'About redis'
57 | task :about do
58 | puts "\nSee http://code.google.com/p/redis/ for information about redis.\n\n"
59 | end
60 |
61 | desc 'Start redis'
62 | task :start do
63 | RedisRunner.start
64 | end
65 |
66 | desc 'Stop redis'
67 | task :stop do
68 | RedisRunner.stop
69 | end
70 |
71 | desc 'Restart redis'
72 | task :restart do
73 | RedisRunner.stop
74 | RedisRunner.start
75 | end
76 |
77 | desc 'Attach to redis dtach socket'
78 | task :attach do
79 | RedisRunner.attach
80 | end
81 |
82 | desc 'Install the latest verison of Redis from Github (requires git, duh)'
83 | task :install => [:about, :download, :make] do
84 | bin_dir = '/usr/bin'
85 | conf_dir = '/etc'
86 |
87 | if ENV['PREFIX']
88 | bin_dir = "#{ENV['PREFIX']}/bin"
89 | sh "mkdir -p #{bin_dir}" unless File.exists?("#{bin_dir}")
90 |
91 | conf_dir = "#{ENV['PREFIX']}/etc"
92 | sh "mkdir -p #{conf_dir}" unless File.exists?("#{conf_dir}")
93 | end
94 |
95 | %w(redis-benchmark redis-cli redis-server).each do |bin|
96 | sh "cp /tmp/redis/src/#{bin} #{bin_dir}"
97 | end
98 |
99 | puts "Installed redis-benchmark, redis-cli and redis-server to #{bin_dir}"
100 |
101 | unless File.exists?("#{conf_dir}/redis.conf")
102 | sh "cp /tmp/redis/redis.conf #{conf_dir}/redis.conf"
103 | puts "Installed redis.conf to #{conf_dir} \n You should look at this file!"
104 | end
105 | end
106 |
107 | task :make do
108 | sh "cd /tmp/redis/src && make clean"
109 | sh "cd /tmp/redis/src && make"
110 | end
111 |
112 | desc "Download package"
113 | task :download do
114 | sh 'rm -rf /tmp/redis/' if File.exists?("/tmp/redis/.svn")
115 | sh 'git clone git://github.com/antirez/redis.git /tmp/redis' unless File.exists?('/tmp/redis')
116 | sh "cd /tmp/redis && git pull" if File.exists?("/tmp/redis/.git")
117 | end
118 |
119 | end
120 |
121 | namespace :dtach do
122 |
123 | desc 'About dtach'
124 | task :about do
125 | puts "\nSee http://dtach.sourceforge.net/ for information about dtach.\n\n"
126 | end
127 |
128 | desc 'Install dtach 0.8 from source'
129 | task :install => [:about, :download, :make] do
130 |
131 | bin_dir = "/usr/bin"
132 |
133 | if ENV['PREFIX']
134 | bin_dir = "#{ENV['PREFIX']}/bin"
135 | sh "mkdir -p #{bin_dir}" unless File.exists?("#{bin_dir}")
136 | end
137 |
138 | sh "cp /tmp/dtach-0.8/dtach #{bin_dir}"
139 | end
140 |
141 | task :make do
142 | sh 'cd /tmp/dtach-0.8/ && ./configure && make'
143 | end
144 |
145 | desc "Download package"
146 | task :download do
147 | unless File.exists?('/tmp/dtach-0.8.tar.gz')
148 | require 'net/http'
149 |
150 | url = 'http://downloads.sourceforge.net/project/dtach/dtach/0.8/dtach-0.8.tar.gz'
151 | open('/tmp/dtach-0.8.tar.gz', 'wb') do |file| file.write(open(url).read) end
152 | end
153 |
154 | unless File.directory?('/tmp/dtach-0.8')
155 | sh 'cd /tmp && tar xzf dtach-0.8.tar.gz'
156 | end
157 | end
158 | end
159 |
160 |
--------------------------------------------------------------------------------
/test/redis-test.conf:
--------------------------------------------------------------------------------
1 | # Redis configuration file example
2 |
3 | # By default Redis does not run as a daemon. Use 'yes' if you need it.
4 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5 | daemonize yes
6 |
7 | # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8 | # You can specify a custom pid file location here.
9 | pidfile ./test/redis-test.pid
10 |
11 | # Accept connections on the specified port, default is 6379
12 | port 9736
13 |
14 | # If you want you can bind a single interface, if the bind option is not
15 | # specified all the interfaces will listen for connections.
16 | #
17 | # bind 127.0.0.1
18 |
19 | # Close the connection after a client is idle for N seconds (0 to disable)
20 | timeout 300
21 |
22 | # Save the DB on disk:
23 | #
24 | # save
25 | #
26 | # Will save the DB if both the given number of seconds and the given
27 | # number of write operations against the DB occurred.
28 | #
29 | # In the example below the behaviour will be to save:
30 | # after 900 sec (15 min) if at least 1 key changed
31 | # after 300 sec (5 min) if at least 10 keys changed
32 | # after 60 sec if at least 10000 keys changed
33 | save 900 1
34 | save 300 10
35 | save 60 10000
36 |
37 | # The filename where to dump the DB
38 | dbfilename dump.rdb
39 |
40 | # For default save/load DB in/from the working directory
41 | # Note that you must specify a directory not a file name.
42 | dir ./test/
43 |
44 | # Set server verbosity to 'debug'
45 | # it can be one of:
46 | # debug (a lot of information, useful for development/testing)
47 | # notice (moderately verbose, what you want in production probably)
48 | # warning (only very important / critical messages are logged)
49 | loglevel debug
50 |
51 | # Specify the log file name. Also 'stdout' can be used to force
52 | # the demon to log on the standard output. Note that if you use standard
53 | # output for logging but daemonize, logs will be sent to /dev/null
54 | logfile stdout
55 |
56 | # Set the number of databases. The default database is DB 0, you can select
57 | # a different one on a per-connection basis using SELECT where
58 | # dbid is a number between 0 and 'databases'-1
59 | databases 16
60 |
61 | ################################# REPLICATION #################################
62 |
63 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64 | # another Redis server. Note that the configuration is local to the slave
65 | # so for example it is possible to configure the slave to save the DB with a
66 | # different interval, or to listen to another port, and so on.
67 |
68 | # slaveof
69 |
70 | ################################## SECURITY ###################################
71 |
72 | # Require clients to issue AUTH before processing any other
73 | # commands. This might be useful in environments in which you do not trust
74 | # others with access to the host running redis-server.
75 | #
76 | # This should stay commented out for backward compatibility and because most
77 | # people do not need auth (e.g. they run their own servers).
78 |
79 | # requirepass foobared
80 |
81 | ################################### LIMITS ####################################
82 |
83 | # Set the max number of connected clients at the same time. By default there
84 | # is no limit, and it's up to the number of file descriptors the Redis process
85 | # is able to open. The special value '0' means no limts.
86 | # Once the limit is reached Redis will close all the new connections sending
87 | # an error 'max number of clients reached'.
88 |
89 | # maxclients 128
90 |
91 | # Don't use more memory than the specified amount of bytes.
92 | # When the memory limit is reached Redis will try to remove keys with an
93 | # EXPIRE set. It will try to start freeing keys that are going to expire
94 | # in little time and preserve keys with a longer time to live.
95 | # Redis will also try to remove objects from free lists if possible.
96 | #
97 | # If all this fails, Redis will start to reply with errors to commands
98 | # that will use more memory, like SET, LPUSH, and so on, and will continue
99 | # to reply to most read-only commands like GET.
100 | #
101 | # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102 | # 'state' server or cache, not as a real DB. When Redis is used as a real
103 | # database the memory usage will grow over the weeks, it will be obvious if
104 | # it is going to use too much memory in the long run, and you'll have the time
105 | # to upgrade. With maxmemory after the limit is reached you'll start to get
106 | # errors for write operations, and this may even lead to DB inconsistency.
107 |
108 | # maxmemory
109 |
110 | ############################### ADVANCED CONFIG ###############################
111 |
112 | # Glue small output buffers together in order to send small replies in a
113 | # single TCP packet. Uses a bit more CPU but most of the times it is a win
114 | # in terms of number of queries per second. Use 'yes' if unsure.
115 | glueoutputbuf yes
116 |
--------------------------------------------------------------------------------
/lib/resque/server/public/style.css:
--------------------------------------------------------------------------------
1 | html { background:#efefef; font-family:Arial, Verdana, sans-serif; font-size:13px; }
2 | body { padding:0; margin:0; }
3 |
4 | .header { background:#000; padding:8px 5% 0 5%; border-bottom:1px solid #444;border-bottom:5px solid #429234;}
5 | .header h1 { color:#333; font-size:90%; font-weight:bold; margin-bottom:6px;}
6 | .header ul li { display:inline;}
7 | .header ul li a { color:#fff; text-decoration:none; margin-right:10px; display:inline-block; padding:8px; -webkit-border-top-right-radius:6px; -webkit-border-top-left-radius:6px; -moz-border-radius-topleft:6px; -moz-border-radius-topright:6px; }
8 | .header ul li a:hover { background:#333;}
9 | .header ul li.current a { background:#429234; font-weight:bold; color:#fff;}
10 |
11 | .header .namespace { position: absolute; right: 75px; top: 10px; color: #7A7A7A; }
12 |
13 | .subnav { padding:2px 5% 7px 5%; background:#429234; font-size:90%;}
14 | .subnav li { display:inline;}
15 | .subnav li a { color:#fff; text-decoration:none; margin-right:10px; display:inline-block; background:#55ad46; padding:5px; -webkit-border-radius:3px; -moz-border-radius:3px;}
16 | .subnav li.current a { background:#fff; font-weight:bold; color:#429234;}
17 | .subnav li a:active { background:#b00909;}
18 |
19 | #main { padding:10px 5%; background:#fff; overflow:hidden; }
20 | #main .logo { float:right; margin:10px;}
21 | #main span.hl { background:#efefef; padding:2px;}
22 | #main h1 { margin:10px 0; font-size:190%; font-weight:bold; color:#429234;}
23 | #main h2 { margin:10px 0; font-size:130%;}
24 | #main table { width:100%; margin:10px 0;}
25 | #main table tr td, #main table tr th { border:1px solid #ccc; padding:6px;}
26 | #main table tr th { background:#efefef; color:#888; font-size:80%; font-weight:bold;}
27 | #main table tr td.no-data { text-align:center; padding:40px 0; color:#999; font-style:italic; font-size:130%;}
28 | #main a { color:#111;}
29 | #main p { margin:5px 0;}
30 | #main p.intro { margin-bottom:15px; font-size:85%; color:#999; margin-top:0; line-height:1.3;}
31 | #main h1.wi { margin-bottom:5px;}
32 | #main p.sub { font-size:95%; color:#999;}
33 |
34 | #main table.queues { width:40%;}
35 | #main table.queues td.queue { font-weight:bold; width:50%;}
36 | #main table.queues tr.failed td { border-top:2px solid; font-size:90%; }
37 |
38 | #main table.queues tr.failed td { background:#ebffed; border-top:2px solid #6fd380; font-size:90%; color:#6fd380;}
39 | #main table.queues tr.failed td a{ color:#6fd380;}
40 |
41 | #main table.jobs td.class { font-family:Monaco, "Courier New", monospace; font-size:90%; width:50%;}
42 | #main table.jobs td.args{ width:50%;}
43 |
44 | #main table.workers td.icon {width:1%; background:#efefef;text-align:center;}
45 | #main table.workers td.where { width:25%;}
46 | #main table.workers td.queues { width:35%;}
47 | #main .queue-tag { background:#b1d2e9; padding:2px; margin:0 3px; font-size:80%; text-decoration:none; text-transform:uppercase; font-weight:bold; color:#3274a2; -webkit-border-radius:4px; -moz-border-radius:4px;}
48 | #main table.workers td.queues.queue { width:10%;}
49 | #main table.workers td.process { width:35%;}
50 | #main table.workers td.process span.waiting { color:#999; font-size:90%;}
51 | #main table.workers td.process small { font-size:80%; margin-left:5px;}
52 | #main table.workers td.process code { font-family:Monaco, "Courier New", monospace; font-size:90%;}
53 | #main table.workers td.process small a { color:#999;}
54 | #main.polling table.workers tr.working td { background:#f4ffe4; color:#7ac312;}
55 | #main.polling table.workers tr.working td.where a { color:#7ac312;}
56 | #main.polling table.workers tr.working td.process code { font-weight:bold;}
57 |
58 |
59 | #main table.stats th { font-size:100%; width:40%; color:#000;}
60 | #main hr { border:0; border-top:5px solid #efefef; margin:15px 0;}
61 |
62 | #footer { padding:10px 5%; background:#efefef; color:#999; font-size:85%; line-height:1.5; border-top:5px solid #ccc; padding-top:10px;}
63 | #footer p a { color:#999;}
64 |
65 | #main p.poll { background:url(poll.png) no-repeat 0 2px; padding:3px 0; padding-left:23px; float:right; font-size:85%; }
66 |
67 | #main ul.failed {}
68 | #main ul.failed li {background:-webkit-gradient(linear, left top, left bottom, from(#efefef), to(#fff)) #efefef; margin-top:10px; padding:10px; overflow:hidden; -webkit-border-radius:5px; border:1px solid #ccc; }
69 | #main ul.failed li dl dt {font-size:80%; color:#999; width:60px; float:left; padding-top:1px; text-align:right;}
70 | #main ul.failed li dl dd {margin-bottom:10px; margin-left:70px;}
71 | #main ul.failed li dl dd .retry { float: right; }
72 | #main ul.failed li dl dd code, #main ul.failed li dl dd pre { font-family:Monaco, "Courier New", monospace; font-size:90%;}
73 | #main ul.failed li dl dd.error a {font-family:Monaco, "Courier New", monospace; font-size:90%; }
74 | #main ul.failed li dl dd.error pre { margin-top:3px; line-height:1.3;}
75 |
76 | #main p.pagination { background:#efefef; padding:10px; overflow:hidden;}
77 | #main p.pagination a.less { float:left;}
78 | #main p.pagination a.more { float:right;}
79 |
80 | #main form {float:right; margin-top:-10px;}
81 |
82 | #main .time a.toggle_format {text-decoration:none;}
--------------------------------------------------------------------------------
/lib/resque/server.rb:
--------------------------------------------------------------------------------
1 | require 'sinatra/base'
2 | require 'erb'
3 | require 'resque'
4 | require 'resque/version'
5 |
6 | module Resque
7 | class Server < Sinatra::Base
8 | dir = File.dirname(File.expand_path(__FILE__))
9 |
10 | set :views, "#{dir}/server/views"
11 | set :public, "#{dir}/server/public"
12 | set :static, true
13 |
14 | helpers do
15 | include Rack::Utils
16 | alias_method :h, :escape_html
17 |
18 | def current_section
19 | url request.path_info.sub('/','').split('/')[0].downcase
20 | end
21 |
22 | def current_page
23 | url request.path_info.sub('/','')
24 | end
25 |
26 | def url(*path_parts)
27 | [ path_prefix, path_parts ].join("/").squeeze('/')
28 | end
29 | alias_method :u, :url
30 |
31 | def path_prefix
32 | request.env['SCRIPT_NAME']
33 | end
34 |
35 | def class_if_current(path = '')
36 | 'class="current"' if current_page[0, path.size] == path
37 | end
38 |
39 | def tab(name)
40 | dname = name.to_s.downcase
41 | path = url(dname)
42 | "
"
43 | end
44 |
45 | def tabs
46 | Resque::Server.tabs
47 | end
48 |
49 | def redis_get_size(key)
50 | case Resque.redis.type(key)
51 | when 'none'
52 | []
53 | when 'list'
54 | Resque.redis.llen(key)
55 | when 'set'
56 | Resque.redis.scard(key)
57 | when 'string'
58 | Resque.redis.get(key).length
59 | when 'zset'
60 | Resque.redis.zcard(key)
61 | end
62 | end
63 |
64 | def redis_get_value_as_array(key, start=0)
65 | case Resque.redis.type(key)
66 | when 'none'
67 | []
68 | when 'list'
69 | Resque.redis.lrange(key, start, start + 20)
70 | when 'set'
71 | Resque.redis.smembers(key)[start..(start + 20)]
72 | when 'string'
73 | [Resque.redis.get(key)]
74 | when 'zset'
75 | Resque.redis.zrange(key, start, start + 20)
76 | end
77 | end
78 |
79 | def show_args(args)
80 | Array(args).map { |a| a.inspect }.join("\n")
81 | end
82 |
83 | def partial?
84 | @partial
85 | end
86 |
87 | def partial(template, local_vars = {})
88 | @partial = true
89 | erb(template.to_sym, {:layout => false}, local_vars)
90 | ensure
91 | @partial = false
92 | end
93 |
94 | def poll
95 | if @polling
96 | text = "Last Updated: #{Time.now.strftime("%H:%M:%S")}"
97 | else
98 | text = "Live Poll"
99 | end
100 | "
#{text}
"
101 | end
102 |
103 | end
104 |
105 | def show(page, layout = true)
106 | begin
107 | erb page.to_sym, {:layout => layout}, :resque => Resque
108 | rescue Errno::ECONNREFUSED
109 | erb :error, {:layout => false}, :error => "Can't connect to Mongo! (#{Resque.mongo.server})"
110 | end
111 | end
112 |
113 | # to make things easier on ourselves
114 | get "/?" do
115 | redirect url(:overview)
116 | end
117 |
118 | %w( overview queues working workers key ).each do |page|
119 | get "/#{page}" do
120 | show page
121 | end
122 |
123 | get "/#{page}/:id" do
124 | show page
125 | end
126 | end
127 |
128 | post "/queues/:id/remove" do
129 | Resque.remove_queue(params[:id])
130 | redirect u('queues')
131 | end
132 |
133 | %w( overview workers ).each do |page|
134 | get "/#{page}.poll" do
135 | content_type "text/plain"
136 | @polling = true
137 | show(page.to_sym, false).gsub(/\s{1,}/, ' ')
138 | end
139 | end
140 |
141 | get "/failed" do
142 | if Resque::Failure.url
143 | redirect Resque::Failure.url
144 | else
145 | show :failed
146 | end
147 | end
148 |
149 | post "/failed/clear" do
150 | Resque::Failure.clear
151 | redirect u('failed')
152 | end
153 |
154 | get "/failed/requeue/:index" do
155 | Resque::Failure.requeue(params[:index])
156 | if request.xhr?
157 | return Resque::Failure.all(params[:index])['retried_at']
158 | else
159 | redirect u('failed')
160 | end
161 | end
162 |
163 | get "/stats" do
164 | redirect url("/stats/resque")
165 | end
166 |
167 | get "/stats/:id" do
168 | show :stats
169 | end
170 |
171 | get "/stats/keys/:key" do
172 | show :stats
173 | end
174 |
175 | get "/stats.txt" do
176 | info = Resque.info
177 |
178 | stats = []
179 | stats << "resque.pending=#{info[:pending]}"
180 | stats << "resque.processed+=#{info[:processed]}"
181 | stats << "resque.failed+=#{info[:failed]}"
182 | stats << "resque.workers=#{info[:workers]}"
183 | stats << "resque.working=#{info[:working]}"
184 |
185 | Resque.queues.each do |queue|
186 | stats << "queues.#{queue}=#{Resque.size(queue)}"
187 | end
188 |
189 | content_type 'text/plain'
190 | stats.join "\n"
191 | end
192 |
193 | def resque
194 | Resque
195 | end
196 |
197 | def self.tabs
198 | @tabs ||= ["Overview", "Working", "Failed", "Queues", "Workers", "Stats"]
199 | end
200 | end
201 | end
202 |
--------------------------------------------------------------------------------
/HISTORY.md:
--------------------------------------------------------------------------------
1 | ## 1.9.8 (2010-07-20)
2 |
3 | * Bugfix: Worker.all should never return nil
4 | * monit example: Fixed Syntax Error and adding environment to the rake task
5 | * redis rake task: Fixed typo in copy command
6 |
7 | ## 1.9.7 (2010-07-09)
8 |
9 | * Improved memory usage in Job.destroy
10 | * redis-namespace 0.7.0 now required
11 | * Bugfix: Reverted $0 changes
12 | * Web Bugfix: Payload-less failures in the web ui work
13 |
14 | ## 1.9.6 (2010-06-22)
15 |
16 | * Bugfix: Rakefile logging works the same as all the other logging
17 |
18 | ## 1.9.5 (2010-06-16)
19 |
20 | * Web Bugfix: Display the configured namespace on the stats page
21 | * Revert Bugfix: Make ps -o more cross platform friendly
22 |
23 | ## 1.9.4 (2010-06-14)
24 |
25 | * Bugfix: Multiple failure backend gets exception information when created
26 |
27 | ## 1.9.3 (2010-06-14)
28 |
29 | * Bugfix: Resque#queues always returns an array
30 |
31 | ## 1.9.2 (2010-06-13)
32 |
33 | * Bugfix: Worker.all returning nil fix
34 | * Bugfix: Make ps -o more cross platform friendly
35 |
36 | ## 1.9.1 (2010-06-04)
37 |
38 | * Less strict JSON dependency
39 | * Included HISTORY.md in gem
40 |
41 | ## 1.9.0 (2010-06-04)
42 |
43 | * Redis 2 support
44 | * Depend on redis-namespace 0.5.0
45 | * Added Resque::VERSION constant (alias of Resque::Version)
46 | * Bugfix: Specify JSON dependency
47 | * Bugfix: Hoptoad plugin now works on 1.9
48 |
49 | ## 1.8.5 (2010-05-18)
50 |
51 | * Bugfix: Be more liberal in which Redis clients we accept.
52 |
53 | ## 1.8.4 (2010-05-18)
54 |
55 | * Try to resolve redis-namespace dependency issue
56 |
57 | ## 1.8.3 (2010-05-17)
58 |
59 | * Depend on redis-rb ~> 1.0.7
60 |
61 | ## 1.8.2 (2010-05-03)
62 |
63 | * Bugfix: Include "tasks/" dir in RubyGem
64 |
65 | ## 1.8.1 (2010-04-29)
66 |
67 | * Bugfix: Multiple failure backend did not support requeue-ing failed jobs
68 | * Bugfix: Fix /failed when error has no backtrace
69 | * Bugfix: Add `Redis::DistRedis` as a valid client
70 |
71 | ## 1.8.0 (2010-04-07)
72 |
73 | * Jobs that never complete due to killed worker are now failed.
74 | * Worker "working" state is now maintained by the parent, not the child.
75 | * Stopped using deprecated redis.rb methods
76 | * `Worker.working` race condition fixed
77 | * `Worker#process` has been deprecated.
78 | * Monit example fixed
79 | * Redis::Client and Redis::Namespace can be passed to `Resque.redis=`
80 |
81 | ## 1.7.1 (2010-04-02)
82 |
83 | * Bugfix: Make job hook execution order consistent
84 | * Bugfix: stdout buffering in child process
85 |
86 | ## 1.7.0 (2010-03-31)
87 |
88 | * Job hooks API. See docs/HOOKS.md.
89 | * web: Hovering over dates shows a timestamp
90 | * web: AJAXify retry action for failed jobs
91 | * web bugfix: Fix pagination bug
92 |
93 | ## 1.6.1 (2010-03-25)
94 |
95 | * Bugfix: Workers may not be clearing their state correctly on
96 | shutdown
97 | * Added example monit config.
98 | * Exception class is now recorded when an error is raised in a
99 | worker.
100 | * web: Unit tests
101 | * web: Show namespace in header and footer
102 | * web: Remove a queue
103 | * web: Retry failed jobs
104 |
105 | ## 1.6.0 (2010-03-09)
106 |
107 | * Added `before_first_fork`, `before_fork`, and `after_fork` hooks.
108 | * Hoptoad: Added server_environment config setting
109 | * Hoptoad bugfix: Don't depend on RAILS_ROOT
110 | * 1.8.6 compat fixes
111 |
112 | ## 1.5.2 (2010-03-03)
113 |
114 | * Bugfix: JSON check was crazy.
115 |
116 | ## 1.5.1 (2010-03-03)
117 |
118 | * `Job.destroy` and `Resque.dequeue` return the # of destroyed jobs.
119 | * Hoptoad notifier improvements
120 | * Specify the namespace with `resque-web` by passing `-N namespace`
121 | * Bugfix: Don't crash when trying to parse invalid JSON.
122 | * Bugfix: Non-standard namespace support
123 | * Web: Red backgound for queue "failed" only shown if there are failed jobs.
124 | * Web bugfix: Tabs highlight properly now
125 | * Web bugfix: ZSET partial support in stats
126 | * Web bugfix: Deleting failed jobs works again
127 | * Web bugfix: Sets (or zsets, lists, etc) now paginate.
128 |
129 | ## 1.5.0 (2010-02-17)
130 |
131 | * Version now included in procline, e.g. `resque-1.5.0: Message`
132 | * Web bugfix: Ignore idle works in the "working" page
133 | * Added `Resque::Job.destroy(queue, klass, *args)`
134 | * Added `Resque.dequeue(klass, *args)`
135 |
136 | ## 1.4.0 (2010-02-11)
137 |
138 | * Fallback when unable to bind QUIT and USR1 for Windows and JRuby.
139 | * Fallback when no `Kernel.fork` is provided (for IronRuby).
140 | * Web: Rounded corners in Firefox
141 | * Cut down system calls in `Worker#prune_dead_workers`
142 | * Enable switching DB in a Redis server from config
143 | * Support USR2 and CONT to stop and start job processing.
144 | * Web: Add example failing job
145 | * Bugfix: `Worker#unregister_worker` shouldn't call `done_working`
146 | * Bugfix: Example god config now restarts Resque properly.
147 | * Multiple failure backends now permitted.
148 | * Hoptoad failure backend updated to new API
149 |
150 | ## 1.3.1 (2010-01-11)
151 |
152 | * Vegas bugfix: Don't error without a config
153 |
154 | ## 1.3.0 (2010-01-11)
155 |
156 | * Use Vegas for resque-web
157 | * Web Bugfix: Show proper date/time value for failed_at on Failures
158 | * Web Bugfix: Make the / route more flexible
159 | * Add Resque::Server.tabs array (so plugins can add their own tabs)
160 | * Start using [Semantic Versioning](http://semver.org/)
161 |
162 | ## 1.2.4 (2009-12-15)
163 |
164 | * Web Bugfix: fix key links on stat page
165 |
166 | ## 1.2.3 (2009-12-15)
167 |
168 | * Bugfix: Fixed `rand` seeding in child processes.
169 | * Bugfix: Better JSON encoding/decoding without Yajl.
170 | * Bugfix: Avoid `ps` flag error on Linux
171 | * Add `PREFIX` observance to `rake` install tasks.
172 |
173 | ## 1.2.2 (2009-12-08)
174 |
175 | * Bugfix: Job equality was not properly implemented.
176 |
177 | ## 1.2.1 (2009-12-07)
178 |
179 | * Added `rake resque:workers` task for starting multiple workers.
180 | * 1.9.x compatibility
181 | * Bugfix: Yajl decoder doesn't care about valid UTF-8
182 | * config.ru loads RESQUECONFIG if the ENV variable is set.
183 | * `resque-web` now sets RESQUECONFIG
184 | * Job objects know if they are equal.
185 | * Jobs can be re-queued using `Job#recreate`
186 |
187 | ## 1.2.0 (2009-11-25)
188 |
189 | * If USR1 is sent and no child is found, shutdown.
190 | * Raise when a job class does not respond to `perform`.
191 | * Added `Resque.remove_queue` for deleting a queue
192 |
193 | ## 1.1.0 (2009-11-04)
194 |
195 | * Bugfix: Broken ERB tag in failure UI
196 | * Bugfix: Save the worker's ID, not the worker itself, in the failure module
197 | * Redesigned the sinatra web interface
198 | * Added option to clear failed jobs
199 |
200 | ## 1.0.0 (2009-11-03)
201 |
202 | * First release.
203 |
--------------------------------------------------------------------------------
/test/job_plugins_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 |
3 | context "Multiple plugins with multiple hooks" do
4 | include PerformJob
5 |
6 | module Plugin1
7 | def before_perform_record_history1(history)
8 | history << :before1
9 | end
10 | def after_perform_record_history1(history)
11 | history << :after1
12 | end
13 | end
14 |
15 | module Plugin2
16 | def before_perform_record_history2(history)
17 | history << :before2
18 | end
19 | def after_perform_record_history2(history)
20 | history << :after2
21 | end
22 | end
23 |
24 | class ::ManyBeforesJob
25 | extend Plugin1
26 | extend Plugin2
27 | def self.perform(history)
28 | history << :perform
29 | end
30 | end
31 |
32 | test "hooks of each type are executed in alphabetical order" do
33 | result = perform_job(ManyBeforesJob, history=[])
34 | assert_equal true, result, "perform returned true"
35 | assert_equal [:before1, :before2, :perform, :after1, :after2], history
36 | end
37 | end
38 |
39 | context "Resque::Plugin ordering before_perform" do
40 | include PerformJob
41 |
42 | module BeforePerformPlugin
43 | def before_perform1(history)
44 | history << :before_perform1
45 | end
46 | end
47 |
48 | class ::BeforePerformJob
49 | extend BeforePerformPlugin
50 | def self.perform(history)
51 | history << :perform
52 | end
53 | def self.before_perform(history)
54 | history << :before_perform
55 | end
56 | end
57 |
58 | test "before_perform hooks are executed in order" do
59 | result = perform_job(BeforePerformJob, history=[])
60 | assert_equal true, result, "perform returned true"
61 | assert_equal [:before_perform, :before_perform1, :perform], history
62 | end
63 | end
64 |
65 | context "Resque::Plugin ordering after_perform" do
66 | include PerformJob
67 |
68 | module AfterPerformPlugin
69 | def after_perform_record_history(history)
70 | history << :after_perform1
71 | end
72 | end
73 |
74 | class ::AfterPerformJob
75 | extend AfterPerformPlugin
76 | def self.perform(history)
77 | history << :perform
78 | end
79 | def self.after_perform(history)
80 | history << :after_perform
81 | end
82 | end
83 |
84 | test "after_perform hooks are executed in order" do
85 | result = perform_job(AfterPerformJob, history=[])
86 | assert_equal true, result, "perform returned true"
87 | assert_equal [:perform, :after_perform, :after_perform1], history
88 | end
89 | end
90 |
91 | context "Resque::Plugin ordering around_perform" do
92 | include PerformJob
93 |
94 | module AroundPerformPlugin1
95 | def around_perform1(history)
96 | history << :around_perform_plugin1
97 | yield
98 | end
99 | end
100 |
101 | class ::AroundPerformJustPerformsJob
102 | extend AroundPerformPlugin1
103 | def self.perform(history)
104 | history << :perform
105 | end
106 | end
107 |
108 | test "around_perform hooks are executed before the job" do
109 | result = perform_job(AroundPerformJustPerformsJob, history=[])
110 | assert_equal true, result, "perform returned true"
111 | assert_equal [:around_perform_plugin1, :perform], history
112 | end
113 |
114 | class ::AroundPerformJob
115 | extend AroundPerformPlugin1
116 | def self.perform(history)
117 | history << :perform
118 | end
119 | def self.around_perform(history)
120 | history << :around_perform
121 | yield
122 | end
123 | end
124 |
125 | test "around_perform hooks are executed in order" do
126 | result = perform_job(AroundPerformJob, history=[])
127 | assert_equal true, result, "perform returned true"
128 | assert_equal [:around_perform, :around_perform_plugin1, :perform], history
129 | end
130 |
131 | module AroundPerformPlugin2
132 | def around_perform2(history)
133 | history << :around_perform_plugin2
134 | yield
135 | end
136 | end
137 |
138 | class ::AroundPerformJob2
139 | extend AroundPerformPlugin1
140 | extend AroundPerformPlugin2
141 | def self.perform(history)
142 | history << :perform
143 | end
144 | def self.around_perform(history)
145 | history << :around_perform
146 | yield
147 | end
148 | end
149 |
150 | test "many around_perform are executed in order" do
151 | result = perform_job(AroundPerformJob2, history=[])
152 | assert_equal true, result, "perform returned true"
153 | assert_equal [:around_perform, :around_perform_plugin1, :around_perform_plugin2, :perform], history
154 | end
155 |
156 | module AroundPerformDoesNotYield
157 | def around_perform0(history)
158 | history << :around_perform0
159 | end
160 | end
161 |
162 | class ::AroundPerformJob3
163 | extend AroundPerformPlugin1
164 | extend AroundPerformPlugin2
165 | extend AroundPerformDoesNotYield
166 | def self.perform(history)
167 | history << :perform
168 | end
169 | def self.around_perform(history)
170 | history << :around_perform
171 | yield
172 | end
173 | end
174 |
175 | test "the job is aborted if an around_perform hook does not yield" do
176 | result = perform_job(AroundPerformJob3, history=[])
177 | assert_equal false, result, "perform returned false"
178 | assert_equal [:around_perform, :around_perform0], history
179 | end
180 |
181 | module AroundPerformGetsJobResult
182 | @@result = nil
183 | def last_job_result
184 | @@result
185 | end
186 |
187 | def around_perform_gets_job_result(*args)
188 | @@result = yield
189 | end
190 | end
191 |
192 | class ::AroundPerformJobWithReturnValue < GoodJob
193 | extend AroundPerformGetsJobResult
194 | end
195 |
196 | test "the job is aborted if an around_perform hook does not yield" do
197 | result = perform_job(AroundPerformJobWithReturnValue, 'Bob')
198 | assert_equal true, result, "perform returned true"
199 | assert_equal 'Good job, Bob', AroundPerformJobWithReturnValue.last_job_result
200 | end
201 | end
202 |
203 | context "Resque::Plugin ordering on_failure" do
204 | include PerformJob
205 |
206 | module OnFailurePlugin
207 | def on_failure1(exception, history)
208 | history << "#{exception.message} plugin"
209 | end
210 | end
211 |
212 | class ::FailureJob
213 | extend OnFailurePlugin
214 | def self.perform(history)
215 | history << :perform
216 | raise StandardError, "oh no"
217 | end
218 | def self.on_failure(exception, history)
219 | history << exception.message
220 | end
221 | end
222 |
223 | test "on_failure hooks are executed in order" do
224 | history = []
225 | assert_raises StandardError do
226 | perform_job(FailureJob, history)
227 | end
228 | assert_equal [:perform, "oh no", "oh no plugin"], history
229 | end
230 | end
231 |
--------------------------------------------------------------------------------
/lib/resque/job.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | # A Resque::Job represents a unit of work. Each job lives on a
3 | # single queue and has an associated payload object. The payload
4 | # is a hash with two attributes: `class` and `args`. The `class` is
5 | # the name of the Ruby class which should be used to run the
6 | # job. The `args` are an array of arguments which should be passed
7 | # to the Ruby class's `perform` class-level method.
8 | #
9 | # You can manually run a job using this code:
10 | #
11 | # job = Resque::Job.reserve(:high)
12 | # klass = Resque::Job.constantize(job.payload['class'])
13 | # klass.perform(*job.payload['args'])
14 | class Job
15 | include Helpers
16 | extend Helpers
17 |
18 | # Raise Resque::Job::DontPerform from a before_perform hook to
19 | # abort the job.
20 | DontPerform = Class.new(StandardError)
21 |
22 | # The worker object which is currently processing this job.
23 | attr_accessor :worker
24 |
25 | # The name of the queue from which this job was pulled (or is to be
26 | # placed)
27 | attr_reader :queue
28 |
29 | # This job's associated payload object.
30 | attr_reader :payload
31 |
32 | def initialize(queue, payload)
33 | @queue = queue
34 | @payload = payload
35 | end
36 |
37 | # Creates a job by placing it on a queue. Expects a string queue
38 | # name, a string class name, and an optional array of arguments to
39 | # pass to the class' `perform` method.
40 | #
41 | # Raises an exception if no queue or class is given.
42 | def self.create(queue, klass, *args)
43 | if !queue
44 | raise NoQueueError.new("Jobs must be placed onto a queue.")
45 | end
46 |
47 | if klass.to_s.empty?
48 | raise NoClassError.new("Jobs must be given a class.")
49 | end
50 |
51 | Resque.push(queue, :class => klass.to_s, :args => args)
52 | end
53 |
54 | # Removes a job from a queue. Expects a string queue name, a
55 | # string class name, and, optionally, args.
56 | #
57 | # Returns the number of jobs destroyed.
58 | #
59 | # If no args are provided, it will remove all jobs of the class
60 | # provided.
61 | #
62 | # That is, for these two jobs:
63 | #
64 | # { 'class' => 'UpdateGraph', 'args' => ['defunkt'] }
65 | # { 'class' => 'UpdateGraph', 'args' => ['mojombo'] }
66 | #
67 | # The following call will remove both:
68 | #
69 | # Resque::Job.destroy(queue, 'UpdateGraph')
70 | #
71 | # Whereas specifying args will only remove the 2nd job:
72 | #
73 | # Resque::Job.destroy(queue, 'UpdateGraph', 'mojombo')
74 | #
75 | # This method can be potentially very slow and memory intensive,
76 | # depending on the size of your queue, as it loads all jobs into
77 | # a Ruby array before processing.
78 | def self.destroy(queue, klass, *args)
79 | klass = klass.to_s
80 |
81 | destroyed = 0
82 |
83 | mongo.find(:queue => queue).each do |rec|
84 | json = decode(rec['item'])
85 |
86 | match = json['class'] == klass
87 | match &= json['args'] == args unless args.empty?
88 |
89 | if match
90 | destroyed += 1
91 | mongo.remove(:_id => rec['_id'])
92 | end
93 | end
94 |
95 | destroyed
96 | end
97 |
98 | # Given a string queue name, returns an instance of Resque::Job
99 | # if any jobs are available. If not, returns nil.
100 | def self.reserve(queue)
101 | return unless payload = Resque.pop(queue)
102 | new(queue, payload)
103 | end
104 |
105 | # Attempts to perform the work represented by this job instance.
106 | # Calls #perform on the class given in the payload with the
107 | # arguments given in the payload.
108 | def perform
109 | job = payload_class
110 | job_args = args || []
111 | job_was_performed = false
112 |
113 | before_hooks = Plugin.before_hooks(job)
114 | around_hooks = Plugin.around_hooks(job)
115 | after_hooks = Plugin.after_hooks(job)
116 | failure_hooks = Plugin.failure_hooks(job)
117 |
118 | begin
119 | # Execute before_perform hook. Abort the job gracefully if
120 | # Resque::DontPerform is raised.
121 | begin
122 | before_hooks.each do |hook|
123 | job.send(hook, *job_args)
124 | end
125 | rescue DontPerform
126 | return false
127 | end
128 |
129 | # Execute the job. Do it in an around_perform hook if available.
130 | if around_hooks.empty?
131 | job.perform(*job_args)
132 | job_was_performed = true
133 | else
134 | # We want to nest all around_perform plugins, with the last one
135 | # finally calling perform
136 | stack = around_hooks.reverse.inject(nil) do |last_hook, hook|
137 | if last_hook
138 | lambda do
139 | job.send(hook, *job_args) { last_hook.call }
140 | end
141 | else
142 | lambda do
143 | job.send(hook, *job_args) do
144 | result = job.perform(*job_args)
145 | job_was_performed = true
146 | result
147 | end
148 | end
149 | end
150 | end
151 | stack.call
152 | end
153 |
154 | # Execute after_perform hook
155 | after_hooks.each do |hook|
156 | job.send(hook, *job_args)
157 | end
158 |
159 | # Return true if the job was performed
160 | return job_was_performed
161 |
162 | # If an exception occurs during the job execution, look for an
163 | # on_failure hook then re-raise.
164 | rescue Object => e
165 | failure_hooks.each { |hook| job.send(hook, e, *job_args) }
166 | raise e
167 | end
168 | end
169 |
170 | # Returns the actual class constant represented in this job's payload.
171 | def payload_class
172 | @payload_class ||= constantize(@payload['class'])
173 | end
174 |
175 | # Returns an array of args represented in this job's payload.
176 | def args
177 | @payload['args']
178 | end
179 |
180 | # Given an exception object, hands off the needed parameters to
181 | # the Failure module.
182 | def fail(exception)
183 | Failure.create \
184 | :payload => payload,
185 | :exception => exception,
186 | :worker => worker,
187 | :queue => queue
188 | end
189 |
190 | # Creates an identical job, essentially placing this job back on
191 | # the queue.
192 | def recreate
193 | self.class.create(queue, payload_class, *args)
194 | end
195 |
196 | # String representation
197 | def inspect
198 | obj = @payload
199 | "(Job{%s} | %s | %s)" % [ @queue, obj['class'], obj['args'].inspect ]
200 | end
201 |
202 | # Equality
203 | def ==(other)
204 | queue == other.queue &&
205 | payload_class == other.payload_class &&
206 | args == other.args
207 | end
208 | end
209 | end
210 |
--------------------------------------------------------------------------------
/test/resque_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 |
3 | context "Resque" do
4 | setup do
5 | Resque.drop
6 |
7 | Resque.push(:people, { 'name' => 'chris' })
8 | Resque.push(:people, { 'name' => 'bob' })
9 | Resque.push(:people, { 'name' => 'mark' })
10 | end
11 |
12 | test "can put jobs on a queue" do
13 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
14 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
15 | end
16 |
17 | test "can grab jobs off a queue" do
18 | Resque::Job.create(:jobs, 'some-job', 20, '/tmp')
19 |
20 | job = Resque.reserve(:jobs)
21 |
22 | assert_kind_of Resque::Job, job
23 | assert_equal SomeJob, job.payload_class
24 | assert_equal 20, job.args[0]
25 | assert_equal '/tmp', job.args[1]
26 | end
27 |
28 | test "can re-queue jobs" do
29 | Resque::Job.create(:jobs, 'some-job', 20, '/tmp')
30 |
31 | job = Resque.reserve(:jobs)
32 | job.recreate
33 |
34 | assert_equal job, Resque.reserve(:jobs)
35 | end
36 |
37 | test "can put jobs on a queue by way of an ivar" do
38 | assert_equal 0, Resque.size(:ivar)
39 | assert Resque.enqueue(SomeIvarJob, 20, '/tmp')
40 | assert Resque.enqueue(SomeIvarJob, 20, '/tmp')
41 |
42 | job = Resque.reserve(:ivar)
43 |
44 | assert_kind_of Resque::Job, job
45 | assert_equal SomeIvarJob, job.payload_class
46 | assert_equal 20, job.args[0]
47 | assert_equal '/tmp', job.args[1]
48 |
49 | assert Resque.reserve(:ivar)
50 | assert_equal nil, Resque.reserve(:ivar)
51 | end
52 |
53 | test "can remove jobs from a queue by way of an ivar" do
54 | assert_equal 0, Resque.size(:ivar)
55 | assert Resque.enqueue(SomeIvarJob, 20, '/tmp')
56 | assert Resque.enqueue(SomeIvarJob, 30, '/tmp')
57 | assert Resque.enqueue(SomeIvarJob, 20, '/tmp')
58 | assert Resque::Job.create(:ivar, 'blah-job', 20, '/tmp')
59 | assert Resque.enqueue(SomeIvarJob, 20, '/tmp')
60 | assert_equal 5, Resque.size(:ivar)
61 |
62 | assert Resque.dequeue(SomeIvarJob, 30, '/tmp')
63 | assert_equal 4, Resque.size(:ivar)
64 | assert Resque.dequeue(SomeIvarJob)
65 | assert_equal 1, Resque.size(:ivar)
66 | end
67 |
68 | test "jobs have a nice #inspect" do
69 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
70 | job = Resque.reserve(:jobs)
71 | assert_equal '(Job{jobs} | SomeJob | [20, "/tmp"])', job.inspect
72 | end
73 |
74 | test "jobs can be destroyed" do
75 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
76 | assert Resque::Job.create(:jobs, 'BadJob', 20, '/tmp')
77 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
78 | assert Resque::Job.create(:jobs, 'BadJob', 30, '/tmp')
79 | assert Resque::Job.create(:jobs, 'BadJob', 20, '/tmp')
80 |
81 | assert_equal 5, Resque.size(:jobs)
82 | assert_equal 2, Resque::Job.destroy(:jobs, 'SomeJob')
83 | assert_equal 3, Resque.size(:jobs)
84 | assert_equal 1, Resque::Job.destroy(:jobs, 'BadJob', 30, '/tmp')
85 | assert_equal 2, Resque.size(:jobs)
86 | end
87 |
88 | test "jobs can test for equality" do
89 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
90 | assert Resque::Job.create(:jobs, 'some-job', 20, '/tmp')
91 | assert_equal Resque.reserve(:jobs), Resque.reserve(:jobs)
92 |
93 | assert Resque::Job.create(:jobs, 'SomeMethodJob', 20, '/tmp')
94 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
95 | assert_not_equal Resque.reserve(:jobs), Resque.reserve(:jobs)
96 |
97 | assert Resque::Job.create(:jobs, 'SomeJob', 20, '/tmp')
98 | assert Resque::Job.create(:jobs, 'SomeJob', 30, '/tmp')
99 | assert_not_equal Resque.reserve(:jobs), Resque.reserve(:jobs)
100 | end
101 |
102 | test "can put jobs on a queue by way of a method" do
103 | assert_equal 0, Resque.size(:method)
104 | assert Resque.enqueue(SomeMethodJob, 20, '/tmp')
105 | assert Resque.enqueue(SomeMethodJob, 20, '/tmp')
106 |
107 | job = Resque.reserve(:method)
108 |
109 | assert_kind_of Resque::Job, job
110 | assert_equal SomeMethodJob, job.payload_class
111 | assert_equal 20, job.args[0]
112 | assert_equal '/tmp', job.args[1]
113 |
114 | assert Resque.reserve(:method)
115 | assert_equal nil, Resque.reserve(:method)
116 | end
117 |
118 | test "needs to infer a queue with enqueue" do
119 | assert_raises Resque::NoQueueError do
120 | Resque.enqueue(SomeJob, 20, '/tmp')
121 | end
122 | end
123 |
124 | test "can put items on a queue" do
125 | assert Resque.push(:people, { 'name' => 'jon' })
126 | end
127 |
128 | test "can pull items off a queue" do
129 | assert_equal({ 'name' => 'chris' }, Resque.pop(:people))
130 | assert_equal({ 'name' => 'bob' }, Resque.pop(:people))
131 | assert_equal({ 'name' => 'mark' }, Resque.pop(:people))
132 | assert_equal nil, Resque.pop(:people)
133 | end
134 |
135 | test "knows how big a queue is" do
136 | assert_equal 3, Resque.size(:people)
137 |
138 | assert_equal({ 'name' => 'chris' }, Resque.pop(:people))
139 | assert_equal 2, Resque.size(:people)
140 |
141 | assert_equal({ 'name' => 'bob' }, Resque.pop(:people))
142 | assert_equal({ 'name' => 'mark' }, Resque.pop(:people))
143 | assert_equal 0, Resque.size(:people)
144 | end
145 |
146 | test "can peek at a queue" do
147 | assert_equal({ 'name' => 'chris' }, Resque.peek(:people))
148 | assert_equal 3, Resque.size(:people)
149 | end
150 |
151 | test "can peek multiple items on a queue" do
152 | assert_equal({ 'name' => 'bob' }, Resque.peek(:people, 1, 1))
153 |
154 | assert_equal([{ 'name' => 'bob' }, { 'name' => 'mark' }], Resque.peek(:people, 1, 2))
155 | assert_equal([{ 'name' => 'chris' }, { 'name' => 'bob' }], Resque.peek(:people, 0, 2))
156 | assert_equal([{ 'name' => 'chris' }, { 'name' => 'bob' }, { 'name' => 'mark' }], Resque.peek(:people, 0, 3))
157 | assert_equal({ 'name' => 'mark' }, Resque.peek(:people, 2, 1))
158 | assert_equal nil, Resque.peek(:people, 3)
159 | assert_equal [], Resque.peek(:people, 3, 2)
160 | end
161 |
162 | test "knows what queues it is managing" do
163 | assert_equal %w( people ), Resque.queues
164 | Resque.push(:cars, { 'make' => 'bmw' })
165 | assert_equal %w( cars people ), Resque.queues
166 | end
167 |
168 | test "queues are always a list" do
169 | Resque.drop
170 | assert_equal [], Resque.queues
171 | end
172 |
173 | test "can delete a queue" do
174 | Resque.push(:cars, { 'make' => 'bmw' })
175 | assert_equal %w( cars people ), Resque.queues
176 | Resque.remove_queue(:people)
177 | assert_equal %w( cars ), Resque.queues
178 | assert_equal nil, Resque.pop(:people)
179 | end
180 |
181 | test "keeps track of resque keys" do
182 | assert_equal ["people"], Resque.keys
183 | end
184 |
185 | test "badly wants a class name, too" do
186 | assert_raises Resque::NoClassError do
187 | Resque::Job.create(:jobs, nil)
188 | end
189 | end
190 |
191 | test "keeps stats" do
192 | Resque::Job.create(:jobs, SomeJob, 20, '/tmp')
193 | Resque::Job.create(:jobs, BadJob)
194 | Resque::Job.create(:jobs, GoodJob)
195 |
196 | Resque::Job.create(:others, GoodJob)
197 | Resque::Job.create(:others, GoodJob)
198 |
199 | stats = Resque.info
200 | assert_equal 8, stats[:pending]
201 |
202 | @worker = Resque::Worker.new(:jobs)
203 | @worker.register_worker
204 | 2.times { @worker.process }
205 |
206 | job = @worker.reserve
207 | @worker.working_on job
208 |
209 | stats = Resque.info
210 | assert_equal 1, stats[:working]
211 | assert_equal 1, stats[:workers]
212 |
213 | @worker.done_working
214 |
215 | stats = Resque.info
216 | assert_equal 2, stats[:queues]
217 | assert_equal 3, stats[:processed]
218 | assert_equal 1, stats[:failed]
219 | assert_equal ['localhost:27017'], stats[:servers]
220 | end
221 |
222 | test "decode bad json" do
223 | assert_nil Resque.decode("{\"error\":\"Module not found \\u002\"}")
224 | end
225 | end
226 |
--------------------------------------------------------------------------------
/test/worker_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 |
3 | context "Resque::Worker" do
4 | setup do
5 | Resque.drop
6 |
7 | Resque.before_first_fork = nil
8 | Resque.before_fork = nil
9 | Resque.after_fork = nil
10 |
11 | @worker = Resque::Worker.new(:jobs)
12 | Resque::Job.create(:jobs, SomeJob, 20, '/tmp')
13 | end
14 |
15 | test "can fail jobs" do
16 | Resque::Job.create(:jobs, BadJob)
17 | @worker.work(0)
18 | assert_equal 1, Resque::Failure.count
19 | end
20 |
21 | test "failed jobs report exception and message" do
22 | Resque::Job.create(:jobs, BadJobWithSyntaxError)
23 | @worker.work(0)
24 | assert_equal('SyntaxError', Resque::Failure.all['exception'])
25 | assert_equal('Extra Bad job!', Resque::Failure.all['error'])
26 | end
27 |
28 | test "fails uncompleted jobs on exit" do
29 | job = Resque::Job.new(:jobs, [GoodJob, "blah"])
30 | @worker.working_on(job)
31 | @worker.unregister_worker
32 | assert_equal 1, Resque::Failure.count
33 | end
34 |
35 | test "can peek at failed jobs" do
36 | 10.times { Resque::Job.create(:jobs, BadJob) }
37 | @worker.work(0)
38 |
39 | assert_equal 10, Resque::Failure.count
40 | assert_equal 10, Resque::Failure.all(0, 20).size
41 | end
42 |
43 | test "can clear failed jobs" do
44 | Resque::Job.create(:jobs, BadJob)
45 | @worker.work(0)
46 | assert_equal 1, Resque::Failure.count
47 | Resque::Failure.clear
48 | assert_equal 0, Resque::Failure.count
49 | end
50 |
51 | test "catches exceptional jobs" do
52 | Resque::Job.create(:jobs, BadJob)
53 | Resque::Job.create(:jobs, BadJob)
54 | @worker.process
55 | @worker.process
56 | @worker.process
57 | assert_equal 2, Resque::Failure.count
58 | end
59 |
60 | test "can work on multiple queues" do
61 | Resque::Job.create(:high, GoodJob)
62 | Resque::Job.create(:critical, GoodJob)
63 |
64 | worker = Resque::Worker.new(:critical, :high)
65 |
66 | worker.process
67 | assert_equal 1, Resque.size(:high)
68 | assert_equal 0, Resque.size(:critical)
69 |
70 | worker.process
71 | assert_equal 0, Resque.size(:high)
72 | end
73 |
74 | test "can work on all queues" do
75 | Resque::Job.create(:high, GoodJob)
76 | Resque::Job.create(:critical, GoodJob)
77 | Resque::Job.create(:blahblah, GoodJob)
78 |
79 | worker = Resque::Worker.new("*")
80 |
81 | worker.work(0)
82 | assert_equal 0, Resque.size(:high)
83 | assert_equal 0, Resque.size(:critical)
84 | assert_equal 0, Resque.size(:blahblah)
85 | end
86 |
87 | test "processes * queues in alphabetical order" do
88 | Resque::Job.create(:high, GoodJob)
89 | Resque::Job.create(:critical, GoodJob)
90 | Resque::Job.create(:blahblah, GoodJob)
91 |
92 | worker = Resque::Worker.new("*")
93 | processed_queues = []
94 |
95 | worker.work(0) do |job|
96 | processed_queues << job.queue
97 | end
98 |
99 | assert_equal %w( jobs high critical blahblah ).sort, processed_queues
100 | end
101 |
102 | test "has a unique id" do
103 | assert_equal "#{`hostname`.chomp}:#{$$}:jobs", @worker.to_s
104 | end
105 |
106 | test "complains if no queues are given" do
107 | assert_raise Resque::NoQueueError do
108 | Resque::Worker.new
109 | end
110 | end
111 |
112 | test "fails if a job class has no `perform` method" do
113 | worker = Resque::Worker.new(:perform_less)
114 | Resque::Job.create(:perform_less, Object)
115 |
116 | assert_equal 0, Resque::Failure.count
117 | worker.work(0)
118 | assert_equal 1, Resque::Failure.count
119 | end
120 |
121 | test "inserts itself into the 'workers' list on startup" do
122 | @worker.work(0) do
123 | assert_equal @worker, Resque.workers[0]
124 | end
125 | end
126 |
127 | test "removes itself from the 'workers' list on shutdown" do
128 | @worker.work(0) do
129 | assert_equal @worker, Resque.workers[0]
130 | end
131 |
132 | assert_equal [], Resque.workers
133 | end
134 |
135 | test "removes worker with stringified id" do
136 | @worker.work(0) do
137 | worker_id = Resque.workers[0].to_s
138 | Resque.remove_worker(worker_id)
139 | assert_equal [], Resque.workers
140 | end
141 | end
142 |
143 | test "records what it is working on" do
144 | @worker.work(0) do
145 | task = @worker.job
146 | assert_equal({"args"=>[20, "/tmp"], "class"=>"SomeJob"}, task['payload'])
147 | assert task['run_at']
148 | assert_equal 'jobs', task['queue']
149 | end
150 | end
151 |
152 | test "clears its status when not working on anything" do
153 | @worker.work(0)
154 | assert_equal Hash.new, @worker.job
155 | end
156 |
157 | test "knows when it is working" do
158 | @worker.work(0) do
159 | assert @worker.working?
160 | end
161 | end
162 |
163 | test "knows when it is idle" do
164 | @worker.work(0)
165 | assert @worker.idle?
166 | end
167 |
168 | test "knows who is working" do
169 | @worker.work(0) do
170 | assert_equal [@worker], Resque.working
171 | end
172 | end
173 |
174 | test "keeps track of how many jobs it has processed" do
175 | Resque::Job.create(:jobs, BadJob)
176 | Resque::Job.create(:jobs, BadJob)
177 |
178 | 3.times do
179 | job = @worker.reserve
180 | @worker.process job
181 | end
182 | assert_equal 3, @worker.processed
183 | end
184 |
185 | test "keeps track of how many failures it has seen" do
186 | Resque::Job.create(:jobs, BadJob)
187 | Resque::Job.create(:jobs, BadJob)
188 |
189 | 3.times do
190 | job = @worker.reserve
191 | @worker.process job
192 | end
193 | assert_equal 2, @worker.failed
194 | end
195 |
196 | test "stats are erased when the worker goes away" do
197 | @worker.work(0)
198 | assert_equal 0, @worker.processed
199 | assert_equal 0, @worker.failed
200 | end
201 |
202 | test "knows when it started" do
203 | time = Time.now
204 | @worker.work(0) do
205 | assert_equal time.to_s, @worker.started.to_s
206 | end
207 | end
208 |
209 | test "knows whether it exists or not" do
210 | @worker.work(0) do
211 | assert Resque::Worker.exists?(@worker)
212 | assert !Resque::Worker.exists?('blah-blah')
213 | end
214 | end
215 |
216 | test "sets $0 while working" do
217 | @worker.work(0) do
218 | ver = Resque::Version
219 | assert_equal "resque-#{ver}: Processing jobs since #{Time.now.to_i}", $0
220 | end
221 | end
222 |
223 | test "can be found" do
224 | @worker.work(0) do
225 | found = Resque::Worker.find(@worker.to_s)
226 | assert_equal @worker.to_s, found.to_s
227 | assert found.working?
228 | assert_equal @worker.job, found.job
229 | end
230 | end
231 |
232 | test "doesn't find fakes" do
233 | @worker.work(0) do
234 | found = Resque::Worker.find('blah-blah')
235 | assert_equal nil, found
236 | end
237 | end
238 |
239 | test "cleans up dead worker info on start (crash recovery)" do
240 | # first we fake out two dead workers
241 | workerA = Resque::Worker.new(:jobs)
242 | workerA.instance_variable_set(:@to_s, "#{`hostname`.chomp}:1:jobs")
243 | workerA.register_worker
244 |
245 | workerB = Resque::Worker.new(:high, :low)
246 | workerB.instance_variable_set(:@to_s, "#{`hostname`.chomp}:2:high,low")
247 | workerB.register_worker
248 |
249 | assert_equal 2, Resque.workers.size
250 |
251 | # then we prune them
252 | @worker.work(0) do
253 | assert_equal 1, Resque.workers.size
254 | end
255 | end
256 |
257 | test "Processed jobs count" do
258 | @worker.work(0)
259 | assert_equal 1, Resque.info[:processed]
260 | end
261 |
262 | test "Will call a before_first_fork hook only once" do
263 | Resque.drop
264 | $BEFORE_FORK_CALLED = 0
265 | Resque.before_first_fork = Proc.new { $BEFORE_FORK_CALLED += 1 }
266 | workerA = Resque::Worker.new(:jobs)
267 | Resque::Job.create(:jobs, SomeJob, 20, '/tmp')
268 |
269 | assert_equal 0, $BEFORE_FORK_CALLED
270 |
271 | workerA.work(0)
272 | assert_equal 1, $BEFORE_FORK_CALLED
273 |
274 | # TODO: Verify it's only run once. Not easy.
275 | # workerA.work(0)
276 | # assert_equal 1, $BEFORE_FORK_CALLED
277 | end
278 |
279 | test "Will call a before_fork hook before forking" do
280 | Resque.drop
281 | $BEFORE_FORK_CALLED = false
282 | Resque.before_fork = Proc.new { $BEFORE_FORK_CALLED = true }
283 | workerA = Resque::Worker.new(:jobs)
284 |
285 | assert !$BEFORE_FORK_CALLED
286 | Resque::Job.create(:jobs, SomeJob, 20, '/tmp')
287 | workerA.work(0)
288 | assert $BEFORE_FORK_CALLED
289 | end
290 |
291 | test "Will call an after_fork hook after forking" do
292 | Resque.drop
293 | $AFTER_FORK_CALLED = false
294 | Resque.after_fork = Proc.new { $AFTER_FORK_CALLED = true }
295 | workerA = Resque::Worker.new(:jobs)
296 |
297 | assert !$AFTER_FORK_CALLED
298 | Resque::Job.create(:jobs, SomeJob, 20, '/tmp')
299 | workerA.work(0)
300 | assert $AFTER_FORK_CALLED
301 | end
302 | end
303 |
--------------------------------------------------------------------------------
/test/job_hooks_test.rb:
--------------------------------------------------------------------------------
1 | require File.dirname(__FILE__) + '/test_helper'
2 |
3 | context "Resque::Job before_perform" do
4 | include PerformJob
5 |
6 | class ::BeforePerformJob
7 | def self.before_perform_record_history(history)
8 | history << :before_perform
9 | end
10 |
11 | def self.perform(history)
12 | history << :perform
13 | end
14 | end
15 |
16 | test "it runs before_perform before perform" do
17 | result = perform_job(BeforePerformJob, history=[])
18 | assert_equal true, result, "perform returned true"
19 | assert_equal history, [:before_perform, :perform]
20 | end
21 |
22 | class ::BeforePerformJobFails
23 | def self.before_perform_fail_job(history)
24 | history << :before_perform
25 | raise StandardError
26 | end
27 | def self.perform(history)
28 | history << :perform
29 | end
30 | end
31 |
32 | test "raises an error and does not perform if before_perform fails" do
33 | history = []
34 | assert_raises StandardError do
35 | perform_job(BeforePerformJobFails, history)
36 | end
37 | assert_equal history, [:before_perform], "Only before_perform was run"
38 | end
39 |
40 | class ::BeforePerformJobAborts
41 | def self.before_perform_abort(history)
42 | history << :before_perform
43 | raise Resque::Job::DontPerform
44 | end
45 | def self.perform(history)
46 | history << :perform
47 | end
48 | end
49 |
50 | test "does not perform if before_perform raises Resque::Job::DontPerform" do
51 | result = perform_job(BeforePerformJobAborts, history=[])
52 | assert_equal false, result, "perform returned false"
53 | assert_equal history, [:before_perform], "Only before_perform was run"
54 | end
55 | end
56 |
57 | context "Resque::Job after_perform" do
58 | include PerformJob
59 |
60 | class ::AfterPerformJob
61 | def self.perform(history)
62 | history << :perform
63 | end
64 | def self.after_perform_record_history(history)
65 | history << :after_perform
66 | end
67 | end
68 |
69 | test "it runs after_perform after perform" do
70 | result = perform_job(AfterPerformJob, history=[])
71 | assert_equal true, result, "perform returned true"
72 | assert_equal history, [:perform, :after_perform]
73 | end
74 |
75 | class ::AfterPerformJobFails
76 | def self.perform(history)
77 | history << :perform
78 | end
79 | def self.after_perform_fail_job(history)
80 | history << :after_perform
81 | raise StandardError
82 | end
83 | end
84 |
85 | test "raises an error but has already performed if after_perform fails" do
86 | history = []
87 | assert_raises StandardError do
88 | perform_job(AfterPerformJobFails, history)
89 | end
90 | assert_equal history, [:perform, :after_perform], "Only after_perform was run"
91 | end
92 | end
93 |
94 | context "Resque::Job around_perform" do
95 | include PerformJob
96 |
97 | class ::AroundPerformJob
98 | def self.perform(history)
99 | history << :perform
100 | end
101 | def self.around_perform_record_history(history)
102 | history << :start_around_perform
103 | yield
104 | history << :finish_around_perform
105 | end
106 | end
107 |
108 | test "it runs around_perform then yields in order to perform" do
109 | result = perform_job(AroundPerformJob, history=[])
110 | assert_equal true, result, "perform returned true"
111 | assert_equal history, [:start_around_perform, :perform, :finish_around_perform]
112 | end
113 |
114 | class ::AroundPerformJobFailsBeforePerforming
115 | def self.perform(history)
116 | history << :perform
117 | end
118 | def self.around_perform_fail(history)
119 | history << :start_around_perform
120 | raise StandardError
121 | yield
122 | history << :finish_around_perform
123 | end
124 | end
125 |
126 | test "raises an error and does not perform if around_perform fails before yielding" do
127 | history = []
128 | assert_raises StandardError do
129 | perform_job(AroundPerformJobFailsBeforePerforming, history)
130 | end
131 | assert_equal history, [:start_around_perform], "Only part of around_perform was run"
132 | end
133 |
134 | class ::AroundPerformJobFailsWhilePerforming
135 | def self.perform(history)
136 | history << :perform
137 | raise StandardError
138 | end
139 | def self.around_perform_fail_in_yield(history)
140 | history << :start_around_perform
141 | begin
142 | yield
143 | ensure
144 | history << :ensure_around_perform
145 | end
146 | history << :finish_around_perform
147 | end
148 | end
149 |
150 | test "raises an error but may handle exceptions if perform fails" do
151 | history = []
152 | assert_raises StandardError do
153 | perform_job(AroundPerformJobFailsWhilePerforming, history)
154 | end
155 | assert_equal history, [:start_around_perform, :perform, :ensure_around_perform], "Only part of around_perform was run"
156 | end
157 |
158 | class ::AroundPerformJobDoesNotHaveToYield
159 | def self.perform(history)
160 | history << :perform
161 | end
162 | def self.around_perform_dont_yield(history)
163 | history << :start_around_perform
164 | history << :finish_around_perform
165 | end
166 | end
167 |
168 | test "around_perform is not required to yield" do
169 | history = []
170 | result = perform_job(AroundPerformJobDoesNotHaveToYield, history)
171 | assert_equal false, result, "perform returns false"
172 | assert_equal history, [:start_around_perform, :finish_around_perform], "perform was not run"
173 | end
174 | end
175 |
176 | context "Resque::Job on_failure" do
177 | include PerformJob
178 |
179 | class ::FailureJobThatDoesNotFail
180 | def self.perform(history)
181 | history << :perform
182 | end
183 | def self.on_failure_record_failure(exception, history)
184 | history << exception.message
185 | end
186 | end
187 |
188 | test "it does not call on_failure if no failures occur" do
189 | result = perform_job(FailureJobThatDoesNotFail, history=[])
190 | assert_equal true, result, "perform returned true"
191 | assert_equal history, [:perform]
192 | end
193 |
194 | class ::FailureJobThatFails
195 | def self.perform(history)
196 | history << :perform
197 | raise StandardError, "oh no"
198 | end
199 | def self.on_failure_record_failure(exception, history)
200 | history << exception.message
201 | end
202 | end
203 |
204 | test "it calls on_failure with the exception and then re-raises the exception" do
205 | history = []
206 | assert_raises StandardError do
207 | perform_job(FailureJobThatFails, history)
208 | end
209 | assert_equal history, [:perform, "oh no"]
210 | end
211 |
212 | class ::FailureJobThatFailsBadly
213 | def self.perform(history)
214 | history << :perform
215 | raise SyntaxError, "oh no"
216 | end
217 | def self.on_failure_record_failure(exception, history)
218 | history << exception.message
219 | end
220 | end
221 |
222 | test "it calls on_failure even with bad exceptions" do
223 | history = []
224 | assert_raises SyntaxError do
225 | perform_job(FailureJobThatFailsBadly, history)
226 | end
227 | assert_equal history, [:perform, "oh no"]
228 | end
229 | end
230 |
231 | context "Resque::Job all hooks" do
232 | include PerformJob
233 |
234 | class ::VeryHookyJob
235 | def self.before_perform_record_history(history)
236 | history << :before_perform
237 | end
238 | def self.around_perform_record_history(history)
239 | history << :start_around_perform
240 | yield
241 | history << :finish_around_perform
242 | end
243 | def self.perform(history)
244 | history << :perform
245 | end
246 | def self.after_perform_record_history(history)
247 | history << :after_perform
248 | end
249 | def self.on_failure_record_history(exception, history)
250 | history << exception.message
251 | end
252 | end
253 |
254 | test "the complete hook order" do
255 | result = perform_job(VeryHookyJob, history=[])
256 | assert_equal true, result, "perform returned true"
257 | assert_equal history, [
258 | :before_perform,
259 | :start_around_perform,
260 | :perform,
261 | :finish_around_perform,
262 | :after_perform
263 | ]
264 | end
265 |
266 | class ::VeryHookyJobThatFails
267 | def self.before_perform_record_history(history)
268 | history << :before_perform
269 | end
270 | def self.around_perform_record_history(history)
271 | history << :start_around_perform
272 | yield
273 | history << :finish_around_perform
274 | end
275 | def self.perform(history)
276 | history << :perform
277 | end
278 | def self.after_perform_record_history(history)
279 | history << :after_perform
280 | raise StandardError, "oh no"
281 | end
282 | def self.on_failure_record_history(exception, history)
283 | history << exception.message
284 | end
285 | end
286 |
287 | test "the complete hook order with a failure at the last minute" do
288 | history = []
289 | assert_raises StandardError do
290 | perform_job(VeryHookyJobThatFails, history)
291 | end
292 | assert_equal history, [
293 | :before_perform,
294 | :start_around_perform,
295 | :perform,
296 | :finish_around_perform,
297 | :after_perform,
298 | "oh no"
299 | ]
300 | end
301 | end
302 |
303 |
--------------------------------------------------------------------------------
/lib/resque.rb:
--------------------------------------------------------------------------------
1 | require 'mongo'
2 |
3 | begin
4 | require 'yajl'
5 | rescue LoadError
6 | require 'json'
7 | end
8 |
9 | require 'resque/version'
10 |
11 | require 'resque/errors'
12 |
13 | require 'resque/failure'
14 | require 'resque/failure/base'
15 |
16 | require 'resque/helpers'
17 | require 'resque/stat'
18 | require 'resque/job'
19 | require 'resque/worker'
20 | require 'resque/plugin'
21 |
22 | module Resque
23 | include Helpers
24 | extend self
25 |
26 | # Accepts a 'hostname:port' string or a Redis server.
27 | def mongo=(server)
28 | case server
29 | when String
30 | host, port = server.split(':')
31 | @con = Mongo::Connection.new(host, port)
32 | @db = @con.db('monque')
33 | @mongo = @db.collection('monque')
34 | @workers = @db.collection('workers')
35 | @failures = @db.collection('failures')
36 | @stats = @db.collection('stats')
37 |
38 | add_indexes
39 | end
40 | end
41 |
42 |
43 | # Returns the current Redis connection. If none has been created, will
44 | # create a new one.
45 | def mongo
46 | return @mongo if @mongo
47 | self.mongo = 'localhost:27017'
48 | self.mongo
49 | end
50 |
51 | def mongo_workers
52 | return @workers if @workers
53 | self.mongo = 'localhost:27017'
54 | @workers
55 | end
56 |
57 | def mongo_failures
58 | return @failures if @failures
59 | self.mongo = 'localhost:27017'
60 | @failures
61 | end
62 |
63 | def mongo_stats
64 | return @stats if @stats
65 | self.mongo = 'localhost:27017'
66 | @stats
67 | end
68 |
69 | # The `before_first_fork` hook will be run in the **parent** process
70 | # only once, before forking to run the first job. Be careful- any
71 | # changes you make will be permanent for the lifespan of the
72 | # worker.
73 | #
74 | # Call with a block to set the hook.
75 | # Call with no arguments to return the hook.
76 | def before_first_fork(&block)
77 | block ? (@before_first_fork = block) : @before_first_fork
78 | end
79 |
80 | # Set a proc that will be called in the parent process before the
81 | # worker forks for the first time.
82 | def before_first_fork=(before_first_fork)
83 | @before_first_fork = before_first_fork
84 | end
85 |
86 | # The `before_fork` hook will be run in the **parent** process
87 | # before every job, so be careful- any changes you make will be
88 | # permanent for the lifespan of the worker.
89 | #
90 | # Call with a block to set the hook.
91 | # Call with no arguments to return the hook.
92 | def before_fork(&block)
93 | block ? (@before_fork = block) : @before_fork
94 | end
95 |
96 | # Set the before_fork proc.
97 | def before_fork=(before_fork)
98 | @before_fork = before_fork
99 | end
100 |
101 | # The `after_fork` hook will be run in the child process and is passed
102 | # the current job. Any changes you make, therefore, will only live as
103 | # long as the job currently being processed.
104 | #
105 | # Call with a block to set the hook.
106 | # Call with no arguments to return the hook.
107 | def after_fork(&block)
108 | block ? (@after_fork = block) : @after_fork
109 | end
110 |
111 | # Set the after_fork proc.
112 | def after_fork=(after_fork)
113 | @after_fork = after_fork
114 | end
115 |
116 | def to_s
117 | "Mongo Client connected to #{@con.host}"
118 | end
119 |
120 | def add_indexes
121 | @mongo.create_index :queue
122 | @workers.create_index :worker
123 | @stats.create_index :stat
124 | end
125 |
126 | def drop
127 | @mongo.drop if @mongo
128 | @workers.drop if @workers
129 | @failures.drop if @failures
130 | @stats.drop if @stats
131 | @mongo = nil
132 | end
133 |
134 | #
135 | # queue manipulation
136 | #
137 |
138 | # Pushes a job onto a queue. Queue name should be a string and the
139 | # item should be any JSON-able Ruby object.
140 | def push(queue, item)
141 | watch_queue(queue)
142 | mongo << { :queue => queue.to_s, :item => encode(item) }
143 | end
144 |
145 | # Pops a job off a queue. Queue name should be a string.
146 | #
147 | # Returns a Ruby object.
148 | def pop(queue)
149 | doc = mongo.find_and_modify( :query => { :queue => queue },
150 | :sort => [:natural, :desc],
151 | :remove => true )
152 | decode doc['item']
153 | rescue Mongo::OperationFailure => e
154 | return nil if e.message =~ /No matching object/
155 | raise e
156 | end
157 |
158 | # Returns an integer representing the size of a queue.
159 | # Queue name should be a string.
160 | def size(queue)
161 | mongo.find(:queue => queue).count
162 | end
163 |
164 | # Returns an array of items currently queued. Queue name should be
165 | # a string.
166 | #
167 | # start and count should be integer and can be used for pagination.
168 | # start is the item to begin, count is how many items to return.
169 | #
170 | # To get the 3rd page of a 30 item, paginatied list one would use:
171 | # Resque.peek('my_list', 59, 30)
172 | def peek(queue, start = 0, count = 1)
173 | start, count = [start, count].map { |n| Integer(n) }
174 | res = mongo.find(:queue => queue).sort([:natural, :desc]).skip(start).limit(count).to_a
175 | res.collect! { |doc| decode(doc['item']) }
176 |
177 | if count == 1
178 | return nil if res.empty?
179 | res.first
180 | else
181 | return [] if res.empty?
182 | res
183 | end
184 | end
185 |
186 | # Returns an array of all known Resque queues as strings.
187 | def queues
188 | mongo.distinct(:queue)
189 | end
190 |
191 | # Given a queue name, completely deletes the queue.
192 | def remove_queue(queue)
193 | mongo.remove(:queue => queue)
194 | end
195 |
196 | # Used internally to keep track of which queues we've created.
197 | # Don't call this directly.
198 | def watch_queue(queue)
199 | # redis.sadd(:queues, queue.to_s)
200 | end
201 |
202 |
203 | #
204 | # job shortcuts
205 | #
206 |
207 | # This method can be used to conveniently add a job to a queue.
208 | # It assumes the class you're passing it is a real Ruby class (not
209 | # a string or reference) which either:
210 | #
211 | # a) has a @queue ivar set
212 | # b) responds to `queue`
213 | #
214 | # If either of those conditions are met, it will use the value obtained
215 | # from performing one of the above operations to determine the queue.
216 | #
217 | # If no queue can be inferred this method will raise a `Resque::NoQueueError`
218 | #
219 | # This method is considered part of the `stable` API.
220 | def enqueue(klass, *args)
221 | Job.create(queue_from_class(klass), klass, *args)
222 | end
223 |
224 | # This method can be used to conveniently remove a job from a queue.
225 | # It assumes the class you're passing it is a real Ruby class (not
226 | # a string or reference) which either:
227 | #
228 | # a) has a @queue ivar set
229 | # b) responds to `queue`
230 | #
231 | # If either of those conditions are met, it will use the value obtained
232 | # from performing one of the above operations to determine the queue.
233 | #
234 | # If no queue can be inferred this method will raise a `Resque::NoQueueError`
235 | #
236 | # If no args are given, this method will dequeue *all* jobs matching
237 | # the provided class. See `Resque::Job.destroy` for more
238 | # information.
239 | #
240 | # Returns the number of jobs destroyed.
241 | #
242 | # Example:
243 | #
244 | # # Removes all jobs of class `UpdateNetworkGraph`
245 | # Resque.dequeue(GitHub::Jobs::UpdateNetworkGraph)
246 | #
247 | # # Removes all jobs of class `UpdateNetworkGraph` with matching args.
248 | # Resque.dequeue(GitHub::Jobs::UpdateNetworkGraph, 'repo:135325')
249 | #
250 | # This method is considered part of the `stable` API.
251 | def dequeue(klass, *args)
252 | Job.destroy(queue_from_class(klass), klass, *args)
253 | end
254 |
255 | # Given a class, try to extrapolate an appropriate queue based on a
256 | # class instance variable or `queue` method.
257 | def queue_from_class(klass)
258 | klass.instance_variable_get(:@queue) ||
259 | (klass.respond_to?(:queue) and klass.queue)
260 | end
261 |
262 | # This method will return a `Resque::Job` object or a non-true value
263 | # depending on whether a job can be obtained. You should pass it the
264 | # precise name of a queue: case matters.
265 | #
266 | # This method is considered part of the `stable` API.
267 | def reserve(queue)
268 | Job.reserve(queue)
269 | end
270 |
271 |
272 | #
273 | # worker shortcuts
274 | #
275 |
276 | # A shortcut to Worker.all
277 | def workers
278 | Worker.all
279 | end
280 |
281 | # A shortcut to Worker.working
282 | def working
283 | Worker.working
284 | end
285 |
286 | # A shortcut to unregister_worker
287 | # useful for command line tool
288 | def remove_worker(worker_id)
289 | worker = Resque::Worker.find(worker_id)
290 | worker.unregister_worker
291 | end
292 |
293 | #
294 | # stats
295 | #
296 |
297 | # Returns a hash, similar to redis-rb's #info, of interesting stats.
298 | def info
299 | return {
300 | :pending => queues.inject(0) { |m,k| m + size(k) },
301 | :processed => Stat[:processed],
302 | :queues => queues.size,
303 | :workers => workers.size.to_i,
304 | :working => working.size,
305 | :failed => Stat[:failed],
306 | :servers => ["#{@con.host}:#{@con.port}"],
307 | :environment => ENV['RAILS_ENV'] || ENV['RACK_ENV'] || 'development'
308 | }
309 | end
310 |
311 | # Returns an array of all known Resque keys in Redis. Redis' KEYS operation
312 | # is O(N) for the keyspace, so be careful - this can be slow for big databases.
313 | def keys
314 | queues
315 | end
316 | end
317 |
--------------------------------------------------------------------------------
/lib/resque/worker.rb:
--------------------------------------------------------------------------------
1 | module Resque
2 | # A Resque Worker processes jobs. On platforms that support fork(2),
3 | # the worker will fork off a child to process each job. This ensures
4 | # a clean slate when beginning the next job and cuts down on gradual
5 | # memory growth as well as low level failures.
6 | #
7 | # It also ensures workers are always listening to signals from you,
8 | # their master, and can react accordingly.
9 | class Worker
10 | include Resque::Helpers
11 | extend Resque::Helpers
12 |
13 | # Whether the worker should log basic info to STDOUT
14 | attr_accessor :verbose
15 |
16 | # Whether the worker should log lots of info to STDOUT
17 | attr_accessor :very_verbose
18 |
19 | # Boolean indicating whether this worker can or can not fork.
20 | # Automatically set if a fork(2) fails.
21 | attr_accessor :cant_fork
22 |
23 | attr_writer :to_s
24 |
25 | # Returns an array of all worker objects.
26 | def self.all
27 | mongo_workers.distinct(:worker).map { |worker| find(worker) }.compact
28 | end
29 |
30 | # Returns an array of all worker objects currently processing
31 | # jobs.
32 | def self.working
33 | select = {}
34 | select['working_on'] = {"$exists" => true}
35 | working = mongo_workers.find(select).to_a
36 | working.map! {|w| w['worker'] }
37 | working.map {|w| find(w) }
38 | end
39 |
40 | # Returns a single worker object. Accepts a string id.
41 | def self.find(worker_id)
42 | worker = mongo_workers.find_one(:worker => worker_id)
43 | return nil unless worker
44 | queues = worker['worker'].split(',')
45 | worker = new(*queues)
46 | worker.to_s = worker_id
47 | worker
48 | end
49 |
50 | # Alias of `find`
51 | def self.attach(worker_id)
52 | find(worker_id)
53 | end
54 |
55 | # # Given a string worker id, return a boolean indicating whether the
56 | # # worker exists
57 | def self.exists?(worker_id)
58 | not mongo_workers.find_one(:worker => worker_id.to_s).nil?
59 | end
60 |
61 | # Workers should be initialized with an array of string queue
62 | # names. The order is important: a Worker will check the first
63 | # queue given for a job. If none is found, it will check the
64 | # second queue name given. If a job is found, it will be
65 | # processed. Upon completion, the Worker will again check the
66 | # first queue given, and so forth. In this way the queue list
67 | # passed to a Worker on startup defines the priorities of queues.
68 | #
69 | # If passed a single "*", this Worker will operate on all queues
70 | # in alphabetical order. Queues can be dynamically added or
71 | # removed without needing to restart workers using this method.
72 | def initialize(*queues)
73 | @queues = queues
74 | validate_queues
75 | end
76 |
77 | # A worker must be given a queue, otherwise it won't know what to
78 | # do with itself.
79 | #
80 | # You probably never need to call this.
81 | def validate_queues
82 | if @queues.nil? || @queues.empty?
83 | raise NoQueueError.new("Please give each worker at least one queue.")
84 | end
85 | end
86 |
87 | # This is the main workhorse method. Called on a Worker instance,
88 | # it begins the worker life cycle.
89 | #
90 | # The following events occur during a worker's life cycle:
91 | #
92 | # 1. Startup: Signals are registered, dead workers are pruned,
93 | # and this worker is registered.
94 | # 2. Work loop: Jobs are pulled from a queue and processed.
95 | # 3. Teardown: This worker is unregistered.
96 | #
97 | # Can be passed an integer representing the polling frequency.
98 | # The default is 5 seconds, but for a semi-active site you may
99 | # want to use a smaller value.
100 | #
101 | # Also accepts a block which will be passed the job as soon as it
102 | # has completed processing. Useful for testing.
103 | def work(interval = 5, &block)
104 | $0 = "resque: Starting"
105 | startup
106 |
107 | loop do
108 | break if @shutdown
109 |
110 | if not @paused and job = reserve
111 | log "got: #{job.inspect}"
112 | run_hook :before_fork
113 | working_on job
114 |
115 | if @child = fork
116 | rand # Reseeding
117 | procline "Forked #{@child} at #{Time.now.to_i}"
118 | Process.wait
119 | else
120 | procline "Processing #{job.queue} since #{Time.now.to_i}"
121 | perform(job, &block)
122 | exit! unless @cant_fork
123 | end
124 |
125 | done_working
126 | @child = nil
127 | else
128 | break if interval.to_i == 0
129 | log! "Sleeping for #{interval.to_i}"
130 | procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
131 | sleep interval.to_i
132 | end
133 | end
134 |
135 | ensure
136 | unregister_worker
137 | end
138 |
139 | # DEPRECATED. Processes a single job. If none is given, it will
140 | # try to produce one. Usually run in the child.
141 | def process(job = nil, &block)
142 | return unless job ||= reserve
143 |
144 | working_on job
145 | perform(job, &block)
146 | ensure
147 | done_working
148 | end
149 |
150 | # Processes a given job in the child.
151 | def perform(job)
152 | begin
153 | run_hook :after_fork, job
154 | job.perform
155 | rescue Object => e
156 | log "#{job.inspect} failed: #{e.inspect}"
157 | job.fail(e)
158 | failed!
159 | else
160 | log "done: #{job.inspect}"
161 | ensure
162 | yield job if block_given?
163 | end
164 | end
165 |
166 | # Attempts to grab a job off one of the provided queues. Returns
167 | # nil if no job can be found.
168 | def reserve
169 | queues.each do |queue|
170 | log! "Checking #{queue}"
171 | if job = Resque::Job.reserve(queue)
172 | log! "Found job on #{queue}"
173 | return job
174 | end
175 | end
176 |
177 | nil
178 | end
179 |
180 | # Returns a list of queues to use when searching for a job.
181 | # A splat ("*") means you want every queue (in alpha order) - this
182 | # can be useful for dynamically adding new queues.
183 | def queues
184 | @queues[0] == "*" ? Resque.queues.sort : @queues
185 | end
186 |
187 | # Not every platform supports fork. Here we do our magic to
188 | # determine if yours does.
189 | def fork
190 | @cant_fork = true if $TESTING
191 |
192 | return if @cant_fork
193 |
194 | begin
195 | # IronRuby doesn't support `Kernel.fork` yet
196 | if Kernel.respond_to?(:fork)
197 | Kernel.fork
198 | else
199 | raise NotImplementedError
200 | end
201 | rescue NotImplementedError
202 | @cant_fork = true
203 | nil
204 | end
205 | end
206 |
207 | # Runs all the methods needed when a worker begins its lifecycle.
208 | def startup
209 | enable_gc_optimizations
210 | register_signal_handlers
211 | prune_dead_workers
212 | run_hook :before_first_fork
213 | register_worker
214 |
215 | # Fix buffering so we can `rake resque:work > resque.log` and
216 | # get output from the child in there.
217 | $stdout.sync = true
218 | end
219 |
220 | # Enables GC Optimizations if you're running REE.
221 | # http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
222 | def enable_gc_optimizations
223 | if GC.respond_to?(:copy_on_write_friendly=)
224 | GC.copy_on_write_friendly = true
225 | end
226 | end
227 |
228 | # Registers the various signal handlers a worker responds to.
229 | #
230 | # TERM: Shutdown immediately, stop processing jobs.
231 | # INT: Shutdown immediately, stop processing jobs.
232 | # QUIT: Shutdown after the current job has finished processing.
233 | # USR1: Kill the forked child immediately, continue processing jobs.
234 | # USR2: Don't process any new jobs
235 | # CONT: Start processing jobs again after a USR2
236 | def register_signal_handlers
237 | trap('TERM') { shutdown! }
238 | trap('INT') { shutdown! }
239 |
240 | begin
241 | trap('QUIT') { shutdown }
242 | trap('USR1') { kill_child }
243 | trap('USR2') { pause_processing }
244 | trap('CONT') { unpause_processing }
245 | rescue ArgumentError
246 | warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
247 | end
248 |
249 | log! "Registered signals"
250 | end
251 |
252 | # Schedule this worker for shutdown. Will finish processing the
253 | # current job.
254 | def shutdown
255 | log 'Exiting...'
256 | @shutdown = true
257 | end
258 |
259 | # Kill the child and shutdown immediately.
260 | def shutdown!
261 | shutdown
262 | kill_child
263 | end
264 |
265 | # Kills the forked child immediately, without remorse. The job it
266 | # is processing will not be completed.
267 | def kill_child
268 | if @child
269 | log! "Killing child at #{@child}"
270 | if system("ps -o pid,state -p #{@child}")
271 | Process.kill("KILL", @child) rescue nil
272 | else
273 | log! "Child #{@child} not found, restarting."
274 | shutdown
275 | end
276 | end
277 | end
278 |
279 | # Stop processing jobs after the current one has completed (if we're
280 | # currently running one).
281 | def pause_processing
282 | log "USR2 received; pausing job processing"
283 | @paused = true
284 | end
285 |
286 | # Start processing jobs again after a pause
287 | def unpause_processing
288 | log "CONT received; resuming job processing"
289 | @paused = false
290 | end
291 |
292 | # Looks for any workers which should be running on this server
293 | # and, if they're not, removes them from Redis.
294 | #
295 | # This is a form of garbage collection. If a server is killed by a
296 | # hard shutdown, power failure, or something else beyond our
297 | # control, the Resque workers will not die gracefully and therefore
298 | # will leave stale state information in Redis.
299 | #
300 | # By checking the current Redis state against the actual
301 | # environment, we can determine if Redis is old and clean it up a bit.
302 | def prune_dead_workers
303 | all_workers = Worker.all
304 | known_workers = worker_pids unless all_workers.empty?
305 | all_workers.each do |worker|
306 | host, pid, queues = worker.to_s.split(':')
307 | next unless host == hostname
308 | next if known_workers.include?(pid)
309 | log! "Pruning dead worker: #{worker}"
310 | worker.unregister_worker
311 | end
312 | end
313 |
314 | # Registers ourself as a worker. Useful when entering the worker
315 | # lifecycle on startup.
316 | def register_worker
317 | mongo_workers.insert(:worker => self.to_s)
318 | started!
319 | end
320 |
321 | # Runs a named hook, passing along any arguments.
322 | def run_hook(name, *args)
323 | return unless hook = Resque.send(name)
324 | msg = "Running #{name} hook"
325 | msg << " with #{args.inspect}" if args.any?
326 | log msg
327 |
328 | args.any? ? hook.call(*args) : hook.call
329 | end
330 |
331 | # Unregisters ourself as a worker. Useful when shutting down.
332 | def unregister_worker
333 | # If we're still processing a job, make sure it gets logged as a
334 | # failure.
335 | if (hash = processing) && !hash.empty?
336 | job = Job.new(hash['queue'], hash['payload'])
337 | # Ensure the proper worker is attached to this job, even if
338 | # it's not the precise instance that died.
339 | job.worker = self
340 | job.fail(DirtyExit.new)
341 | end
342 |
343 | mongo_workers.remove(:worker => self.to_s)
344 |
345 | Stat.clear("processed:#{self}")
346 | Stat.clear("failed:#{self}")
347 | end
348 |
349 | # Given a job, tells Redis we're working on it. Useful for seeing
350 | # what workers are doing and when.
351 | def working_on(job)
352 | job.worker = self
353 | data = encode \
354 | :queue => job.queue,
355 | :run_at => Time.now.to_s,
356 | :payload => job.payload
357 | working_on = {'working_on' => data}
358 | mongo_workers.update({:worker => self.to_s}, {'$set' => working_on}, :upsert => true )
359 | end
360 |
361 | # Called when we are done working - clears our `working_on` state
362 | # and tells Redis we processed a job.
363 | def done_working
364 | processed!
365 | working_on = {'working_on' => 1}
366 | mongo_workers.update({:worker => self.to_s}, {'$unset' => working_on})
367 | end
368 |
369 | # How many jobs has this worker processed? Returns an int.
370 | def processed
371 | Stat["processed:#{self}"]
372 | end
373 |
374 | # Tell Redis we've processed a job.
375 | def processed!
376 | Stat << "processed"
377 | Stat << "processed:#{self}"
378 | end
379 |
380 | # How many failed jobs has this worker seen? Returns an int.
381 | def failed
382 | Stat["failed:#{self}"]
383 | end
384 |
385 | # Tells Redis we've failed a job.
386 | def failed!
387 | Stat << "failed"
388 | Stat << "failed:#{self}"
389 | end
390 |
391 | # What time did this worker start? Returns an instance of `Time`
392 | def started
393 | worker = mongo_workers.find_one(:worker => self.to_s)
394 | return nil if !worker
395 | worker['started']
396 | end
397 |
398 | # Tell Redis we've started
399 | def started!
400 | started = {'started' => Time.now.to_s}
401 | mongo_workers.update({:worker => self.to_s}, {'$set' => started})
402 | end
403 |
404 | # Returns a hash explaining the Job we're currently processing, if any.
405 | def job
406 | worker = mongo_workers.find_one(:worker => self.to_s)
407 | return {} if !worker
408 | decode(worker['working_on']) || {}
409 | end
410 | alias_method :processing, :job
411 |
412 | # Boolean - true if working, false if not
413 | def working?
414 | state == :working
415 | end
416 |
417 | # Boolean - true if idle, false if not
418 | def idle?
419 | state == :idle
420 | end
421 |
422 | # Returns a symbol representing the current worker state,
423 | # which can be either :working or :idle
424 | def state
425 | worker = mongo_workers.find_one(:worker => self.to_s)
426 | worker ? :working : :idle
427 | end
428 |
429 | # Is this worker the same as another worker?
430 | def ==(other)
431 | to_s == other.to_s
432 | end
433 |
434 | def inspect
435 | "#"
436 | end
437 |
438 | # The string representation is the same as the id for this worker
439 | # instance. Can be used with `Worker.find`.
440 | def to_s
441 | @to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
442 | end
443 | alias_method :worker_id, :to_s
444 |
445 | # chomp'd hostname of this machine
446 | def hostname
447 | @hostname ||= `hostname`.chomp
448 | end
449 |
450 | # Returns an array of string pids of all the other workers on this
451 | # machine. Useful when pruning dead workers on startup.
452 | def worker_pids
453 | `ps -A -o pid,command | grep [r]esque`.split("\n").map do |line|
454 | line.split(' ')[0]
455 | end
456 | end
457 |
458 | # Given a string, sets the procline ($0) and logs.
459 | # Procline is always in the format of:
460 | # resque-VERSION: STRING
461 | def procline(string)
462 | $0 = "resque-#{Resque::Version}: #{string}"
463 | log! $0
464 | end
465 |
466 | # Log a message to STDOUT if we are verbose or very_verbose.
467 | def log(message)
468 | if verbose
469 | puts "*** #{message}"
470 | elsif very_verbose
471 | time = Time.now.strftime('%I:%M:%S %Y-%m-%d')
472 | puts "** [#{time}] #$$: #{message}"
473 | end
474 | end
475 |
476 | # Logs a very verbose message to STDOUT.
477 | def log!(message)
478 | log message if very_verbose
479 | end
480 | end
481 | end
482 |
--------------------------------------------------------------------------------
/README.markdown:
--------------------------------------------------------------------------------
1 | Resque-mongo
2 | ============
3 | Resque-mongo is a fork of [Resque][resque] that uses MongoDB as a
4 | backend instead of Redis. This fork is a work in progress, all the
5 | library functionality has been ported (all tests pass) and the
6 | monitoring sinatra app works except the "stats" panel, although there are
7 | a lot of details that need to been taken care of.
8 |
9 | Resque-mongo uses a fairly new feature of mongo, [findAndModify()][fnr].
10 | findAndModify that is only supported by 0.20+ version ofthe ruby mongo driver.
11 |
12 | Also, check your mongo version: 1.3.0 or higher
13 |
14 | [fnr]: http://www.mongodb.org/display/DOCS/findandmodify+Command
15 |
16 | gem install resque-mongo
17 | # Rails integration, config/environment.rb
18 | config.gem 'resque-mongo', :lib => 'resque'
19 |
20 |
21 | Resque
22 | ======
23 |
24 | Resque is a Redis-backed library for creating background jobs, placing
25 | those jobs on multiple queues, and processing them later.
26 |
27 | Background jobs can be any Ruby class or module that responds to
28 | `perform`. Your existing classes can easily be converted to background
29 | jobs or you can create new classes specifically to do work. Or, you
30 | can do both.
31 |
32 | Resque is heavily inspired by DelayedJob (which rocks) and is
33 | comprised of three parts:
34 |
35 | 1. A Ruby library for creating, querying, and processing jobs
36 | 2. A Rake task for starting a worker which processes jobs
37 | 3. A Sinatra app for monitoring queues, jobs, and workers.
38 |
39 | Resque workers can be distributed between multiple machines,
40 | support priorities, are resilient to memory bloat / "leaks," are
41 | optimized for REE (but work on MRI and JRuby), tell you what they're
42 | doing, and expect failure.
43 |
44 | Resque queues are persistent; support constant time, atomic push and
45 | pop (thanks to Redis); provide visibility into their contents; and
46 | store jobs as simple JSON packages.
47 |
48 | The Resque frontend tells you what workers are doing, what workers are
49 | not doing, what queues you're using, what's in those queues, provides
50 | general usage stats, and helps you track failures.
51 |
52 |
53 | The Blog Post
54 | -------------
55 |
56 | For the backstory, philosophy, and history of Resque's beginnings,
57 | please see [the blog post][0].
58 |
59 |
60 | Overview
61 | --------
62 |
63 | Resque allows you to create jobs and place them on a queue, then,
64 | later, pull those jobs off the queue and process them.
65 |
66 | Resque jobs are Ruby classes (or modules) which respond to the
67 | `perform` method. Here's an example:
68 |
69 | class Archive
70 | @queue = :file_serve
71 |
72 | def self.perform(repo_id, branch = 'master')
73 | repo = Repository.find(repo_id)
74 | repo.create_archive(branch)
75 | end
76 | end
77 |
78 | The `@queue` class instance variable determines which queue `Archive`
79 | jobs will be placed in. Queues are arbitrary and created on the fly -
80 | you can name them whatever you want and have as many as you want.
81 |
82 | To place an `Archive` job on the `file_serve` queue, we might add this
83 | to our application's pre-existing `Repository` class:
84 |
85 | class Repository
86 | def async_create_archive(branch)
87 | Resque.enqueue(Archive, self.id, branch)
88 | end
89 | end
90 |
91 | Now when we call `repo.async_create_archive('masterbrew')` in our
92 | application, a job will be created and placed on the `file_serve`
93 | queue.
94 |
95 | Later, a worker will run something like this code to process the job:
96 |
97 | klass, args = Resque.reserve(:file_serve)
98 | klass.perform(*args) if klass.respond_to? :perform
99 |
100 | Which translates to:
101 |
102 | Archive.perform(44, 'masterbrew')
103 |
104 | Let's start a worker to run `file_serve` jobs:
105 |
106 | $ cd app_root
107 | $ QUEUE=file_serve rake resque:work
108 |
109 | This starts one Resque worker and tells it to work off the
110 | `file_serve` queue. As soon as it's ready it'll try to run the
111 | `Resque.reserve` code snippet above and process jobs until it can't
112 | find any more, at which point it will sleep for a small period and
113 | repeatedly poll the queue for more jobs.
114 |
115 | Workers can be given multiple queues (a "queue list") and run on
116 | multiple machines. In fact they can be run anywhere with network
117 | access to the Redis server.
118 |
119 |
120 | Jobs
121 | ----
122 |
123 | What should you run in the background? Anything that takes any time at
124 | all. Slow INSERT statements, disk manipulating, data processing, etc.
125 |
126 | At GitHub we use Resque to process the following types of jobs:
127 |
128 | * Warming caches
129 | * Counting disk usage
130 | * Building tarballs
131 | * Building Rubygems
132 | * Firing off web hooks
133 | * Creating events in the db and pre-caching them
134 | * Building graphs
135 | * Deleting users
136 | * Updating our search index
137 |
138 | As of writing we have about 35 different types of background jobs.
139 |
140 | Keep in mind that you don't need a web app to use Resque - we just
141 | mention "foreground" and "background" because they make conceptual
142 | sense. You could easily be spidering sites and sticking data which
143 | needs to be crunched later into a queue.
144 |
145 |
146 | ### Persistence
147 |
148 | Jobs are persisted to queues as JSON objects. Let's take our `Archive`
149 | example from above. We'll run the following code to create a job:
150 |
151 | repo = Repository.find(44)
152 | repo.async_create_archive('masterbrew')
153 |
154 | The following JSON will be stored in the `file_serve` queue:
155 |
156 | {
157 | 'class': 'Archive',
158 | 'args': [ 44, 'masterbrew' ]
159 | }
160 |
161 | Because of this your jobs must only accept arguments that can be JSON encoded.
162 |
163 | So instead of doing this:
164 |
165 | Resque.enqueue(Archive, self, branch)
166 |
167 | do this:
168 |
169 | Resque.enqueue(Archive, self.id, branch)
170 |
171 | This is why our above example (and all the examples in `examples/`)
172 | uses object IDs instead of passing around the objects.
173 |
174 | While this is less convenient than just sticking a marshaled object
175 | in the database, it gives you a slight advantage: your jobs will be
176 | run against the most recent version of an object because they need to
177 | pull from the DB or cache.
178 |
179 | If your jobs were run against marshaled objects, they could
180 | potentially be operating on a stale record with out-of-date information.
181 |
182 |
183 | ### send_later / async
184 |
185 | Want something like DelayedJob's `send_later` or the ability to use
186 | instance methods instead of just methods for jobs? See the `examples/`
187 | directory for goodies.
188 |
189 | We plan to provide first class `async` support in a future release.
190 |
191 |
192 | ### Failure
193 |
194 | If a job raises an exception, it is logged and handed off to the
195 | `Resque::Failure` module. Failures are logged either locally in Redis
196 | or using some different backend.
197 |
198 | For example, Resque ships with Hoptoad support.
199 |
200 | Keep this in mind when writing your jobs: you may want to throw
201 | exceptions you would not normally throw in order to assist debugging.
202 |
203 |
204 | Workers
205 | -------
206 |
207 | Resque workers are rake tasks that run forever. They basically do this:
208 |
209 | start
210 | loop do
211 | if job = reserve
212 | job.process
213 | else
214 | sleep 5
215 | end
216 | end
217 | shutdown
218 |
219 | Starting a worker is simple. Here's our example from earlier:
220 |
221 | $ QUEUE=file_serve rake resque:work
222 |
223 | By default Resque won't know about your application's
224 | environment. That is, it won't be able to find and run your jobs - it
225 | needs to load your application into memory.
226 |
227 | If we've installed Resque as a Rails plugin, we might run this command
228 | from our RAILS_ROOT:
229 |
230 | $ QUEUE=file_serve rake environment resque:work
231 |
232 | This will load the environment before starting a worker. Alternately
233 | we can define a `resque:setup` task with a dependency on the
234 | `environment` rake task:
235 |
236 | task "resque:setup" => :environment
237 |
238 | GitHub's setup task looks like this:
239 |
240 | task "resque:setup" => :environment do
241 | Grit::Git.git_timeout = 10.minutes
242 | end
243 |
244 | We don't want the `git_timeout` as high as 10 minutes in our web app,
245 | but in the Resque workers it's fine.
246 |
247 |
248 | ### Logging
249 |
250 | Workers support basic logging to STDOUT. If you start them with the
251 | `VERBOSE` env variable set, they will print basic debugging
252 | information. You can also set the `VVERBOSE` (very verbose) env
253 | variable.
254 |
255 | $ VVERBOSE=1 QUEUE=file_serve rake environment resque:work
256 |
257 |
258 | ### Priorities and Queue Lists
259 |
260 | Resque doesn't support numeric priorities but instead uses the order
261 | of queues you give it. We call this list of queues the "queue list."
262 |
263 | Let's say we add a `warm_cache` queue in addition to our `file_serve`
264 | queue. We'd now start a worker like so:
265 |
266 | $ QUEUES=file_serve,warm_cache rake resque:work
267 |
268 | When the worker looks for new jobs, it will first check
269 | `file_serve`. If it finds a job, it'll process it then check
270 | `file_serve` again. It will keep checking `file_serve` until no more
271 | jobs are available. At that point, it will check `warm_cache`. If it
272 | finds a job it'll process it then check `file_serve` (repeating the
273 | whole process).
274 |
275 | In this way you can prioritize certain queues. At GitHub we start our
276 | workers with something like this:
277 |
278 | $ QUEUES=critical,archive,high,low rake resque:work
279 |
280 | Notice the `archive` queue - it is specialized and in our future
281 | architecture will only be run from a single machine.
282 |
283 | At that point we'll start workers on our generalized background
284 | machines with this command:
285 |
286 | $ QUEUES=critical,high,low rake resque:work
287 |
288 | And workers on our specialized archive machine with this command:
289 |
290 | $ QUEUE=archive rake resque:work
291 |
292 |
293 | ### Running All Queues
294 |
295 | If you want your workers to work off of every queue, including new
296 | queues created on the fly, you can use a splat:
297 |
298 | $ QUEUE=* rake resque:work
299 |
300 | Queues will be processed in alphabetical order.
301 |
302 |
303 | ### Running Multiple Workers
304 |
305 | At GitHub we use god to start and stop multiple workers. A sample god
306 | configuration file is included under `examples/god`. We recommend this
307 | method.
308 |
309 | If you'd like to run multiple workers in development mode, you can do
310 | so using the `resque:workers` rake task:
311 |
312 | $ COUNT=5 QUEUE=* rake resque:workers
313 |
314 | This will spawn five Resque workers, each in its own thread. Hitting
315 | ctrl-c should be sufficient to stop them all.
316 |
317 |
318 | ### Forking
319 |
320 | On certain platforms, when a Resque worker reserves a job it
321 | immediately forks a child process. The child processes the job then
322 | exits. When the child has exited successfully, the worker reserves
323 | another job and repeats the process.
324 |
325 | Why?
326 |
327 | Because Resque assumes chaos.
328 |
329 | Resque assumes your background workers will lock up, run too long, or
330 | have unwanted memory growth.
331 |
332 | If Resque workers processed jobs themselves, it'd be hard to whip them
333 | into shape. Let's say one is using too much memory: you send it a
334 | signal that says "shutdown after you finish processing the current
335 | job," and it does so. It then starts up again - loading your entire
336 | application environment. This adds useless CPU cycles and causes a
337 | delay in queue processing.
338 |
339 | Plus, what if it's using too much memory and has stopped responding to
340 | signals?
341 |
342 | Thanks to Resque's parent / child architecture, jobs that use too much memory
343 | release that memory upon completion. No unwanted growth.
344 |
345 | And what if a job is running too long? You'd need to `kill -9` it then
346 | start the worker again. With Resque's parent / child architecture you
347 | can tell the parent to forcefully kill the child then immediately
348 | start processing more jobs. No startup delay or wasted cycles.
349 |
350 | The parent / child architecture helps us keep tabs on what workers are
351 | doing, too. By eliminating the need to `kill -9` workers we can have
352 | parents remove themselves from the global listing of workers. If we
353 | just ruthlessly killed workers, we'd need a separate watchdog process
354 | to add and remove them to the global listing - which becomes
355 | complicated.
356 |
357 | Workers instead handle their own state.
358 |
359 |
360 | ### Parents and Children
361 |
362 | Here's a parent / child pair doing some work:
363 |
364 | $ ps -e -o pid,command | grep [r]esque
365 | 92099 resque: Forked 92102 at 1253142769
366 | 92102 resque: Processing file_serve since 1253142769
367 |
368 | You can clearly see that process 92099 forked 92102, which has been
369 | working since 1253142769.
370 |
371 | (By advertising the time they began processing you can easily use monit
372 | or god to kill stale workers.)
373 |
374 | When a parent process is idle, it lets you know what queues it is
375 | waiting for work on:
376 |
377 | $ ps -e -o pid,command | grep [r]esque
378 | 92099 resque: Waiting for file_serve,warm_cache
379 |
380 |
381 | ### Signals
382 |
383 | Resque workers respond to a few different signals:
384 |
385 | * `QUIT` - Wait for child to finish processing then exit
386 | * `TERM` / `INT` - Immediately kill child then exit
387 | * `USR1` - Immediately kill child but don't exit
388 | * `USR2` - Don't start to process any new jobs
389 | * `CONT` - Start to process new jobs again after a USR2
390 |
391 | If you want to gracefully shutdown a Resque worker, use `QUIT`.
392 |
393 | If you want to kill a stale or stuck child, use `USR1`. Processing
394 | will continue as normal unless the child was not found. In that case
395 | Resque assumes the parent process is in a bad state and shuts down.
396 |
397 | If you want to kill a stale or stuck child and shutdown, use `TERM`
398 |
399 | If you want to stop processing jobs, but want to leave the worker running
400 | (for example, to temporarily alleviate load), use `USR2` to stop processing,
401 | then `CONT` to start it again.
402 |
403 | ### Mysql::Error: MySQL server has gone away
404 |
405 | If your workers remain idle for too long they may lose their MySQL
406 | connection. If that happens we recommend using [this
407 | Gist](http://gist.github.com/238999).
408 |
409 |
410 | The Front End
411 | -------------
412 |
413 | Resque comes with a Sinatra-based front end for seeing what's up with
414 | your queue.
415 |
416 | 
417 |
418 | ### Standalone
419 |
420 | If you've installed Resque as a gem running the front end standalone is easy:
421 |
422 | $ resque-web
423 |
424 | It's a thin layer around `rackup` so it's configurable as well:
425 |
426 | $ resque-web -p 8282
427 |
428 | If you have a Resque config file you want evaluated just pass it to
429 | the script as the final argument:
430 |
431 | $ resque-web -p 8282 rails_root/config/initializers/resque.rb
432 |
433 | You can also set the namespace directly using `resque-web`:
434 |
435 | $ resque-web -p 8282 -N myapp
436 |
437 | ### Passenger
438 |
439 | Using Passenger? Resque ships with a `config.ru` you can use. See
440 | Phusion's guide:
441 |
442 |
443 |
444 | ### Rack::URLMap
445 |
446 | If you want to load Resque on a subpath, possibly alongside other
447 | apps, it's easy to do with Rack's `URLMap`:
448 |
449 | require 'resque/server'
450 |
451 | run Rack::URLMap.new \
452 | "/" => Your::App.new,
453 | "/resque" => Resque::Server.new
454 |
455 | Check `examples/demo/config.ru` for a functional example (including
456 | HTTP basic auth).
457 |
458 |
459 | Resque vs DelayedJob
460 | --------------------
461 |
462 | How does Resque compare to DelayedJob, and why would you choose one
463 | over the other?
464 |
465 | * Resque supports multiple queues
466 | * DelayedJob supports finer grained priorities
467 | * Resque workers are resilient to memory leaks / bloat
468 | * DelayedJob workers are extremely simple and easy to modify
469 | * Resque requires Redis
470 | * DelayedJob requires ActiveRecord
471 | * Resque can only place JSONable Ruby objects on a queue as arguments
472 | * DelayedJob can place _any_ Ruby object on its queue as arguments
473 | * Resque includes a Sinatra app for monitoring what's going on
474 | * DelayedJob can be queried from within your Rails app if you want to
475 | add an interface
476 |
477 | If you're doing Rails development, you already have a database and
478 | ActiveRecord. DelayedJob is super easy to setup and works great.
479 | GitHub used it for many months to process almost 200 million jobs.
480 |
481 | Choose Resque if:
482 |
483 | * You need multiple queues
484 | * You don't care / dislike numeric priorities
485 | * You don't need to persist every Ruby object ever
486 | * You have potentially huge queues
487 | * You want to see what's going on
488 | * You expect a lot of failure / chaos
489 | * You can setup Redis
490 | * You're not running short on RAM
491 |
492 | Choose DelayedJob if:
493 |
494 | * You like numeric priorities
495 | * You're not doing a gigantic amount of jobs each day
496 | * Your queue stays small and nimble
497 | * There is not a lot failure / chaos
498 | * You want to easily throw anything on the queue
499 | * You don't want to setup Redis
500 |
501 | In no way is Resque a "better" DelayedJob, so make sure you pick the
502 | tool that's best for your app.
503 |
504 |
505 | Installing Redis
506 | ----------------
507 |
508 | Resque requires Redis 0.900 or higher.
509 |
510 | Resque uses Redis' lists for its queues. It also stores worker state
511 | data in Redis.
512 |
513 | #### Homebrew
514 |
515 | If you're on OS X, Homebrew is the simplest way to install Redis:
516 |
517 | $ brew install redis
518 | $ redis-server /usr/local/etc/redis.conf
519 |
520 | You now have a Redis daemon running on 6379.
521 |
522 | #### Via Resque
523 |
524 | Resque includes Rake tasks (thanks to Ezra's redis-rb) that will
525 | install and run Redis for you:
526 |
527 | $ git clone git://github.com/defunkt/resque.git
528 | $ cd resque
529 | $ rake redis:install dtach:install
530 | $ rake redis:start
531 |
532 | Or, if you don't have admin access on your machine:
533 |
534 | $ git clone git://github.com/defunkt/resque.git
535 | $ cd resque
536 | $ PREFIX= rake redis:install dtach:install
537 | $ rake redis:start
538 |
539 | You now have Redis running on 6379. Wait a second then hit ctrl-\ to
540 | detach and keep it running in the background.
541 |
542 | The demo is probably the best way to figure out how to put the parts
543 | together. But, it's not that hard.
544 |
545 |
546 | Resque Dependencies
547 | -------------------
548 |
549 | gem install redis redis-namespace yajl-ruby
550 |
551 | If you cannot install `yajl-ruby` (JRuby?), you can install the `json`
552 | gem and Resque will use it instead.
553 |
554 | When problems arise, make sure you have the newest versions of the
555 | `redis` and `redis-namespace` gems.
556 |
557 |
558 | Installing Resque
559 | -----------------
560 |
561 | ### In a Rack app, as a gem
562 |
563 | First install the gem.
564 |
565 | $ gem install resque
566 |
567 | Next include it in your application.
568 |
569 | require 'resque'
570 |
571 | Now start your application:
572 |
573 | rackup config.ru
574 |
575 | That's it! You can now create Resque jobs from within your app.
576 |
577 | To start a worker, create a Rakefile in your app's root (or add this
578 | to an existing Rakefile):
579 |
580 | require 'your/app'
581 | require 'resque/tasks'
582 |
583 | Now:
584 |
585 | $ QUEUE=* rake resque:work
586 |
587 | Alternately you can define a `resque:setup` hook in your Rakefile if you
588 | don't want to load your app every time rake runs.
589 |
590 |
591 | ### In a Rails app, as a gem
592 |
593 | First install the gem.
594 |
595 | $ gem install resque
596 |
597 | Next include it in your application.
598 |
599 | $ cat config/initializers/load_resque.rb
600 | require 'resque'
601 |
602 | Now start your application:
603 |
604 | $ ./script/server
605 |
606 | That's it! You can now create Resque jobs from within your app.
607 |
608 | To start a worker, add this to your Rakefile in `RAILS_ROOT`:
609 |
610 | require 'resque/tasks'
611 |
612 | Now:
613 |
614 | $ QUEUE=* rake environment resque:work
615 |
616 | Don't forget you can define a `resque:setup` hook in
617 | `lib/tasks/whatever.rake` that loads the `environment` task every time.
618 |
619 |
620 | ### In a Rails app, as a plugin
621 |
622 | $ ./script/plugin install git://github.com/defunkt/resque
623 |
624 | That's it! Resque will automatically be available when your Rails app
625 | loads.
626 |
627 | To start a worker:
628 |
629 | $ QUEUE=* rake environment resque:work
630 |
631 | Don't forget you can define a `resque:setup` hook in
632 | `lib/tasks/whatever.rake` that loads the `environment` task every time.
633 |
634 |
635 | Configuration
636 | -------------
637 |
638 | You may want to change the Redis host and port Resque connects to, or
639 | set various other options at startup.
640 |
641 | Resque has a `redis` setter which can be given a string or a Redis
642 | object. This means if you're already using Redis in your app, Resque
643 | can re-use the existing connection.
644 |
645 | String: `Resque.redis = 'localhost:6379'`
646 |
647 | Redis: `Resque.redis = $redis`
648 |
649 | For our rails app we have a `config/initializers/resque.rb` file where
650 | we load `config/resque.yml` by hand and set the Redis information
651 | appropriately.
652 |
653 | Here's our `config/resque.yml`:
654 |
655 | development: localhost:6379
656 | test: localhost:6379
657 | staging: redis1.se.github.com:6379
658 | fi: localhost:6379
659 | production: redis1.ae.github.com:6379
660 |
661 | And our initializer:
662 |
663 | rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
664 | rails_env = ENV['RAILS_ENV'] || 'development'
665 |
666 | resque_config = YAML.load_file(rails_root + '/config/resque.yml')
667 | Resque.redis = resque_config[rails_env]
668 |
669 | Easy peasy! Why not just use `RAILS_ROOT` and `RAILS_ENV`? Because
670 | this way we can tell our Sinatra app about the config file:
671 |
672 | $ RAILS_ENV=production resque-web rails_root/config/initializers/resque.rb
673 |
674 | Now everyone is on the same page.
675 |
676 |
677 | Plugins and Hooks
678 | -----------------
679 |
680 | For a list of available plugins see
681 | .
682 |
683 | If you'd like to write your own plugin, or want to customize Resque
684 | using hooks (such as `Resque.after_fork`), see
685 | [docs/HOOKS.md](http://github.com/defunkt/resque/blob/master/docs/HOOKS.md).
686 |
687 |
688 | Namespaces
689 | ----------
690 |
691 | If you're running multiple, separate instances of Resque you may want
692 | to namespace the keyspaces so they do not overlap. This is not unlike
693 | the approach taken by many memcached clients.
694 |
695 | This feature is provided by the [redis-namespace][rs] library, which
696 | Resque uses by default to separate the keys it manages from other keys
697 | in your Redis server.
698 |
699 | Simply use the `Resque.redis.namespace` accessor:
700 |
701 | Resque.redis.namespace = "resque:GitHub"
702 |
703 | We recommend sticking this in your initializer somewhere after Redis
704 | is configured.
705 |
706 |
707 | Demo
708 | ----
709 |
710 | Resque ships with a demo Sinatra app for creating jobs that are later
711 | processed in the background.
712 |
713 | Try it out by looking at the README, found at `examples/demo/README.markdown`.
714 |
715 |
716 | Monitoring
717 | ----------
718 |
719 | ### god
720 |
721 | If you're using god to monitor Resque, we have provided example
722 | configs in `examples/god/`. One is for starting / stopping workers,
723 | the other is for killing workers that have been running too long.
724 |
725 | ### monit
726 |
727 | If you're using monit, `examples/monit/resque.monit` is provided free
728 | of charge. This is **not** used by GitHub in production, so please
729 | send patches for any tweaks or improvements you can make to it.
730 |
731 |
732 | Development
733 | -----------
734 |
735 | Want to hack on Resque?
736 |
737 | First clone the repo and run the tests:
738 |
739 | git clone git://github.com/defunkt/resque.git
740 | cd resque
741 | rake test
742 |
743 | If the tests do not pass make sure you have Redis installed
744 | correctly (though we make an effort to tell you if we feel this is the
745 | case). The tests attempt to start an isolated instance of Redis to
746 | run against.
747 |
748 | Also make sure you've installed all the dependencies correctly. For
749 | example, try loading the `redis-namespace` gem after you've installed
750 | it:
751 |
752 | $ irb
753 | >> require 'rubygems'
754 | => true
755 | >> require 'redis/namespace'
756 | => true
757 |
758 | If you get an error requiring any of the dependencies, you may have
759 | failed to install them or be seeing load path issues.
760 |
761 | Feel free to ping the mailing list with your problem and we'll try to
762 | sort it out.
763 |
764 |
765 | Contributing
766 | ------------
767 |
768 | Once you've made your great commits:
769 |
770 | 1. [Fork][1] Resque
771 | 2. Create a topic branch - `git checkout -b my_branch`
772 | 3. Push to your branch - `git push origin my_branch`
773 | 4. Create an [Issue][2] with a link to your branch
774 | 5. That's it!
775 |
776 | You might want to checkout our [Contributing][cb] wiki page for information
777 | on coding standards, new features, etc.
778 |
779 |
780 | Mailing List
781 | ------------
782 |
783 | To join the list simply send an email to . This
784 | will subscribe you and send you information about your subscription,
785 | including unsubscribe information.
786 |
787 | The archive can be found at .
788 |
789 |
790 | Meta
791 | ----
792 |
793 | * Code: `git clone git://github.com/defunkt/resque.git`
794 | * Home:
795 | * Docs:
796 | * Bugs:
797 | * List:
798 | * Chat:
799 | * Gems:
800 |
801 | This project uses [Semantic Versioning][sv].
802 |
803 |
804 | Author
805 | ------
806 |
807 | Chris Wanstrath :: chris@ozmm.org :: @defunkt
808 |
809 | [0]: http://github.com/blog/542-introducing-resque
810 | [1]: http://help.github.com/forking/
811 | [2]: http://github.com/defunkt/resque/issues
812 | [sv]: http://semver.org/
813 | [rs]: http://github.com/defunkt/redis-namespace
814 | [cb]: http://wiki.github.com/defunkt/resque/contributing
815 |
--------------------------------------------------------------------------------