├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Gemfile ├── HOOKS.md ├── LICENSE ├── README.md ├── Rakefile ├── TODO ├── backburner.gemspec ├── bin └── backburner ├── circle.yml ├── examples ├── custom.rb ├── demo.rb ├── god.rb ├── hooked.rb ├── retried.rb ├── simple.rb └── stress.rb ├── lib ├── active_job │ └── queue_adapters │ │ └── backburner_adapter.rb ├── backburner.rb └── backburner │ ├── async_proxy.rb │ ├── cli.rb │ ├── configuration.rb │ ├── connection.rb │ ├── helpers.rb │ ├── hooks.rb │ ├── job.rb │ ├── logger.rb │ ├── performable.rb │ ├── queue.rb │ ├── tasks.rb │ ├── version.rb │ ├── worker.rb │ └── workers │ ├── forking.rb │ ├── simple.rb │ ├── threading.rb │ └── threads_on_fork.rb └── test ├── active_job_adapter_test.rb ├── async_proxy_test.rb ├── back_burner_test.rb ├── connection_test.rb ├── fixtures ├── active_jobs.rb ├── hooked.rb ├── test_fork_jobs.rb ├── test_forking_jobs.rb ├── test_jobs.rb └── test_queue_settings.rb ├── helpers └── templogger.rb ├── helpers_test.rb ├── hooks_test.rb ├── job_test.rb ├── logger_test.rb ├── performable_test.rb ├── queue_test.rb ├── test_helper.rb ├── worker_test.rb └── workers ├── forking_worker_test.rb ├── simple_worker_test.rb ├── threading_worker_test.rb └── threads_on_fork_worker_test.rb /.gitignore: -------------------------------------------------------------------------------- 1 | *.gem 2 | *.rbc 3 | .bundle 4 | .config 5 | .yardoc 6 | Gemfile.lock 7 | InstalledFiles 8 | _yardoc 9 | coverage 10 | doc/ 11 | lib/bundler/man 12 | pkg 13 | rdoc 14 | spec/reports 15 | test/tmp 16 | test/version_tmp 17 | tmp -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # http://about.travis-ci.org/docs/user/build-configuration/ 2 | rvm: 3 | - 1.9.3 4 | - 2.0.0 5 | - 2.1 6 | - 2.2 7 | - 2.3 8 | - 2.4 9 | - 2.5 10 | - rbx-2 11 | before_install: 12 | - curl -L https://github.com/kr/beanstalkd/archive/v1.9.tar.gz | tar xz -C /tmp 13 | - cd /tmp/beanstalkd-1.9/ 14 | - make 15 | - ./beanstalkd & 16 | - cd $TRAVIS_BUILD_DIR 17 | - gem update --system 18 | - gem update bundler 19 | matrix: 20 | allow_failures: 21 | - rvm: rbx-2 22 | - rvm: 2.0.0 23 | script: 24 | - bundle exec rake test 25 | gemfile: Gemfile 26 | notifications: 27 | recipients: 28 | - nesquena@gmail.com 29 | - therealdave.myron@gmail.com 30 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## Version 1.6.1 (Jan 16 2023) 4 | 5 | * Change `File.exists?`` to `File.exist?` as latest ruby versions have removed support (@ShrutiSaagar). 6 | 7 | ## Version 1.6.0 (December 30 2021) 8 | 9 | * TBD (please help backfill) 10 | 11 | ## Version 1.5.0 (September 10 2018) 12 | 13 | * TBD 14 | 15 | ## Version 1.4.1 (June 10 2017) 16 | 17 | * Fix warning for constant ::Fixnum is deprecated (@amatsuda) 18 | 19 | ## Version 1.4.0 (May 13 2017) 20 | 21 | * Fix unit tests to be more consistent (@eltone) 22 | * Ensure job supports body hash with symbol keys (@eltone) 23 | * Add support for custom serialization formats (@eltone) 24 | * Log the params when a job timeout occurs (@nathantsoi) 25 | 26 | ## Version 1.3.1 (April 21 2016) 27 | 28 | * Addition of thread-pool-based concurrency (@contentfree) 29 | 30 | ## Version 1.3.0 (February 05 2016) 31 | 32 | * Enqueue command now responds with beanstalk response details 33 | 34 | ## Version 1.2.0 (November 01 2015) 35 | 36 | * FIX Made connections to beanstalkd more resilient (@contentfree) 37 | 38 | ## Version 1.2.0.pre (October 24 2015) 39 | 40 | * FIX Replace static Beaneater connection with individual connections per worker instance/thread (@contentfree) 41 | * FIX Beaneater connections try really hard to repair themselves if disconnected accidentally (@contentfree) 42 | * NEW Event hook for workers: on_reconnect (@contentfree) 43 | 44 | ## Version 1.1.0 (September 14 2015) 45 | 46 | * NEW Ability to configure namespace separator (@bfolkens) 47 | * NEW Avoid timeouts altogether by setting queue_respond_timeout to 0 (@zacviandier) 48 | * NEW Event hooks for on_retry and on_bury (@contentfree) 49 | * NEW Support lambdas for queue names (@contentfree) 50 | * NEW Allow for control of delay calculation (@contentfree) 51 | * NEW Ability to specify environment when running the CLI (@contentfree) 52 | * NEW Control default async behavior of methods (@contentfree) 53 | 54 | ## Version 1.0.0 (April 26 2015) 55 | 56 | * NEW Updating to Beaneater 1.0 (@alup) 57 | 58 | ## Version 0.4.6 (October 26 2014) 59 | 60 | * NEW Add job to on_error handler if the handler has a 4th argument (@Nitrodist) 61 | * NEW Use a timeout when looking for a job to reserve (@EasyPost) 62 | * NEW Support configuring settings on threads on fork class (@silentshade) 63 | * FIX queue override by existing queues (@silentshade) 64 | * FIX Use thread to log exit message (@silentshade) 65 | 66 | ## Version 0.4.5 (December 16 2013) 67 | 68 | * FIX #47 Create a backburner connection per thread (Thanks @thcrock) 69 | 70 | ## Version 0.4.4 (October 27 2013) 71 | 72 | * NEW #51 Added ability to set per-queue default ttr's (Thanks @ryanjohns) 73 | 74 | ## Version 0.4.3 (July 19 2013) 75 | 76 | * FIX #44 Additional fix to issue introduced in 0.4.2 77 | * FIX #45 More graceful shutdown using Kernel.exit and rescuing SystemExit. (Thanks @ryanjohns) 78 | 79 | ## Version 0.4.2 (July 3 2013) 80 | 81 | * FIX #44 Properly retry to connect to beanstalkd when connection fails. 82 | 83 | ## Version 0.4.1 (June 28 2013) 84 | 85 | * FIX #43 Properly support CLI options and smart load the app environment. 86 | 87 | ## Version 0.4.0 (June 28 2013) 88 | 89 | NOTE: This is the start of working with @bradgessler to improve backburner and merge with quebert 90 | 91 | * NEW #26 #27 Remove need for Queue mixin, allow plain ruby objects 92 | * NEW Default all jobs to a single general queue rather than separate queues 93 | * NEW Add support for named priorities, allowing shorthand names for priority values 94 | 95 | ## Version 0.3.4 (April 23 2013) 96 | 97 | * FIX #22 Adds signal handlers for worker to manage proper shutdown (Thanks @tkiley) 98 | 99 | ## Version 0.3.3 (April 19 2013) 100 | 101 | * Fix naming conflict rename 'config' to 'queue_config' 102 | 103 | ## Version 0.3.2 (Jan 23 2013) 104 | 105 | * Bump version of beaneater to 0.3.0 (better socket handling) 106 | 107 | ## Version 0.3.1 (Dec 28 2012) 108 | 109 | * Adds basic forking processing strategy and rake tasks (Thanks @danielfarrell) 110 | 111 | ## Version 0.3.0 (Nov 14 2012) 112 | 113 | * Major update with support for a 'threads_on_fork' processing strategy (Thanks @ShadowBelmolve) 114 | * Different workers have different rake tasks (Thanks @ShadowBelmolve) 115 | * Added processing strategy specific examples i.e stress.rb and adds new unit tests. (Thanks @ShadowBelmolve) 116 | 117 | ## Version 0.2.6 (Nov 12 2012) 118 | 119 | * Upgrade to beaneater 0.2.0 120 | 121 | ## Version 0.2.5 (Nov 9 2012) 122 | 123 | * Add support for multiple worker processing strategies through subclassing. 124 | 125 | ## Version 0.2.0 (Nov 7 2012) 126 | 127 | * Add new plugin hooks feature (see HOOKS.md) 128 | 129 | ## Version 0.1.2 (Nov 7 2012) 130 | 131 | * Adds ability to specify a custom logger. 132 | * Adds job retry configuration and worker support. 133 | 134 | ## Version 0.1.1 (Nov 6 2012) 135 | 136 | * Fix issue with timed out reserves 137 | 138 | ## Version 0.1.0 (Nov 4 2012) 139 | 140 | * Switch to beaneater as new ruby beanstalkd client 141 | * Add support for array of connections in `beanstalk_url` 142 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | We love pull requests. Here's a quick guide: 2 | 3 | 1. Fork the repo. 4 | 5 | 2. Run the tests. We only take pull requests with passing tests, and it's great 6 | to know that you have a clean slate: `bundle && rake test` 7 | 8 | 3. Add a test for your change. Only refactoring and documentation changes 9 | require no new tests. If you are adding functionality or fixing a bug, we need 10 | a test! 11 | 12 | 4. Make the test pass. 13 | 14 | 5. Push to your fork and submit a pull request. 15 | 16 | At this point you're waiting on us. We like to at least comment on, if not 17 | accept, pull requests within three business days (and, typically, one business 18 | day). We may suggest some changes or improvements or alternatives. 19 | 20 | Some things that will increase the chance that your pull request is accepted: 21 | 22 | * Use Rails idioms and helpers 23 | * Include tests that fail without your code, and pass with it 24 | * Update the documentation and README for anything affected by your contribution 25 | 26 | Syntax: 27 | 28 | * Two spaces, no tabs. 29 | * No trailing whitespace. Blank lines should not have any space. 30 | * Prefer &&/|| over and/or. 31 | * MyClass.my_method(my_arg) not my_method( my_arg ) or my_method my_arg. 32 | * a = b and not a=b. 33 | * Follow the conventions you see used in the source already. 34 | 35 | And in case we didn't emphasize it enough: we love tests! 36 | 37 | NOTE: Adapted from https://raw.github.com/thoughtbot/factory_girl_rails/master/CONTRIBUTING.md -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | # Specify your gem's dependencies in backburner.gemspec 4 | gemspec -------------------------------------------------------------------------------- /HOOKS.md: -------------------------------------------------------------------------------- 1 | # Backburner Hooks 2 | 3 | You can customize Backburner or write plugins using its hook API. 4 | In many cases you can use a hook rather than mess around with Backburner's internals. 5 | 6 | ## Job Hooks 7 | 8 | Hooks are transparently adapted from [Resque](https://github.com/resque/resque/blob/master/docs/HOOKS.md), so 9 | if you are familiar with their hook API, now you can use nearly the same ones with beanstalkd and backburner! 10 | 11 | There are a variety of hooks available that are triggered during the lifecycle of a job: 12 | 13 | * `before_enqueue`: Called with the job args before a job is placed on the queue. 14 | If the hook returns `false`, the job will not be placed on the queue. 15 | 16 | * `after_enqueue`: Called with the job args after a job is placed on the queue. 17 | Any exception raised propagates up to the code which queued the job. 18 | 19 | * `before_perform`: Called with the job args before perform. If a hook returns false, 20 | the job is aborted. Other exceptions are treated like regular job exceptions. 21 | 22 | * `after_perform`: Called with the job args after it performs. Uncaught 23 | exceptions will be treated like regular job exceptions. 24 | 25 | * `around_perform`: Called with the job args. It is expected to yield in order 26 | to perform the job (but is not required to do so). It may handle exceptions 27 | thrown by perform, but uncaught exceptions will be treated like regular job exceptions. 28 | 29 | * `on_retry`: Called with the retry count, the delay and the job args whenever a job is retried. 30 | 31 | * `on_bury`: Called with the job args when the job is buried. 32 | 33 | * `on_failure`: Called with the exception and job args if any exception occurs 34 | while performing the job (or hooks). 35 | 36 | Hooks are just methods prefixed with the hook type. For example: 37 | 38 | ```ruby 39 | class SomeJob 40 | def self.before_perform_log_job(*args) 41 | logger.info "About to perform #{self} with #{args.inspect}" 42 | end 43 | 44 | def self.on_failure_bury(e, *args) 45 | logger.info "Performing #{self} caused an exception (#{e})" 46 | self.bury 47 | end 48 | 49 | def self.perform(*args) 50 | # ... 51 | end 52 | 53 | def self.logger 54 | @_logger ||= Logger.new(STDOUT) 55 | end 56 | end 57 | ``` 58 | 59 | You can also setup modules to create compose-able and reusable hooks for your jobs. For example: 60 | 61 | ```ruby 62 | module LoggedJob 63 | def before_perform_log_job(*args) 64 | Logger.info "About to perform #{self} with #{args.inspect}" 65 | end 66 | end 67 | 68 | module BuriedJob 69 | def on_failure_bury(e, *args) 70 | Logger.info "Performing #{self} caused an exception (#{e}). Retrying..." 71 | self.bury 72 | end 73 | end 74 | 75 | class MyJob 76 | extend LoggedJob 77 | extend BuriedJob 78 | 79 | def self.perform(*args) 80 | # ... 81 | end 82 | end 83 | ``` 84 | 85 | ## Worker Hooks 86 | 87 | Currently, there is just one hook: 88 | 89 | * `on_reconnect`: Called on the worker whose connection has been reset. The connection 90 | is given as the argument 91 | 92 | An example: 93 | 94 | ```ruby 95 | class MyWorker < Backburner::Worker 96 | def on_reconnect(conn) 97 | prepare 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Nathan Esquenazi 2 | 3 | MIT License 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env rake 2 | require "bundler/gem_tasks" 3 | require 'rake/testtask' 4 | # require 'yard' 5 | 6 | task :test do 7 | Rake::TestTask.new do |t| 8 | t.libs.push "lib" 9 | t.test_files = FileList[File.expand_path('../test/**/*_test.rb', __FILE__)] 10 | t.verbose = true 11 | end 12 | end 13 | 14 | task :default => :test 15 | 16 | # task :doc do 17 | # YARD::CLI::Yardoc.new.run -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | - Custom front-end in sinatra for viewing beanstalk jobs 2 | - Refer to https://github.com/denniskuczynski/beanstalkd_view 3 | - Fork jobs to control memory 4 | - https://github.com/michaeldwan/stalker/commit/386267690a7c03e11d1a8b7b6f08b7c9c7cd2c0d -------------------------------------------------------------------------------- /backburner.gemspec: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | require File.expand_path('../lib/backburner/version', __FILE__) 3 | 4 | Gem::Specification.new do |s| 5 | s.authors = ["Nathan Esquenazi"] 6 | s.email = ["nesquena@gmail.com"] 7 | s.description = %q{Beanstalk background job processing made easy} 8 | s.summary = %q{Reliable beanstalk background job processing made easy for Ruby and Sinatra} 9 | s.homepage = "http://github.com/nesquena/backburner" 10 | 11 | s.files = `git ls-files`.split($\) 12 | s.executables = s.files.grep(%r{^bin/}).map{ |f| File.basename(f) } 13 | s.test_files = s.files.grep(%r{^(test|spec|features)/}) 14 | s.name = "backburner" 15 | s.require_paths = ["lib"] 16 | s.version = Backburner::VERSION 17 | s.license = 'MIT' 18 | 19 | s.add_runtime_dependency 'beaneater', '~> 1.0' 20 | s.add_runtime_dependency 'dante', '> 0.1.5' 21 | s.add_runtime_dependency 'concurrent-ruby', '~> 1.0', '>= 1.0.1' 22 | 23 | s.add_development_dependency 'rake' 24 | s.add_development_dependency 'minitest', '5.1.0' 25 | s.add_development_dependency 'mocha' 26 | s.add_development_dependency 'activejob', '>= 6.0' 27 | s.add_development_dependency 'activesupport', '>= 6.0' 28 | end 29 | -------------------------------------------------------------------------------- /bin/backburner: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'backburner' 4 | require 'backburner/cli' 5 | 6 | # bundle exec backburner foo,bar 7 | Backburner::CLI.start(ARGV) -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | services: 3 | - beanstalkd -------------------------------------------------------------------------------- /examples/custom.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | # Define ruby job 5 | class TestJob 6 | include Backburner::Queue 7 | # queue "test-job" 8 | 9 | def self.perform(value, user) 10 | puts "[TestJob] Running perform with args: [#{value}, #{user}]" 11 | end 12 | end 13 | 14 | # Configure Backburner 15 | Backburner.configure do |config| 16 | config.beanstalk_url = "beanstalk://127.0.0.1" 17 | config.tube_namespace = "demo.production" 18 | end 19 | 20 | # Enqueue tasks 21 | Backburner.enqueue TestJob, 5, 3 22 | Backburner.enqueue TestJob, 10, 6 23 | 24 | # Work tasks using threaded worker 25 | Backburner.work("test-job", :worker => Backburner::Workers::ThreadsOnFork) -------------------------------------------------------------------------------- /examples/demo.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | module Tester 5 | class TestJob 6 | include Backburner::Queue 7 | queue "test.job" 8 | 9 | def self.perform(value, user) 10 | p [value, user] 11 | end 12 | end 13 | 14 | class UserModel 15 | include Backburner::Performable 16 | 17 | attr_accessor :id, :name 18 | 19 | def self.first 20 | self.find(3, "John") 21 | end 22 | 23 | def self.find(id, name="Fetched") 24 | self.new(id, name) 25 | end 26 | 27 | def initialize(id, name) 28 | @id, @name = id, name 29 | end 30 | 31 | def hello(x, y) 32 | puts "Instance #{x} and #{y} and my id is #{id}" 33 | end 34 | 35 | def self.foo(x, y) 36 | puts "Class #{x} and #{y}" 37 | end 38 | end 39 | end 40 | 41 | # connection = Backburner::Connection.new("beanstalk://127.0.0.1") 42 | 43 | Backburner.configure do |config| 44 | config.beanstalk_url = "beanstalk://127.0.0.1" 45 | config.tube_namespace = "myblog.production" 46 | end 47 | 48 | # p Backburner.configuration.beanstalk_url 49 | # p Backburner::Worker.connection 50 | 51 | Backburner.enqueue Tester::TestJob, 5, 3 52 | Backburner.enqueue Tester::TestJob, 10, 6 53 | @user = Tester::UserModel.first 54 | @user.async.hello("foo", "bar") 55 | Tester::UserModel.async.foo("bar", "baz") 56 | 57 | Backburner.default_queues.concat([Tester::TestJob.queue, Tester::UserModel.queue]) 58 | Backburner.work 59 | # Backburner.work("test.job") 60 | # Backburner.work("tester/user-model") 61 | -------------------------------------------------------------------------------- /examples/god.rb: -------------------------------------------------------------------------------- 1 | God.watch do |w| 2 | w.name = "backburner-worker-1" 3 | w.dir = '/path/to/app/dir' 4 | w.env = { 'PADRINO_ENV' => 'production', 'QUEUES' => 'newsletter-sender,push-message' } 5 | w.group = 'backburner-workers' 6 | w.interval = 30.seconds 7 | w.start = "bundle exec rake -f Rakefile backburner:work" 8 | w.log = "/var/log/god/backburner-worker-1.log" 9 | 10 | # restart if memory gets too high 11 | w.transition(:up, :restart) do |on| 12 | on.condition(:memory_usage) do |c| 13 | c.above = 50.megabytes 14 | c.times = 3 15 | end 16 | end 17 | 18 | # determine the state on startup 19 | w.transition(:init, { true => :up, false => :start }) do |on| 20 | on.condition(:process_running) do |c| 21 | c.running = true 22 | end 23 | end 24 | 25 | # determine when process has finished starting 26 | w.transition([:start, :restart], :up) do |on| 27 | on.condition(:process_running) do |c| 28 | c.running = true 29 | c.interval = 5.seconds 30 | end 31 | 32 | # failsafe 33 | on.condition(:tries) do |c| 34 | c.times = 5 35 | c.transition = :start 36 | c.interval = 5.seconds 37 | end 38 | end 39 | 40 | # start if process is not running 41 | w.transition(:up, :start) do |on| 42 | on.condition(:process_running) do |c| 43 | c.running = false 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /examples/hooked.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | $fail = 0 5 | class User 6 | include Backburner::Performable 7 | 8 | # Called with the job args before a job is placed on the queue. 9 | # !! If the hook returns `false`, the job will not be placed on the queue. 10 | def self.before_enqueue_foo(*args) 11 | puts "[before_enqueue] Just about to enqueue #{self} with #{args.inspect}" 12 | end 13 | 14 | # Called with the job args after a job is placed on the queue. 15 | # !! Any exception raised propagates up to the code which queued the job. 16 | def self.after_enqueue_foo(*args) 17 | puts "[after_enqueue] Finished enqueuing #{self} with #{args.inspect}" 18 | end 19 | 20 | # Called with the job args before perform. If it raises 21 | # `Backburner::Job::DontPerform`, the job is aborted. Other exceptions 22 | # are treated like regular job exceptions. 23 | def self.before_perform_foo(*args) 24 | puts "[before_perform] Just about to perform #{self} with #{args.inspect}" 25 | end 26 | 27 | # Called with the job args after it performs. Uncaught 28 | # exceptions will be treated like regular job exceptions. 29 | def self.after_perform_foo(*args) 30 | puts "[after_perform] Just finished performing #{self} with #{args.inspect}" 31 | end 32 | 33 | # Called with the job args. It is expected to yield in order 34 | # to perform the job (but is not required to do so). It may handle exceptions 35 | # thrown by perform, but uncaught exceptions will be treated like regular job exceptions. 36 | def self.around_perform_bar(*args) 37 | puts "[around_perform_bar before] About to perform #{self} with #{args.inspect}" 38 | yield 39 | puts "[around_perform_bar after] Just after performing #{self} with #{args.inspect}" 40 | end 41 | 42 | # Called with the job args. It is expected to yield in order 43 | # to perform the job (but is not required to do so). It may handle exceptions 44 | # thrown by perform, but uncaught exceptions will be treated like regular job exceptions. 45 | def self.around_perform_cat(*args) 46 | puts "[around_perform_cat before] About to perform #{self} with #{args.inspect}" 47 | yield 48 | puts "[around_perform_cat after] Just after performing #{self} with #{args.inspect}" 49 | end 50 | 51 | # Called with the job args. It is expected to yield in order 52 | # to perform the job (but is not required to do so). It may handle exceptions 53 | # thrown by perform, but uncaught exceptions will be treated like regular job exceptions. 54 | def self.around_perform_foo(*args) 55 | puts "[around_perform_foo before] About to perform #{self} with #{args.inspect}" 56 | yield 57 | puts "[around_perform_foo after] Just after performing #{self} with #{args.inspect}" 58 | end 59 | 60 | # Called with the exception and job args if any exception occurs 61 | # while performing the job (or hooks). 62 | def self.on_failure_foo(ex, *args) 63 | puts "[on_failure] Failure #{ex.inspect} occurred for job #{self} with #{args.inspect}" 64 | end 65 | 66 | def self.foo 67 | $fail += 1 68 | raise "Fail!" if $fail == 1 69 | puts "This is the job running successfully!!" 70 | end 71 | end 72 | 73 | # Configure Backburner 74 | Backburner.configure do |config| 75 | config.beanstalk_url = "beanstalk://127.0.0.1" 76 | config.tube_namespace = "demo.production" 77 | config.on_error = lambda { |e| puts "HEY!!! #{e.class}" } 78 | config.max_job_retries = 1 79 | config.retry_delay = 0 80 | end 81 | 82 | # Enqueue tasks 83 | User.async.foo 84 | 85 | # Run work 86 | # Backburner.default_queues << "user" 87 | Backburner.work -------------------------------------------------------------------------------- /examples/retried.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | $error = 0 5 | 6 | class User 7 | include Backburner::Performable 8 | attr_accessor :id, :name 9 | 10 | def self.foo(x, y) 11 | $error += 1 12 | raise "fail #{$error}" unless $error > 3 13 | puts "User #foo args [#{x}, #{y}] Success!!" 14 | end 15 | end 16 | 17 | # Configure Backburner 18 | Backburner.configure do |config| 19 | config.beanstalk_url = "beanstalk://127.0.0.1" 20 | config.tube_namespace = "demo.production" 21 | config.on_error = lambda { |e| puts "HEY!!! #{e.class}" } 22 | config.max_job_retries = 3 23 | config.retry_delay = 0 24 | end 25 | 26 | # Enqueue tasks 27 | User.async(:queue => "retried").foo("bar", "baz") 28 | 29 | # Run work 30 | # Backburner.default_queues << "user" 31 | Backburner.work -------------------------------------------------------------------------------- /examples/simple.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | class User 5 | include Backburner::Performable 6 | attr_accessor :id, :name 7 | 8 | def self.first 9 | User.find(3, "John") 10 | end 11 | 12 | def self.find(id, name="Fetched") 13 | User.new(id, name) 14 | end 15 | 16 | def initialize(id, name) 17 | @id, @name = id, name 18 | end 19 | 20 | def hello(x, y) 21 | puts "User(id=#{id}) #hello args: [#{x}, #{y}] (Instance method)" 22 | end 23 | 24 | def self.foo(x, y) 25 | puts "User #foo args [#{x}, #{y}] (Class method)" 26 | end 27 | end 28 | 29 | # Configure Backburner 30 | Backburner.configure do |config| 31 | config.beanstalk_url = "beanstalk://127.0.0.1" 32 | config.tube_namespace = "demo.production" 33 | config.on_error = lambda { |e| puts "HEY!!! #{e.class}" } 34 | end 35 | 36 | # Enqueue tasks 37 | @user = User.first 38 | @user.async(:pri => 1000).hello("foo", "bar") 39 | User.async.foo("bar", "baz") 40 | 41 | # Run work 42 | # Backburner.default_queues << "user" 43 | Backburner.work -------------------------------------------------------------------------------- /examples/stress.rb: -------------------------------------------------------------------------------- 1 | $:.unshift "lib" 2 | require 'backburner' 3 | 4 | $values = [] 5 | 6 | # Define ruby job 7 | class TestJob 8 | include Backburner::Queue 9 | queue "test-job" 10 | 11 | def self.perform(value) 12 | puts "[TestJob] Running perform with args: [#{value}]" 13 | $values << value 14 | puts "#{$values.size} total jobs processed" 15 | end 16 | end 17 | 18 | # Configure Backburner 19 | Backburner.configure do |config| 20 | config.beanstalk_url = "beanstalk://127.0.0.1" 21 | config.tube_namespace = "demo.production" 22 | end 23 | 24 | # Enqueue tasks 25 | 1.upto(1000) do |i| 26 | Backburner.enqueue TestJob, i 27 | end 28 | 29 | # Work tasks using threads_on_fork worker 30 | # twitter tube will have 10 threads, garbage after 1000 executions and retry jobs 1 times. 31 | Backburner.work("test-job:10:100:1", :worker => Backburner::Workers::ThreadsOnFork) -------------------------------------------------------------------------------- /lib/active_job/queue_adapters/backburner_adapter.rb: -------------------------------------------------------------------------------- 1 | module ActiveJob 2 | module QueueAdapters 3 | # Explicitly remove the implementation existing in older rails'. 4 | remove_const(:BackburnerAdapter) if const_defined?(:BackburnerAdapter) 5 | 6 | # = Backburner adapter for Active Job 7 | # 8 | # To use Backburner set the queue_adapter config to +:backburner+. 9 | # 10 | # Rails.application.config.active_job.queue_adapter = :backburner 11 | class BackburnerAdapter < ::ActiveJob::QueueAdapters::AbstractAdapter 12 | def enqueue(job) 13 | response = Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority) 14 | job.provider_job_id = response[:id] if response.is_a?(Hash) 15 | response 16 | end 17 | 18 | def enqueue_at(job, timestamp) 19 | delay = timestamp - Time.current.to_f 20 | response = Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority, delay: delay) 21 | job.provider_job_id = response[:id] if response.is_a?(Hash) 22 | response 23 | end 24 | 25 | class JobWrapper 26 | class << self 27 | def perform(job_data) 28 | Base.execute job_data 29 | end 30 | end 31 | end 32 | end 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /lib/backburner.rb: -------------------------------------------------------------------------------- 1 | require 'beaneater' 2 | require 'json' 3 | require 'uri' 4 | require 'timeout' 5 | require 'backburner/version' 6 | require 'backburner/helpers' 7 | require 'backburner/configuration' 8 | require 'backburner/logger' 9 | require 'backburner/connection' 10 | require 'backburner/hooks' 11 | require 'backburner/performable' 12 | require 'backburner/worker' 13 | require 'backburner/workers/simple' 14 | require 'backburner/workers/forking' 15 | require 'backburner/workers/threads_on_fork' 16 | require 'backburner/workers/threading' 17 | require 'backburner/queue' 18 | require 'active_job/queue_adapters/backburner_adapter' if defined?(ActiveJob) 19 | 20 | module Backburner 21 | class << self 22 | 23 | # Enqueues a job to be performed with given arguments. 24 | # 25 | # @example 26 | # Backburner.enqueue NewsletterSender, self.id, user.id 27 | # 28 | def enqueue(job_class, *args) 29 | Backburner::Worker.enqueue(job_class, args, {}) 30 | end 31 | 32 | # Begins working on jobs enqueued with optional tubes specified 33 | # 34 | # @example 35 | # Backburner.work('newsletter_sender', 'test_job') 36 | # Backburner.work('newsletter_sender', 'test_job', :worker => NotSimpleWorker) 37 | # 38 | def work(*tubes) 39 | options = tubes.last.is_a?(Hash) ? tubes.pop : {} 40 | worker_class = options[:worker] || configuration.default_worker 41 | worker_class.start(tubes) 42 | end 43 | 44 | # Yields a configuration block 45 | # 46 | # @example 47 | # Backburner.configure do |config| 48 | # config.beanstalk_url = "beanstalk://..." 49 | # end 50 | # 51 | def configure(&block) 52 | yield(configuration) 53 | configuration 54 | end 55 | 56 | # Returns the configuration options set for Backburner 57 | # 58 | # @example 59 | # Backburner.configuration.beanstalk_url => false 60 | # 61 | def configuration 62 | @_configuration ||= Configuration.new 63 | end 64 | 65 | # Returns the queues that are processed by default if none are specified 66 | # 67 | # @example 68 | # Backburner.default_queues << "foo" 69 | # Backburner.default_queues => ["foo", "bar"] 70 | # 71 | def default_queues 72 | configuration.default_queues 73 | end 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /lib/backburner/async_proxy.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | # BasicObject for 1.8.7 3 | class BasicObject 4 | instance_methods.each do |m| 5 | undef_method(m) if m.to_s !~ /(?:^__|^nil?$|^send$|^object_id$)/ 6 | end 7 | end unless defined?(::BasicObject) 8 | 9 | # Class allows async task to be proxied 10 | class AsyncProxy < BasicObject 11 | # Options include `pri` (priority), `delay` (delay in secs), `ttr` (time to respond) 12 | # 13 | # @example 14 | # AsyncProxy.new(User, 10, :pri => 1000, :ttr => 1000) 15 | # 16 | def initialize(klazz, id=nil, opts={}) 17 | @klazz, @id, @opts = klazz, id, opts 18 | end 19 | 20 | # Enqueue as job when a method is invoked 21 | def method_missing(method, *args, &block) 22 | ::Backburner::Worker.enqueue(@klazz, [@id, method, *args], @opts) 23 | end 24 | end # AsyncProxy 25 | end # Backburner -------------------------------------------------------------------------------- /lib/backburner/cli.rb: -------------------------------------------------------------------------------- 1 | require 'dante' 2 | 3 | module Backburner 4 | class CLI 5 | 6 | def self.start(args) 7 | runner = Dante::Runner.new('backburner') 8 | runner.description = "Execute a backburner worker process" 9 | runner.with_options do |opts| 10 | opts.on("-r", "--require PATH", String, "The path to load as the environment.") do |req| 11 | options[:require] = req 12 | end 13 | opts.on("-q", "--queues PATH", String, "The specific queues to work.") do |queues| 14 | options[:queues] = queues 15 | end 16 | opts.on("-e", "--environment ENVIRONMENT", String, "The environment to run Backburner within") do |environment| 17 | options[:environment] = environment 18 | end 19 | end 20 | runner.execute do |opts| 21 | queues = (opts[:queues] ? opts[:queues].split(',') : nil) rescue nil 22 | load_environment(opts[:require], opts[:environment]) 23 | Backburner.work(queues) 24 | end 25 | end 26 | 27 | protected 28 | 29 | def self.load_environment(file = nil, environment = nil) 30 | file ||= "." 31 | if File.directory?(file) && File.exist?(File.expand_path("#{file}/config/environment.rb")) 32 | ENV["RAILS_ENV"] = environment if environment && ENV["RAILS_ENV"].nil? 33 | require "rails" 34 | require File.expand_path("#{file}/config/environment.rb") 35 | if defined?(::Rails) && ::Rails.respond_to?(:application) 36 | # Rails 3 37 | ::Rails.application.eager_load! 38 | elsif defined?(::Rails::Initializer) 39 | # Rails 2.3 40 | $rails_rake_task = false 41 | ::Rails::Initializer.run :load_application_classes 42 | end 43 | elsif File.directory?(file) && File.exist?(File.expand_path("#{file}/config/boot.rb")) 44 | ENV["RACK_ENV"] = environment if environment && ENV["RACK_ENV"].nil? 45 | ENV["PADRINO_ROOT"] = file 46 | require File.expand_path("#{file}/config/boot.rb") 47 | elsif File.file?(file) 48 | require File.expand_path(file) 49 | end 50 | end 51 | 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /lib/backburner/configuration.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | class Configuration 3 | PRIORITY_LABELS = { :high => 0, :medium => 100, :low => 200 } 4 | 5 | attr_accessor :beanstalk_url # beanstalk url connection 6 | attr_accessor :tube_namespace # namespace prefix for every queue 7 | attr_reader :namespace_separator # namespace separator 8 | attr_accessor :default_priority # default job priority 9 | attr_accessor :respond_timeout # default job timeout 10 | attr_accessor :on_error # error handler 11 | attr_accessor :max_job_retries # max job retries 12 | attr_accessor :retry_delay # (minimum) retry delay in seconds 13 | attr_accessor :retry_delay_proc # proc to calculate delay (and allow for back-off) 14 | attr_accessor :default_queues # default queues 15 | attr_accessor :logger # logger 16 | attr_accessor :default_worker # default worker class 17 | attr_accessor :primary_queue # the general queue 18 | attr_accessor :priority_labels # priority labels 19 | attr_accessor :reserve_timeout # duration to wait to reserve on a single server 20 | attr_accessor :job_serializer_proc # proc to write the job body to a string 21 | attr_accessor :job_parser_proc # proc to parse a job body from a string 22 | 23 | def initialize 24 | @beanstalk_url = "beanstalk://127.0.0.1" 25 | @tube_namespace = "backburner.worker.queue" 26 | @namespace_separator = "." 27 | @default_priority = 65536 28 | @respond_timeout = 120 29 | @on_error = nil 30 | @max_job_retries = 0 31 | @retry_delay = 5 32 | @retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 3) } 33 | @default_queues = [] 34 | @logger = nil 35 | @default_worker = Backburner::Workers::Simple 36 | @primary_queue = "backburner-jobs" 37 | @priority_labels = PRIORITY_LABELS 38 | @reserve_timeout = nil 39 | @job_serializer_proc = lambda { |body| body.to_json } 40 | @job_parser_proc = lambda { |body| JSON.parse(body) } 41 | end 42 | 43 | def namespace_separator=(val) 44 | raise 'Namespace separator cannot used reserved queue configuration separator ":"' if val == ':' 45 | @namespace_separator = val 46 | end 47 | end # Configuration 48 | end # Backburner 49 | -------------------------------------------------------------------------------- /lib/backburner/connection.rb: -------------------------------------------------------------------------------- 1 | require 'delegate' 2 | 3 | module Backburner 4 | class Connection < SimpleDelegator 5 | class BadURL < RuntimeError; end 6 | 7 | attr_accessor :url, :beanstalk 8 | 9 | # If a proc is provided, it will be called (and given this connection as an 10 | # argument) whenever the connection is reconnected. 11 | # @example 12 | # connection.on_reconnect = lambda { |conn| puts 'reconnected!' } 13 | attr_accessor :on_reconnect 14 | 15 | # Constructs a backburner connection 16 | # `url` can be a string i.e '127.0.0.1:3001' or an array of 17 | # addresses (however, only the first element in the array will 18 | # be used) 19 | def initialize(url, &on_reconnect) 20 | @url = url 21 | @beanstalk = nil 22 | @on_reconnect = on_reconnect 23 | connect! 24 | end 25 | 26 | # Close the connection, if it exists 27 | def close 28 | @beanstalk.close if @beanstalk 29 | @beanstalk = nil 30 | __setobj__(@beanstalk) 31 | end 32 | 33 | # Determines if the connection to Beanstalk is currently open 34 | def connected? 35 | begin 36 | !!(@beanstalk && @beanstalk.connection && @beanstalk.connection.connection && !@beanstalk.connection.connection.closed?) # Would be nice if beaneater provided a connected? method 37 | rescue 38 | false 39 | end 40 | end 41 | 42 | # Attempt to reconnect to Beanstalk. Note: the connection will not be watching 43 | # or using the tubes it was before it was reconnected (as it's actually a 44 | # completely new connection) 45 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect 46 | def reconnect! 47 | close 48 | connect! 49 | @on_reconnect.call(self) if @on_reconnect.respond_to?(:call) 50 | end 51 | 52 | # Yield to a block that will be retried several times if the connection to 53 | # beanstalk goes down and is able to be re-established. 54 | # 55 | # @param options Hash Options. Valid options are: 56 | # :max_retries Integer The maximum number of times the block will be yielded to. 57 | # Defaults to 4 58 | # :on_retry Proc An optional proc that will be called for each retry. Will be 59 | # called after the connection is re-established and :retry_delay 60 | # has passed but before the block is yielded to again 61 | # :retry_delay Float The amount to sleep before retrying. Defaults to 1.0 62 | # @raise Beaneater::NotConnected If a connection is unable to be re-established 63 | def retryable(options = {}, &block) 64 | options = {:max_retries => 4, :on_retry => nil, :retry_delay => 1.0}.merge!(options) 65 | retry_count = options[:max_retries] 66 | 67 | begin 68 | yield 69 | 70 | rescue Beaneater::NotConnected 71 | if retry_count > 0 72 | reconnect! 73 | retry_count -= 1 74 | sleep options[:retry_delay] 75 | options[:on_retry].call if options[:on_retry].respond_to?(:call) 76 | retry 77 | else # stop retrying 78 | raise e 79 | end 80 | end 81 | end 82 | 83 | protected 84 | 85 | # Attempt to ensure we're connected to Beanstalk if the missing method is 86 | # present in the delegate and we haven't shut down the connection on purpose 87 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect after multiple attempts. 88 | def method_missing(m, *args, &block) 89 | ensure_connected! if respond_to_missing?(m, false) 90 | super 91 | end 92 | 93 | # Connects to a beanstalk queue 94 | # @raise Beaneater::NotConnected if the connection cannot be established 95 | def connect! 96 | @beanstalk = Beaneater.new(beanstalk_addresses) 97 | __setobj__(@beanstalk) 98 | @beanstalk 99 | end 100 | 101 | # Attempts to ensure a connection to beanstalk is established but only if 102 | # we're not connected already 103 | # @param max_retries Integer The maximum number of times to attempt connecting. Defaults to 4 104 | # @param retry_delay Float The time to wait between retrying to connect. Defaults to 1.0 105 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect after multiple attempts. 106 | # @return Connection This Connection is returned if the connection to beanstalk is open or was able to be reconnected 107 | def ensure_connected!(max_retries = 4, retry_delay = 1.0) 108 | return self if connected? 109 | 110 | begin 111 | reconnect! 112 | return self 113 | 114 | rescue Beaneater::NotConnected => e 115 | if max_retries > 0 116 | max_retries -= 1 117 | sleep retry_delay 118 | retry 119 | else # stop retrying 120 | raise e 121 | end 122 | end 123 | end 124 | 125 | # Returns the beanstalk queue addresses 126 | # 127 | # @example 128 | # beanstalk_addresses => ["127.0.0.1:11300"] 129 | # 130 | def beanstalk_addresses 131 | uri = self.url.is_a?(Array) ? self.url.first : self.url 132 | beanstalk_host_and_port(uri) 133 | end 134 | 135 | # Returns a host and port based on the uri_string given 136 | # 137 | # @example 138 | # beanstalk_host_and_port("beanstalk://127.0.0.1") => "127.0.0.1:11300" 139 | # 140 | def beanstalk_host_and_port(uri_string) 141 | uri = URI.parse(uri_string) 142 | raise(BadURL, uri_string) if uri.scheme != 'beanstalk'.freeze 143 | "#{uri.host}:#{uri.port || 11300}" 144 | end 145 | end # Connection 146 | end # Backburner 147 | -------------------------------------------------------------------------------- /lib/backburner/helpers.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | module Helpers 3 | # Loads in instance and class levels 4 | def self.included(base) 5 | base.extend self 6 | end 7 | 8 | # Prints out exception_message based on specified e 9 | def exception_message(e) 10 | msg = [ "Exception #{e.class} -> #{e.message}" ] 11 | 12 | base = File.expand_path(Dir.pwd) + '/' 13 | e.backtrace.each do |t| 14 | msg << " #{File.expand_path(t).gsub(/#{base}/, '')}" 15 | end if e.backtrace 16 | 17 | msg.join("\n") 18 | end 19 | 20 | # Given a word with dashes, returns a camel cased version of it. 21 | # 22 | # @example 23 | # classify('job-name') # => 'JobName' 24 | # 25 | def classify(dashed_word) 26 | dashed_word.to_s.split('-').each { |part| part[0] = part[0].chr.upcase }.join 27 | end 28 | 29 | # Given a class, dasherizes the name, used for getting tube names 30 | # 31 | # @example 32 | # dasherize('JobName') # => "job-name" 33 | # 34 | def dasherize(word) 35 | classify(word).to_s.gsub(/::/, '/'). 36 | gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2'). 37 | gsub(/([a-z\d])([A-Z])/,'\1_\2'). 38 | tr("_", "-").downcase 39 | end 40 | 41 | # Tries to find a constant with the name specified in the argument string: 42 | # 43 | # @example 44 | # constantize("Module") # => Module 45 | # constantize("Test::Unit") # => Test::Unit 46 | # 47 | # NameError is raised when the constant is unknown. 48 | def constantize(camel_cased_word) 49 | camel_cased_word = camel_cased_word.to_s 50 | 51 | if camel_cased_word.include?('-') 52 | camel_cased_word = classify(camel_cased_word) 53 | end 54 | 55 | names = camel_cased_word.split('::') 56 | names.shift if names.empty? || names.first.empty? 57 | 58 | constant = Object 59 | names.each do |name| 60 | args = Module.method(:const_get).arity != 1 ? [false] : [] 61 | 62 | if constant.const_defined?(name, *args) 63 | constant = constant.const_get(name) 64 | else 65 | constant = constant.const_missing(name) 66 | end 67 | end 68 | constant 69 | end 70 | 71 | # Returns configuration options for backburner 72 | # 73 | # @example 74 | # queue_config.max_job_retries => 3 75 | # 76 | def queue_config 77 | Backburner.configuration 78 | end 79 | 80 | # Expands a tube to include the prefix 81 | # 82 | # @example 83 | # expand_tube_name("foo_with_settings:3:100:6") # => .foo_with_settings 84 | # expand_tube_name("foo") # => .foo 85 | # expand_tube_name(FooJob) # => .foo-job 86 | # 87 | def expand_tube_name(tube) 88 | prefix = queue_config.tube_namespace 89 | separator = queue_config.namespace_separator 90 | queue_name = if tube.is_a?(String) 91 | tube 92 | elsif tube.respond_to?(:queue) # use queue name 93 | queue = tube.queue 94 | queue.is_a?(Proc) ? queue.call(tube) : queue 95 | elsif tube.is_a?(Proc) 96 | tube.call 97 | elsif tube.is_a?(Class) # no queue name, use default 98 | queue_config.primary_queue # tube.name 99 | else # turn into a string 100 | tube.to_s 101 | end 102 | [prefix.gsub(/\.$/, ''), dasherize(queue_name).gsub(/^#{prefix}/, '')].join(separator).gsub(/#{Regexp::escape(separator)}+/, separator).split(':').first 103 | end 104 | 105 | # Resolves job priority based on the value given. Can be integer, a class or nothing 106 | # 107 | # @example 108 | # resolve_priority(1000) => 1000 109 | # resolve_priority(FooBar) => 110 | # resolve_priority(nil) => 111 | # 112 | def resolve_priority(pri) 113 | if pri.respond_to?(:queue_priority) 114 | resolve_priority(pri.queue_priority) 115 | elsif pri.is_a?(String) || pri.is_a?(Symbol) # named priority 116 | resolve_priority(Backburner.configuration.priority_labels[pri.to_sym]) 117 | elsif pri.is_a?(Integer) # numerical 118 | pri 119 | else # default 120 | Backburner.configuration.default_priority 121 | end 122 | end 123 | 124 | # Resolves job respond timeout based on the value given. Can be integer, a class or nothing 125 | # 126 | # @example 127 | # resolve_respond_timeout(1000) => 1000 128 | # resolve_respond_timeout(FooBar) => 129 | # resolve_respond_timeout(nil) => 130 | # 131 | def resolve_respond_timeout(ttr) 132 | if ttr.respond_to?(:queue_respond_timeout) 133 | resolve_respond_timeout(ttr.queue_respond_timeout) 134 | elsif ttr.is_a?(Integer) # numerical 135 | ttr 136 | else # default 137 | Backburner.configuration.respond_timeout 138 | end 139 | end 140 | 141 | # Resolves max retries based on the value given. Can be integer, a class or nothing 142 | # 143 | # @example 144 | # resolve_max_job_retries(5) => 5 145 | # resolve_max_job_retries(FooBar) => 146 | # resolve_max_job_retries(nil) => 147 | # 148 | def resolve_max_job_retries(retries) 149 | if retries.respond_to?(:queue_max_job_retries) 150 | resolve_max_job_retries(retries.queue_max_job_retries) 151 | elsif retries.is_a?(Integer) # numerical 152 | retries 153 | else # default 154 | Backburner.configuration.max_job_retries 155 | end 156 | end 157 | 158 | # Resolves retry delay based on the value given. Can be integer, a class or nothing 159 | # 160 | # @example 161 | # resolve_retry_delay(5) => 5 162 | # resolve_retry_delay(FooBar) => 163 | # resolve_retry_delay(nil) => 164 | # 165 | def resolve_retry_delay(delay) 166 | if delay.respond_to?(:queue_retry_delay) 167 | resolve_retry_delay(delay.queue_retry_delay) 168 | elsif delay.is_a?(Integer) # numerical 169 | delay 170 | else # default 171 | Backburner.configuration.retry_delay 172 | end 173 | end 174 | 175 | # Resolves retry delay proc based on the value given. Can be proc, a class or nothing 176 | # 177 | # @example 178 | # resolve_retry_delay_proc(proc) => proc 179 | # resolve_retry_delay_proc(FooBar) => 180 | # resolve_retry_delay_proc(nil) => 181 | # 182 | def resolve_retry_delay_proc(proc) 183 | if proc.respond_to?(:queue_retry_delay_proc) 184 | resolve_retry_delay_proc(proc.queue_retry_delay_proc) 185 | elsif proc.is_a?(Proc) 186 | proc 187 | else # default 188 | Backburner.configuration.retry_delay_proc 189 | end 190 | end 191 | 192 | end # Helpers 193 | end # Backburner 194 | -------------------------------------------------------------------------------- /lib/backburner/hooks.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | class Hooks 3 | class << self 4 | # Triggers all method hooks that match the given event type with specified arguments. 5 | # 6 | # @example 7 | # invoke_hook_events(hookable, :before_enqueue, 'some', 'args') 8 | # invoke_hook_events(hookable, :after_perform, 5) 9 | # 10 | def invoke_hook_events(hookable, event, *args) 11 | res = find_hook_events(hookable, event).map { |e| hookable.send(e, *args) } 12 | return false if res.any? { |result| result == false } 13 | res 14 | end 15 | 16 | # Triggers all method hooks that match given around event type. Used for 'around' hooks 17 | # that stack over the original task cumulatively onto one another. 18 | # 19 | # The final block will be the one that actually invokes the 20 | # original task after calling all other around blocks. 21 | # 22 | # @example 23 | # around_hook_events(hookable, :around_perform) { hookable.perform } 24 | # 25 | def around_hook_events(hookable, event, *args, &block) 26 | raise "Please pass a block to hook events!" unless block_given? 27 | around_hooks = find_hook_events(hookable, event).reverse 28 | aggregate_filter = Proc.new { |&blk| blk.call } 29 | around_hooks.each do |ah| 30 | prior_around_filter = aggregate_filter 31 | aggregate_filter = Proc.new do |&blk| 32 | hookable.method(ah).call(*args) do 33 | prior_around_filter.call(&blk) 34 | end 35 | end 36 | end 37 | aggregate_filter.call(&block) 38 | end 39 | 40 | protected 41 | 42 | # Returns all methods that match given hook type 43 | # 44 | # @example 45 | # find_hook_events(:before_enqueue) 46 | # # => ['before_enqueue_foo', 'before_enqueue_bar'] 47 | # 48 | def find_hook_events(hookable, event) 49 | (hookable.methods - Object.methods).grep(/^#{event}/).sort 50 | end 51 | end 52 | end # Hooks 53 | end # Backburner 54 | -------------------------------------------------------------------------------- /lib/backburner/job.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | # A single backburner job which can be processed and removed by the worker 3 | class Job < SimpleDelegator 4 | include Backburner::Helpers 5 | 6 | # Raises when a job times out 7 | class JobTimeout < RuntimeError; end 8 | class JobNotFound < RuntimeError; end 9 | class JobFormatInvalid < RuntimeError; end 10 | class RetryJob < RuntimeError; end 11 | 12 | attr_accessor :task, :body, :name, :args 13 | 14 | # Construct a job to be parsed and processed 15 | # 16 | # task is a reserved object containing the json body in the form of 17 | # { :class => "NewsletterSender", :args => ["foo@bar.com"] } 18 | # 19 | # @example 20 | # Backburner::Job.new(payload) 21 | # 22 | def initialize(task) 23 | @hooks = Backburner::Hooks 24 | @task = task 25 | @body = task.body.is_a?(Hash) ? task.body : Backburner.configuration.job_parser_proc.call(task.body) 26 | @name = body["class"] || body[:class] 27 | @args = body["args"] || body[:args] 28 | rescue => ex # Job was not valid format 29 | self.bury 30 | raise JobFormatInvalid, "Job body could not be parsed: #{ex.inspect}" 31 | end 32 | 33 | # Sets the delegator object to the underlying beaneater job 34 | # self.bury 35 | def __getobj__ 36 | __setobj__(@task) 37 | super 38 | end 39 | 40 | # Processes a job and handles any failure, deleting the job once complete 41 | # 42 | # @example 43 | # @task.process 44 | # 45 | def process 46 | # Invoke before hook and stop if false 47 | res = @hooks.invoke_hook_events(job_name, :before_perform, *args) 48 | return false unless res 49 | # Execute the job 50 | @hooks.around_hook_events(job_name, :around_perform, *args) do 51 | # We subtract one to ensure we timeout before beanstalkd does, except if: 52 | # a) ttr == 0, to support never timing out 53 | # b) ttr == 1, so that we don't accidentally set it to never time out 54 | # NB: A ttr of 1 will likely result in race conditions between 55 | # Backburner and beanstalkd and should probably be avoided 56 | timeout_job_after(task.ttr > 1 ? task.ttr - 1 : task.ttr) { job_class.perform(*args) } 57 | end 58 | task.delete 59 | # Invoke after perform hook 60 | @hooks.invoke_hook_events(job_name, :after_perform, *args) 61 | rescue => e 62 | @hooks.invoke_hook_events(job_name, :on_failure, e, *args) 63 | raise e 64 | end 65 | 66 | def bury 67 | @hooks.invoke_hook_events(job_name, :on_bury, *args) 68 | task.bury 69 | end 70 | 71 | def retry(count, delay) 72 | @hooks.invoke_hook_events(job_name, :on_retry, count, delay, *args) 73 | task.release(delay: delay) 74 | end 75 | 76 | # Returns the class for the job handler 77 | # 78 | # @example 79 | # job_class # => NewsletterSender 80 | # 81 | def job_class 82 | handler = try_job_class 83 | raise(JobNotFound, self.name) unless handler 84 | handler 85 | end 86 | 87 | protected 88 | 89 | # Attempts to return a constantized job name, otherwise reverts to the name string 90 | # 91 | # @example 92 | # job_name # => "SomeUnknownJob" 93 | def job_name 94 | handler = try_job_class 95 | handler ? handler : self.name 96 | end 97 | 98 | def try_job_class 99 | constantize(self.name) 100 | rescue NameError 101 | nil 102 | end 103 | 104 | # Timeout job within specified block after given time. 105 | # 106 | # @example 107 | # timeout_job_after(3) { do_something! } 108 | # 109 | def timeout_job_after(secs, &block) 110 | begin 111 | Timeout::timeout(secs) { yield } 112 | rescue Timeout::Error => e 113 | raise JobTimeout, "#{name}(#{(@args||[]).join(', ')}) hit #{secs}s timeout.\nbacktrace: #{e.backtrace}" 114 | end 115 | end 116 | 117 | end # Job 118 | end # Backburner 119 | -------------------------------------------------------------------------------- /lib/backburner/logger.rb: -------------------------------------------------------------------------------- 1 | require 'logger' 2 | 3 | module Backburner 4 | module Logger 5 | # Loads in instance and class levels 6 | def self.included(base) 7 | base.extend self 8 | end 9 | 10 | # Print out when a job is about to begin 11 | def log_job_begin(name, args) 12 | log_info "Work job #{name} with #{args.inspect}" 13 | Thread.current[:job_started_at] = Time.now 14 | end 15 | 16 | # Print out when a job completed 17 | # If message is nil, job is considered complete 18 | def log_job_end(name, message = nil) 19 | ellapsed = Time.now - job_started_at 20 | ms = (ellapsed.to_f * 1000).to_i 21 | action_word = message ? 'Finished' : 'Completed' 22 | log_info("#{action_word} #{name} in #{ms}ms #{message}") 23 | end 24 | 25 | # Returns true if the job logging started 26 | def job_started_at 27 | Thread.current[:job_started_at] 28 | end 29 | 30 | # Print a message to stdout 31 | # 32 | # @example 33 | # log_info("Working on task") 34 | # 35 | def log_info(msg) 36 | logger ? logger.info(msg) : puts(msg) 37 | end 38 | 39 | # Print an error to stderr 40 | # 41 | # @example 42 | # log_error("Task failed!") 43 | # 44 | def log_error(msg) 45 | logger ? logger.error(msg) : $stderr.puts(msg) 46 | end 47 | 48 | # Return logger if specified 49 | def logger 50 | Backburner.configuration.logger 51 | end 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /lib/backburner/performable.rb: -------------------------------------------------------------------------------- 1 | require 'backburner/async_proxy' 2 | 3 | module Backburner 4 | module Performable 5 | def self.included(base) 6 | base.send(:include, InstanceMethods) 7 | base.send(:include, Backburner::Queue) 8 | base.extend ClassMethods 9 | end 10 | 11 | module InstanceMethods 12 | # Return proxy object to enqueue jobs for object 13 | # Options: `pri` (priority), `delay` (delay in secs), `ttr` (time to respond), `queue` (queue name) 14 | # @example 15 | # @model.async(:pri => 1000).do_something("foo") 16 | # 17 | def async(opts={}) 18 | Backburner::AsyncProxy.new(self.class, self.id, opts) 19 | end 20 | end # InstanceMethods 21 | 22 | module ClassMethods 23 | # Return proxy object to enqueue jobs for object 24 | # Options: `pri` (priority), `delay` (delay in secs), `ttr` (time to respond), `queue` (queue name) 25 | # @example 26 | # Model.async(:ttr => 300).do_something("foo") 27 | def async(opts={}) 28 | Backburner::AsyncProxy.new(self, nil, opts) 29 | end 30 | 31 | # Defines perform method for job processing 32 | # @example 33 | # perform(55, :do_something, "foo", "bar") 34 | def perform(id, method, *args) 35 | if id # instance 36 | find(id).send(method, *args) 37 | else # class method 38 | send(method, *args) 39 | end 40 | end # perform 41 | 42 | # Always handle an instance method asynchronously 43 | # @example 44 | # User.handle_asynchronously :send_welcome_email, queue: 'send-mail', delay: 10 45 | def handle_asynchronously(method, opts={}) 46 | Backburner::Performable.handle_asynchronously(self, method, opts) 47 | end 48 | 49 | # Always handle a class method asynchronously 50 | # @example 51 | # User.handle_static_asynchronously :update_recent_visitors, ttr: 300 52 | def handle_static_asynchronously(method, opts={}) 53 | Backburner::Performable.handle_static_asynchronously(self, method, opts) 54 | end 55 | end # ClassMethods 56 | 57 | 58 | # Make all calls to an instance method asynchronous. The given opts will be passed 59 | # to the async method. 60 | # @example 61 | # Backburner::Performable.handle_asynchronously(MyObject, :long_task, queue: 'long-tasks') 62 | # NB: The method called on the async proxy will be ""#{method}_without_async". This 63 | # will also be what's given to the Worker.enqueue method so your workers need 64 | # to know about that. It shouldn't be a problem unless the producer and consumer are 65 | # from different codebases (or anywhere they don't both call the handle_asynchronously 66 | # method when booting up) 67 | def self.handle_asynchronously(klass, method, opts={}) 68 | _handle_asynchronously(klass, klass, method, opts) 69 | end 70 | 71 | # Make all calls to a class method asynchronous. The given opts will be passed 72 | # to the async method. Please see the NB on #handle_asynchronously 73 | def self.handle_static_asynchronously(klass, method, opts={}) 74 | _handle_asynchronously(klass, klass.singleton_class, method, opts) 75 | end 76 | 77 | def self._handle_asynchronously(klass, klass_eval_scope, method, opts={}) 78 | aliased_method, punctuation = method.to_s.sub(/([?!=])$/, ''), $1 79 | with_async_name = :"#{aliased_method}_with_async#{punctuation}" 80 | without_async_name = :"#{aliased_method}_without_async#{punctuation}" 81 | 82 | klass.send(:include, Performable) unless included_modules.include?(Performable) 83 | klass_eval_scope.class_eval do 84 | define_method with_async_name do |*args| 85 | async(opts).__send__ without_async_name, *args 86 | end 87 | alias_method without_async_name, method.to_sym 88 | alias_method method.to_sym, with_async_name 89 | end 90 | end 91 | private_class_method :_handle_asynchronously 92 | 93 | 94 | end # Performable 95 | end # Backburner 96 | -------------------------------------------------------------------------------- /lib/backburner/queue.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | module Queue 3 | def self.included(base) 4 | base.instance_variable_set(:@queue_name, nil) 5 | base.instance_variable_set(:@queue_priority, nil) 6 | base.instance_variable_set(:@queue_respond_timeout, nil) 7 | base.instance_variable_set(:@queue_max_job_retries, nil) 8 | base.instance_variable_set(:@queue_retry_delay, nil) 9 | base.instance_variable_set(:@queue_retry_delay_proc, nil) 10 | base.instance_variable_set(:@queue_jobs_limit, nil) 11 | base.instance_variable_set(:@queue_garbage_limit, nil) 12 | base.instance_variable_set(:@queue_retry_limit, nil) 13 | base.extend ClassMethods 14 | Backburner::Worker.known_queue_classes << base 15 | end 16 | 17 | module ClassMethods 18 | # Returns or assigns queue name for this job. 19 | # 20 | # @example 21 | # queue "some.task.name" 22 | # @klass.queue # => "some.task.name" 23 | # 24 | def queue(name=nil) 25 | if name 26 | @queue_name = name 27 | else # accessor 28 | (@queue_name.is_a?(Proc) ? @queue_name.call(self) : @queue_name) || Backburner.configuration.primary_queue 29 | end 30 | end 31 | 32 | # Returns or assigns queue priority for this job 33 | # 34 | # @example 35 | # queue_priority 120 36 | # @klass.queue_priority # => 120 37 | # 38 | def queue_priority(pri=nil) 39 | if pri 40 | @queue_priority = pri 41 | else # accessor 42 | @queue_priority 43 | end 44 | end 45 | 46 | # Returns or assigns queue respond_timeout for this job 47 | # 48 | # @example 49 | # queue_respond_timeout 120 50 | # @klass.queue_respond_timeout # => 120 51 | # 52 | def queue_respond_timeout(ttr=nil) 53 | if ttr 54 | @queue_respond_timeout = ttr 55 | else # accessor 56 | @queue_respond_timeout 57 | end 58 | end 59 | 60 | # Returns or assigns queue max_job_retries for this job 61 | # 62 | # @example 63 | # queue_max_job_retries 120 64 | # @klass.queue_max_job_retries # => 120 65 | # 66 | def queue_max_job_retries(delay=nil) 67 | if delay 68 | @queue_max_job_retries = delay 69 | else # accessor 70 | @queue_max_job_retries 71 | end 72 | end 73 | 74 | # Returns or assigns queue retry_delay for this job 75 | # 76 | # @example 77 | # queue_retry_delay 120 78 | # @klass.queue_retry_delay # => 120 79 | # 80 | def queue_retry_delay(delay=nil) 81 | if delay 82 | @queue_retry_delay = delay 83 | else # accessor 84 | @queue_retry_delay 85 | end 86 | end 87 | 88 | # Returns or assigns queue retry_delay_proc for this job 89 | # 90 | # @example 91 | # queue_retry_delay_proc lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) } 92 | # @klass.queue_retry_delay_proc # => lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) } 93 | # 94 | def queue_retry_delay_proc(proc=nil) 95 | if proc 96 | @queue_retry_delay_proc = proc 97 | else # accessor 98 | @queue_retry_delay_proc 99 | end 100 | end 101 | 102 | # Returns or assigns queue parallel active jobs limit (only ThreadsOnFork and Threading workers) 103 | # 104 | # @example 105 | # queue_jobs_limit 5 106 | # @klass.queue_jobs_limit # => 5 107 | # 108 | def queue_jobs_limit(limit=nil) 109 | if limit 110 | @queue_jobs_limit = limit 111 | else #accessor 112 | @queue_jobs_limit 113 | end 114 | end 115 | 116 | # Returns or assigns queue jobs garbage limit (only ThreadsOnFork Worker) 117 | # 118 | # @example 119 | # queue_garbage_limit 1000 120 | # @klass.queue_garbage_limit # => 1000 121 | # 122 | def queue_garbage_limit(limit=nil) 123 | if limit 124 | @queue_garbage_limit = limit 125 | else #accessor 126 | @queue_garbage_limit 127 | end 128 | end 129 | 130 | # Returns or assigns queue retry limit (only ThreadsOnFork worker) 131 | # 132 | # @example 133 | # queue_retry_limit 6 134 | # @klass.queue_retry_limit # => 6 135 | # 136 | def queue_retry_limit(limit=nil) 137 | if limit 138 | @queue_retry_limit = limit 139 | else #accessor 140 | @queue_retry_limit 141 | end 142 | end 143 | end # ClassMethods 144 | end # Queue 145 | end # Backburner 146 | -------------------------------------------------------------------------------- /lib/backburner/tasks.rb: -------------------------------------------------------------------------------- 1 | # require 'backburner/tasks' 2 | # will give you the backburner tasks 3 | 4 | namespace :backburner do 5 | # QUEUE=foo,bar,baz rake backburner:work 6 | desc "Start backburner worker using default worker" 7 | task :work => :environment do 8 | Backburner.work get_queues 9 | end 10 | 11 | namespace :simple do 12 | # QUEUE=foo,bar,baz rake backburner:simple:work 13 | desc "Starts backburner worker using simple processing" 14 | task :work => :environment do 15 | Backburner.work get_queues, :worker => Backburner::Workers::Simple 16 | end 17 | end # simple 18 | 19 | namespace :forking do 20 | # QUEUE=foo,bar,baz rake backburner:forking:work 21 | desc "Starts backburner worker using fork processing" 22 | task :work => :environment do 23 | Backburner.work get_queues, :worker => Backburner::Workers::Forking 24 | end 25 | end # forking 26 | 27 | namespace :threads_on_fork do 28 | # QUEUE=twitter:10:5000:5,parse_page,send_mail,verify_bithday THREADS=2 GARBAGE=1000 rake backburner:threads_on_fork:work 29 | # twitter tube will have 10 threads, garbage after 5k executions and retry 5 times. 30 | desc "Starts backburner worker using threads_on_fork processing" 31 | task :work => :environment do 32 | threads = ENV['THREADS'].to_i 33 | garbage = ENV['GARBAGE'].to_i 34 | Backburner::Workers::ThreadsOnFork.threads_number = threads if threads > 0 35 | Backburner::Workers::ThreadsOnFork.garbage_after = garbage if garbage > 0 36 | Backburner.work get_queues, :worker => Backburner::Workers::ThreadsOnFork 37 | end 38 | end # threads_on_fork 39 | 40 | namespace :threading do 41 | # QUEUE=twitter:10,parse_page,send_mail,verify_bithday THREADS=2 rake backburner:threading:work 42 | # twitter tube will have 10 threads 43 | desc "Starts backburner worker using threading processing" 44 | task :work => :environment do 45 | threads = ENV['THREADS'].to_i 46 | Backburner::Workers::Threading.threads_number = threads if threads > 0 47 | Backburner.work get_queues, :worker => Backburner::Workers::Threading 48 | end 49 | end # threads_on_fork 50 | 51 | def get_queues 52 | (ENV["QUEUE"] ? ENV["QUEUE"].split(',') : nil) rescue nil 53 | end 54 | end 55 | -------------------------------------------------------------------------------- /lib/backburner/version.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | VERSION = "1.6.1" 3 | end 4 | -------------------------------------------------------------------------------- /lib/backburner/worker.rb: -------------------------------------------------------------------------------- 1 | require 'backburner/job' 2 | 3 | module Backburner 4 | # 5 | # @abstract Subclass and override {#process_tube_names}, {#prepare} and {#start} to implement 6 | # a custom Worker class. 7 | # 8 | class Worker 9 | include Backburner::Helpers 10 | include Backburner::Logger 11 | 12 | # Backburner::Worker.known_queue_classes 13 | # List of known_queue_classes 14 | class << self 15 | attr_writer :known_queue_classes 16 | def known_queue_classes; @known_queue_classes ||= []; end 17 | end 18 | 19 | # Enqueues a job to be processed later by a worker. 20 | # Options: `pri` (priority), `delay` (delay in secs), `ttr` (time to respond), `queue` (queue name) 21 | # 22 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect. 23 | # @example 24 | # Backburner::Worker.enqueue NewsletterSender, [self.id, user.id], :ttr => 1000 25 | # 26 | def self.enqueue(job_class, args=[], opts={}) 27 | pri = resolve_priority(opts[:pri] || job_class) 28 | delay = [0, opts[:delay].to_i].max 29 | ttr = resolve_respond_timeout(opts[:ttr] || job_class) 30 | res = Backburner::Hooks.invoke_hook_events(job_class, :before_enqueue, *args) 31 | 32 | return nil unless res # stop if hook is false 33 | 34 | data = { :class => job_class.name, :args => args } 35 | queue = opts[:queue] && (Proc === opts[:queue] ? opts[:queue].call(job_class) : opts[:queue]) 36 | 37 | begin 38 | response = nil 39 | connection = Backburner::Connection.new(Backburner.configuration.beanstalk_url) 40 | connection.retryable do 41 | tube = connection.tubes[expand_tube_name(queue || job_class)] 42 | serialized_data = Backburner.configuration.job_serializer_proc.call(data) 43 | response = tube.put(serialized_data, :pri => pri, :delay => delay, :ttr => ttr) 44 | end 45 | return nil unless Backburner::Hooks.invoke_hook_events(job_class, :after_enqueue, *args) 46 | ensure 47 | connection.close if connection 48 | end 49 | 50 | response 51 | end 52 | 53 | # Starts processing jobs with the specified tube_names. 54 | # 55 | # @example 56 | # Backburner::Worker.start(["foo.tube.name"]) 57 | # 58 | def self.start(tube_names=nil) 59 | begin 60 | self.new(tube_names).start 61 | rescue SystemExit 62 | # do nothing 63 | end 64 | end 65 | 66 | # List of tube names to be watched and processed 67 | attr_accessor :tube_names, :connection 68 | 69 | # Constructs a new worker for processing jobs within specified tubes. 70 | # 71 | # @example 72 | # Worker.new(['test.job']) 73 | def initialize(tube_names=nil) 74 | @connection = new_connection 75 | @tube_names = self.process_tube_names(tube_names) 76 | register_signal_handlers! 77 | end 78 | 79 | # Starts processing ready jobs indefinitely. 80 | # Primary way to consume and process jobs in specified tubes. 81 | # 82 | # @example 83 | # @worker.start 84 | # 85 | def start 86 | raise NotImplementedError 87 | end 88 | 89 | # Used to prepare the job queues before job processing is initiated. 90 | # 91 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect. 92 | # @example 93 | # @worker.prepare 94 | # 95 | # @abstract Define this in your worker subclass 96 | # to be run once before processing. Recommended to watch tubes 97 | # or print a message to the logs with 'log_info' 98 | # 99 | def prepare 100 | raise NotImplementedError 101 | end 102 | 103 | # Triggers this worker to shutdown 104 | def shutdown 105 | Thread.new do 106 | log_info 'Worker exiting...' 107 | end 108 | Kernel.exit 109 | end 110 | 111 | # Processes tube_names given tube_names array. 112 | # Should return normalized tube_names as an array of strings. 113 | # 114 | # @example 115 | # process_tube_names([['foo'], ['bar']]) 116 | # => ['foo', 'bar', 'baz'] 117 | # 118 | # @note This method can be overridden in inherited workers 119 | # to add more complex tube name processing. 120 | def process_tube_names(tube_names) 121 | compact_tube_names(tube_names) 122 | end 123 | 124 | # Performs a job by reserving a job from beanstalk and processing it 125 | # 126 | # @example 127 | # @worker.work_one_job 128 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect multiple times. 129 | def work_one_job(conn = connection) 130 | begin 131 | job = reserve_job(conn) 132 | rescue Beaneater::TimedOutError => e 133 | return 134 | end 135 | 136 | self.log_job_begin(job.name, job.args) 137 | job.process 138 | self.log_job_end(job.name) 139 | 140 | rescue Backburner::Job::JobFormatInvalid => e 141 | self.log_error self.exception_message(e) 142 | rescue => e # Error occurred processing job 143 | self.log_error self.exception_message(e) unless e.is_a?(Backburner::Job::RetryJob) 144 | 145 | unless job 146 | self.log_error "Error occurred before we were able to assign a job. Giving up without retrying!" 147 | return 148 | end 149 | 150 | # NB: There's a slight chance here that the connection to beanstalkd has 151 | # gone down between the time we reserved / processed the job and here. 152 | num_retries = job.stats.releases 153 | max_job_retries = resolve_max_job_retries(job.job_class) 154 | retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}" 155 | if num_retries < max_job_retries # retry again 156 | retry_delay = resolve_retry_delay(job.job_class) 157 | delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay 158 | job.retry(num_retries + 1, delay) 159 | self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at 160 | else # retries failed, bury 161 | job.bury 162 | self.log_job_end(job.name, "#{retry_status}, burying") if job_started_at 163 | end 164 | 165 | handle_error(e, job.name, job.args, job) 166 | end 167 | 168 | 169 | protected 170 | 171 | # Return a new connection instance 172 | def new_connection 173 | Connection.new(Backburner.configuration.beanstalk_url) { |conn| Backburner::Hooks.invoke_hook_events(self, :on_reconnect, conn) } 174 | end 175 | 176 | # Reserve a job from the watched queues 177 | def reserve_job(conn, reserve_timeout = Backburner.configuration.reserve_timeout) 178 | Backburner::Job.new(conn.tubes.reserve(reserve_timeout)) 179 | end 180 | 181 | # Returns a list of all tubes known within the system 182 | # Filtered for tubes that match the known prefix 183 | def all_existing_queues 184 | known_queues = Backburner::Worker.known_queue_classes.map(&:queue) 185 | existing_tubes = self.connection.tubes.all.map(&:name).select { |tube| tube =~ /^#{queue_config.tube_namespace}/ } 186 | existing_tubes + known_queues + [queue_config.primary_queue] 187 | end 188 | 189 | 190 | # Handles an error according to custom definition 191 | # Used when processing a job that errors out 192 | def handle_error(e, name, args, job) 193 | if error_handler = Backburner.configuration.on_error 194 | if error_handler.arity == 1 195 | error_handler.call(e) 196 | elsif error_handler.arity == 3 197 | error_handler.call(e, name, args) 198 | else 199 | error_handler.call(e, name, args, job) 200 | end 201 | end 202 | end 203 | 204 | # Normalizes tube names given array of tube_names 205 | # Compacts nil items, flattens arrays, sets tubes to nil if no valid names 206 | # Loads default tubes when no tubes given. 207 | def compact_tube_names(tube_names) 208 | tube_names = tube_names.first if tube_names && tube_names.size == 1 && tube_names.first.is_a?(Array) 209 | tube_names = Array(tube_names).compact if tube_names && Array(tube_names).compact.size > 0 210 | tube_names = nil if tube_names && tube_names.compact.empty? 211 | tube_names ||= Backburner.default_queues.any? ? Backburner.default_queues : all_existing_queues 212 | Array(tube_names).uniq 213 | end 214 | 215 | # Registers signal handlers TERM and INT to trigger 216 | def register_signal_handlers! 217 | trap('TERM') { shutdown } 218 | trap('INT') { shutdown } 219 | end 220 | end # Worker 221 | end # Backburner 222 | -------------------------------------------------------------------------------- /lib/backburner/workers/forking.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | module Workers 3 | class Forking < Worker 4 | # Used to prepare job queues before processing jobs. 5 | # Setup beanstalk tube_names and watch all specified tubes for jobs. 6 | # 7 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect. 8 | # @example 9 | # @worker.prepare 10 | # 11 | def prepare 12 | self.tube_names.map! { |name| expand_tube_name(name) }.uniq! 13 | log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]" 14 | self.connection.tubes.watch!(*self.tube_names) 15 | end 16 | 17 | # Starts processing new jobs indefinitely. 18 | # Primary way to consume and process jobs in specified tubes. 19 | # 20 | # @example 21 | # @worker.start 22 | # 23 | def start 24 | prepare 25 | loop { fork_one_job } 26 | end 27 | 28 | # Need to re-establish the connection to the server(s) after forking 29 | # Waits for a job, works the job, and exits 30 | def fork_one_job 31 | pid = Process.fork do 32 | work_one_job 33 | coolest_exit 34 | end 35 | Process.wait(pid) 36 | end 37 | 38 | def on_reconnect(conn) 39 | @connection = conn 40 | prepare 41 | end 42 | 43 | # Exit with Kernel.exit! to avoid at_exit callbacks that should belongs to 44 | # parent process 45 | # We will use exitcode 99 that means the fork reached the garbage number 46 | def coolest_exit 47 | Kernel.exit! 99 48 | end 49 | 50 | end 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/backburner/workers/simple.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | module Workers 3 | class Simple < Worker 4 | # Used to prepare job queues before processing jobs. 5 | # Setup beanstalk tube_names and watch all specified tubes for jobs. 6 | # 7 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect. 8 | # @example 9 | # @worker.prepare 10 | # 11 | def prepare 12 | self.tube_names.map! { |name| expand_tube_name(name) }.uniq! 13 | log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]" 14 | self.connection.tubes.watch!(*self.tube_names) 15 | end 16 | 17 | # Starts processing new jobs indefinitely. 18 | # Primary way to consume and process jobs in specified tubes. 19 | # 20 | # @example 21 | # @worker.start 22 | # 23 | def start 24 | prepare 25 | loop { work_one_job } 26 | end 27 | end # Basic 28 | end # Workers 29 | end # Backburner 30 | -------------------------------------------------------------------------------- /lib/backburner/workers/threading.rb: -------------------------------------------------------------------------------- 1 | require 'concurrent' 2 | 3 | module Backburner 4 | module Workers 5 | class Threading < Worker 6 | attr_accessor :self_read, :self_write, :exit_on_shutdown 7 | 8 | @shutdown_timeout = 10 9 | 10 | class << self 11 | attr_accessor :threads_number 12 | attr_accessor :shutdown_timeout 13 | end 14 | 15 | # Custom initializer just to set @tubes_data 16 | def initialize(*args) 17 | @tubes_data = {} 18 | super 19 | self.process_tube_options 20 | @exit_on_shutdown = true 21 | end 22 | 23 | # Used to prepare job queues before processing jobs. 24 | # Setup beanstalk tube_names and watch all specified tubes for jobs. 25 | # 26 | # @raise [Beaneater::NotConnected] If beanstalk fails to connect. 27 | # @example 28 | # @worker.prepare 29 | # 30 | def prepare 31 | self.tube_names.map! { |name| expand_tube_name(name) }.uniq! 32 | log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]" 33 | @thread_pools = {} 34 | @tubes_data.each do |name, config| 35 | max_threads = (config[:threads] || self.class.threads_number || ::Concurrent.processor_count).to_i 36 | @thread_pools[name] = (::Concurrent::ThreadPoolExecutor.new(min_threads: 1, max_threads: max_threads)) 37 | end 38 | end 39 | 40 | # Starts processing new jobs indefinitely. 41 | # Primary way to consume and process jobs in specified tubes. 42 | # 43 | # @example 44 | # @worker.start 45 | # 46 | def start(wait=true) 47 | prepare 48 | 49 | @thread_pools.each do |tube_name, pool| 50 | pool.max_length.times do 51 | # Create a new connection and set it up to listen on this tube name 52 | connection = new_connection.tap{ |conn| conn.tubes.watch!(tube_name) } 53 | connection.on_reconnect = lambda { |conn| conn.tubes.watch!(tube_name) } 54 | 55 | # Make it work jobs using its own connection per thread 56 | pool.post(connection) do |memo_connection| 57 | # TODO: use read-write lock? 58 | loop do 59 | begin 60 | break if @in_shutdown 61 | work_one_job(memo_connection) 62 | rescue => e 63 | log_error("Exception caught in thread pool loop. Continuing. -> #{e.message}\nBacktrace: #{e.backtrace}") 64 | end 65 | end 66 | 67 | connection.close 68 | end 69 | end 70 | end 71 | 72 | wait_for_shutdown! if wait 73 | end 74 | 75 | # FIXME: We can't use this on_reconnect method since we don't know which thread 76 | # pool the connection belongs to (and therefore we can't re-watch the right tubes). 77 | # However, we set the individual connections' on_reconnect method in #start 78 | # def on_reconnect(conn) 79 | # watch_tube(@watching_tube, conn) if @watching_tube 80 | # end 81 | 82 | # Process the special tube_names of Threading worker: 83 | # The format is tube_name:custom_threads_limit 84 | # 85 | # @example 86 | # process_tube_names(['foo:10', 'lol']) 87 | # => ['foo', lol'] 88 | def process_tube_names(tube_names) 89 | names = compact_tube_names(tube_names) 90 | if names.nil? 91 | nil 92 | else 93 | names.map do |name| 94 | data = name.split(":") 95 | tube_name = data.first 96 | threads_number = data[1].empty? ? nil : data[1].to_i rescue nil 97 | @tubes_data[expand_tube_name(tube_name)] = { 98 | :threads => threads_number 99 | } 100 | tube_name 101 | end 102 | end 103 | end 104 | 105 | # Process the tube settings 106 | # This overrides @tubes_data set by process_tube_names method. So a tube has name 'super_job:5' 107 | # and the tube class has setting queue_jobs_limit 10, the result limit will be 10 108 | # If the tube is known by existing beanstalkd queue, but not by class - skip it 109 | # 110 | def process_tube_options 111 | Backburner::Worker.known_queue_classes.each do |queue| 112 | next if @tubes_data[expand_tube_name(queue)].nil? 113 | queue_settings = { 114 | :threads => queue.queue_jobs_limit 115 | } 116 | @tubes_data[expand_tube_name(queue)].merge!(queue_settings){|k, v1, v2| v2.nil? ? v1 : v2 } 117 | end 118 | end 119 | 120 | # Wait for the shutdown signel 121 | def wait_for_shutdown! 122 | raise Interrupt while IO.select([self_read]) 123 | rescue Interrupt 124 | shutdown 125 | end 126 | 127 | def shutdown_threadpools 128 | @thread_pools.each { |_name, pool| pool.shutdown } 129 | shutdown_time = Time.now 130 | @in_shutdown = true 131 | all_shutdown = @thread_pools.all? do |_name, pool| 132 | time_to_wait = self.class.shutdown_timeout - (Time.now - shutdown_time).to_i 133 | pool.wait_for_termination(time_to_wait) if time_to_wait > 0 134 | end 135 | rescue Interrupt 136 | log_info "graceful shutdown aborted, shutting down immediately" 137 | ensure 138 | kill unless all_shutdown 139 | end 140 | 141 | def kill 142 | @thread_pools.each { |_name, pool| pool.kill unless pool.shutdown? } 143 | end 144 | 145 | def shutdown 146 | log_info "beginning graceful worker shutdown" 147 | shutdown_threadpools 148 | super if @exit_on_shutdown 149 | end 150 | 151 | # Registers signal handlers TERM and INT to trigger 152 | def register_signal_handlers! 153 | @self_read, @self_write = IO.pipe 154 | %w[TERM INT].each do |sig| 155 | trap(sig) do 156 | raise Interrupt if @in_shutdown 157 | self_write.puts(sig) 158 | end 159 | end 160 | end 161 | end # Threading 162 | end # Workers 163 | end # Backburner 164 | -------------------------------------------------------------------------------- /lib/backburner/workers/threads_on_fork.rb: -------------------------------------------------------------------------------- 1 | module Backburner 2 | module Workers 3 | class ThreadsOnFork < Worker 4 | class << self 5 | attr_accessor :shutdown 6 | attr_accessor :threads_number 7 | attr_accessor :garbage_after 8 | attr_accessor :is_child 9 | 10 | # return the pids of all alive children/forks 11 | def child_pids 12 | return [] if is_child 13 | @child_pids ||= [] 14 | tmp_ids = [] 15 | for id in @child_pids 16 | next if id.to_i == Process.pid 17 | begin 18 | Process.kill(0, id) 19 | tmp_ids << id 20 | rescue Errno::ESRCH 21 | end 22 | end 23 | @child_pids = tmp_ids if @child_pids != tmp_ids 24 | @child_pids 25 | end 26 | 27 | # Send a SIGTERM signal to all children 28 | # This is the same of a normal exit 29 | # We are simply asking the children to exit 30 | def stop_forks 31 | for id in child_pids 32 | begin 33 | Process.kill("SIGTERM", id) 34 | rescue Errno::ESRCH 35 | end 36 | end 37 | end 38 | 39 | # Send a SIGKILL signal to all children 40 | # This is the same of assassinate 41 | # We are KILLING those folks that don't obey us 42 | def kill_forks 43 | for id in child_pids 44 | begin 45 | Process.kill("SIGKILL", id) 46 | rescue Errno::ESRCH 47 | end 48 | end 49 | end 50 | 51 | def finish_forks 52 | return if is_child 53 | 54 | ids = child_pids 55 | if ids.length > 0 56 | puts "[ThreadsOnFork workers] Stopping forks: #{ids.join(", ")}" 57 | stop_forks 58 | Kernel.sleep 1 59 | ids = child_pids 60 | if ids.length > 0 61 | puts "[ThreadsOnFork workers] Killing remaining forks: #{ids.join(", ")}" 62 | kill_forks 63 | Process.waitall 64 | end 65 | end 66 | end 67 | end 68 | 69 | # Custom initializer just to set @tubes_data 70 | def initialize(*args) 71 | @tubes_data = {} 72 | super 73 | self.process_tube_options 74 | end 75 | 76 | # Process the special tube_names of ThreadsOnFork worker 77 | # The idea is tube_name:custom_threads_limit:custom_garbage_limit:custom_retries 78 | # Any custom can be ignore. So if you want to set just the custom_retries 79 | # you will need to write this 'tube_name:::10' 80 | # 81 | # @example 82 | # process_tube_names(['foo:10:5:1', 'bar:2::3', 'lol']) 83 | # => ['foo', 'bar', 'lol'] 84 | def process_tube_names(tube_names) 85 | names = compact_tube_names(tube_names) 86 | if names.nil? 87 | nil 88 | else 89 | names.map do |name| 90 | data = name.split(":") 91 | tube_name = data.first 92 | threads_number = data[1].empty? ? nil : data[1].to_i rescue nil 93 | garbage_number = data[2].empty? ? nil : data[2].to_i rescue nil 94 | retries_number = data[3].empty? ? nil : data[3].to_i rescue nil 95 | @tubes_data[expand_tube_name(tube_name)] = { 96 | :threads => threads_number, 97 | :garbage => garbage_number, 98 | :retries => retries_number 99 | } 100 | tube_name 101 | end 102 | end 103 | end 104 | 105 | # Process the tube settings 106 | # This overrides @tubes_data set by process_tube_names method. So a tube has name 'super_job:5:20:10' 107 | # and the tube class has setting queue_jobs_limit 10, the result limit will be 10 108 | # If the tube is known by existing beanstalkd queue, but not by class - skip it 109 | # 110 | def process_tube_options 111 | Backburner::Worker.known_queue_classes.each do |queue| 112 | next if @tubes_data[expand_tube_name(queue)].nil? 113 | queue_settings = { 114 | :threads => queue.queue_jobs_limit, 115 | :garbage => queue.queue_garbage_limit, 116 | :retries => queue.queue_retry_limit 117 | } 118 | @tubes_data[expand_tube_name(queue)].merge!(queue_settings){|k, v1, v2| v2.nil? ? v1 : v2 } 119 | end 120 | end 121 | 122 | def prepare 123 | self.tube_names ||= Backburner.default_queues.any? ? Backburner.default_queues : all_existing_queues 124 | self.tube_names = Array(self.tube_names) 125 | tube_names.map! { |name| expand_tube_name(name) }.uniq! 126 | tube_display_names = tube_names.map{|name| "#{name}:#{@tubes_data[name].values}"} 127 | log_info "Working #{tube_names.size} queues: [ #{tube_display_names.join(', ')} ]" 128 | end 129 | 130 | # For each tube we will call fork_and_watch to create the fork 131 | # The lock argument define if this method should block or no 132 | def start(lock=true) 133 | prepare 134 | tube_names.each do |name| 135 | fork_and_watch(name) 136 | end 137 | 138 | if lock 139 | sleep 0.1 while true 140 | end 141 | end 142 | 143 | # Make the fork and create a thread to watch the child process 144 | # The exit code '99' means that the fork exited because of the garbage limit 145 | # Any other code is an error 146 | def fork_and_watch(name) 147 | create_thread(name) do |tube_name| 148 | until self.class.shutdown 149 | pid = fork_tube(tube_name) 150 | _, status = wait_for_process(pid) 151 | 152 | # 99 = garbaged 153 | if status.exitstatus != 99 154 | log_error("Catastrophic failure: tube #{tube_name} exited with code #{status.exitstatus}.") 155 | end 156 | end 157 | end 158 | end 159 | 160 | # This makes easy to test 161 | def fork_tube(name) 162 | fork_it do 163 | fork_inner(name) 164 | end 165 | end 166 | 167 | # Here we are already on the forked child 168 | # We will watch just the selected tube and change the configuration of 169 | # queue_config.max_job_retries if needed 170 | # 171 | # If we limit the number of threads to 1 it will just run in a loop without 172 | # creating any extra thread. 173 | def fork_inner(name) 174 | if @tubes_data[name] 175 | queue_config.max_job_retries = @tubes_data[name][:retries] if @tubes_data[name][:retries] 176 | else 177 | @tubes_data[name] = {} 178 | end 179 | @garbage_after = @tubes_data[name][:garbage] || self.class.garbage_after 180 | @threads_number = (@tubes_data[name][:threads] || self.class.threads_number || 1).to_i 181 | 182 | @runs = 0 183 | 184 | if @threads_number == 1 185 | watch_tube(name) 186 | run_while_can 187 | else 188 | threads_count = Thread.list.count 189 | @threads_number.times do 190 | create_thread do 191 | begin 192 | conn = new_connection 193 | watch_tube(name, conn) 194 | run_while_can(conn) 195 | ensure 196 | conn.close if conn 197 | end 198 | end 199 | end 200 | sleep 0.1 while Thread.list.count > threads_count 201 | end 202 | 203 | coolest_exit 204 | end 205 | 206 | # Run work_one_job while we can 207 | def run_while_can(conn = connection) 208 | while @garbage_after.nil? or @garbage_after > @runs 209 | @runs += 1 # FIXME: Likely race condition 210 | work_one_job(conn) 211 | end 212 | end 213 | 214 | # Shortcut for watching a tube on our beanstalk connection 215 | def watch_tube(name, conn = connection) 216 | @watching_tube = name 217 | conn.tubes.watch!(name) 218 | end 219 | 220 | def on_reconnect(conn) 221 | watch_tube(@watching_tube, conn) if @watching_tube 222 | end 223 | 224 | # Exit with Kernel.exit! to avoid at_exit callbacks that should belongs to 225 | # parent process 226 | # We will use exitcode 99 that means the fork reached the garbage number 227 | def coolest_exit 228 | Kernel.exit! 99 229 | end 230 | 231 | # Create a thread. Easy to test 232 | def create_thread(*args, &block) 233 | Thread.new(*args, &block) 234 | end 235 | 236 | # Wait for a specific process. Easy to test 237 | def wait_for_process(pid) 238 | out = Process.wait2(pid) 239 | self.class.child_pids.delete(pid) 240 | out 241 | end 242 | 243 | # Forks the specified block and adds the process to the child process pool 244 | # FIXME: If blk.call breaks then the pid isn't added to child_pids and is 245 | # never shutdown 246 | def fork_it(&blk) 247 | pid = Kernel.fork do 248 | self.class.is_child = true 249 | $0 = "[ThreadsOnFork worker] parent: #{Process.ppid}" 250 | blk.call 251 | end 252 | self.class.child_pids << pid 253 | pid 254 | end 255 | end 256 | end 257 | end 258 | 259 | at_exit do 260 | unless Backburner::Workers::ThreadsOnFork.is_child 261 | Backburner::Workers::ThreadsOnFork.shutdown = true 262 | end 263 | Backburner::Workers::ThreadsOnFork.finish_forks 264 | end 265 | -------------------------------------------------------------------------------- /test/active_job_adapter_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('test_helper', __dir__) 2 | require File.expand_path('fixtures/active_jobs', __dir__) 3 | 4 | describe 'ActiveJob::QueueAdapters::BackburnerAdapter class' do 5 | before do 6 | clear_jobs!('default') 7 | end 8 | 9 | describe 'perform_later' do 10 | should 'enqueues job with arguments' do 11 | active_job = TestJob.perform_later('first') 12 | 13 | pop_one_job('default') do |job, body| 14 | assert_equal 'ActiveJob::QueueAdapters::BackburnerAdapter::JobWrapper', body['class'] 15 | assert_equal 'TestJob', body['args'].first['job_class'] 16 | assert_equal active_job.arguments, body['args'].first['arguments'] 17 | assert_equal active_job.job_id, body['args'].first['job_id'] 18 | end 19 | end 20 | 21 | should 'enqueues job with priority' do 22 | active_job = TestJob.set(priority: 10).perform_later('first') 23 | 24 | pop_one_job('default') do |job, body| 25 | assert_equal active_job.priority, body['args'].first['priority'] 26 | end 27 | end 28 | 29 | should 'enqueues scheduled job' do 30 | active_job = TestJob.set(wait: 5.seconds).perform_later('first') 31 | 32 | assert_raises(Timeout::Error) do 33 | pop_one_job('default') 34 | end 35 | 36 | sleep 5 37 | pop_one_job('default') do |job, body| 38 | assert_equal active_job.job_id, body['args'].first['job_id'] 39 | end 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /test/async_proxy_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | class AsyncUser; end 4 | 5 | describe "Backburner::AsyncProxy class" do 6 | before do 7 | Backburner.default_queues.clear 8 | clear_jobs!(Backburner.configuration.primary_queue) 9 | end 10 | 11 | describe "for method_missing enqueue" do 12 | should "enqueue job onto worker with no args" do 13 | @async = Backburner::AsyncProxy.new(AsyncUser, 10, :pri => 1000, :ttr => 100) 14 | @async.foo 15 | pop_one_job do |job, body| 16 | assert_equal "AsyncUser", body["class"] 17 | assert_equal [10, "foo"], body["args"] 18 | assert_equal 100, job.ttr 19 | assert_equal 1000, job.pri 20 | job.delete 21 | end 22 | end 23 | 24 | should "enqueue job onto worker with args" do 25 | @async = Backburner::AsyncProxy.new(AsyncUser, 10, :pri => 1000, :ttr => 100) 26 | @async.bar(1, 2, 3) 27 | pop_one_job do |job, body| 28 | assert_equal "AsyncUser", body["class"] 29 | assert_equal [10, "bar", 1, 2, 3], body["args"] 30 | assert_equal 100, job.ttr 31 | assert_equal 1000, job.pri 32 | job.delete 33 | end 34 | end 35 | end # method_missing 36 | end # AsyncProxy 37 | -------------------------------------------------------------------------------- /test/back_burner_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | $backburner_sum = 0 4 | $backburner_numbers = [] 5 | 6 | class TestBackburnerJob 7 | include Backburner::Queue 8 | queue "test.jobber" 9 | 10 | def self.perform(value, number) 11 | $backburner_sum += value 12 | $backburner_numbers << number 13 | end 14 | end 15 | 16 | class TestWorker < Backburner::Worker; end 17 | 18 | describe "Backburner module" do 19 | before { Backburner.default_queues.clear } 20 | 21 | describe "for enqueue method" do 22 | before do 23 | Backburner.enqueue TestBackburnerJob, 5, 6 24 | Backburner.enqueue TestBackburnerJob, 15, 10 25 | silenced(2) do 26 | worker = Backburner::Workers::Simple.new('test.jobber') 27 | worker.prepare 28 | 2.times { worker.work_one_job } 29 | end 30 | end 31 | 32 | it "can run jobs using #run method" do 33 | assert_equal 20, $backburner_sum 34 | assert_same_elements [6, 10], $backburner_numbers 35 | end 36 | end # enqueue 37 | 38 | describe "for work method" do 39 | it "invokes worker simple start" do 40 | Backburner::Workers::Simple.expects(:start).with(["foo", "bar"]) 41 | Backburner.work("foo", "bar") 42 | end 43 | 44 | it "invokes other worker if specified in configuration" do 45 | Backburner.configure { |config| config.default_worker = TestWorker } 46 | TestWorker.expects(:start).with(["foo", "bar"]) 47 | Backburner.work("foo", "bar") 48 | end 49 | 50 | it "invokes other worker if specified in work method as options" do 51 | TestWorker.expects(:start).with(["foo", "bar"]) 52 | Backburner.work("foo", "bar", :worker => TestWorker) 53 | end 54 | 55 | it "invokes worker start with no args" do 56 | Backburner::Workers::Simple.expects(:start).with([]) 57 | Backburner.work 58 | end 59 | end # work! 60 | 61 | describe "for configuration" do 62 | it "remembers the tube_namespace" do 63 | assert_equal "demo.test", Backburner.configuration.tube_namespace 64 | end 65 | 66 | it "remembers the namespace_separator" do 67 | assert_equal ".", Backburner.configuration.namespace_separator 68 | end 69 | 70 | it "disallows a reserved separator" do 71 | assert_raises RuntimeError do 72 | Backburner.configuration.namespace_separator = ':' 73 | end 74 | end 75 | end # configuration 76 | 77 | describe "for default_queues" do 78 | it "supports assignment" do 79 | Backburner.default_queues << "foo" 80 | Backburner.default_queues << "bar" 81 | assert_same_elements ["foo", "bar"], Backburner.default_queues 82 | end 83 | end 84 | 85 | after do 86 | Backburner.configure { |config| config.default_worker = Backburner::Workers::Simple } 87 | end 88 | end # Backburner -------------------------------------------------------------------------------- /test/connection_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | describe "Backburner::Connection class" do 4 | describe "for initialize with single url" do 5 | before do 6 | @connection = Backburner::Connection.new("beanstalk://127.0.0.1") 7 | end 8 | 9 | it "should store url in accessor" do 10 | assert_equal "beanstalk://127.0.0.1", @connection.url 11 | end 12 | 13 | it "should setup beanstalk connection" do 14 | assert_kind_of Beaneater, @connection.beanstalk 15 | end 16 | end # initialize single connection 17 | 18 | describe "for initialize with url" do 19 | it "should delegate the address url correctly" do 20 | @connection = Backburner::Connection.new("beanstalk://127.0.0.1") 21 | connection = @connection.beanstalk.connection 22 | assert_equal '127.0.0.1:11300', connection.address 23 | end 24 | end # initialize 25 | 26 | describe "for bad uri" do 27 | it "should raise a BadUrl" do 28 | assert_raises(Backburner::Connection::BadURL) { 29 | @connection = Backburner::Connection.new("fake://foo") 30 | } 31 | end 32 | end 33 | 34 | describe "for initialize with on_reconnect block" do 35 | it "should store the block for use upon reconnect" do 36 | callback = proc {} 37 | connection = Backburner::Connection.new('beanstalk://127.0.0.1', &callback) 38 | assert_equal callback, connection.on_reconnect 39 | end 40 | end 41 | 42 | describe "dealing with connecting and reconnecting" do 43 | before do 44 | @connection = Backburner::Connection.new('beanstalk://127.0.0.1') 45 | end 46 | 47 | it "should know if its connection is open" do 48 | assert_equal true, @connection.connected? 49 | @connection.close 50 | assert_equal false, @connection.connected? 51 | end 52 | 53 | it "should be able to attempt reconnecting to beanstalk" do 54 | @connection.close 55 | assert_equal false, @connection.connected? 56 | @connection.reconnect! 57 | assert_equal true, @connection.connected? 58 | end 59 | 60 | it "should allow for retryable commands" do 61 | @result = false 62 | @connection.close 63 | @connection.retryable { @result = true } 64 | assert_equal true, @result 65 | end 66 | 67 | it "should provide a hook when a retryable command successfully retries" do 68 | @result = false 69 | @retried = false 70 | @connection.close 71 | callback = proc { @result = true } 72 | @connection.retryable(:on_retry => callback) do 73 | unless @retried 74 | @retried = true 75 | raise Beaneater::NotConnected.new 76 | end 77 | end 78 | assert_equal true, @result 79 | end 80 | 81 | it "should provide a hook when the connection successfully reconnects" do 82 | reconnected = false 83 | retried = false 84 | @connection.close 85 | @connection.on_reconnect = proc { reconnected = true } 86 | @connection.retryable do 87 | unless retried 88 | retried = true 89 | raise Beaneater::NotConnected.new 90 | end 91 | end 92 | assert_equal true, reconnected 93 | end 94 | 95 | it "should call the on_reconnect hook before the on_retry hook" do 96 | @result = [] 97 | @retried = false 98 | @connection.close 99 | @connection.on_reconnect = proc { @result << "reconnect" } 100 | on_retry = proc { @result << "retry" } 101 | @connection.retryable(:on_retry => on_retry) do 102 | unless @retried 103 | @retried = true 104 | raise Beaneater::NotConnected.new 105 | end 106 | end 107 | assert_equal %w(reconnect retry), @result 108 | end 109 | 110 | describe "ensuring the connection is open" do 111 | it "should reattempt the connection to beanstalk several times" do 112 | stats = @connection.stats 113 | simulate_disconnect(@connection) 114 | new_connection = Beaneater.new('127.0.0.1:11300') 115 | Beaneater.expects(:new).twice.raises(Beaneater::NotConnected).then.returns(new_connection) 116 | @connection.tubes 117 | assert_equal true, @connection.connected? 118 | end 119 | 120 | it "should not attempt reconnecting if the current connection is open" do 121 | assert_equal true, @connection.connected? 122 | Beaneater.expects(:new).never 123 | @connection.tubes 124 | end 125 | 126 | describe "when reconnecting is successful" do 127 | it "should allow for a callback" do 128 | @result = false 129 | simulate_disconnect(@connection) 130 | @connection.on_reconnect = proc { @result = true } 131 | @connection.tubes 132 | assert_equal true, @result 133 | end 134 | 135 | it "should pass self to the callback" do 136 | result = nil 137 | simulate_disconnect(@connection) 138 | @connection.on_reconnect = lambda { |conn| result = conn } 139 | @connection.tubes 140 | assert_equal result, @connection 141 | end 142 | end 143 | end 144 | 145 | describe "when unable to ensure its connected" do 146 | it "should raise Beaneater::NotConnected" do 147 | Beaneater.stubs(:new).raises(Beaneater::NotConnected) 148 | simulate_disconnect(@connection, 1) # since we're stubbing Beaneater.new above we only to simlulate the disconnect of our current connection 149 | assert_raises Beaneater::NotConnected do 150 | @connection.tubes 151 | end 152 | end 153 | end 154 | 155 | describe "when using the retryable method" do 156 | it "should yield to the block multiple times" do 157 | expected = 2 158 | retry_count = 0 159 | @connection.retryable(max_retries: expected) do 160 | if retry_count < 2 161 | retry_count += 1 162 | raise Beaneater::NotConnected 163 | end 164 | end 165 | assert_equal expected, retry_count 166 | end 167 | end 168 | end 169 | 170 | describe "for delegated methods" do 171 | before do 172 | @connection = Backburner::Connection.new("beanstalk://127.0.0.1") 173 | end 174 | 175 | it "delegate methods to beanstalk connection" do 176 | assert_equal "127.0.0.1", @connection.connection.host 177 | end 178 | end # delegator 179 | end # Connection 180 | -------------------------------------------------------------------------------- /test/fixtures/active_jobs.rb: -------------------------------------------------------------------------------- 1 | require 'active_job' 2 | require 'active_job/queue_adapters/backburner_adapter' 3 | 4 | ActiveJob::Base.queue_adapter = :backburner 5 | ActiveJob::Base.logger = nil 6 | 7 | class TestJob < ActiveJob::Base 8 | def perform(arg) 9 | true 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/fixtures/hooked.rb: -------------------------------------------------------------------------------- 1 | $hooked_fail_count = 0 2 | class HookFailError < RuntimeError; end 3 | 4 | class HookedObjectBeforeEnqueueFail 5 | include Backburner::Performable 6 | 7 | def self.before_enqueue_abe(*args) 8 | puts "!!before_enqueue_foo!! #{args.inspect}" 9 | end 10 | 11 | def self.before_enqueue_bar(*args) 12 | return false 13 | end 14 | end 15 | 16 | 17 | class HookedObjectAfterEnqueueFail 18 | def self.after_enqueue_abe(*args) 19 | puts "!!after_enqueue_foo!! #{args.inspect}" 20 | end 21 | 22 | def self.after_enqueue_bar(*args) 23 | raise HookFailError, "Fail HookedObjectAfterEnqueueFail" 24 | end 25 | end 26 | 27 | class HookedObjectBeforePerformFail 28 | include Backburner::Performable 29 | 30 | def self.before_perform_abe(*args) 31 | puts "!!before_perform_foo!! #{args.inspect}" 32 | end 33 | 34 | def self.before_perform_foo(*args) 35 | return false 36 | end 37 | 38 | def self.foo(x) 39 | puts "Fail ran!!" 40 | raise HookFailError, "HookedObjectJobFailure on foo!" 41 | end 42 | end 43 | 44 | class HookedObjectAfterPerformFail 45 | def self.after_perform_abe(*args) 46 | puts "!!after_perform_foo!! #{args.inspect}" 47 | end 48 | 49 | def self.after_perform_bar(*args) 50 | raise HookFailError, "Fail HookedObjectAfterEnqueueFail" 51 | end 52 | end 53 | 54 | class HookedObjectJobFailure 55 | def self.foo(x) 56 | raise HookFailError, "HookedObjectJobFailure on foo!" 57 | end 58 | end 59 | 60 | class HookedObjectSuccess 61 | include Backburner::Performable 62 | 63 | def self.before_enqueue_foo(*args) 64 | puts "!!before_enqueue_foo!! #{args.inspect}" 65 | end 66 | 67 | def self.before_enqueue_bar(*args) 68 | puts "!!before_enqueue_bar!! #{args.inspect}" 69 | end 70 | 71 | def self.after_enqueue_foo(*args) 72 | puts "!!after_enqueue_foo!! #{args.inspect}" 73 | end 74 | 75 | def self.after_enqueue_bar(*args) 76 | puts "!!after_enqueue_bar!! #{args.inspect}" 77 | end 78 | 79 | def self.before_perform_foo(*args) 80 | puts "!!before_perform_foo!! #{args.inspect}" 81 | end 82 | 83 | def self.after_perform_foo(*args) 84 | puts "!!after_perform_foo!! #{args.inspect}" 85 | end 86 | 87 | def self.around_perform_bar(*args) 88 | puts "!!BEGIN around_perform_bar!! #{args.inspect}" 89 | yield 90 | puts "!!END around_perform_bar!! #{args.inspect}" 91 | end 92 | 93 | def self.around_perform_cat(*args) 94 | puts "!!BEGIN around_perform_cat!! #{args.inspect}" 95 | yield 96 | puts "!!END around_perform_cat!! #{args.inspect}" 97 | end 98 | 99 | def self.on_failure_foo(ex, *args) 100 | puts "!!on_failure_foo!! #{ex.inspect} #{args.inspect}" 101 | end 102 | 103 | def self.on_bury_foo(*args) 104 | puts "!!on_bury_foo!! #{args.inspect}" 105 | end 106 | 107 | def self.on_retry_foo(retry_count, delay, *args) 108 | puts "!!on_retry_foo!! #{retry_count} #{delay} #{args.inspect}" 109 | end 110 | 111 | def self.foo(x) 112 | $hooked_fail_count += 1 113 | raise HookFailError, "Fail!" if $hooked_fail_count == 1 114 | puts "This is the job running successfully!! #{x.inspect}" 115 | end 116 | end # HookedObjectSuccess 117 | 118 | class HookedWorker < Backburner::Worker 119 | def on_reconnect 120 | puts "!!on_reconnect!!" 121 | end 122 | end 123 | -------------------------------------------------------------------------------- /test/fixtures/test_fork_jobs.rb: -------------------------------------------------------------------------------- 1 | class ResponseJob 2 | include Backburner::Queue 3 | queue_priority 1000 4 | def self.perform(data) 5 | $worker_test_count += data['worker_test_count'].to_i if data['worker_test_count'] 6 | $worker_success = data['worker_success'] if data['worker_success'] 7 | $worker_test_count = data['worker_test_count_set'].to_i if data['worker_test_count_set'] 8 | $worker_raise = data['worker_raise'] if data['worker_raise'] 9 | end 10 | end 11 | 12 | class TestJobFork 13 | include Backburner::Queue 14 | queue "test-job-fork" 15 | queue_priority 1000 16 | def self.perform(x, y) 17 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 18 | :worker_test_count_set => x + y 19 | }], :queue => 'response' 20 | end 21 | end 22 | 23 | class TestFailJobFork 24 | include Backburner::Queue 25 | queue "test-fail-job-fork" 26 | def self.perform(x, y) 27 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 28 | :worker_raise => true 29 | }], :queue => 'response' 30 | end 31 | end 32 | 33 | class TestRetryJobFork 34 | include Backburner::Queue 35 | def self.perform(x, y) 36 | $worker_test_count += 1 37 | 38 | if $worker_test_count <= 2 39 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 40 | :worker_test_count => 1 41 | }], :queue => 'response' 42 | 43 | raise RuntimeError 44 | else # succeeds 45 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 46 | :worker_test_count => 1, 47 | :worker_success => true 48 | }], :queue => 'response' 49 | end 50 | end 51 | end 52 | 53 | class TestAsyncJobFork 54 | include Backburner::Performable 55 | def self.foo(x, y) 56 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 57 | :worker_test_count_set => x * y 58 | }], :queue => 'response' 59 | end 60 | end 61 | 62 | class TestJobMultithreadFork 63 | include Backburner::Queue 64 | queue "test-job-multithread-fork" 65 | queue_priority 1000 66 | def self.perform(x, y) 67 | sleep 1 # simluate work 68 | Backburner::Workers::ThreadsOnFork.enqueue ResponseJob, [{ 69 | :worker_test_count_set => x + y 70 | }], :queue => 'response' 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /test/fixtures/test_forking_jobs.rb: -------------------------------------------------------------------------------- 1 | class ResponseForkingJob 2 | include Backburner::Queue 3 | queue_priority 1000 4 | def self.perform(data) 5 | $worker_test_count += data['worker_test_count'].to_i if data['worker_test_count'] 6 | $worker_success = data['worker_success'] if data['worker_success'] 7 | $worker_test_count = data['worker_test_count_set'].to_i if data['worker_test_count_set'] 8 | $worker_raise = data['worker_raise'] if data['worker_raise'] 9 | end 10 | end 11 | 12 | class TestJobForking 13 | include Backburner::Queue 14 | queue_priority 1000 15 | def self.perform(x, y) 16 | Backburner::Workers::Forking.enqueue ResponseForkingJob, [{ 17 | :worker_test_count_set => x + y 18 | }], :queue => 'response' 19 | end 20 | end 21 | 22 | class TestFailJobForking 23 | include Backburner::Queue 24 | def self.perform(x, y) 25 | Backburner::Workers::Forking.enqueue ResponseForkingJob, [{ 26 | :worker_raise => true 27 | }], :queue => 'response' 28 | end 29 | end 30 | 31 | class TestRetryJobForking 32 | include Backburner::Queue 33 | def self.perform(x, y) 34 | if $worker_test_count <= 2 35 | Backburner::Workers::Forking.enqueue ResponseForkingJob, [{ 36 | :worker_test_count => 1 37 | }], :queue => 'response' 38 | 39 | raise RuntimeError 40 | else # succeeds 41 | Backburner::Workers::Forking.enqueue ResponseForkingJob, [{ 42 | :worker_test_count => 1, 43 | :worker_success => true 44 | }], :queue => 'response' 45 | end 46 | end 47 | end 48 | 49 | class TestAsyncJobForking 50 | include Backburner::Performable 51 | def self.foo(x, y) 52 | Backburner::Workers::Forking.enqueue ResponseForkingJob, [{ 53 | :worker_test_count_set => x * y 54 | }], :queue => 'response' 55 | end 56 | end -------------------------------------------------------------------------------- /test/fixtures/test_jobs.rb: -------------------------------------------------------------------------------- 1 | $worker_test_count = 0 2 | $worker_success = false 3 | 4 | class TestPlainJob 5 | def self.queue; "test-plain"; end 6 | def self.perform(x, y); $worker_test_count += x + y + 1; end 7 | end 8 | 9 | class TestJob 10 | include Backburner::Queue 11 | queue_priority :medium 12 | queue_respond_timeout 300 13 | def self.perform(x, y); $worker_test_count += x + y; end 14 | end 15 | 16 | class TestSlowJob 17 | include Backburner::Queue 18 | queue_priority :medium 19 | queue_respond_timeout 300 20 | def self.perform(x, y); sleep 1; $worker_test_count += x + y; end 21 | end 22 | 23 | class TestStuckJob 24 | include Backburner::Queue 25 | queue_priority :medium 26 | queue_respond_timeout 300 27 | def self.perform(_x, _y) 28 | loop do 29 | sleep 0.5 30 | end 31 | end 32 | end 33 | 34 | class TestFailJob 35 | include Backburner::Queue 36 | def self.perform(x, y); raise RuntimeError; end 37 | end 38 | 39 | class TestRetryJob 40 | include Backburner::Queue 41 | def self.perform(x, y) 42 | $worker_test_count += 1 43 | raise RuntimeError unless $worker_test_count > 2 44 | $worker_success = true 45 | end 46 | end 47 | 48 | class TestConfigurableRetryJob 49 | include Backburner::Queue 50 | def self.perform(retry_count) 51 | $worker_test_count += 1 52 | raise RuntimeError unless $worker_test_count > retry_count 53 | $worker_success = true 54 | end 55 | end 56 | 57 | class TestRetryWithQueueOverridesJob 58 | include Backburner::Queue 59 | def self.perform(retry_count) 60 | $worker_test_count += 1 61 | raise RuntimeError unless $worker_test_count > retry_count 62 | $worker_success = true 63 | end 64 | 65 | def self.queue_max_job_retries 66 | 3 67 | end 68 | 69 | def self.queue_retry_delay 70 | 0 71 | end 72 | 73 | def self.queue_retry_delay_proc 74 | lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) } 75 | end 76 | end 77 | 78 | class TestAsyncJob 79 | include Backburner::Performable 80 | def self.foo(x, y); $worker_test_count = x * y; end 81 | end 82 | 83 | class TestLambdaQueueJob 84 | include Backburner::Queue 85 | queue lambda { |klass| klass.calculated_queue_name } 86 | def self.calculated_queue_name; 'lambda-queue' end 87 | end 88 | -------------------------------------------------------------------------------- /test/fixtures/test_queue_settings.rb: -------------------------------------------------------------------------------- 1 | class TestJobSettings 2 | include Backburner::Queue 3 | queue "job-settings:5:10:6" 4 | def self.perform; end 5 | end 6 | 7 | class TestJobSettingsOverride 8 | include Backburner::Queue 9 | queue "job-settings-override:5:10:12" 10 | queue_jobs_limit 10 11 | queue_garbage_limit 1000 12 | queue_retry_limit 2 13 | def self.perform; end 14 | end -------------------------------------------------------------------------------- /test/helpers/templogger.rb: -------------------------------------------------------------------------------- 1 | class Templogger 2 | attr_reader :logger, :log_path 3 | 4 | def initialize(root_path) 5 | @file = Tempfile.new('foo', root_path) 6 | @log_path = @file.path 7 | @logger = Logger.new(@log_path) 8 | end 9 | 10 | # wait_for_match /Completed TestJobFork/m 11 | def wait_for_match(match_pattern) 12 | sleep 0.1 until self.body =~ match_pattern 13 | end 14 | 15 | def body 16 | File.read(@log_path) 17 | end 18 | 19 | def close 20 | @file.close 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /test/helpers_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | describe "Backburner::Helpers module" do 4 | include Backburner::Helpers 5 | 6 | describe "for classify method" do 7 | it "should support simple classify" do 8 | assert_equal "FooBarBaz", classify("foo-bar-baz") 9 | end 10 | 11 | it "should not affect existing classified strings" do 12 | assert_equal "Foo::BarBaz", classify("Foo::BarBaz") 13 | end 14 | end # classify 15 | 16 | describe "for constantize method" do 17 | it "should constantize known constant" do 18 | assert_equal Backburner, constantize("Backburner") 19 | end 20 | 21 | it "should properly report when constant is undefined" do 22 | assert_raises(NameError) { constantize("FakeObject") } 23 | end 24 | end # constantize 25 | 26 | describe "for dasherize method" do 27 | it "should not harm existing dashed names" do 28 | assert_equal "foo/bar-baz", dasherize("foo/bar-baz") 29 | end 30 | 31 | it "should properly convert simple names to dashes" do 32 | assert_equal "foo-bar", dasherize("FooBar") 33 | end 34 | 35 | it "should properly convert class to dash with namespace" do 36 | assert_equal "foo/bar-baz", dasherize("Foo::BarBaz") 37 | end 38 | end # dasherize 39 | 40 | describe "for exception_message method" do 41 | it "prints out message about failure" do 42 | output = exception_message(RuntimeError.new("test")) 43 | assert_match(/Exception RuntimeError/, output) 44 | end 45 | end # exception_message 46 | 47 | describe "for queue_config" do 48 | before { Backburner.expects(:configuration).returns(stub(:tube_namespace => "test.foo.job", :namespace_separator => '.')) } 49 | 50 | it "accesses correct value for namespace" do 51 | assert_equal "test.foo.job", queue_config.tube_namespace 52 | end 53 | end # config 54 | 55 | describe "for expand_tube_name method" do 56 | before { Backburner.stubs(:configuration).returns(stub(:tube_namespace => "test.foo.job.", :namespace_separator => '.', :primary_queue => "backburner-jobs")) } 57 | 58 | it "supports base strings" do 59 | assert_equal "test.foo.job.email/send-news", expand_tube_name("email/send_news") 60 | end # simple string 61 | 62 | it "supports qualified strings" do 63 | assert_equal "test.foo.job.email/send-news", expand_tube_name("test.foo.job.email/send_news") 64 | end # qualified string 65 | 66 | it "supports base symbols" do 67 | assert_equal "test.foo.job.email/send-news", expand_tube_name(:"email/send_news") 68 | end # symbols 69 | 70 | it "supports queue names" do 71 | test = stub(:queue => "email/send_news") 72 | assert_equal "test.foo.job.email/send-news", expand_tube_name(test) 73 | end # queue names 74 | 75 | it "supports class names" do 76 | assert_equal "test.foo.job.backburner-jobs", expand_tube_name(RuntimeError) 77 | end # class names 78 | 79 | it "supports lambda in queue object" do 80 | test = stub(:queue => lambda { |job_class| "email/send_news" }) 81 | assert_equal "test.foo.job.email/send-news", expand_tube_name(test) 82 | end # lambdas in queue object 83 | 84 | it "supports lambdas" do 85 | test = lambda { "email/send_news" } 86 | assert_equal "test.foo.job.email/send-news", expand_tube_name(test) 87 | end #lambdas 88 | end # expand_tube_name 89 | 90 | describe "for alternative namespace separator" do 91 | before { Backburner.stubs(:configuration).returns(stub(:tube_namespace => "test", :namespace_separator => '-', :primary_queue => "backburner-jobs")) } 92 | 93 | it "uses alternative namespace separator" do 94 | assert_equal "test-queue-name", expand_tube_name("queue_name") 95 | end # simple string 96 | end 97 | 98 | describe "for resolve_priority method" do 99 | before do 100 | @original_queue_priority = Backburner.configuration.default_priority 101 | Backburner.configure { |config| config.default_priority = 1000 } 102 | end 103 | after do 104 | Backburner.configure { |config| config.default_priority = @original_queue_priority } 105 | Backburner.configure { |config| config.priority_labels = Backburner::Configuration::PRIORITY_LABELS } 106 | end 107 | 108 | it "supports fix num priority" do 109 | assert_equal 500, resolve_priority(500) 110 | end 111 | 112 | it "supports baked in priority alias" do 113 | assert_equal 200, resolve_priority(:low) 114 | assert_equal 0, resolve_priority(:high) 115 | end 116 | 117 | it "supports custom priority alias" do 118 | Backburner.configure { |config| config.priority_labels = { :foo => 5 } } 119 | assert_equal 5, resolve_priority(:foo) 120 | end 121 | 122 | it "supports aliased priority alias" do 123 | Backburner.configure { |config| config.priority_labels = { :foo => 5, :bar => 'foo' } } 124 | assert_equal 5, resolve_priority(:bar) 125 | end 126 | 127 | it "supports classes which respond to queue_priority" do 128 | job = stub(:queue_priority => 600) 129 | assert_equal 600, resolve_priority(job) 130 | end 131 | 132 | it "supports classes which respond to queue_priority with named alias" do 133 | job = stub(:queue_priority => :low) 134 | assert_equal 200, resolve_priority(job) 135 | end 136 | 137 | it "supports classes which returns null queue_priority" do 138 | job = stub(:queue_priority => nil) 139 | assert_equal 1000, resolve_priority(job) 140 | end 141 | 142 | it "supports classes which don't respond to queue_priority" do 143 | job = stub(:fake => true) 144 | assert_equal 1000, resolve_priority(job) 145 | end 146 | 147 | it "supports default pri for null values" do 148 | assert_equal 1000, resolve_priority(nil) 149 | end 150 | end # resolve_priority 151 | 152 | describe "for resolve_respond_timeout method" do 153 | before do 154 | @original_respond_timeout = Backburner.configuration.respond_timeout 155 | Backburner.configure { |config| config.respond_timeout = 300 } 156 | end 157 | after { Backburner.configure { |config| config.respond_timeout = @original_respond_timeout } } 158 | 159 | it "supports fix num respond_timeout" do 160 | assert_equal 500, resolve_respond_timeout(500) 161 | end 162 | 163 | it "supports classes which respond to queue_respond_timeout" do 164 | job = stub(:queue_respond_timeout => 600) 165 | assert_equal 600, resolve_respond_timeout(job) 166 | end 167 | 168 | it "supports classes which returns null queue_respond_timeout" do 169 | job = stub(:queue_respond_timeout => nil) 170 | assert_equal 300, resolve_respond_timeout(job) 171 | end 172 | 173 | it "supports classes which don't respond to queue_respond_timeout" do 174 | job = stub(:fake => true) 175 | assert_equal 300, resolve_respond_timeout(job) 176 | end 177 | 178 | it "supports default ttr for null values" do 179 | assert_equal 300, resolve_respond_timeout(nil) 180 | end 181 | end # resolve_respond_timeout 182 | 183 | describe "for resolve_max_job_retries method" do 184 | before do 185 | @original_max_job_retries = Backburner.configuration.max_job_retries 186 | Backburner.configure { |config| config.max_job_retries = 300 } 187 | end 188 | after { Backburner.configure { |config| config.max_job_retries = @original_max_job_retries } } 189 | 190 | it "supports fix num max_job_retries" do 191 | assert_equal 500, resolve_max_job_retries(500) 192 | end 193 | 194 | it "supports classes which respond to queue_max_job_retries" do 195 | job = stub(:queue_max_job_retries => 600) 196 | assert_equal 600, resolve_max_job_retries(job) 197 | end 198 | 199 | it "supports classes which return null queue_max_job_retries" do 200 | job = stub(:queue_max_job_retries => nil) 201 | assert_equal 300, resolve_max_job_retries(job) 202 | end 203 | 204 | it "supports classes which don't respond to queue_max_job_retries" do 205 | job = stub(:fake => true) 206 | assert_equal 300, resolve_max_job_retries(job) 207 | end 208 | 209 | it "supports default max_job_retries for null values" do 210 | assert_equal 300, resolve_max_job_retries(nil) 211 | end 212 | end # resolve_max_job_retries 213 | 214 | describe "for resolve_retry_delay method" do 215 | before do 216 | @original_retry_delay = Backburner.configuration.retry_delay 217 | Backburner.configure { |config| config.retry_delay = 300 } 218 | end 219 | after { Backburner.configure { |config| config.retry_delay = @original_retry_delay } } 220 | 221 | it "supports fix num retry_delay" do 222 | assert_equal 500, resolve_retry_delay(500) 223 | end 224 | 225 | it "supports classes which respond to queue_retry_delay" do 226 | job = stub(:queue_retry_delay => 600) 227 | assert_equal 600, resolve_retry_delay(job) 228 | end 229 | 230 | it "supports classes which return null queue_retry_delay" do 231 | job = stub(:queue_retry_delay => nil) 232 | assert_equal 300, resolve_retry_delay(job) 233 | end 234 | 235 | it "supports classes which don't respond to queue_retry_delay" do 236 | job = stub(:fake => true) 237 | assert_equal 300, resolve_retry_delay(job) 238 | end 239 | 240 | it "supports default retry_delay for null values" do 241 | assert_equal 300, resolve_retry_delay(nil) 242 | end 243 | end # resolve_retry_delay 244 | 245 | describe "for resolve_retry_delay_proc method" do 246 | before do 247 | @config_retry_delay_proc = lambda { |x, y| x + y } # Default config proc adds two values 248 | @override_delay_proc = lambda { |x, y| x - y } # Overriden proc subtracts values 249 | @original_retry_delay_proc = Backburner.configuration.retry_delay_proc 250 | Backburner.configure { |config| config.retry_delay_proc = @config_retry_delay_proc } 251 | end 252 | after { Backburner.configure { |config| config.retry_delay_proc = @original_retry_delay_proc } } 253 | 254 | # Rather than compare Procs execute them and compare the output 255 | it "supports proc retry_delay_proc" do 256 | assert_equal @override_delay_proc.call(2, 1), resolve_retry_delay_proc(@override_delay_proc).call(2, 1) 257 | end 258 | 259 | it "supports classes which respond to queue_retry_delay_proc" do 260 | job = stub(:queue_retry_delay_proc => @override_delay_proc) 261 | assert_equal @override_delay_proc.call(2, 1), resolve_retry_delay_proc(job).call(2, 1) 262 | end 263 | 264 | it "supports classes which return null queue_retry_delay_proc" do 265 | job = stub(:queue_retry_delay_proc => nil) 266 | assert_equal @original_retry_delay_proc.call(2, 1), resolve_retry_delay_proc(job).call(2, 1) 267 | end 268 | 269 | it "supports classes which don't respond to queue_retry_delay_proc" do 270 | job = stub(:fake => true) 271 | assert_equal @original_retry_delay_proc.call(2, 1), resolve_retry_delay_proc(job).call(2, 1) 272 | end 273 | 274 | it "supports default retry_delay_proc for null values" do 275 | assert_equal @original_retry_delay_proc.call(2, 1), resolve_retry_delay_proc(nil).call(2, 1) 276 | end 277 | end # resolve_retry_delay_proc 278 | end 279 | -------------------------------------------------------------------------------- /test/hooks_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | require File.expand_path('../fixtures/hooked', __FILE__) 3 | 4 | describe "Backburner::Hooks module" do 5 | before do 6 | $hooked_fail_count = 0 7 | @hooks = Backburner::Hooks 8 | end 9 | 10 | describe "for invoke_hook_events method" do 11 | describe "with before_enqueue" do 12 | it "should support successful invocation" do 13 | out = silenced { @res = @hooks.invoke_hook_events(HookedObjectSuccess, :before_enqueue, 5, 6) } 14 | assert_equal [nil, nil], @res 15 | assert_match(/!!before_enqueue_foo!! \[5\, 6\]/, out) 16 | assert_match(/!!before_enqueue_bar!! \[5\, 6\]/, out) 17 | end 18 | 19 | it "should support fail case" do 20 | out = silenced { @res = @hooks.invoke_hook_events(HookedObjectBeforeEnqueueFail, :before_enqueue, 5, 6) } 21 | assert_equal false, @res 22 | assert_match(/!!before_enqueue_foo!! \[5\, 6\]/, out) 23 | end 24 | end # before_enqueue 25 | 26 | describe "with after_enqueue" do 27 | it "should support successful invocation" do 28 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :after_enqueue, 7, 8) } 29 | assert_match(/!!after_enqueue_foo!! \[7\, 8\]/, out) 30 | assert_match(/!!after_enqueue_bar!! \[7\, 8\]/, out) 31 | end 32 | 33 | it "should support fail case" do 34 | assert_raises(HookFailError) do 35 | silenced { @res = @hooks.invoke_hook_events(HookedObjectAfterEnqueueFail, :after_enqueue, 5, 6) } 36 | end 37 | end 38 | end # after_enqueue 39 | 40 | describe "with before_perform" do 41 | it "should support successful invocation" do 42 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :before_perform, 1, 2) } 43 | assert_match(/!!before_perform_foo!! \[1\, 2\]/, out) 44 | end 45 | 46 | it "should support fail case" do 47 | out = silenced { @res = @hooks.invoke_hook_events(HookedObjectBeforePerformFail, :before_perform, 5, 6) } 48 | assert_equal false, @res 49 | assert_match(/!!before_perform_foo!! \[5\, 6\]/, out) 50 | end 51 | end # before_perform 52 | 53 | describe "with after_perform" do 54 | it "should support successful invocation" do 55 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :after_perform, 3, 4) } 56 | assert_match(/!!after_perform_foo!! \[3\, 4\]/, out) 57 | end 58 | 59 | it "should support fail case" do 60 | assert_raises(HookFailError) do 61 | silenced { @res = @hooks.invoke_hook_events(HookedObjectAfterPerformFail, :after_perform, 5, 6) } 62 | end 63 | end 64 | end # after_perform 65 | 66 | describe "with on_failure" do 67 | it "should support successful invocation" do 68 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :on_failure, RuntimeError, 10) } 69 | assert_match(/!!on_failure_foo!! RuntimeError \[10\]/, out) 70 | end 71 | end # on_failure 72 | 73 | describe 'with on_retry' do 74 | it "should support successful invocation" do 75 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :on_retry, 1, 0, 10) } 76 | assert_match(/!!on_retry_foo!! 1 0 \[10\]/, out) 77 | end 78 | end 79 | 80 | describe 'with on_bury' do 81 | it "should support successful invocation" do 82 | out = silenced { @hooks.invoke_hook_events(HookedObjectSuccess, :on_bury, 10) } 83 | assert_match(/!!on_bury_foo!! \[10\]/, out) 84 | end 85 | end 86 | 87 | describe "with on_reconnect" do 88 | it "should support successful invocation" do 89 | out = silenced { @hooks.invoke_hook_events(HookedWorker.new, :on_reconnect)} 90 | assert_match(/!!on_reconnect!!/, out) 91 | end 92 | end 93 | end # invoke_hook_events 94 | 95 | describe "for around_hook_events method" do 96 | describe "with around_perform" do 97 | it "should support successful invocation" do 98 | out = silenced do 99 | @hooks.around_hook_events(HookedObjectSuccess, :around_perform, 7, 8) { 100 | puts "!!FIRED!!" 101 | } 102 | end 103 | assert_match(/BEGIN.*?bar.*BEGIN.*cat.*FIRED.*END.*cat.*END.*bar/m, out) 104 | assert_match(/!!BEGIN around_perform_bar!! \[7\, 8\]/, out) 105 | assert_match(/!!BEGIN around_perform_cat!! \[7\, 8\]/, out) 106 | assert_match(/!!FIRED!!/, out) 107 | assert_match(/!!END around_perform_cat!! \[7\, 8\]/, out) 108 | assert_match(/!!END around_perform_bar!! \[7\, 8\]/, out) 109 | end 110 | end # successful 111 | end # around_hook_events 112 | end # Hooks 113 | -------------------------------------------------------------------------------- /test/job_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | module NestedDemo 4 | class TestJobC 5 | def self.perform(x); puts "Performed #{x} in #{self}"; end 6 | end 7 | 8 | class TestJobD 9 | include Backburner::Queue 10 | def self.perform(x); raise RuntimeError; end 11 | end 12 | end 13 | 14 | describe "Backburner::Job module" do 15 | describe "for initialize" do 16 | describe "with hash" do 17 | before do 18 | @task = stub(:body => task_body, :ttr => 120, :delete => true, :bury => true) 19 | end 20 | 21 | describe "with string keys" do 22 | let(:task_body) { { "class" => "NewsletterSender", "args" => ["foo@bar.com", "bar@foo.com"] } } 23 | it "should create job with correct task data" do 24 | @job = Backburner::Job.new(@task) 25 | assert_equal @task, @job.task 26 | assert_equal ["class", "args"], @job.body.keys 27 | assert_equal task_body["class"], @job.name 28 | assert_equal task_body["args"], @job.args 29 | end 30 | end 31 | 32 | describe "with symbol keys" do 33 | let(:task_body) { { :class => "NewsletterSender", :args => ["foo@bar.com", "bar@foo.com"] } } 34 | it "should create job with correct task data" do 35 | @job = Backburner::Job.new(@task) 36 | assert_equal @task, @job.task 37 | assert_equal [:class, :args], @job.body.keys 38 | assert_equal task_body[:class], @job.name 39 | assert_equal task_body[:args], @job.args 40 | end 41 | end 42 | end # with hash 43 | 44 | describe "with json string" do 45 | before do 46 | @task_body = { "class" => "NewsletterSender", "args" => ["foo@bar.com", "bar@foo.com"] } 47 | @task = stub(:body => @task_body.to_json, :ttr => 120, :delete => true, :bury => true) 48 | end 49 | 50 | it "should create job with correct task data" do 51 | @job = Backburner::Job.new(@task) 52 | assert_equal @task, @job.task 53 | assert_equal ["class", "args"], @job.body.keys 54 | assert_equal @task_body["class"], @job.name 55 | assert_equal @task_body["args"], @job.args 56 | end 57 | end # with json 58 | 59 | describe "with invalid string" do 60 | before do 61 | @task_body = "^%$*&^*" 62 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true) 63 | end 64 | 65 | it "should raise a job format exception" do 66 | assert_raises(Backburner::Job::JobFormatInvalid) { 67 | @job = Backburner::Job.new(@task) 68 | } 69 | end 70 | end # invalid 71 | end # initialize 72 | 73 | describe "for process method" do 74 | describe "with valid task" do 75 | before do 76 | @task_body = { "class" => "NestedDemo::TestJobC", "args" => [56] } 77 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true) 78 | @task.expects(:delete).once 79 | end 80 | 81 | it "should process task" do 82 | @job = Backburner::Job.new(@task) 83 | out = silenced(1) { @job.process } 84 | assert_match(/Performed 56 in NestedDemo::TestJobC/, out) 85 | end # process 86 | end # valid 87 | 88 | describe "with invalid task" do 89 | before do 90 | @task_body = { "class" => "NestedDemo::TestJobD", "args" => [56] } 91 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true) 92 | @task.expects(:delete).never 93 | end 94 | 95 | it "should raise an exception" do 96 | @job = Backburner::Job.new(@task) 97 | assert_raises(RuntimeError) { @job.process } 98 | end # error invalid 99 | end # invalid 100 | 101 | describe "with invalid class" do 102 | before do 103 | @task_body = { "class" => "NestedDemo::TestJobY", "args" => [56] } 104 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true) 105 | @task.expects(:delete).never 106 | end 107 | 108 | it "should raise an exception" do 109 | @job = Backburner::Job.new(@task) 110 | assert_raises(Backburner::Job::JobNotFound) { @job.process } 111 | end # error class 112 | end # invalid 113 | end # process 114 | 115 | describe "for simple delegation method" do 116 | describe "with valid class" do 117 | before do 118 | @task_body = { "class" => "NestedDemo::TestJobC", "args" => [56] } 119 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true) 120 | @task.expects(:bury).once 121 | end 122 | 123 | it "should call bury for task" do 124 | @job = Backburner::Job.new(@task) 125 | @job.bury 126 | end # bury 127 | end 128 | 129 | describe "with invalid class" do 130 | before do 131 | @task_body = { "class" => "AnUnknownClass", "args" => [] } 132 | @task = stub(:body => @task_body, :ttr => 120, :delete => true, :bury => true, :release => true) 133 | end 134 | 135 | it "should call bury for task" do 136 | @task.expects(:bury).once 137 | @job = Backburner::Job.new(@task) 138 | Backburner::Hooks.expects(:invoke_hook_events) 139 | .with("AnUnknownClass", :on_bury, anything) 140 | @job.bury 141 | end 142 | 143 | it "should call retry for task" do 144 | @task.expects(:release).once 145 | @job = Backburner::Job.new(@task) 146 | Backburner::Hooks.expects(:invoke_hook_events) 147 | .with("AnUnknownClass", :on_retry, 0, is_a(Integer), anything) 148 | @job.retry(0, 0) 149 | end 150 | end 151 | end # simple delegation 152 | 153 | describe "timing out for various values of ttr" do 154 | before do 155 | @task_body = { "class" => "NestedDemo::TestJobC", "args" => [56] } 156 | end 157 | 158 | describe "when ttr == 0" do 159 | it "should use 0 for the timeout" do 160 | @task = stub(:body => @task_body, :delete => true, :ttr => 0) 161 | @job = Backburner::Job.new(@task) 162 | Timeout.expects(:timeout).with(0) 163 | @job.process 164 | end 165 | end 166 | 167 | describe "when ttr == 1" do 168 | it "should use 1 for the timeout" do 169 | @task = stub(:body => @task_body, :delete => true, :ttr => 1) 170 | @job = Backburner::Job.new(@task) 171 | Timeout.expects(:timeout).with(1) 172 | @job.process 173 | end 174 | end 175 | 176 | describe "when ttr > 1" do 177 | it "should use ttr-1 for the timeout" do 178 | @task = stub(:body => @task_body, :delete => true, :ttr => 2) 179 | @job = Backburner::Job.new(@task) 180 | Timeout.expects(:timeout).with(1) 181 | @job.process 182 | end 183 | end 184 | end 185 | end 186 | -------------------------------------------------------------------------------- /test/logger_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | describe "Backburner::Logger module" do 4 | include Backburner::Logger 5 | 6 | before do 7 | @strio = StringIO.new 8 | @logger = Logger.new(@strio) 9 | end 10 | 11 | describe "for log_info method" do 12 | it "prints out to std out" do 13 | output = capture_stdout { log_info("foo") } 14 | assert_equal "foo\n", output 15 | end 16 | 17 | it "can be configured to log to logger" do 18 | Backburner.configure { |config| config.logger = @logger } 19 | log_info("foo") 20 | assert_match(/I,.*?foo/, @strio.string) 21 | end 22 | 23 | after do 24 | Backburner.configure { |config| config.logger = nil } 25 | end 26 | end # log_info 27 | 28 | describe "for log_error method" do 29 | it "prints out to std err" do 30 | output = capture_stdout { log_error("bar") } 31 | assert_equal "bar\n", output 32 | end 33 | 34 | it "can be configured to log to logger" do 35 | Backburner.configure { |config| config.logger = @logger } 36 | log_error("bar") 37 | assert_match(/E,.*?bar/, @strio.string) 38 | end 39 | 40 | after do 41 | Backburner.configure { |config| config.logger = nil } 42 | end 43 | end # log_error 44 | end 45 | -------------------------------------------------------------------------------- /test/performable_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | class TestObj 4 | ID = 56 5 | def id; ID; end 6 | def self.find(id); TestObj.new if id == ID; end 7 | def foo(state, state2); "bar #{state} #{state2}"; end 8 | def self.bar(state, state2); "baz #{state} #{state2}"; end 9 | end 10 | 11 | class PerformableTestObj < TestObj 12 | include Backburner::Performable 13 | end 14 | 15 | class AutomagicTestObj < TestObj 16 | # Don't include Backburner::Performable because it should be automagically included 17 | def qux(state, state2); "garply #{state} #{state2}" end 18 | def self.garply(state, state2); "thud #{state} #{state2}" end 19 | def qux?; "garply!" end 20 | end 21 | 22 | class AsyncInstanceMethodsTestObj < PerformableTestObj; end 23 | class AsyncStaticMethodsTestObj < PerformableTestObj; end 24 | 25 | 26 | 27 | describe "Backburner::Performable module" do 28 | after { ENV["TEST"] = nil } 29 | 30 | describe "for async instance method" do 31 | it "should invoke worker enqueue" do 32 | Backburner::Worker.expects(:enqueue).with(PerformableTestObj, [56, :foo, true, false], has_entries(:pri => 5000, :queue => "foo")) 33 | PerformableTestObj.new.async(:pri => 5000, :queue => "foo").foo(true, false) 34 | end 35 | end # async instance 36 | 37 | describe "for async class method" do 38 | it "should invoke worker enqueue" do 39 | Backburner::Worker.expects(:enqueue).with(PerformableTestObj, [nil, :bar, true, false], has_entries(:pri => 5000, :queue => "foo")) 40 | PerformableTestObj.async(:pri => 5000, :queue => "foo").bar(true, false) 41 | end 42 | end # async class 43 | 44 | describe "for perform class method" do 45 | it "should work for instance" do 46 | assert_equal "bar true false", PerformableTestObj.perform(PerformableTestObj::ID, :foo, true, false) 47 | end # instance 48 | 49 | it "should work for class level" do 50 | assert_equal "baz false true", PerformableTestObj.perform(nil, :bar, false, true) 51 | end # class 52 | end # perform 53 | 54 | describe "for handle_asynchronously class method" do 55 | it "should automagically asynchronously proxy calls to the method" do 56 | Backburner::Performable.handle_asynchronously(AutomagicTestObj, :qux, :pri => 5000, :queue => "qux") 57 | 58 | Backburner::Worker.expects(:enqueue).with(AutomagicTestObj, [56, :qux_without_async, true, false], has_entries(:pri => 5000, :queue => "qux")) 59 | AutomagicTestObj.new.qux(true, false) 60 | end 61 | 62 | it "should work for class methods, too" do 63 | Backburner::Performable.handle_static_asynchronously(AutomagicTestObj, :garply, :pri => 5000, :queue => "garply") 64 | 65 | Backburner::Worker.expects(:enqueue).with(AutomagicTestObj, [nil, :garply_without_async, true, false], has_entries(:pri => 5000, :queue => "garply")) 66 | AutomagicTestObj.garply(true, false) 67 | end 68 | 69 | it "should correctly handle punctuation" do 70 | Backburner::Performable.handle_asynchronously(AutomagicTestObj, :qux?) 71 | 72 | Backburner::Worker.expects(:enqueue).with(AutomagicTestObj, [56, :qux_without_async?], {}) 73 | AutomagicTestObj.new.qux? 74 | end 75 | 76 | it "should be available for instance methods on any class that includes the Performable module" do 77 | AsyncInstanceMethodsTestObj.handle_asynchronously :foo, pri: 5000, queue: 'qux' 78 | Backburner::Worker.expects(:enqueue).with(AsyncInstanceMethodsTestObj, [56, :foo_without_async, true, false], has_entries(:pri => 5000, :queue => "qux")) 79 | AsyncInstanceMethodsTestObj.new.foo(true, false) 80 | end 81 | 82 | it "should be available for class methods on any class that includes the Performable module" do 83 | AsyncStaticMethodsTestObj.handle_static_asynchronously :bar, pri: 5000, queue: 'garply' 84 | Backburner::Worker.expects(:enqueue).with(AsyncStaticMethodsTestObj, [nil, :bar_without_async, true, false], has_entries(:pri => 5000, :queue => "garply")) 85 | AsyncStaticMethodsTestObj.bar(true, false) 86 | end 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /test/queue_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | 3 | module NestedDemo 4 | class TestJobA; include Backburner::Queue; end 5 | class TestJobB; include Backburner::Queue; end 6 | end 7 | 8 | describe "Backburner::Queue module" do 9 | describe "contains known_queue_classes" do 10 | it "has all defined known queues" do 11 | assert_contains Backburner::Worker.known_queue_classes, NestedDemo::TestJobA 12 | assert_contains Backburner::Worker.known_queue_classes, NestedDemo::TestJobB 13 | end 14 | end 15 | 16 | describe "for queue method accessor" do 17 | it "should return the queue name" do 18 | assert_equal Backburner.configuration.primary_queue, NestedDemo::TestJobA.queue 19 | end 20 | end # queue_name 21 | 22 | describe "for queue assignment method" do 23 | it "should allow queue name to be assigned" do 24 | NestedDemo::TestJobB.queue("nested/job") 25 | assert_equal "nested/job", NestedDemo::TestJobB.queue 26 | end 27 | 28 | it "should allow lambdas" do 29 | NestedDemo::TestJobB.queue(lambda { |klass| klass.name }) 30 | assert_equal "NestedDemo::TestJobB", NestedDemo::TestJobB.queue 31 | end 32 | end # queue 33 | 34 | describe "for queue_priority assignment method" do 35 | it "should allow queue priority to be assigned" do 36 | NestedDemo::TestJobB.queue_priority(1000) 37 | assert_equal 1000, NestedDemo::TestJobB.queue_priority 38 | end 39 | end # queue_priority 40 | 41 | describe "for queue_respond_timeout assignment method" do 42 | it "should allow queue respond_timeout to be assigned" do 43 | NestedDemo::TestJobB.queue_respond_timeout(300) 44 | assert_equal 300, NestedDemo::TestJobB.queue_respond_timeout 45 | end 46 | end # queue_respond_timeout 47 | 48 | describe "for queue_max_job_retries assignment method" do 49 | it "should allow queue max_job_retries to be assigned" do 50 | NestedDemo::TestJobB.queue_max_job_retries(5) 51 | assert_equal 5, NestedDemo::TestJobB.queue_max_job_retries 52 | end 53 | end # queue_max_job_retries 54 | 55 | describe "for queue_retry_delay assignment method" do 56 | it "should allow queue retry_delay to be assigned" do 57 | NestedDemo::TestJobB.queue_retry_delay(300) 58 | assert_equal 300, NestedDemo::TestJobB.queue_retry_delay 59 | end 60 | end # queue_retry_delay 61 | 62 | describe "for queue_retry_delay_proc assignment method" do 63 | it "should allow queue retry_delay_proc to be assigned" do 64 | retry_delay_proc = lambda { |x, y| x - y } 65 | NestedDemo::TestJobB.queue_retry_delay_proc(retry_delay_proc) 66 | assert_equal retry_delay_proc.call(2, 1), NestedDemo::TestJobB.queue_retry_delay_proc.call(2, 1) 67 | end 68 | end # queue_retry_delay_proc 69 | end # Backburner::Queue 70 | -------------------------------------------------------------------------------- /test/test_helper.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'tempfile' 3 | require 'minitest/autorun' 4 | begin 5 | require 'mocha/setup' 6 | rescue LoadError 7 | require 'mocha' 8 | end 9 | $LOAD_PATH.unshift File.expand_path("lib") 10 | require 'backburner' 11 | require File.expand_path('../helpers/templogger', __FILE__) 12 | 13 | # Configure Backburner 14 | Backburner.configure do |config| 15 | config.beanstalk_url = "beanstalk://127.0.0.1" 16 | config.tube_namespace = "demo.test" 17 | end 18 | 19 | ## Kernel Extensions 20 | require 'stringio' 21 | 22 | module Kernel 23 | # Redirect standard out, standard error and the buffered logger for sprinkle to StringIO 24 | # capture_stdout { any_commands; you_want } => "all output from the commands" 25 | def capture_stdout 26 | if ENV['DEBUG'] # Skip if debug mode 27 | yield 28 | return "" 29 | end 30 | 31 | out = StringIO.new 32 | $stdout = out 33 | $stderr = out 34 | yield 35 | return out.string 36 | ensure 37 | $stdout = STDOUT 38 | $stderr = STDERR 39 | end 40 | end 41 | 42 | class User 43 | attr_accessor :id, :name 44 | 45 | def initialize(id, name) 46 | @id, @name = id, name 47 | end 48 | end 49 | 50 | class MiniTest::Spec 51 | class << self 52 | alias :should :it 53 | alias :context :describe 54 | end 55 | alias :assert_no_match :refute_match 56 | alias :assert_not_nil :refute_nil 57 | alias :assert_not_equal :refute_equal 58 | 59 | # assert_same_elements([:a, :b, :c], [:c, :a, :b]) => passes 60 | def assert_same_elements(a1, a2, msg = nil) 61 | [:select, :inject, :size].each do |m| 62 | [a1, a2].each {|a| assert_respond_to(a, m, "Are you sure that #{a.inspect} is an array? It doesn't respond to #{m}.") } 63 | end 64 | 65 | assert a1h = a1.inject({}) { |h,e| h[e] ||= a1.select { |i| i == e }.size; h } 66 | assert a2h = a2.inject({}) { |h,e| h[e] ||= a2.select { |i| i == e }.size; h } 67 | 68 | assert_equal(a1h, a2h, msg) 69 | end 70 | 71 | # assert_contains(['a', '1'], /\d/) => passes 72 | # assert_contains(['a', '1'], 'a') => passes 73 | # assert_contains(['a', '1'], /not there/) => fails 74 | def assert_contains(collection, x, extra_msg = "") 75 | collection = [collection] unless collection.is_a?(Array) 76 | msg = "#{x.inspect} not found in #{collection.to_a.inspect} #{extra_msg}" 77 | case x 78 | when Regexp 79 | assert(collection.detect { |e| e =~ x }, msg) 80 | else 81 | assert(collection.include?(x), msg) 82 | end 83 | end 84 | 85 | # silenced(5) { ... } 86 | def silenced(time=3, &block) 87 | Timeout::timeout(time) { capture_stdout(&block) } 88 | end 89 | 90 | def beanstalk_connection 91 | Backburner::Connection.new(Backburner.configuration.beanstalk_url) 92 | end 93 | 94 | # pop_one_job(tube_name) 95 | def pop_one_job(tube_name=Backburner.configuration.primary_queue) 96 | tube_name = [Backburner.configuration.tube_namespace, tube_name].join(".") 97 | connection = beanstalk_connection 98 | connection.tubes.watch!(tube_name) 99 | silenced(3) { @res = connection.tubes.reserve } 100 | yield @res, Backburner.configuration.job_parser_proc.call(@res.body) 101 | ensure 102 | connection.close if connection 103 | end 104 | 105 | # clear_jobs!('foo') 106 | def clear_jobs!(*tube_names) 107 | connection = beanstalk_connection 108 | tube_names.each do |tube_name| 109 | expanded_name = [Backburner.configuration.tube_namespace, tube_name].join(".") 110 | connection.tubes.find(expanded_name).clear 111 | end 112 | ensure 113 | connection.close if connection 114 | end 115 | 116 | # Simulates a broken connection for any Beaneater::Connection. Will 117 | # simulate a restored connection after `reconnects_after`. This is expected 118 | # to be used when ensuring a Beaneater connection is open, therefore 119 | def simulate_disconnect(connection, reconnects_after = 2) 120 | connection.beanstalk.connection.connection.expects(:closed? => true) 121 | returns = Array.new(reconnects_after - 1, stub('TCPSocket')) 122 | returns.each do |socket| 123 | result = (socket != returns.last) 124 | socket.stubs(:closed? => result) 125 | end 126 | TCPSocket.expects(:new).times(returns.size).returns(*returns) 127 | end 128 | end # MiniTest::Spec 129 | -------------------------------------------------------------------------------- /test/worker_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../test_helper', __FILE__) 2 | require File.expand_path('../fixtures/test_jobs', __FILE__) 3 | require File.expand_path('../fixtures/hooked', __FILE__) 4 | 5 | describe "Backburner::Worker module" do 6 | before do 7 | Backburner.default_queues.clear 8 | clear_jobs!(Backburner.configuration.primary_queue, "test-plain", "test.bar", "bar.baz.foo") 9 | end 10 | 11 | describe "for enqueue class method" do 12 | it "should support enqueuing plain job" do 13 | Backburner::Worker.enqueue TestPlainJob, [7, 9], :ttr => 100, :pri => 2000 14 | pop_one_job("test-plain") do |job, body| 15 | assert_equal "TestPlainJob", body["class"] 16 | assert_equal [7, 9], body["args"] 17 | assert_equal 100, job.ttr 18 | assert_equal 2000, job.pri 19 | end 20 | end # plain 21 | 22 | it "should support enqueuing job with class queue priority" do 23 | Backburner::Worker.enqueue TestJob, [3, 4], :ttr => 100 24 | pop_one_job do |job, body| 25 | assert_equal "TestJob", body["class"] 26 | assert_equal [3, 4], body["args"] 27 | assert_equal 100, job.ttr 28 | assert_equal 100, job.pri 29 | end 30 | end # queue priority 31 | 32 | it "should support enqueuing job with specified named priority" do 33 | Backburner::Worker.enqueue TestJob, [3, 4], :ttr => 100, :pri => 'high' 34 | pop_one_job do |job, body| 35 | assert_equal "TestJob", body["class"] 36 | assert_equal [3, 4], body["args"] 37 | assert_equal 100, job.ttr 38 | assert_equal 0, job.pri 39 | end 40 | end # queue named priority 41 | 42 | it "should support enqueuing job with class queue respond_timeout" do 43 | Backburner::Worker.enqueue TestJob, [3, 4] 44 | pop_one_job do |job, body| 45 | assert_equal "TestJob", body["class"] 46 | assert_equal [3, 4], body["args"] 47 | assert_equal 300, job.ttr 48 | assert_equal 100, job.pri 49 | end 50 | end # queue respond_timeout 51 | 52 | it "should support enqueuing job with custom queue" do 53 | Backburner::Worker.enqueue TestJob, [6, 7], :queue => "test.bar", :pri => 5000 54 | pop_one_job("test.bar") do |job, body| 55 | assert_equal "TestJob", body["class"] 56 | assert_equal [6, 7], body["args"] 57 | assert_equal 0, job.delay 58 | assert_equal 5000, job.pri 59 | assert_equal 300, job.ttr 60 | end 61 | end # custom 62 | 63 | it "should support async job" do 64 | TestAsyncJob.async(:ttr => 100, :queue => "bar.baz.foo").foo(10, 5) 65 | pop_one_job("bar.baz.foo") do |job, body| 66 | assert_equal "TestAsyncJob", body["class"] 67 | assert_equal [nil, "foo", 10, 5], body["args"] 68 | assert_equal 100, job.ttr 69 | assert_equal Backburner.configuration.default_priority, job.pri 70 | end 71 | end # async 72 | 73 | it "should support enqueueing job with lambda queue" do 74 | expected_queue_name = TestLambdaQueueJob.calculated_queue_name 75 | Backburner::Worker.enqueue TestLambdaQueueJob, [6, 7], :queue => lambda { |klass| klass.calculated_queue_name } 76 | pop_one_job(expected_queue_name) do |job, body| 77 | assert_equal "TestLambdaQueueJob", body["class"] 78 | assert_equal [6, 7], body["args"] 79 | end 80 | end 81 | end # enqueue 82 | 83 | describe "for start class method" do 84 | it "should initialize and start the worker instance" do 85 | ech = stub 86 | Backburner::Worker.expects(:new).with("foo").returns(ech) 87 | ech.expects(:start) 88 | Backburner::Worker.start("foo") 89 | end 90 | end # start 91 | 92 | describe "for tube_names accessor" do 93 | before do 94 | Backburner.default_queues << "baz" 95 | Backburner.default_queues << "bam" 96 | end 97 | 98 | it "supports retrieving tubes" do 99 | worker = Backburner::Worker.new(["foo", "bar"]) 100 | assert_equal ["foo", "bar"], worker.tube_names 101 | end 102 | 103 | it "supports single tube array arg" do 104 | worker = Backburner::Worker.new([["foo", "bar"]]) 105 | assert_equal ["foo", "bar"], worker.tube_names 106 | end 107 | 108 | it "supports empty nil array arg with default values" do 109 | worker = Backburner::Worker.new([nil]) 110 | assert_equal ['baz', 'bam'], worker.tube_names 111 | end 112 | 113 | it "supports single tube arg" do 114 | worker = Backburner::Worker.new("foo") 115 | assert_equal ["foo"], worker.tube_names 116 | end 117 | 118 | it "supports empty array arg with default values" do 119 | worker = Backburner::Worker.new([]) 120 | assert_equal ['baz', 'bam'], worker.tube_names 121 | end 122 | 123 | it "supports nil arg with default values" do 124 | worker = Backburner::Worker.new(nil) 125 | assert_equal ['baz', 'bam'], worker.tube_names 126 | end 127 | end # tube_names 128 | 129 | describe "for custom serialization" do 130 | before do 131 | Backburner.configure do |config| 132 | @old_parser = config.job_parser_proc 133 | @old_serializer = config.job_serializer_proc 134 | config.job_parser_proc = lambda { |body| Marshal.load(body) } 135 | config.job_serializer_proc = lambda { |body| Marshal.dump(body) } 136 | end 137 | end 138 | 139 | after do 140 | clear_jobs!('test-plain') 141 | Backburner.configure do |config| 142 | config.job_parser_proc = @old_parser 143 | config.job_serializer_proc = @old_serializer 144 | end 145 | end 146 | 147 | it "should support enqueuing a job" do 148 | Backburner::Worker.enqueue TestPlainJob, [7, 9], :ttr => 100, :pri => 2000 149 | pop_one_job("test-plain") do |job, body| 150 | assert_equal "TestPlainJob", body[:class] 151 | assert_equal [7, 9], body[:args] 152 | assert_equal 100, job.ttr 153 | assert_equal 2000, job.pri 154 | end 155 | end 156 | end # custom serialization 157 | end # Backburner::Worker 158 | -------------------------------------------------------------------------------- /test/workers/forking_worker_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../../test_helper', __FILE__) 2 | require File.expand_path('../../fixtures/test_forking_jobs', __FILE__) 3 | 4 | describe "Backburner::Workers::Forking module" do 5 | 6 | before do 7 | Backburner.default_queues.clear 8 | @worker_class = Backburner::Workers::Forking 9 | end 10 | 11 | describe "for prepare method" do 12 | it "should make tube names array always unique to avoid duplication" do 13 | worker = @worker_class.new(["foo", "demo.test.foo"]) 14 | capture_stdout { worker.prepare } 15 | assert_equal ["demo.test.foo"], worker.tube_names 16 | end 17 | 18 | it "should watch specified tubes" do 19 | worker = @worker_class.new(["foo", "bar"]) 20 | out = capture_stdout { worker.prepare } 21 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 22 | assert_same_elements ["demo.test.foo", "demo.test.bar"], worker.connection.tubes.watched.map(&:name) 23 | assert_match(/demo\.test\.foo/, out) 24 | end # multiple 25 | 26 | it "should watch single tube" do 27 | worker = @worker_class.new("foo") 28 | out = capture_stdout { worker.prepare } 29 | assert_equal ["demo.test.foo"], worker.tube_names 30 | assert_same_elements ["demo.test.foo"], worker.connection.tubes.watched.map(&:name) 31 | assert_match(/demo\.test\.foo/, out) 32 | end # single 33 | 34 | it "should respect default_queues settings" do 35 | Backburner.default_queues.concat(["foo", "bar"]) 36 | worker = @worker_class.new 37 | out = capture_stdout { worker.prepare } 38 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 39 | assert_same_elements ["demo.test.foo", "demo.test.bar"], worker.connection.tubes.watched.map(&:name) 40 | assert_match(/demo\.test\.foo/, out) 41 | end 42 | 43 | it "should assign based on all tubes" do 44 | @worker_class.any_instance.expects(:all_existing_queues).once.returns("bar") 45 | worker = @worker_class.new 46 | out = capture_stdout { worker.prepare } 47 | assert_equal ["demo.test.bar"], worker.tube_names 48 | assert_same_elements ["demo.test.bar"], worker.connection.tubes.watched.map(&:name) 49 | assert_match(/demo\.test\.bar/, out) 50 | end # all assign 51 | 52 | it "should properly retrieve all tubes" do 53 | worker = @worker_class.new 54 | out = capture_stdout { worker.prepare } 55 | assert_contains worker.tube_names, "demo.test.backburner-jobs" 56 | assert_contains worker.connection.tubes.watched.map(&:name), "demo.test.backburner-jobs" 57 | assert_match(/demo\.test\.backburner-jobs/, out) 58 | end # all read 59 | end # prepare 60 | 61 | describe "for fork_one_job method" do 62 | 63 | it "should fork, work job, and exit" do 64 | clear_jobs!("bar.foo") 65 | @worker_class.enqueue TestJobForking, [1, 2], :queue => "bar.foo" 66 | 67 | fake_pid = 45 68 | Process.expects(:fork).returns(fake_pid).yields 69 | @worker_class.any_instance.expects(:work_one_job) 70 | @worker_class.any_instance.expects(:coolest_exit) 71 | Process.expects(:wait).with(fake_pid) 72 | 73 | silenced(2) do 74 | worker = @worker_class.new('bar.foo') 75 | worker.prepare 76 | worker.fork_one_job 77 | end 78 | end 79 | 80 | end # fork_one_job 81 | 82 | describe "practical tests" do 83 | 84 | before do 85 | @templogger = Templogger.new('/tmp') 86 | Backburner.configure { |config| config.logger = @templogger.logger } 87 | $worker_test_count = 0 88 | $worker_success = false 89 | $worker_raise = false 90 | clear_jobs!('response') 91 | clear_jobs!('bar.foo.1', 'bar.foo.2', 'bar.foo.3', 'bar.foo.4', 'bar.foo.5') 92 | silenced do 93 | @response_worker = @worker_class.new('response') 94 | end 95 | end 96 | 97 | after do 98 | @templogger.close 99 | Backburner.configuration.logger = nil 100 | clear_jobs!('response') 101 | clear_jobs!('bar.foo.1', 'bar.foo.2', 'bar.foo.3', 'bar.foo.4', 'bar.foo.5') 102 | end 103 | 104 | 105 | it "should work an enqueued job" do 106 | @worker = @worker_class.new('bar.foo.1') 107 | @worker_class.enqueue TestJobForking, [1, 2], :queue => "bar.foo.1" 108 | @worker.prepare 109 | silenced(2) do 110 | @worker.fork_one_job 111 | @templogger.wait_for_match(/Completed TestJobFork/m) 112 | @response_worker.prepare 113 | @response_worker.work_one_job 114 | end 115 | assert_equal 3, $worker_test_count 116 | end # enqueue 117 | 118 | it "should work for an async job" do 119 | @worker = @worker_class.new('bar.foo.2') 120 | TestAsyncJobForking.async(:queue => 'bar.foo.2').foo(3, 5) 121 | @worker.prepare 122 | silenced(4) do 123 | @worker.fork_one_job 124 | @templogger.wait_for_match(/Completed TestAsyncJobFork/m) 125 | @response_worker.prepare 126 | @response_worker.work_one_job 127 | end 128 | assert_equal 15, $worker_test_count 129 | end # async 130 | 131 | it "should fail quietly if there's an argument error" do 132 | Backburner.configure { |config| config.max_job_retries = 0 } 133 | @worker = @worker_class.new('bar.foo.3') 134 | @worker_class.enqueue TestJobForking, ["bam", "foo", "bar"], :queue => "bar.foo.3" 135 | @worker.prepare 136 | silenced(5) do 137 | @worker.fork_one_job 138 | @templogger.wait_for_match(/Finished TestJobFork.*attempt 1 of 1/m) 139 | end 140 | assert_match(/Exception ArgumentError/, @templogger.body) 141 | assert_equal 0, $worker_test_count 142 | end # fail, argument 143 | 144 | it "should support retrying jobs and burying" do 145 | Backburner.configure { |config| config.max_job_retries = 1; config.retry_delay = 0 } 146 | @worker = @worker_class.new('bar.foo.4') 147 | @worker_class.enqueue TestRetryJobForking, ["bam", "foo"], :queue => 'bar.foo.4' 148 | @worker.prepare 149 | silenced(4) do 150 | 2.times do 151 | $worker_test_count += 1 152 | @worker.fork_one_job 153 | end 154 | @templogger.wait_for_match(/Finished TestRetryJobFork.*attempt 2 of 2/m) 155 | @response_worker.prepare 156 | 2.times { @response_worker.work_one_job } 157 | end 158 | assert_equal 4, $worker_test_count 159 | assert_equal false, $worker_success 160 | end # retry, bury 161 | 162 | it "should support retrying jobs and succeeds" do 163 | Backburner.configure { |config| config.max_job_retries = 2; config.retry_delay = 0 } 164 | @worker = @worker_class.new('bar.foo.5') 165 | @worker_class.enqueue TestRetryJobForking, ["bam", "foo"], :queue => 'bar.foo.5' 166 | @worker.prepare 167 | silenced(4) do 168 | 3.times do 169 | $worker_test_count += 1 170 | @worker.fork_one_job 171 | end 172 | @templogger.wait_for_match(/Completed TestRetryJobFork/m) 173 | @response_worker.prepare 174 | 3.times { @response_worker.work_one_job } 175 | end 176 | assert_equal 6, $worker_test_count 177 | assert_equal true, $worker_success 178 | end # retrying, succeeds 179 | 180 | end # practical tests 181 | end 182 | -------------------------------------------------------------------------------- /test/workers/simple_worker_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../../test_helper', __FILE__) 2 | require File.expand_path('../../fixtures/test_jobs', __FILE__) 3 | require File.expand_path('../../fixtures/hooked', __FILE__) 4 | 5 | describe "Backburner::Workers::Simple module" do 6 | before do 7 | Backburner.default_queues.clear 8 | @worker_class = Backburner::Workers::Simple 9 | end 10 | 11 | describe "for prepare method" do 12 | it "should make tube names array always unique to avoid duplication" do 13 | worker = @worker_class.new(["foo", "demo.test.foo"]) 14 | capture_stdout { worker.prepare } 15 | assert_equal ["demo.test.foo"], worker.tube_names 16 | end 17 | 18 | it "should watch specified tubes" do 19 | worker = @worker_class.new(["foo", "bar"]) 20 | out = capture_stdout { worker.prepare } 21 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 22 | assert_same_elements ["demo.test.foo", "demo.test.bar"], worker.connection.tubes.watched.map(&:name) 23 | assert_match(/demo\.test\.foo/, out) 24 | end # multiple 25 | 26 | it "should watch single tube" do 27 | worker = @worker_class.new("foo") 28 | out = capture_stdout { worker.prepare } 29 | assert_equal ["demo.test.foo"], worker.tube_names 30 | assert_same_elements ["demo.test.foo"], worker.connection.tubes.watched.map(&:name) 31 | assert_match(/demo\.test\.foo/, out) 32 | end # single 33 | 34 | it "should respect default_queues settings" do 35 | Backburner.default_queues.concat(["foo", "bar"]) 36 | worker = @worker_class.new 37 | out = capture_stdout { worker.prepare } 38 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 39 | assert_same_elements ["demo.test.foo", "demo.test.bar"], worker.connection.tubes.watched.map(&:name) 40 | assert_match(/demo\.test\.foo/, out) 41 | end 42 | 43 | it "should assign based on all tubes" do 44 | @worker_class.any_instance.expects(:all_existing_queues).once.returns("bar") 45 | worker = @worker_class.new 46 | out = capture_stdout { worker.prepare } 47 | assert_equal ["demo.test.bar"], worker.tube_names 48 | assert_same_elements ["demo.test.bar"], worker.connection.tubes.watched.map(&:name) 49 | assert_match(/demo\.test\.bar/, out) 50 | end # all assign 51 | 52 | it "should properly retrieve all tubes" do 53 | worker = @worker_class.new 54 | out = capture_stdout { worker.prepare } 55 | assert_contains worker.tube_names, "demo.test.backburner-jobs" 56 | assert_contains worker.connection.tubes.watched.map(&:name), "demo.test.backburner-jobs" 57 | assert_match(/demo\.test\.backburner-jobs/, out) 58 | end # all read 59 | end # prepare 60 | 61 | describe "for work_one_job method" do 62 | before do 63 | $worker_test_count = 0 64 | $worker_success = false 65 | end 66 | 67 | it "should work a plain enqueued job" do 68 | clear_jobs!("foo.bar") 69 | @worker_class.enqueue TestPlainJob, [1, 2], :queue => "foo.bar" 70 | silenced(2) do 71 | worker = @worker_class.new('foo.bar') 72 | worker.prepare 73 | worker.work_one_job 74 | end 75 | assert_equal 4, $worker_test_count 76 | end # plain enqueue 77 | 78 | it "should work an enqueued job" do 79 | clear_jobs!("foo.bar") 80 | @worker_class.enqueue TestJob, [1, 2], :queue => "foo.bar" 81 | silenced(2) do 82 | worker = @worker_class.new('foo.bar') 83 | worker.prepare 84 | worker.work_one_job 85 | end 86 | assert_equal 3, $worker_test_count 87 | end # enqueue 88 | 89 | it "should fail quietly if there's an argument error" do 90 | clear_jobs!("foo.bar") 91 | @worker_class.enqueue TestJob, ["bam", "foo", "bar"], :queue => "foo.bar" 92 | out = silenced(2) do 93 | worker = @worker_class.new('foo.bar') 94 | worker.prepare 95 | worker.work_one_job 96 | end 97 | assert_match(/Exception ArgumentError/, out) 98 | assert_equal 0, $worker_test_count 99 | end # fail, argument 100 | 101 | it "should work an enqueued failing job" do 102 | # NB: The #bury expectation below leaves the job in the queue (as reserved!) 103 | # since bury is never actually called on the task. Therefore, clear_jobs!() 104 | # can't remove it which can break a lot of things depending on the order the 105 | # tests are run. So we ensure that it's using a unique queue name. Mocha 106 | # lacks expectations with proxies (where we could actually call bury) 107 | clear_jobs!('foo.bar.failed') 108 | @worker_class.enqueue TestFailJob, [1, 2], :queue => 'foo.bar.failed' 109 | Backburner::Job.any_instance.expects(:bury).once 110 | out = silenced(2) do 111 | worker = @worker_class.new('foo.bar.failed') 112 | worker.prepare 113 | worker.work_one_job 114 | end 115 | assert_match(/Exception RuntimeError/, out) 116 | assert_equal 0, $worker_test_count 117 | end # fail, runtime error 118 | 119 | it "should work an invalid job parsed" do 120 | Beaneater::Tubes.any_instance.expects(:reserve).returns(stub(:body => "{%$^}", :bury => true)) 121 | out = silenced(2) do 122 | worker = @worker_class.new('foo.bar') 123 | worker.prepare 124 | worker.work_one_job 125 | end 126 | assert_match(/Exception Backburner::Job::JobFormatInvalid/, out) 127 | assert_equal 0, $worker_test_count 128 | end # fail, runtime error 129 | 130 | it "should work for an async job" do 131 | clear_jobs!('foo.bar') 132 | TestAsyncJob.async(:queue => 'foo.bar').foo(3, 5) 133 | silenced(2) do 134 | worker = @worker_class.new('foo.bar') 135 | worker.prepare 136 | worker.work_one_job 137 | end 138 | assert_equal 15, $worker_test_count 139 | end # async 140 | 141 | it "should support retrying jobs and burying" do 142 | clear_jobs!('foo.bar') 143 | Backburner.configure { |config| config.max_job_retries = 1; config.retry_delay = 0 } 144 | @worker_class.enqueue TestRetryJob, ["bam", "foo"], :queue => 'foo.bar' 145 | out = [] 146 | 2.times do 147 | out << silenced(2) do 148 | worker = @worker_class.new('foo.bar') 149 | worker.prepare 150 | worker.work_one_job 151 | end 152 | end 153 | assert_match(/attempt 1 of 2, retrying/, out.first) 154 | assert_match(/Finished TestRetryJob/m, out.last) 155 | assert_match(/attempt 2 of 2, burying/m, out.last) 156 | assert_equal 2, $worker_test_count 157 | assert_equal false, $worker_success 158 | end # retry, bury 159 | 160 | it "should support retrying jobs and succeeds" do 161 | clear_jobs!('foo.bar') 162 | Backburner.configure { |config| config.max_job_retries = 2; config.retry_delay = 0 } 163 | @worker_class.enqueue TestRetryJob, ["bam", "foo"], :queue => 'foo.bar' 164 | out = [] 165 | 3.times do 166 | out << silenced(2) do 167 | worker = @worker_class.new('foo.bar') 168 | worker.prepare 169 | worker.work_one_job 170 | end 171 | end 172 | assert_match(/attempt 1 of 3, retrying/, out.first) 173 | assert_match(/attempt 2 of 3, retrying/, out[1]) 174 | assert_match(/Completed TestRetryJob/m, out.last) 175 | refute_match(/failed/, out.last) 176 | assert_equal 3, $worker_test_count 177 | assert_equal true, $worker_success 178 | end # retrying, succeeds 179 | 180 | it "should back off retries exponentially" do 181 | max_job_retries = 3 182 | clear_jobs!('foo.bar') 183 | Backburner.configure do |config| 184 | config.max_job_retries = max_job_retries 185 | config.retry_delay = 0 186 | #config.retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 3) } # default retry_delay_proc 187 | end 188 | @worker_class.enqueue TestConfigurableRetryJob, [max_job_retries], :queue => 'foo.bar' 189 | out = [] 190 | (max_job_retries + 1).times do 191 | out << silenced(10) do 192 | worker = @worker_class.new('foo.bar') 193 | worker.prepare 194 | worker.work_one_job 195 | end 196 | end 197 | assert_match(/attempt 1 of 4, retrying in 0/, out.first) 198 | assert_match(/attempt 2 of 4, retrying in 1/, out[1]) 199 | assert_match(/attempt 3 of 4, retrying in 8/, out[2]) 200 | assert_match(/Completed TestConfigurableRetryJob/m, out.last) 201 | refute_match(/failed/, out.last) 202 | assert_equal 4, $worker_test_count 203 | assert_equal true, $worker_success 204 | end 205 | 206 | it "should allow configurable back off retry delays" do 207 | max_job_retries = 3 208 | clear_jobs!('foo.bar') 209 | Backburner.configure do |config| 210 | config.max_job_retries = max_job_retries 211 | config.retry_delay = 0 212 | config.retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) } 213 | end 214 | @worker_class.enqueue TestConfigurableRetryJob, [max_job_retries], :queue => 'foo.bar' 215 | out = [] 216 | (max_job_retries + 1).times do 217 | out << silenced(5) do 218 | worker = @worker_class.new('foo.bar') 219 | worker.prepare 220 | worker.work_one_job 221 | end 222 | end 223 | assert_match(/attempt 1 of 4, retrying in 0/, out.first) 224 | assert_match(/attempt 2 of 4, retrying in 1/, out[1]) 225 | assert_match(/attempt 3 of 4, retrying in 4/, out[2]) 226 | assert_match(/Completed TestConfigurableRetryJob/m, out.last) 227 | refute_match(/failed/, out.last) 228 | assert_equal 4, $worker_test_count 229 | assert_equal true, $worker_success 230 | end 231 | 232 | it "should allow queue override of retries" do 233 | max_job_retries = TestRetryWithQueueOverridesJob.queue_max_job_retries 234 | clear_jobs!('foo.bar') 235 | Backburner.configure do |config| 236 | # Config should be overridden by queue overrides 237 | config.max_job_retries = 20 238 | config.retry_delay = 60 239 | #config.retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 3) } # default retry_delay_proc 240 | end 241 | @worker_class.enqueue TestRetryWithQueueOverridesJob, [max_job_retries], :queue => 'foo.bar' 242 | out = [] 243 | (max_job_retries + 1).times do 244 | out << silenced(5) do 245 | worker = @worker_class.new('foo.bar') 246 | worker.prepare 247 | worker.work_one_job 248 | end 249 | end 250 | assert_match(/attempt 1 of 4, retrying in 0/, out.first) 251 | assert_match(/attempt 2 of 4, retrying in 1/, out[1]) 252 | assert_match(/attempt 3 of 4, retrying in 4/, out[2]) 253 | assert_match(/Completed TestRetryWithQueueOverridesJob/m, out.last) 254 | refute_match(/failed/, out.last) 255 | assert_equal 4, $worker_test_count 256 | assert_equal true, $worker_success 257 | end 258 | 259 | it "should support event hooks without retry" do 260 | $hooked_fail_count = 0 261 | clear_jobs!('foo.bar.events') 262 | out = silenced(2) do 263 | HookedObjectSuccess.async(:queue => 'foo.bar.events').foo(5) 264 | worker = @worker_class.new('foo.bar.events') 265 | worker.prepare 266 | worker.work_one_job 267 | end 268 | assert_match(/before_enqueue.*after_enqueue.*Working 1 queues/m, out) 269 | assert_match(/!!before_enqueue_bar!! \[nil, :foo, 5\]/, out) 270 | assert_match(/!!after_enqueue_bar!! \[nil, :foo, 5\]/, out) 271 | assert_match(/!!before_perform_foo!! \[nil, "foo", 5\]/, out) 272 | assert_match(/!!BEGIN around_perform_bar!! \[nil, "foo", 5\]/, out) 273 | assert_match(/!!BEGIN around_perform_cat!! \[nil, "foo", 5\]/, out) 274 | assert_match(/!!on_failure_foo!!.*HookFailError/, out) 275 | assert_match(/!!on_bury_foo!! \[nil, "foo", 5\]/, out) 276 | assert_match(/attempt 1 of 1, burying/, out) 277 | end # event hooks, no retry 278 | 279 | it "should support event hooks with retry" do 280 | $hooked_fail_count = 0 281 | clear_jobs!('foo.bar.events.retry') 282 | Backburner.configure { |config| config.max_job_retries = 1; config.retry_delay = 0 } 283 | out = silenced(2) do 284 | HookedObjectSuccess.async(:queue => 'foo.bar.events.retry').foo(5) 285 | worker = @worker_class.new('foo.bar.events.retry') 286 | worker.prepare 287 | 2.times do 288 | worker.work_one_job 289 | end 290 | end 291 | assert_match(/before_enqueue.*after_enqueue.*Working 1 queues/m, out) 292 | assert_match(/!!before_enqueue_bar!! \[nil, :foo, 5\]/, out) 293 | assert_match(/!!after_enqueue_bar!! \[nil, :foo, 5\]/, out) 294 | assert_match(/!!before_perform_foo!! \[nil, "foo", 5\]/, out) 295 | assert_match(/!!BEGIN around_perform_bar!! \[nil, "foo", 5\]/, out) 296 | assert_match(/!!BEGIN around_perform_cat!! \[nil, "foo", 5\]/, out) 297 | assert_match(/!!on_failure_foo!!.*HookFailError/, out) 298 | assert_match(/!!on_failure_foo!!.*retrying.*around_perform_bar.*around_perform_cat/m, out) 299 | assert_match(/!!on_retry_foo!! 1 0 \[nil, "foo", 5\]/, out) 300 | assert_match(/attempt 1 of 2, retrying/, out) 301 | assert_match(/!!before_perform_foo!! \[nil, "foo", 5\]/, out) 302 | assert_match(/!!END around_perform_bar!! \[nil, "foo", 5\]/, out) 303 | assert_match(/!!END around_perform_cat!! \[nil, "foo", 5\]/, out) 304 | assert_match(/!!after_perform_foo!! \[nil, "foo", 5\]/, out) 305 | assert_match(/Finished HookedObjectSuccess/, out) 306 | end # event hooks, with retry 307 | 308 | it "should support event hooks with stopping enqueue" do 309 | $hooked_fail_count = 0 310 | worker = @worker_class.new('foo.bar.events.retry2') 311 | clear_jobs!('foo.bar.events.retry2') 312 | silenced(2) do 313 | HookedObjectBeforeEnqueueFail.async(:queue => 'foo.bar.events.retry2').foo(5) 314 | end 315 | expanded_tube = [Backburner.configuration.tube_namespace, 'foo.bar.events.retry2'].join(".") 316 | assert_nil worker.connection.tubes[expanded_tube].peek(:ready) 317 | end # stopping enqueue 318 | 319 | it "should support event hooks with stopping perform" do 320 | $hooked_fail_count = 0 321 | clear_jobs!('foo.bar.events.retry3') 322 | [Backburner.configuration.tube_namespace, 'foo.bar.events.retry3'].join(".") 323 | out = silenced(2) do 324 | HookedObjectBeforePerformFail.async(:queue => 'foo.bar.events.retry3').foo(10) 325 | worker = @worker_class.new('foo.bar.events.retry3') 326 | worker.prepare 327 | worker.work_one_job 328 | end 329 | assert_match(/!!before_perform_foo!! \[nil, "foo", 10\]/, out) 330 | assert_match(/before_perform_foo.*Completed/m, out) 331 | refute_match(/Fail ran!!/, out) 332 | refute_match(/HookFailError/, out) 333 | end # stopping perform 334 | 335 | it "should use the connection given as an argument" do 336 | worker = @worker_class.new('foo.bar') 337 | connection = mock('connection') 338 | worker.expects(:reserve_job).with(connection).returns(stub_everything('job')) 339 | capture_stdout { worker.work_one_job(connection) } 340 | end 341 | 342 | after do 343 | Backburner.configure do |config| 344 | config.max_job_retries = 0 345 | config.retry_delay = 5 346 | config.retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 3) } 347 | end 348 | end 349 | end # work_one_job 350 | end # Worker 351 | -------------------------------------------------------------------------------- /test/workers/threading_worker_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../../test_helper', __FILE__) 2 | require File.expand_path('../../fixtures/test_jobs', __FILE__) 3 | require File.expand_path('../../fixtures/hooked', __FILE__) 4 | 5 | describe "Backburner::Workers::Threading worker" do 6 | before do 7 | Backburner.default_queues.clear 8 | @worker_class = Backburner::Workers::Threading 9 | @worker_class.shutdown_timeout = 2 10 | end 11 | 12 | describe "for prepare method" do 13 | it "should make tube names array always unique to avoid duplication" do 14 | worker = @worker_class.new(["foo", "demo.test.foo"]) 15 | capture_stdout { worker.prepare } 16 | assert_equal ["demo.test.foo"], worker.tube_names 17 | end 18 | 19 | it 'creates a thread pool per queue' do 20 | worker = @worker_class.new(%w(foo bar)) 21 | capture_stdout { worker.prepare } 22 | assert_equal 2, worker.instance_variable_get("@thread_pools").keys.size 23 | end 24 | 25 | it 'uses Concurrent.processor_count if no custom thread count is provided' do 26 | worker = @worker_class.new("foo") 27 | capture_stdout { worker.prepare } 28 | assert_equal ::Concurrent.processor_count, worker.instance_variable_get("@thread_pools")["demo.test.foo"].max_length 29 | end 30 | end # prepare 31 | 32 | describe "for process_tube_names method" do 33 | it "should interpret the job_name:threads_limit format" do 34 | worker = @worker_class.new(["foo:4"]) 35 | assert_equal ["foo"], worker.tube_names 36 | end 37 | 38 | it "should interpret correctly even if missing values" do 39 | tubes = %W(foo1:2 foo2) 40 | worker = @worker_class.new(tubes) 41 | assert_equal %W(foo1 foo2), worker.tube_names 42 | end 43 | 44 | it "should store interpreted values correctly" do 45 | tubes = %W(foo1 foo2:2) 46 | worker = @worker_class.new(tubes) 47 | assert_equal({ 48 | "demo.test.foo1" => { :threads => nil }, 49 | "demo.test.foo2" => { :threads => 2 } 50 | }, worker.instance_variable_get("@tubes_data")) 51 | end 52 | end # process_tube_names 53 | 54 | describe 'working a queue' do 55 | before do 56 | @worker = @worker_class.new(["foo:3"]) 57 | capture_stdout { @worker.prepare } 58 | $worker_test_count = 0 59 | $worker_success = false 60 | end 61 | 62 | it 'runs work_on_job per thread' do 63 | clear_jobs!("foo") 64 | job_count=10 65 | # TestJob adds the given arguments together and then to $worker_test_count 66 | job_count.times { @worker_class.enqueue TestJob, [1, 0], :queue => "foo" } 67 | capture_stdout do 68 | @worker.start(false) # don't wait for shutdown 69 | sleep 0.5 # Wait for threads to do their work 70 | end 71 | assert_equal job_count, $worker_test_count 72 | end 73 | end # working a queue 74 | 75 | describe 'shutting down' do 76 | before do 77 | @thread_count = 3 78 | @worker = @worker_class.new(["threaded-shutdown:#{@thread_count}"]) 79 | @worker.exit_on_shutdown = false 80 | $worker_test_count = 0 81 | clear_jobs!("threaded-shutdown") 82 | end 83 | 84 | it 'gracefully exits and completes all in-flight jobs' do 85 | 6.times { @worker_class.enqueue TestSlowJob, [1, 0], :queue => "threaded-shutdown" } 86 | Thread.new { sleep 0.1; @worker.self_write.puts("TERM") } 87 | capture_stdout do 88 | @worker.start 89 | end 90 | 91 | assert_equal @thread_count, $worker_test_count 92 | end 93 | 94 | it 'forces an exit when a job is stuck' do 95 | 6.times { @worker_class.enqueue TestStuckJob, [1, 0], :queue => "threaded-shutdown" } 96 | Thread.new { sleep 0.1; @worker.self_write.puts("TERM") } 97 | capture_stdout do 98 | @worker.start 99 | end 100 | 101 | assert_equal 0, $worker_test_count 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /test/workers/threads_on_fork_worker_test.rb: -------------------------------------------------------------------------------- 1 | require File.expand_path('../../test_helper', __FILE__) 2 | require File.expand_path('../../fixtures/test_fork_jobs', __FILE__) 3 | require File.expand_path('../../fixtures/test_queue_settings', __FILE__) 4 | 5 | describe "Backburner::Workers::ThreadsOnFork module" do 6 | 7 | before do 8 | Backburner.default_queues.clear 9 | @worker_class = Backburner::Workers::ThreadsOnFork 10 | @worker_class.shutdown = false 11 | @worker_class.is_child = false 12 | @worker_class.threads_number = 1 13 | @worker_class.garbage_after = 1 14 | @ignore_forks = false 15 | end 16 | 17 | after do 18 | Backburner.configure { |config| config.max_job_retries = 0; config.retry_delay = 5; config.logger = nil } 19 | unless @ignore_forks 20 | cpids = @worker_class.instance_variable_get("@child_pids") 21 | if cpids && cpids.length > 0 22 | raise "Why is there forks alive?" 23 | end 24 | end 25 | end 26 | 27 | describe "for process_tube_names method" do 28 | it "should interpreter the job_name:threads_limit:garbage_after:retries format" do 29 | worker = @worker_class.new(["foo:1:2:3"]) 30 | assert_equal ["foo"], worker.tube_names 31 | end 32 | 33 | it "should interpreter event if is missing values" do 34 | tubes = %W(foo1:1:2:3 foo2:4:5 foo3:6 foo4 foo5::7:8 foo6:::9 foo7::10) 35 | worker = @worker_class.new(tubes) 36 | assert_equal %W(foo1 foo2 foo3 foo4 foo5 foo6 foo7), worker.tube_names 37 | end 38 | 39 | it "should store interpreted values correctly" do 40 | tubes = %W(foo1:1:2:3 foo2:4:5 foo3:6 foo4 foo5::7:8 foo6:::9 foo7::10) 41 | worker = @worker_class.new(tubes) 42 | assert_equal({ 43 | "demo.test.foo1" => { :threads => 1, :garbage => 2, :retries => 3 }, 44 | "demo.test.foo2" => { :threads => 4, :garbage => 5, :retries => nil }, 45 | "demo.test.foo3" => { :threads => 6, :garbage => nil, :retries => nil }, 46 | "demo.test.foo4" => { :threads => nil, :garbage => nil, :retries => nil }, 47 | "demo.test.foo5" => { :threads => nil, :garbage => 7, :retries => 8 }, 48 | "demo.test.foo6" => { :threads => nil, :garbage => nil, :retries => 9 }, 49 | "demo.test.foo7" => { :threads => nil, :garbage => 10, :retries => nil } 50 | }, worker.instance_variable_get("@tubes_data")) 51 | end 52 | end 53 | 54 | describe "for process_tube_settings" do 55 | it "should set the settings specified by queue name in class" do 56 | worker = @worker_class.new 57 | assert_equal(worker.instance_variable_get("@tubes_data")['demo.test.job-settings'], { :threads => 5, :garbage => 10, :retries => 6 }) 58 | end 59 | 60 | it 'should override the tube settings if they are specified directly at class level' do 61 | worker = @worker_class.new 62 | assert_equal(worker.instance_variable_get("@tubes_data")['demo.test.job-settings-override'], { :threads => 10, :garbage => 1000, :retries => 2 }) 63 | end 64 | end 65 | 66 | describe "for prepare method" do 67 | before do 68 | Backburner.configure { |config| config.logger = false } 69 | end 70 | 71 | it "should make tube names array always unique to avoid duplication" do 72 | worker = @worker_class.new(["foo", "demo.test.foo"]) 73 | capture_stdout { worker.prepare } 74 | assert_equal ["demo.test.foo"], worker.tube_names 75 | end 76 | 77 | it "should watch specified tubes" do 78 | worker = @worker_class.new(["foo", "bar"]) 79 | out = capture_stdout { worker.prepare } 80 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 81 | assert_match(/demo\.test\.foo/, out) 82 | end # multiple 83 | 84 | it "should watch single tube" do 85 | worker = @worker_class.new("foo") 86 | out = capture_stdout { worker.prepare } 87 | assert_equal ["demo.test.foo"], worker.tube_names 88 | assert_match(/demo\.test\.foo/, out) 89 | end # single 90 | 91 | it "should respect default_queues settings" do 92 | Backburner.default_queues.concat(["foo", "bar"]) 93 | worker = @worker_class.new 94 | out = capture_stdout { worker.prepare } 95 | assert_equal ["demo.test.foo", "demo.test.bar"], worker.tube_names 96 | assert_match(/demo\.test\.foo/, out) 97 | end 98 | 99 | it "should assign based on all tubes" do 100 | @worker_class.any_instance.expects(:all_existing_queues).once.returns("bar") 101 | worker = @worker_class.new 102 | out = capture_stdout { worker.prepare } 103 | assert_equal ["demo.test.bar"], worker.tube_names 104 | assert_match(/demo\.test\.bar/, out) 105 | end # all assign 106 | 107 | it "should properly retrieve all tubes" do 108 | worker = @worker_class.new 109 | out = capture_stdout { worker.prepare } 110 | assert_contains worker.tube_names, "demo.test.test-job-fork" 111 | assert_match(/demo\.test\.test-job-fork/, out) 112 | end # all read 113 | end # prepare 114 | 115 | describe "forking and threading" do 116 | 117 | it "start should call fork_and_watch for each tube" do 118 | worker = @worker_class.new(%W(foo bar)) 119 | worker.expects(:fork_and_watch).with("demo.test.foo").once 120 | worker.expects(:fork_and_watch).with("demo.test.bar").once 121 | silenced { worker.start(false) } 122 | end 123 | 124 | it "fork_and_watch should create a thread to fork and watch" do 125 | worker = @worker_class.new(%(foo)) 126 | worker.expects(:create_thread).once.with("demo.test.foo") 127 | silenced { worker.start(false) } 128 | end 129 | 130 | it "fork_and_watch thread should wait with wait_for_process" do 131 | process_exit = stub('process_exit') 132 | process_exit.expects(:exitstatus).returns(99) 133 | worker = @worker_class.new(%(foo)) 134 | worker.expects(:wait_for_process).with(12).returns([nil, process_exit]) 135 | 136 | wc = @worker_class 137 | # TODO: Is there a best way do do this? 138 | worker.define_singleton_method :fork_it do 139 | wc.shutdown = true 140 | 12 141 | end 142 | def worker.create_thread(*args, &block); block.call(*args) end 143 | 144 | out = silenced(2) { worker.start(false) } 145 | refute_match(/Catastrophic failure/, out) 146 | end 147 | 148 | it "fork_and_watch thread should log an error if exitstatus is != 99" do 149 | process_exit = stub('process_exit') 150 | process_exit.expects(:exitstatus).twice.returns(0) 151 | worker = @worker_class.new(%(foo)) 152 | worker.expects(:wait_for_process).with(12).returns([nil, process_exit]) 153 | 154 | wc = @worker_class 155 | # TODO: Is there a best way do do this? 156 | worker.define_singleton_method :fork_it do 157 | wc.shutdown = true 158 | 12 159 | end 160 | def worker.create_thread(*args, &block); block.call(*args) end 161 | out = silenced(2) { worker.start(false) } 162 | assert_match(/Catastrophic failure: tube demo\.test\.foo exited with code 0\./, out) 163 | end 164 | 165 | describe "fork_inner" do 166 | 167 | before do 168 | @worker_class.any_instance.expects(:coolest_exit).once 169 | end 170 | 171 | it "should watch just the channel it receive as argument" do 172 | worker = @worker_class.new(%(foo)) 173 | @worker_class.expects(:threads_number).returns(1) 174 | worker.expects(:run_while_can).once 175 | silenced do 176 | worker.prepare 177 | worker.fork_inner('demo.test.bar') 178 | end 179 | assert_same_elements %W(demo.test.bar), worker.connection.tubes.watched.map(&:name) 180 | end 181 | 182 | it "should not create threads if the number of threads is 1" do 183 | worker = @worker_class.new(%(foo)) 184 | @worker_class.expects(:threads_number).returns(1) 185 | worker.expects(:run_while_can).once 186 | worker.expects(:create_thread).never 187 | silenced do 188 | worker.prepare 189 | worker.fork_inner('demo.test.foo') 190 | end 191 | end 192 | 193 | it "should create threads if the number of threads is > 1" do 194 | worker = @worker_class.new(%(foo)) 195 | @worker_class.expects(:threads_number).returns(5) 196 | worker.expects(:create_thread).times(5) 197 | silenced do 198 | worker.prepare 199 | worker.fork_inner('demo.test.foo') 200 | end 201 | end 202 | 203 | it "should create threads that call run_while_can" do 204 | worker = @worker_class.new(%(foo)) 205 | @worker_class.expects(:threads_number).returns(5) 206 | worker.expects(:run_while_can).times(5) 207 | def worker.create_thread(*args, &block); block.call(*args) end 208 | silenced do 209 | worker.prepare 210 | worker.fork_inner('demo.test.foo') 211 | end 212 | end 213 | 214 | it "should create a connection for each thread" do 215 | name = 'demo.test.foo' 216 | num_threads = 3 217 | 218 | worker = @worker_class.new(%(foo)) 219 | @worker_class.expects(:threads_number).returns(num_threads) 220 | 221 | invocations = Array(1..num_threads).map do |i| 222 | conn = OpenStruct.new(:num => i) 223 | conn.expects(:close) 224 | conn 225 | end 226 | Backburner::Connection.expects(:new).times(num_threads).returns(*invocations) 227 | 228 | # ensure each invocation of run_while_can is with a different connection 229 | num_conns = states('num_conns').starts_as(0) 230 | invocations.each do |conn| 231 | worker.expects(:watch_tube).with(name, conn) 232 | worker.expects(:run_while_can).with(conn).when(num_conns.is(conn.num-1)).then(num_conns.is(conn.num)) 233 | end 234 | 235 | def worker.create_thread(*args, &block); block.call(*args) end 236 | silenced do 237 | worker.prepare 238 | worker.fork_inner(name) 239 | end 240 | 241 | assert_equal(num_threads, num_conns.current_state) 242 | end 243 | 244 | it "should set @garbage_after, @threads_number and set retries if needed" do 245 | worker = @worker_class.new(%W(foo1 foo2:10 foo3:20:30 foo4:40:50:60)) 246 | default_threads = 1 247 | default_garbage = 5 248 | default_retries = 100 249 | @worker_class.expects(:threads_number).times(1).returns(default_threads) 250 | @worker_class.expects(:garbage_after).times(2).returns(default_garbage) 251 | @worker_class.any_instance.expects(:coolest_exit).times(3) 252 | Backburner.configuration.max_job_retries = default_retries 253 | 254 | worker.expects(:create_thread).times(70) 255 | worker.expects(:run_while_can).once 256 | 257 | silenced do 258 | worker.prepare 259 | worker.fork_inner('demo.test.foo1') 260 | end 261 | 262 | assert_equal worker.instance_variable_get("@threads_number"), default_threads 263 | assert_equal worker.instance_variable_get("@garbage_after"), default_garbage 264 | assert_equal Backburner.configuration.max_job_retries, default_retries 265 | 266 | silenced do 267 | worker.fork_inner('demo.test.foo2') 268 | end 269 | 270 | assert_equal worker.instance_variable_get("@threads_number"), 10 271 | assert_equal worker.instance_variable_get("@garbage_after"), default_garbage 272 | assert_equal Backburner.configuration.max_job_retries, default_retries 273 | 274 | silenced do 275 | worker.fork_inner('demo.test.foo3') 276 | end 277 | 278 | assert_equal worker.instance_variable_get("@threads_number"), 20 279 | assert_equal worker.instance_variable_get("@garbage_after"), 30 280 | assert_equal Backburner.configuration.max_job_retries, default_retries 281 | 282 | silenced do 283 | worker.fork_inner('demo.test.foo4') 284 | end 285 | 286 | assert_equal worker.instance_variable_get("@threads_number"), 40 287 | assert_equal worker.instance_variable_get("@garbage_after"), 50 288 | assert_equal Backburner.configuration.max_job_retries, 60 289 | end 290 | 291 | end 292 | 293 | describe "cleanup on parent" do 294 | 295 | it "child_pids should return a list of alive children pids" do 296 | worker = @worker_class.new(%W(foo)) 297 | Kernel.expects(:fork).once.returns(12345) 298 | Process.expects(:kill).with(0, 12345).once 299 | Process.expects(:pid).once.returns(12346) 300 | assert_equal [], @worker_class.child_pids 301 | worker.fork_it {} 302 | child_pids = @worker_class.child_pids 303 | assert_equal [12345], child_pids 304 | child_pids.clear 305 | end 306 | 307 | it "child_pids should return an empty array if is_child" do 308 | Process.expects(:pid).never 309 | @worker_class.is_child = true 310 | @worker_class.child_pids << 12345 311 | assert_equal [], @worker_class.child_pids 312 | end 313 | 314 | it "stop_forks should send a SIGTERM for every child" do 315 | Process.expects(:pid).returns(12346).at_least(1) 316 | Process.expects(:kill).with(0, 12345).at_least(1) 317 | Process.expects(:kill).with(0, 12347).at_least(1) 318 | Process.expects(:kill).with("SIGTERM", 12345) 319 | Process.expects(:kill).with("SIGTERM", 12347) 320 | @worker_class.child_pids << 12345 321 | @worker_class.child_pids << 12347 322 | assert_equal [12345, 12347], @worker_class.child_pids 323 | @worker_class.stop_forks 324 | @worker_class.child_pids.clear 325 | end 326 | 327 | it "kill_forks should send a SIGKILL for every child" do 328 | Process.expects(:pid).returns(12346).at_least(1) 329 | Process.expects(:kill).with(0, 12345).at_least(1) 330 | Process.expects(:kill).with(0, 12347).at_least(1) 331 | Process.expects(:kill).with("SIGKILL", 12345) 332 | Process.expects(:kill).with("SIGKILL", 12347) 333 | @worker_class.child_pids << 12345 334 | @worker_class.child_pids << 12347 335 | assert_equal [12345, 12347], @worker_class.child_pids 336 | @worker_class.kill_forks 337 | @worker_class.child_pids.clear 338 | end 339 | 340 | it "finish_forks should call stop_forks, kill_forks and Process.waitall" do 341 | Process.expects(:pid).returns(12346).at_least(1) 342 | Process.expects(:kill).with(0, 12345).at_least(1) 343 | Process.expects(:kill).with(0, 12347).at_least(1) 344 | Process.expects(:kill).with("SIGTERM", 12345) 345 | Process.expects(:kill).with("SIGTERM", 12347) 346 | Process.expects(:kill).with("SIGKILL", 12345) 347 | Process.expects(:kill).with("SIGKILL", 12347) 348 | Kernel.expects(:sleep).with(1) 349 | Process.expects(:waitall) 350 | @worker_class.child_pids << 12345 351 | @worker_class.child_pids << 12347 352 | assert_equal [12345, 12347], @worker_class.child_pids 353 | silenced do 354 | @worker_class.finish_forks 355 | end 356 | @worker_class.child_pids.clear 357 | end 358 | 359 | it "finish_forks should not do anything if is_child" do 360 | @worker_class.expects(:stop_forks).never 361 | @worker_class.is_child = true 362 | @worker_class.child_pids << 12345 363 | silenced do 364 | @worker_class.finish_forks 365 | end 366 | end 367 | 368 | end # cleanup on parent 369 | 370 | describe "practical tests" do 371 | 372 | before do 373 | @templogger = Templogger.new('/tmp') 374 | Backburner.configure { |config| config.logger = @templogger.logger } 375 | $worker_test_count = 0 376 | $worker_success = false 377 | $worker_raise = false 378 | clear_jobs!('response') 379 | clear_jobs!('foo.bar.1', 'foo.bar.2', 'foo.bar.3', 'foo.bar.4', 'foo.bar.5', 'foo.bar.6', 'foo.bar.7') 380 | @worker_class.threads_number = 1 381 | @worker_class.garbage_after = 10 382 | 383 | silenced do 384 | @response_worker = @worker_class.new('response') 385 | @response_worker.watch_tube('demo.test.response') 386 | end 387 | 388 | @ignore_forks = true 389 | end 390 | 391 | after do 392 | @templogger.close 393 | clear_jobs!('response') 394 | clear_jobs!('foo.bar.1', 'foo.bar.2', 'foo.bar.3', 'foo.bar.4', 'foo.bar.5', 'foo.bar.6', 'foo.bar.7') 395 | @worker_class.threads_number = 1 396 | @worker_class.shutdown = true 397 | silenced do 398 | @worker_class.stop_forks 399 | Timeout::timeout(2) { sleep 0.1 while @worker_class.child_pids.length > 0 } 400 | @worker_class.kill_forks 401 | Timeout::timeout(2) { sleep 0.1 while @worker_class.child_pids.length > 0 } 402 | end 403 | end 404 | 405 | it "should work an enqueued job" do 406 | @worker = @worker_class.new('foo.bar.1') 407 | @worker.start(false) 408 | @worker_class.enqueue TestJobFork, [1, 2], :queue => "foo.bar.1" 409 | 410 | silenced do 411 | @templogger.wait_for_match(/Completed TestJobFork/m) 412 | @response_worker.work_one_job 413 | end 414 | assert_equal 3, $worker_test_count 415 | end # enqueue 416 | 417 | it "should work for an async job" do 418 | @worker = @worker_class.new('foo.bar.2') 419 | @worker.start(false) 420 | TestAsyncJobFork.async(:queue => 'foo.bar.2').foo(3, 5) 421 | silenced(2) do 422 | @templogger.wait_for_match(/Completed TestAsyncJobFork/m) 423 | @response_worker.work_one_job 424 | end 425 | assert_equal 15, $worker_test_count 426 | end # async 427 | 428 | it "should fail quietly if there's an argument error" do 429 | @worker = @worker_class.new('foo.bar.3') 430 | @worker.start(false) 431 | @worker_class.enqueue TestJobFork, ["bam", "foo", "bar"], :queue => "foo.bar.3" 432 | silenced(5) do 433 | @templogger.wait_for_match(/Finished TestJobFork.*attempt 1 of 1/m) 434 | end 435 | assert_match(/Exception ArgumentError/, @templogger.body) 436 | assert_equal 0, $worker_test_count 437 | end # fail, argument 438 | 439 | it "should support retrying jobs and burying" do 440 | Backburner.configure { |config| config.max_job_retries = 1; config.retry_delay = 0 } 441 | @worker = @worker_class.new('foo.bar.4') 442 | @worker.start(false) 443 | @worker_class.enqueue TestRetryJobFork, ["bam", "foo"], :queue => 'foo.bar.4' 444 | silenced(2) do 445 | @templogger.wait_for_match(/Finished TestRetryJobFork.*attempt 2 of 2/m) 446 | 2.times { @response_worker.work_one_job } 447 | end 448 | assert_equal 2, $worker_test_count 449 | assert_equal false, $worker_success 450 | end # retry, bury 451 | 452 | it "should support retrying jobs and succeeds" do 453 | Backburner.configure { |config| config.max_job_retries = 2; config.retry_delay = 0 } 454 | @worker = @worker_class.new('foo.bar.5') 455 | @worker.start(false) 456 | @worker_class.enqueue TestRetryJobFork, ["bam", "foo"], :queue => 'foo.bar.5' 457 | silenced(2) do 458 | @templogger.wait_for_match(/Completed TestRetryJobFork/m) 459 | 3.times { @response_worker.work_one_job } 460 | end 461 | assert_equal 3, $worker_test_count 462 | assert_equal true, $worker_success 463 | end # retrying, succeeds 464 | 465 | it "should support a multithreaded worker without deadlocks" do 466 | num_threads = 15 467 | num_jobs = 8 468 | num_jobs.times do |i| 469 | @worker_class.enqueue TestJobMultithreadFork, [6,2], :queue => 'foo.bar.6' 470 | end 471 | 472 | @worker_class.threads_number = num_threads 473 | @worker = @worker_class.new('foo.bar.6') 474 | @worker.start(false) 475 | 476 | silenced do 477 | @templogger.wait_for_match(/Completed TestJobMultithreadFork/m) 478 | num_jobs.times { @response_worker.work_one_job } 479 | end 480 | assert_equal num_jobs, $worker_test_count 481 | end # multithreaded 482 | end # practical tests 483 | end # forking and threading 484 | end # Backburner::Workers::ThreadsOnFork module 485 | --------------------------------------------------------------------------------