├── frontend ├── .gitignore ├── main-web-api │ ├── app.js │ ├── index.html │ └── demo.js ├── src │ ├── dumb │ │ ├── details │ │ │ ├── job.module.css │ │ │ ├── busy.js │ │ │ ├── dead.js │ │ │ ├── enqueued.js │ │ │ ├── job.js │ │ │ └── filter.js │ │ ├── util │ │ │ ├── format-duration.js │ │ │ ├── redirect.js │ │ │ ├── format-number.js │ │ │ ├── column-name.js │ │ │ └── breadcrumbs.js │ │ ├── dashboard │ │ │ ├── pulse.module.css │ │ │ ├── pulse.js │ │ │ ├── total.js │ │ │ ├── redis-info.js │ │ │ ├── index.js │ │ │ ├── chart.js │ │ │ └── table.js │ │ ├── routes.js │ │ ├── app.js │ │ └── bootstrap.scss │ ├── main.js │ └── web-api │ │ ├── details │ │ ├── busy.js │ │ ├── enqueued.js │ │ └── dead.js │ │ ├── index.js │ │ ├── dashboard.js │ │ └── client.js ├── main-dumb │ ├── index.html │ ├── demo.js │ └── app.js ├── package.json ├── webpack.config.js ├── webpack.config.dumb.js └── webpack.config.web-api.js ├── .gitignore ├── .rspec ├── lib ├── lowkiq │ ├── version.rb │ ├── splitters │ │ ├── default.rb │ │ └── by_node.rb │ ├── option_parser.rb │ ├── redis_info.rb │ ├── schedulers │ │ ├── seq.rb │ │ └── lag.rb │ ├── utils.rb │ ├── web │ │ ├── action.rb │ │ └── api.rb │ ├── worker.rb │ ├── web.rb │ ├── queue │ │ ├── shard_metrics.rb │ │ ├── fetch.rb │ │ ├── actions.rb │ │ ├── keys.rb │ │ ├── queue_metrics.rb │ │ ├── queries.rb │ │ └── queue.rb │ ├── script.rb │ ├── server.rb │ └── shard_handler.rb └── lowkiq.rb ├── doc └── dashboard.png ├── deploy.md ├── examples ├── benchmark │ ├── Gemfile │ ├── Readme.md │ ├── Gemfile.lock │ ├── sidekiq.rb │ └── lowkiq.rb └── dummy │ ├── Gemfile │ ├── Gemfile.lock │ └── lib │ └── app.rb ├── Rakefile ├── bin ├── setup └── console ├── Gemfile ├── spec ├── option_parser_spec.rb ├── lowkiq_spec.rb ├── redis_info_spec.rb ├── utils_spec.rb ├── spec_helper.rb ├── schedulers │ ├── lag_spec.rb │ └── seq_spec.rb ├── splitters │ ├── default_spec.rb │ └── by_node_spec.rb ├── queue │ ├── shard_metrics_spec.rb │ ├── queue_metrics_spec.rb │ ├── actions_spec.rb │ ├── queries_spec.rb │ └── queue_spec.rb ├── script_spec.rb ├── shard_handler_spec.rb └── web_spec.rb ├── CHANGELOG.md ├── docker-compose.yml ├── .github └── workflows │ └── rspec.yml ├── exe └── lowkiq ├── lowkiq.gemspec ├── LICENSE.md ├── README.ru.md └── README.md /frontend/.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .rspec_status 2 | /assets 3 | /*.gem 4 | /Gemfile.lock -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --format documentation 2 | --color 3 | --require spec_helper 4 | -------------------------------------------------------------------------------- /lib/lowkiq/version.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | VERSION = "1.2.2" 3 | end 4 | -------------------------------------------------------------------------------- /doc/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bia-technologies/lowkiq/HEAD/doc/dashboard.png -------------------------------------------------------------------------------- /deploy.md: -------------------------------------------------------------------------------- 1 | + bump version in `lib/lowkiq/version.rb` 2 | + build frontend: `npm run build` 3 | + build gem: `gem build lowkiq.gemspec` 4 | -------------------------------------------------------------------------------- /examples/benchmark/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | gem "lowkiq", path: "../.." 4 | gem 'sidekiq' 5 | 6 | gem "hiredis" 7 | -------------------------------------------------------------------------------- /frontend/main-web-api/app.js: -------------------------------------------------------------------------------- 1 | import factory from '../src/web-api'; 2 | export default factory('http://localhost:8081/api/web', ''); 3 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require "bundler/gem_tasks" 2 | require "rspec/core/rake_task" 3 | 4 | RSpec::Core::RakeTask.new(:spec) 5 | 6 | task :default => :spec 7 | -------------------------------------------------------------------------------- /examples/dummy/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem "rack" 4 | gem "lowkiq", path: "../.." 5 | gem "webrick", "~> 1.7" # for ruby 3 6 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | IFS=$'\n\t' 4 | set -vx 5 | 6 | bundle install 7 | 8 | # Do any other automated setup that you need to do here 9 | -------------------------------------------------------------------------------- /frontend/src/dumb/details/job.module.css: -------------------------------------------------------------------------------- 1 | .table { 2 | table-layout: fixed; 3 | } 4 | 5 | .table .label { 6 | width: 140px; 7 | } 8 | 9 | .table .value { 10 | } 11 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | git_source(:github) {|repo_name| "https://github.com/#{repo_name}" } 4 | 5 | # Specify your gem's dependencies in lowkiq.gemspec 6 | gemspec 7 | -------------------------------------------------------------------------------- /frontend/src/dumb/util/format-duration.js: -------------------------------------------------------------------------------- 1 | import {fmt} from 'human-duration'; 2 | 3 | export default function formatDuration(seconds) { 4 | return fmt(seconds * 1000).segments(2); 5 | } 6 | -------------------------------------------------------------------------------- /spec/option_parser_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::OptionParser do 2 | it "defaults" do 3 | args = ["-r" "./lib/app.rb"] 4 | expect( Lowkiq::OptionParser.call args ).to eq({require: "./lib/app.rb"}) 5 | end 6 | end 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 1.1.0 2 | 3 | * Timestamps are float rather than int. #23 4 | * Due to problems with autoloading, you now need to manually assign a list of workers. #22 5 | 6 | ```ruby 7 | Lowkiq.workers = [ ATestWorker, ATest2Worker ] 8 | ``` 9 | -------------------------------------------------------------------------------- /spec/lowkiq_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq do 2 | # it "has a version number" do 3 | # expect(Lowkiq::VERSION).not_to be nil 4 | # end 5 | 6 | # it "does something useful" do 7 | # expect(false).to eq(true) 8 | # end 9 | end 10 | -------------------------------------------------------------------------------- /frontend/main-dumb/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Webpack App 6 | 7 | 8 |
9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /frontend/main-web-api/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Webpack App 6 | 7 | 8 |
9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /spec/redis_info_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::RedisInfo do 2 | let(:redis_pool) { ConnectionPool.new(size: 5, timeout: 5) { Redis.new url: ENV['REDIS_URL'] } } 3 | 4 | let(:subject) { described_class.new redis_pool } 5 | 6 | it "call" do 7 | expect( subject.call ).to be 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /frontend/src/main.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | 4 | import factory from './web-api'; 5 | 6 | const {lowkiqRoot} = window; 7 | const App = factory(`${lowkiqRoot}/api/web`, lowkiqRoot); 8 | 9 | const root = document.getElementById('root'); 10 | 11 | ReactDOM.render( , root ); 12 | -------------------------------------------------------------------------------- /examples/benchmark/Readme.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | + `bundle exec ../../exe/lowkiq -r ./lowkiq.rb` 4 | + `bundle exec sidekiq -r ./sidekiq.rb` 5 | 6 | # Results 7 | 8 | 5 threads, 100_000 jobs 9 | 10 | + lowkiq default: 155 sec 11 | + lowkiq +seq: 146 sec 12 | + lowkiq +hiredis: 80 sec 13 | + lowkiq +seq +hiredis: 65 sec 14 | + sidekiq: 15 sec 15 | -------------------------------------------------------------------------------- /frontend/src/dumb/util/redirect.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Redirect } from "react-router-dom"; 3 | 4 | import { RoutesContext } from '../routes'; 5 | 6 | export function RedirectToDashboard() { 7 | return ( 8 | 9 | { 10 | routes => 11 | } 12 | 13 | ); 14 | } 15 | -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "bundler/setup" 4 | require "lowkiq" 5 | 6 | # You can add fixtures and/or initialization code here to make experimenting 7 | # with your gem easier. You can also use a different console, if you like. 8 | 9 | # (If you use this, don't forget to add pry to your Gemfile!) 10 | # require "pry" 11 | # Pry.start 12 | 13 | require "irb" 14 | IRB.start(__FILE__) 15 | -------------------------------------------------------------------------------- /frontend/src/dumb/dashboard/pulse.module.css: -------------------------------------------------------------------------------- 1 | .container { 2 | width: 1rem; 3 | height: 1rem; 4 | } 5 | 6 | .container.pulse { 7 | animation: pulse 1s; 8 | } 9 | 10 | @keyframes pulse { 11 | from { 12 | box-shadow: 0 0 0 #212529; 13 | } 14 | 50% { 15 | box-shadow: 0 0 1rem #212529; 16 | } 17 | to { 18 | box-shadow: 0 0 0 #212529; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /lib/lowkiq/splitters/default.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | module Splitters 3 | class Default 4 | def initialize(threads_per_node) 5 | @threads_per_node = threads_per_node 6 | end 7 | 8 | def call(shard_handlers) 9 | Utils::Array.new(shard_handlers) 10 | .in_transposed_groups(@threads_per_node) 11 | .reject(&:empty?) 12 | end 13 | end 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /spec/utils_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::Utils do 2 | describe "Array" do 3 | it "#in_transposed_groups" do 4 | groups = Lowkiq::Utils::Array.new((0...10)).in_transposed_groups(3) 5 | 6 | expect(groups).to eq([ 7 | [0,3,6,9], 8 | [1,4,7], 9 | [2,5,8], 10 | ]) 11 | end 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /frontend/src/dumb/util/format-number.js: -------------------------------------------------------------------------------- 1 | export default function formatNumber(number) { 2 | if (number < 1000) return number; 3 | 4 | const abbrev = ['', 'K', 'M', 'B', 'T']; 5 | const unrangifiedOrder = Math.floor(Math.log10(Math.abs(number)) / 3); 6 | const order = Math.max(0, Math.min(unrangifiedOrder, abbrev.length - 1)); 7 | const suffix = abbrev[order]; 8 | 9 | return (number / Math.pow(10, order * 3)).toPrecision(3) + suffix; 10 | } 11 | -------------------------------------------------------------------------------- /frontend/src/dumb/details/busy.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | import Breadcrumbs from '../util/breadcrumbs'; 4 | import Filter from './filter'; 5 | import Job from './job'; 6 | 7 | export default function Enqueued({name, items}) { 8 | return ( 9 |
10 | 11 | 13 | {items.map(item => { 14 | return ; 15 | })} 16 |
17 | ); 18 | } 19 | -------------------------------------------------------------------------------- /examples/dummy/Gemfile.lock: -------------------------------------------------------------------------------- 1 | PATH 2 | remote: ../.. 3 | specs: 4 | lowkiq (1.1.0) 5 | connection_pool (~> 2.2, >= 2.2.2) 6 | rack (>= 1.5.0) 7 | redis (>= 4.0.1, < 5) 8 | 9 | GEM 10 | remote: https://rubygems.org/ 11 | specs: 12 | connection_pool (2.2.5) 13 | rack (2.0.5) 14 | redis (4.5.1) 15 | webrick (1.7.0) 16 | 17 | PLATFORMS 18 | ruby 19 | 20 | DEPENDENCIES 21 | lowkiq! 22 | rack 23 | webrick (~> 1.7) 24 | 25 | BUNDLED WITH 26 | 2.2.22 27 | -------------------------------------------------------------------------------- /frontend/src/dumb/routes.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | export class Routes { 4 | constructor(rootUrl) { 5 | this.rootUrl = rootUrl; 6 | } 7 | 8 | dashboard() { 9 | return `${this.rootUrl}/`; 10 | } 11 | 12 | busy(queue) { 13 | return `${this.rootUrl}/${queue}/busy`; 14 | } 15 | 16 | enqueued(queue) { 17 | return `${this.rootUrl}/${queue}/enqueued`; 18 | } 19 | 20 | dead(queue) { 21 | return `${this.rootUrl}/${queue}/dead`; 22 | } 23 | } 24 | 25 | export const RoutesContext = React.createContext(); 26 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require "bundler/setup" 2 | require "lowkiq" 3 | require "pry-byebug" 4 | 5 | # to test their usage 6 | Lowkiq.dump_error = -> (msg) { msg&.reverse } 7 | Lowkiq.load_error = -> (msg) { msg&.reverse } 8 | 9 | RSpec.configure do |config| 10 | # Enable flags like --only-failures and --next-failure 11 | config.example_status_persistence_file_path = ".rspec_status" 12 | 13 | # Disable RSpec exposing methods globally on `Module` and `main` 14 | config.disable_monkey_patching! 15 | 16 | config.expect_with :rspec do |c| 17 | c.syntax = :expect 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /frontend/src/dumb/util/column-name.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | export default function ColumnName({name, order, onClick}) { 4 | const orderSymble = { 5 | desc: '▼', // по убыванию 6 | asc: '▲' // по возрастанию 7 | }; 8 | 9 | let presentedName = name; 10 | if (orderSymble[order]) { 11 | presentedName += ` ${orderSymble[order]}`; 12 | } 13 | 14 | return ( 15 | 29 | ); 30 | } 31 | 32 | function getVal(selected) { 33 | if (!selected) return ""; 34 | 35 | const {min, max, rev} = selected; 36 | if (min !== '-inf' && max === '+inf' && rev === false) 37 | return min; 38 | if (min === '-inf' && max !== '+inf' && rev === true) 39 | return max; 40 | return ""; 41 | } 42 | 43 | export default class Filter extends React.Component { 44 | constructor(props) { 45 | super(props); 46 | 47 | this.state = { 48 | value: getVal(props.selected) 49 | }; 50 | this.handleChange = this.handleChange.bind(this); 51 | } 52 | 53 | handleChange(e) { 54 | this.setState({value: e.target.value}); 55 | } 56 | 57 | render() { 58 | const {label, selected, type, onClick} = this.props; 59 | const {value} = this.state; 60 | 61 | return ( 62 |
63 | 64 | 65 | 66 |
67 |
68 |
72 |
73 |
77 |
78 | 83 | 84 |
85 |
94 |
95 |
96 |
97 | ); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /lib/lowkiq/shard_handler.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | class ShardHandler 3 | def self.build_many(worker, wrapper) 4 | (0...worker.shards_count).map do |shard_index| 5 | new shard_index, worker, wrapper 6 | end 7 | end 8 | 9 | attr_reader :shard_index, :queue_name, :worker 10 | 11 | def initialize(shard_index, worker, wrapper) 12 | @shard_index = shard_index 13 | @queue_name = worker.queue_name 14 | @worker = worker 15 | @wrapper = wrapper 16 | @timestamp = Utils::Timestamp.method(:now) 17 | @queue = Queue::Queue.new Lowkiq.server_redis_pool, 18 | worker.queue_name, 19 | worker.shards_count 20 | end 21 | 22 | def process 23 | data = @queue.pop @shard_index, limit: @worker.batch_size 24 | 25 | return false if data.empty? 26 | 27 | begin 28 | batch = batch_from_data data 29 | 30 | @wrapper.call @worker, batch do 31 | @worker.perform batch 32 | end 33 | 34 | @queue.ack @shard_index, data, :success 35 | true 36 | rescue => ex 37 | fail! data, ex 38 | back, morgue = separate data 39 | 40 | @queue.push_back back 41 | @queue.push_to_morgue morgue 42 | @queue.ack @shard_index, data, :fail 43 | @worker.retries_exhausted morgue 44 | false 45 | end 46 | end 47 | 48 | def restore 49 | data = @queue.processing_data @shard_index 50 | return if data.nil? 51 | @queue.push_back data 52 | @queue.ack @shard_index, data 53 | end 54 | 55 | private 56 | 57 | def batch_from_data(data) 58 | data.each_with_object({}) do |job, h| 59 | id = job.fetch(:id) 60 | payloads = job.fetch(:payloads).map(&:first) 61 | h[id] = payloads 62 | end 63 | end 64 | 65 | def fail!(data, ex) 66 | data.map! do |job| 67 | job[:retry_count] += 1 68 | job[:perform_in] = @timestamp.call + @worker.retry_in(job[:retry_count]) 69 | job[:error] = Lowkiq.format_error.call(ex) 70 | job 71 | end 72 | end 73 | 74 | def separate(data) 75 | back = [] 76 | morgue = [] 77 | 78 | data.each do |job| 79 | id = job.fetch(:id) 80 | payloads = job.fetch(:payloads) 81 | retry_count = job.fetch(:retry_count) 82 | perform_in = job.fetch(:perform_in) 83 | error = job.fetch(:error, nil) 84 | 85 | morgue_payload = payloads.shift if retry_count >= @worker.max_retry_count 86 | 87 | if payloads.any? 88 | job = { 89 | id: id, 90 | payloads: payloads, 91 | retry_count: morgue_payload ? 0 : retry_count, 92 | perform_in: morgue_payload ? @timestamp.call : perform_in, 93 | error: error, 94 | }.compact 95 | back << job 96 | end 97 | 98 | if morgue_payload 99 | job = { 100 | id: id, 101 | payloads: [morgue_payload], 102 | error: error, 103 | }.compact 104 | morgue << job 105 | end 106 | end 107 | 108 | [back, morgue] 109 | end 110 | end 111 | end 112 | -------------------------------------------------------------------------------- /frontend/src/dumb/dashboard/table.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Link } from "react-router-dom"; 3 | import { OverlayTrigger, Tooltip } from 'react-bootstrap'; 4 | 5 | import formatNumber from '../util/format-number'; 6 | import formatDuration from '../util/format-duration'; 7 | import {RoutesContext} from '../routes'; 8 | 9 | function CamelCaseBreaker({val}) { 10 | return val 11 | .replace(/([a-z0-9:])([A-Z])/g, '$1 $2') 12 | .split(/\s/) 13 | .reduce((acc, word) => acc.concat(word, ), []); 14 | } 15 | 16 | function FormattedNumber({val}) { 17 | return ( 18 | {val.toLocaleString()}} > 19 | 20 | {formatNumber(val)} 21 | 22 | 23 | ); 24 | } 25 | 26 | function FormattedDuration({val}) { 27 | return ( 28 | {val.toLocaleString()}} > 29 | 30 | {formatDuration(val)} 31 | 32 | 33 | ); 34 | } 35 | 36 | function Row({name, lag, processed, failed, busy, enqueued, fresh, retries, dead, routes}) { 37 | return ( 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | {formatNumber(busy)} 52 | 53 | 54 | 55 | 58 | {formatNumber(enqueued)} 59 | 60 | 61 | 62 | 66 | {formatNumber(fresh)} 67 | 68 | 69 | 70 | 74 | {formatNumber(retries)} 75 | 76 | 77 | 78 | 79 | {formatNumber(dead)} 80 | 81 | 82 | 83 | ); 84 | } 85 | 86 | export default function Table({queues}) { 87 | return ( 88 |
89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | { 105 | queues.map( queue => { 106 | return 107 | {routes => } 108 | ; 109 | }) 110 | } 111 | 112 |
WorkerProcessedFailedLagBusyEnqueuedFreshRetriesDead
113 |
114 | ); 115 | } 116 | -------------------------------------------------------------------------------- /spec/queue/queue_metrics_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::Queue::QueueMetrics do 2 | let(:redis_pool) { ConnectionPool.new(size: 5, timeout: 5) { Redis.new url: ENV['REDIS_URL'] } } 3 | let(:shards_count) { 1 } 4 | let(:shard_index) { 0 } 5 | 6 | let(:queue_name) { 'Test' } 7 | let(:queue) { Lowkiq::Queue::Queue.new redis_pool, queue_name, shards_count } 8 | let(:queue_metrics) do 9 | Lowkiq::Queue::QueueMetrics 10 | .new(redis_pool) 11 | .call([queue_name]) 12 | .first 13 | end 14 | 15 | before(:each) { redis_pool.with(&:flushdb) } 16 | before(:each) { redis_pool.with { |r| Lowkiq::Script.load! r } } 17 | before(:each) { $now = Lowkiq::Utils::Timestamp.now } 18 | before(:each) do 19 | allow(Lowkiq::Utils::Timestamp).to receive(:now) { $now } 20 | end 21 | 22 | describe 'queue_metrics' do 23 | describe 'empty' do 24 | it "length" do 25 | expect( queue_metrics.length ).to be(0) 26 | end 27 | 28 | it "morgue_length" do 29 | expect( queue_metrics.morgue_length ).to be(0) 30 | end 31 | 32 | it 'lag' do 33 | expect( queue_metrics.lag ).to be(0.0) 34 | end 35 | 36 | it 'processed' do 37 | expect( queue_metrics.processed ).to be(0) 38 | end 39 | 40 | it 'failed' do 41 | expect( queue_metrics.failed ).to be(0) 42 | end 43 | 44 | it 'busy' do 45 | expect( queue_metrics.busy ).to be(0) 46 | end 47 | end 48 | 49 | describe 'filled' do 50 | it 'length' do 51 | queue.push( 52 | [ 53 | { id: '0', payload: "v1" }, 54 | ] 55 | ) 56 | 57 | expect( queue_metrics.length ).to be(1) 58 | end 59 | 60 | it 'mougue_length' do 61 | queue.push_to_morgue( 62 | [ 63 | { id: '1', payloads: [['v1', $now]] }, 64 | ] 65 | ) 66 | 67 | expect( queue_metrics.morgue_length ).to be(1) 68 | end 69 | 70 | it 'lag' do 71 | lag = 10.0 72 | queue.push( 73 | [ 74 | { id: '1', perform_in: $now - lag, payload: 'v1' }, 75 | { id: '2', perform_in: $now, payload: 'v1' }, 76 | ] 77 | ) 78 | 79 | expect( queue_metrics.lag ).to be(lag) 80 | end 81 | 82 | it 'lag for not ready' do 83 | queue.push( 84 | [ 85 | { id: '1', perform_in: $now + 10, payload: 'v1' }, 86 | ] 87 | ) 88 | 89 | expect( queue_metrics.lag ).to be(0.0) 90 | end 91 | 92 | it 'processed' do 93 | queue.push( 94 | [ 95 | { id: '1', payload: 'v1' }, 96 | ] 97 | ) 98 | data = queue.pop shard_index, limit: 10 99 | queue.ack shard_index, data, :success 100 | 101 | expect( queue_metrics.processed ).to be(1) 102 | end 103 | 104 | it 'failed' do 105 | queue.push( 106 | [ 107 | { id: '1', payload: 'v1' }, 108 | ] 109 | ) 110 | data = queue.pop shard_index, limit: 10 111 | queue.ack shard_index, data, :fail 112 | 113 | expect( queue_metrics.failed ).to be(1) 114 | end 115 | 116 | it 'busy' do 117 | queue.push( 118 | [ 119 | { id: '1', payload: 'v1' }, 120 | ] 121 | ) 122 | queue.pop shard_index, limit: 10 123 | 124 | expect( queue_metrics.busy ).to be(1) 125 | end 126 | end 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/lowkiq.rb: -------------------------------------------------------------------------------- 1 | require "connection_pool" 2 | require "redis" 3 | require "zlib" 4 | require "json" 5 | require "ostruct" 6 | require "optparse" 7 | require "digest" 8 | 9 | require "lowkiq/version" 10 | require "lowkiq/utils" 11 | require "lowkiq/script" 12 | 13 | require "lowkiq/option_parser" 14 | 15 | require "lowkiq/splitters/default" 16 | require "lowkiq/splitters/by_node" 17 | 18 | require "lowkiq/schedulers/lag" 19 | require "lowkiq/schedulers/seq" 20 | 21 | require "lowkiq/server" 22 | 23 | require "lowkiq/queue/keys" 24 | require "lowkiq/queue/fetch" 25 | require "lowkiq/queue/queue" 26 | require "lowkiq/queue/queue_metrics" 27 | require "lowkiq/queue/shard_metrics" 28 | require "lowkiq/queue/queries" 29 | require "lowkiq/queue/actions" 30 | require "lowkiq/worker" 31 | require "lowkiq/shard_handler" 32 | 33 | require "lowkiq/redis_info" 34 | 35 | require "lowkiq/web" 36 | 37 | module Lowkiq 38 | class << self 39 | attr_accessor :poll_interval, :threads_per_node, 40 | :redis, :client_pool_size, :pool_timeout, 41 | :server_middlewares, :client_middlewares, :on_server_init, 42 | :build_scheduler, :build_splitter, 43 | :last_words, 44 | :dump_payload, :load_payload, 45 | :format_error, :dump_error, :load_error, 46 | :workers 47 | 48 | def server_redis_pool 49 | @server_redis_pool ||= ConnectionPool.new(size: threads_per_node, timeout: pool_timeout, &redis) 50 | end 51 | 52 | def client_redis_pool 53 | @client_redis_pool ||= ConnectionPool.new(size: client_pool_size, timeout: pool_timeout, &redis) 54 | end 55 | 56 | def middleware_wrapper(middlewares) 57 | null = -> (worker, batch, &block) { block.call } 58 | middlewares.reduce(null) do |wrapper, m| 59 | -> (worker, batch, &block) do 60 | wrapper.call worker, batch do 61 | m.call worker, batch, &block 62 | end 63 | end 64 | end 65 | end 66 | 67 | def client_wrapper 68 | @client_wrapper ||= self.middleware_wrapper(self.client_middlewares) 69 | end 70 | 71 | def server_wrapper 72 | @server_wrapper ||= self.middleware_wrapper(self.server_middlewares) 73 | end 74 | 75 | def shard_handlers 76 | self.workers.flat_map do |w| 77 | ShardHandler.build_many w, self.server_wrapper 78 | end 79 | end 80 | 81 | def build_lag_scheduler 82 | Schedulers::Lag.new( 83 | ->() { sleep Lowkiq.poll_interval }, 84 | Queue::ShardMetrics.new(self.server_redis_pool) 85 | ) 86 | end 87 | 88 | def build_seq_scheduler 89 | Schedulers::Seq.new( 90 | ->() { sleep Lowkiq.poll_interval } 91 | ) 92 | end 93 | 94 | def build_default_splitter 95 | Lowkiq::Splitters::Default.new Lowkiq.threads_per_node 96 | end 97 | 98 | def build_by_node_splitter(number_of_nodes, node_number) 99 | Lowkiq::Splitters::ByNode.new( 100 | number_of_nodes, 101 | node_number, 102 | Lowkiq.threads_per_node, 103 | ) 104 | end 105 | end 106 | 107 | # defaults 108 | self.poll_interval = 1 109 | self.threads_per_node = 5 110 | self.redis = ->() { Redis.new url: ENV.fetch('REDIS_URL') } 111 | self.client_pool_size = 5 112 | self.pool_timeout = 5 113 | self.server_middlewares = [] 114 | self.client_middlewares = [] 115 | self.on_server_init = ->() {} 116 | self.build_scheduler = ->() { Lowkiq.build_lag_scheduler } 117 | self.build_splitter = ->() { Lowkiq.build_default_splitter } 118 | self.last_words = ->(ex) {} 119 | self.dump_payload = ::Marshal.method :dump 120 | self.load_payload = ::Marshal.method :load 121 | self.format_error = -> (error) { error.message } 122 | self.dump_error = -> (msg) { msg } 123 | self.load_error = -> (msg) { msg } 124 | self.workers = [] 125 | end 126 | -------------------------------------------------------------------------------- /lib/lowkiq/queue/queries.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | module Queue 3 | class Queries 4 | def initialize(redis_pool, name) 5 | @pool = redis_pool 6 | @keys = Keys.new name 7 | @fetch = Fetch.new name 8 | end 9 | 10 | def range_by_id(min, max, limit: 10) 11 | @pool.with do |redis| 12 | ids = redis.zrangebylex( 13 | @keys.all_ids_lex_zset, 14 | min, max, 15 | limit: [0, limit] 16 | ) 17 | _fetch redis, ids 18 | end 19 | end 20 | 21 | def rev_range_by_id(max, min, limit: 10) 22 | @pool.with do |redis| 23 | ids = redis.zrevrangebylex( 24 | @keys.all_ids_lex_zset, 25 | max, min, 26 | limit: [0, limit] 27 | ) 28 | _fetch redis, ids 29 | end 30 | end 31 | 32 | def range_by_perform_in(min, max, limit: 10) 33 | @pool.with do |redis| 34 | ids = redis.zrangebyscore( 35 | @keys.all_ids_scored_by_perform_in_zset, 36 | min, max, 37 | limit: [0, limit] 38 | ) 39 | _fetch redis, ids 40 | end 41 | end 42 | 43 | def rev_range_by_perform_in(max, min, limit: 10) 44 | @pool.with do |redis| 45 | ids = redis.zrevrangebyscore( 46 | @keys.all_ids_scored_by_perform_in_zset, 47 | max, min, 48 | limit: [0, limit] 49 | ) 50 | _fetch redis, ids 51 | end 52 | end 53 | 54 | def range_by_retry_count(min, max, limit: 10) 55 | @pool.with do |redis| 56 | ids = redis.zrangebyscore( 57 | @keys.all_ids_scored_by_retry_count_zset, 58 | min, max, 59 | limit: [0, limit] 60 | ) 61 | _fetch redis, ids 62 | end 63 | end 64 | 65 | def rev_range_by_retry_count(max, min, limit: 10) 66 | @pool.with do |redis| 67 | ids = redis.zrevrangebyscore( 68 | @keys.all_ids_scored_by_retry_count_zset, 69 | max, min, 70 | limit: [0, limit] 71 | ) 72 | _fetch redis, ids 73 | end 74 | end 75 | 76 | def morgue_range_by_id(min, max, limit: 10) 77 | @pool.with do |redis| 78 | ids = redis.zrangebylex( 79 | @keys.morgue_all_ids_lex_zset, 80 | min, max, 81 | limit: [0, limit] 82 | ) 83 | _morgue_fetch redis, ids 84 | end 85 | end 86 | 87 | def morgue_rev_range_by_id(max, min, limit: 10) 88 | @pool.with do |redis| 89 | ids = redis.zrevrangebylex( 90 | @keys.morgue_all_ids_lex_zset, 91 | max, min, 92 | limit: [0, limit] 93 | ) 94 | _morgue_fetch redis, ids 95 | end 96 | end 97 | 98 | def morgue_range_by_updated_at(min, max, limit: 10) 99 | @pool.with do |redis| 100 | ids = redis.zrangebyscore( 101 | @keys.morgue_all_ids_scored_by_updated_at_zset, 102 | min, max, 103 | limit: [0, limit] 104 | ) 105 | _morgue_fetch redis, ids 106 | end 107 | end 108 | 109 | def morgue_rev_range_by_updated_at(max, min, limit: 10) 110 | @pool.with do |redis| 111 | ids = redis.zrevrangebyscore( 112 | @keys.morgue_all_ids_scored_by_updated_at_zset, 113 | max, min, 114 | limit: [0, limit] 115 | ) 116 | _morgue_fetch redis, ids 117 | end 118 | end 119 | 120 | def fetch(ids) 121 | @pool.with do |redis| 122 | _fetch redis, ids 123 | end 124 | end 125 | 126 | def morgue_fetch(ids) 127 | @pool.with do |redis| 128 | _morgue_fetch redis, ids 129 | end 130 | end 131 | 132 | private 133 | 134 | def _fetch(redis, ids) 135 | @fetch.fetch(redis, :multi, ids) 136 | end 137 | 138 | def _morgue_fetch(redis, ids) 139 | @fetch.morgue_fetch(redis, :multi, ids) 140 | end 141 | end 142 | end 143 | end 144 | -------------------------------------------------------------------------------- /spec/shard_handler_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::ShardHandler do 2 | module ATestWorker 3 | extend Lowkiq::Worker 4 | 5 | self.shards_count = 1 6 | 7 | def self.retry_in(count) 8 | $retry_in.call(count) 9 | end 10 | 11 | def self.perform(batch) 12 | $perform.call(batch) 13 | end 14 | 15 | def self.retries_exhausted(batch) 16 | $retries_exhausted.call(batch) 17 | end 18 | end 19 | 20 | before(:each) { Lowkiq.server_redis_pool.with(&:flushdb) } 21 | before(:each) { Lowkiq.server_redis_pool.with { |r| Lowkiq::Script.load! r } } 22 | 23 | before(:each) { $now = Lowkiq::Utils::Timestamp.now } 24 | before(:each) do 25 | allow(Lowkiq::Utils::Timestamp).to receive(:now) { $now } 26 | end 27 | 28 | before(:each) { $id = double('id') } 29 | before(:each) { $retry_in = double('retry_in') } 30 | before(:each) { $perform = double('perform') } 31 | before(:each) { $retries_exhausted = double('retries_exhausted') } 32 | 33 | let(:worker) { ATestWorker } 34 | let(:queue) { worker.client_queue } 35 | let(:queries) { worker.client_queries } 36 | let(:wrapper) { -> (worker, batch, &block) { block.call } } 37 | let(:shards) { described_class.build_many worker, wrapper } 38 | let(:shard_index) { 0 } 39 | let(:shard) { shards[shard_index] } 40 | 41 | context '#process' do 42 | it 'normal' do 43 | payload = "payload" 44 | 45 | expect($retry_in).to_not receive(:call) 46 | expect($perform).to receive(:call).with({ '1' => [payload] }) 47 | 48 | worker.perform_async( 49 | [ 50 | { id: 1, payload: payload }, 51 | ] 52 | ) 53 | 54 | expect( shard.process ).to be(true) 55 | expect( queries.fetch ['1'] ).to be_empty 56 | expect( queue.processing_data shard_index ).to be_empty 57 | end 58 | 59 | it 'error' do 60 | expect($retry_in).to receive(:call).with(0).and_return(10) 61 | expect($perform).to receive(:call).at_least(:once).and_raise(StandardError.new "error") 62 | expect($retries_exhausted).to receive(:call).at_most(:once).with([]) 63 | 64 | worker.perform_async( 65 | [ 66 | { id: 1, payload: "v1", score: 0 }, 67 | { id: 1, payload: "v2", score: 1 }, 68 | ] 69 | ) 70 | 71 | expect( shard.process ).to be(false) 72 | expect( queue.processing_data shard_index ).to be_empty 73 | 74 | expected_in_queue = { 75 | id: '1', retry_count: 0, perform_in: $now + 10, error: "error", 76 | payloads: [['v1', 0], 77 | ['v2', 1]], 78 | } 79 | 80 | expect( queries.fetch ['1'] ).to contain_exactly(expected_in_queue) 81 | expect( queries.morgue_fetch ['1'] ).to be_empty 82 | end 83 | 84 | it 'morgue' do 85 | expect($retry_in).to receive(:call).with( worker.max_retry_count ).and_return(10) 86 | expect($perform).to receive(:call).at_least(:once).and_raise(StandardError.new "error") 87 | 88 | worker.perform_async( 89 | [ 90 | { id: 1, payload: "v1", score: 0, retry_count: worker.max_retry_count - 1 }, 91 | { id: 1, payload: "v2", score: 1, retry_count: worker.max_retry_count - 1 }, 92 | ] 93 | ) 94 | 95 | expected_in_retries_exhausted = [{ 96 | id: "1", payloads: [["v1", 0]], error: "error" 97 | }] 98 | 99 | expect($retries_exhausted).to receive(:call).at_most(:once).with(expected_in_retries_exhausted) 100 | 101 | expect( shard.process ).to be(false) 102 | expect( queue.processing_data shard_index ).to be_empty 103 | 104 | expected_in_queue = { 105 | id: '1', retry_count: 0, perform_in: $now, error: "error", 106 | payloads: [['v2', 1]], 107 | } 108 | 109 | expect( queries.fetch ['1'] ).to contain_exactly(expected_in_queue) 110 | 111 | expected_in_morgue = { 112 | id: '1', payloads: [['v1', 0]], updated_at: $now, error: "error", 113 | } 114 | 115 | expect( queries.morgue_fetch ['1'] ).to contain_exactly(expected_in_morgue) 116 | end 117 | end 118 | 119 | it '#restore' do 120 | expect($perform).to receive(:call).and_raise(Exception.new "fatal error") 121 | 122 | worker.perform_async( 123 | [ 124 | { id: 1, payload: "v1" }, 125 | ] 126 | ) 127 | 128 | expect{ shard.process }.to raise_error(Exception, "fatal error") 129 | expect( queue.processing_data shard_index ).to be_any 130 | 131 | shard.restore 132 | 133 | expect( queue.processing_data shard_index ).to be_empty 134 | 135 | expected = { 136 | id: '1', retry_count: -1, perform_in: $now, 137 | payloads: [ ['v1', $now] ], 138 | } 139 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 140 | end 141 | end 142 | -------------------------------------------------------------------------------- /frontend/main-dumb/app.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import _get from 'lodash/get'; 3 | 4 | import App from '../src/dumb/app'; 5 | import Dashboard from '../src/dumb/dashboard'; 6 | import Busy from '../src/dumb/details/busy'; 7 | import Enqueued from '../src/dumb/details/enqueued'; 8 | import Dead from '../src/dumb/details/dead'; 9 | 10 | function rnd(max) { 11 | return Math.floor( Math.random() * max ); 12 | } 13 | 14 | class DashboardManager extends React.Component { 15 | constructor(props) { 16 | super(props); 17 | this.state = { 18 | queues: [ 19 | { 20 | name: "AwesomeProject::CoolModule::SomeWorker", 21 | lag: 3010, 22 | processed: 10000, 23 | failed: 100000, 24 | busy: 1883, 25 | enqueued: 8329, 26 | fresh: 79998, 27 | retries: 7968, 28 | dead: 979878987 29 | } 30 | ] 31 | }; 32 | } 33 | 34 | tick() { 35 | this.setState(state => ({ 36 | queues: state.queues.map( q => { 37 | const c = Object.assign({}, q); 38 | c.processed += rnd(10); 39 | c.failed += rnd(2); 40 | return c; 41 | }) 42 | })); 43 | } 44 | 45 | componentDidMount() { 46 | this.interval = setInterval(() => this.tick(), 1000); 47 | } 48 | 49 | componentWillUnmount() { 50 | clearInterval(this.interval); 51 | } 52 | 53 | redisInfo() { 54 | return { 55 | url: "redis://asfdasfd-asdfasdf-asdf", 56 | version: "4.4.4", 57 | uptime_in_days: 10, 58 | connected_clients: 100, 59 | used_memory_human: "100M", 60 | used_memory_peak_human: "500M" 61 | }; 62 | } 63 | 64 | render() { 65 | return ; 66 | } 67 | } 68 | 69 | function BusyManager(params) { 70 | const queue = params.match.params.name; 71 | const items = [ 72 | { 73 | id: "1918f2e4-d5ee-4ffd-bbd6-6faf4020c97d", 74 | perform_in: 1541763528, 75 | retry_count: -1, 76 | error: "some error", 77 | payloads: [ 78 | ["foo bar", 1541763533], 79 | ["foo buzz", 1541763534], 80 | ] 81 | } 82 | ]; 83 | return ; 84 | } 85 | 86 | function EnqueuedManager(params) { 87 | const selectedFilter = _get( 88 | params, 89 | 'location.state.selectedFilter', 90 | {id: {min: '-inf', max: '+inf', rev: false}} 91 | ); 92 | 93 | const queue = params.match.params.name; 94 | const items = [ 95 | { 96 | id: "1918f2e4-d5ee-4ffd-bbd6-6faf4020c97d", 97 | perform_in: 1541763528, 98 | retry_count: 10, 99 | error: "some error", 100 | payloads: [ 101 | ["foo bar", 1541763533], 102 | ["foo buzz", 1541763534], 103 | ] 104 | } 105 | ]; 106 | return ( 107 | console.log('perfrom all jobs now')} 111 | onKillAllFailedJobs={() => console.log('kill all failed jobs')} 112 | onDeleteAllFailedJobs={() => console.log('delete all failed jobs')} 113 | selectedFilter={selectedFilter} 114 | onFilter={(...args) => console.log(args)} /> 115 | ); 116 | } 117 | 118 | function DeadManager(params) { 119 | const queue = params.match.params.name; 120 | const items = [ 121 | { 122 | id: "1918f2e4-d5ee-4ffd-bbd6-6faf4020c97d", 123 | updated_at: 1541763528, 124 | payloads: [ 125 | ["foo bar", 1541763528], 126 | ["Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce consequat augue orci, in luctus libero lacinia sit amet.", 1541763534], 127 | ], 128 | error: "some error", 129 | actions: { 130 | onQueueUp: () => console.log("queue up"), 131 | onDelete: () => console.log("delete") 132 | } 133 | }, 134 | { 135 | id: "1918f2e4-d5ee-4ffd-bbd6-6faf4020c978", 136 | updated_at: 1541763528, 137 | payloads: [ 138 | ["foo bar", 1541763528], 139 | ["Lorem ipsum dolor sit amet, consectetur adipiscing elit.", 1541763534], 140 | ], 141 | error: "some error", 142 | actions: { 143 | onQueueUp: () => console.log("queue up"), 144 | onDelete: () => console.log("delete") 145 | } 146 | } 147 | ]; 148 | return ( 149 | console.log(args)} 153 | onQueueUpAllJobs={() => console.log('queue up all jobs')} 154 | onDeleteAllJobs={() => console.log('delete all jobs')} /> 155 | ); 156 | } 157 | 158 | export default function Main() { 159 | return ( 160 | 166 | ); 167 | }; 168 | -------------------------------------------------------------------------------- /lib/lowkiq/web/api.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | module Web 3 | module Api 4 | GET = 'GET'.freeze 5 | POST = 'POST'.freeze 6 | 7 | ACTIONS = [ 8 | Action.new(GET, ['v1', 'stats']) do |_, _| 9 | worker_names = Lowkiq.workers.map(&:name) 10 | queue_names = Lowkiq.workers.map(&:queue_name) 11 | 12 | metrics = Lowkiq::Queue::QueueMetrics 13 | .new(Lowkiq.client_redis_pool) 14 | .call(queue_names) 15 | by_worker = worker_names.zip(metrics).each_with_object({}) do |(name, m), o| 16 | o[name] = m.to_h.slice(:length, :morgue_length, :lag) 17 | end 18 | total = { 19 | length: metrics.map(&:length).reduce(&:+).to_i, 20 | morgue_length: metrics.map(&:morgue_length).reduce(&:+).to_i, 21 | lag: metrics.map(&:lag).max.to_f, 22 | } 23 | { 24 | total: total, 25 | by_worker: by_worker, 26 | } 27 | end, 28 | 29 | Action.new(GET, ['web', 'dashboard']) do |_, _| 30 | worker_names = Lowkiq.workers.map(&:name) 31 | queue_names = Lowkiq.workers.map(&:queue_name) 32 | 33 | metrics = Lowkiq::Queue::QueueMetrics 34 | .new(Lowkiq.client_redis_pool) 35 | .call(queue_names) 36 | 37 | queues = worker_names.zip(metrics).map do |(name, m)| 38 | { 39 | name: name, 40 | lag: m.lag, 41 | processed: m.processed, 42 | failed: m.failed, 43 | busy: m.busy, 44 | enqueued: m.length, # fresh + retries 45 | fresh: m.fresh, 46 | retries: m.retries, 47 | dead: m.morgue_length, 48 | } 49 | end 50 | 51 | redis_info = Lowkiq::RedisInfo.new(Lowkiq.client_redis_pool).call 52 | 53 | { 54 | queues: queues, 55 | redis_info: redis_info, 56 | } 57 | end, 58 | 59 | %w[ range_by_id range_by_perform_in range_by_retry_count 60 | morgue_range_by_id morgue_range_by_updated_at 61 | ].map do |method| 62 | Action.new(GET, ['web', :worker, method]) do |req, match| 63 | min = req.params['min'] 64 | max = req.params['max'] 65 | 66 | queries = match_to_worker(match).client_queries 67 | queries.public_send method, min, max, limit: 100 68 | end 69 | end, 70 | 71 | %w[ rev_range_by_id rev_range_by_perform_in rev_range_by_retry_count 72 | morgue_rev_range_by_id morgue_rev_range_by_updated_at 73 | ].map do |method| 74 | Action.new(GET, ['web', :worker, method]) do |req, match| 75 | min = req.params['min'] 76 | max = req.params['max'] 77 | 78 | queries = match_to_worker(match).client_queries 79 | queries.public_send method, max, min, limit: 100 80 | end 81 | end, 82 | 83 | Action.new(GET, ['web', :worker, 'processing_data']) do |_, match| 84 | queue = match_to_worker(match).client_queue 85 | 86 | queue.shards.flat_map do |shard| 87 | queue.processing_data shard 88 | end 89 | end, 90 | 91 | %w[ morgue_delete ].map do |method| 92 | Action.new(POST, ['web', :worker, method]) do |req, match| 93 | ids = req.params['ids'] 94 | Thread.new do 95 | queue = match_to_worker(match).client_queue 96 | queue.public_send method, ids 97 | end 98 | :ok 99 | end 100 | end, 101 | 102 | %w[ morgue_queue_up ].map do |method| 103 | Action.new(POST, ['web', :worker, method]) do |req, match| 104 | ids = req.params['ids'] 105 | Thread.new do 106 | actions = match_to_worker(match).client_actions 107 | actions.public_send method, ids 108 | end 109 | :ok 110 | end 111 | end, 112 | 113 | %w[ morgue_queue_up_all_jobs morgue_delete_all_jobs 114 | perform_all_jobs_now kill_all_failed_jobs delete_all_failed_jobs].map do |method| 115 | Action.new(POST, ['web', :worker, method]) do |_, match| 116 | Thread.new do 117 | actions = match_to_worker(match).client_actions 118 | actions.public_send method 119 | end 120 | :ok 121 | end 122 | end, 123 | 124 | ].flatten 125 | 126 | def self.match_to_worker(match) 127 | Lowkiq.workers.find { |w| w.name == match[:worker] } 128 | end 129 | 130 | def self.call(env) 131 | req = Rack::Request.new env 132 | 133 | ACTIONS.each do |action| 134 | resp = action.call req 135 | return resp if resp 136 | end 137 | 138 | [404, {}, ["not found"]] 139 | end 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /spec/web_spec.rb: -------------------------------------------------------------------------------- 1 | require "rack/test" 2 | 3 | RSpec.describe Lowkiq::Web do 4 | include Rack::Test::Methods 5 | 6 | module ApiTestWorker 7 | extend Lowkiq::Worker 8 | 9 | def self.perform(batch) 10 | end 11 | end 12 | 13 | def app 14 | described_class 15 | end 16 | 17 | def json_last_response 18 | JSON.parse(last_response.body) 19 | end 20 | 21 | around(:each) do |t| 22 | saved = Lowkiq.workers 23 | Lowkiq.workers = [ApiTestWorker] 24 | begin 25 | t.call 26 | ensure 27 | Lowkiq.workers = saved 28 | end 29 | end 30 | 31 | before(:each) { Lowkiq.server_redis_pool.with(&:flushdb) } 32 | before(:each) { Lowkiq.server_redis_pool.with { |r| Lowkiq::Script.load! r } } 33 | before(:each) { ApiTestWorker.perform_async [ {id: 1, payload: "v1"} ] } 34 | 35 | it 'dashboard' do 36 | get '/api/web/dashboard' 37 | 38 | expect( last_response.status ).to eq(200) 39 | end 40 | 41 | context 'api' do 42 | it 'stats' do 43 | get '/api/v1/stats' 44 | 45 | expect( last_response.status ).to eq(200) 46 | expect( json_last_response['total']['length'] ).to be(1) 47 | expect( json_last_response['total']['morgue_length'] ).to be(0) 48 | expect( json_last_response['total']['lag'] ).to be_within(0.1).of(0.0) 49 | 50 | expect( json_last_response['by_worker']['ApiTestWorker']['length'] ).to be(1) 51 | expect( json_last_response['by_worker']['ApiTestWorker']['morgue_length'] ).to be(0) 52 | expect( json_last_response['by_worker']['ApiTestWorker']['lag'] ).to be_within(0.1).of(0.0) 53 | end 54 | end 55 | 56 | context 'web_api' do 57 | # %2B is `+` 58 | it 'range_by_id' do 59 | get "/api/web/ApiTestWorker/range_by_id", min: '-', max: '+' 60 | 61 | expect( last_response.status ).to eq(200) 62 | expect( json_last_response.length ).to be(1) 63 | end 64 | 65 | it 'rev_range_by_id' do 66 | get "/api/web/ApiTestWorker/rev_range_by_id", max: '+', min: '-' 67 | 68 | expect( last_response.status ).to eq(200) 69 | expect( json_last_response.length ).to be(1) 70 | end 71 | 72 | it 'range_by_perform_in' do 73 | get "/api/web/ApiTestWorker/range_by_perform_in", min: '-inf', max: '+inf' 74 | 75 | expect( last_response.status ).to eq(200) 76 | expect( json_last_response.length ).to be(1) 77 | end 78 | 79 | it 'rev_range_by_perform_in' do 80 | get "/api/web/ApiTestWorker/rev_range_by_perform_in", max: '+inf', min: '-inf' 81 | 82 | expect( last_response.status ).to eq(200) 83 | expect( json_last_response.length ).to be(1) 84 | end 85 | 86 | it 'range_by_retry_count' do 87 | get "/api/web/ApiTestWorker/range_by_retry_count", min: '-inf', max: '+inf' 88 | 89 | expect( last_response.status ).to eq(200) 90 | expect( json_last_response.length ).to be(1) 91 | end 92 | 93 | it 'rev_range_by_retry_count' do 94 | get "/api/web/ApiTestWorker/rev_range_by_retry_count", max: '+inf', min: '-inf' 95 | 96 | expect( last_response.status ).to eq(200) 97 | expect( json_last_response.length ).to be(1) 98 | end 99 | 100 | it 'processing_data' do 101 | get '/api/web/ApiTestWorker/processing_data' 102 | expect( last_response.status ).to eq(200) 103 | expect { json_last_response }.to_not raise_error 104 | end 105 | 106 | it 'morgue_range_by_id' do 107 | get "/api/web/ApiTestWorker/morgue_range_by_id", min: '-', max: '+' 108 | 109 | expect( last_response.status ).to eq(200) 110 | expect( json_last_response.length ).to be(0) 111 | end 112 | 113 | it 'morgue_rev_range_by_id' do 114 | get "/api/web/ApiTestWorker/morgue_rev_range_by_id", max: '+', min: '-' 115 | 116 | expect( last_response.status ).to eq(200) 117 | expect( json_last_response.length ).to be(0) 118 | end 119 | 120 | it 'morgue_range_by_updated_at' do 121 | get "/api/web/ApiTestWorker/morgue_range_by_updated_at?", min: '-inf', max: '+inf' 122 | 123 | expect( last_response.status ).to eq(200) 124 | expect( json_last_response.length ).to be(0) 125 | end 126 | 127 | it 'morgue_rev_range_by_updated_at' do 128 | get "/api/web/ApiTestWorker/morgue_rev_range_by_updated_at", max: '+inf', min: '-inf' 129 | 130 | expect( last_response.status ).to eq(200) 131 | expect( json_last_response.length ).to be(0) 132 | end 133 | 134 | context "operations" do 135 | before(:each) do 136 | allow(Thread).to receive(:new) { |&block| block.call } 137 | end 138 | 139 | it 'morgue_queue_up' do 140 | post '/api/web/ApiTestWorker/morgue_queue_up', ids: [1,2,3] 141 | expect( last_response.status ).to eq(200) 142 | expect( json_last_response ).to eq('ok') 143 | end 144 | 145 | it 'morgue_delete' do 146 | post '/api/web/ApiTestWorker/morgue_delete', ids: [1,2,3] 147 | expect( last_response.status ).to eq(200) 148 | expect( json_last_response ).to eq('ok') 149 | end 150 | 151 | it 'morgue_queue_up_all' do 152 | post '/api/web/ApiTestWorker/morgue_queue_up_all_jobs' 153 | expect( last_response.status ).to eq(200) 154 | expect( json_last_response ).to eq('ok') 155 | end 156 | 157 | it 'morgue_delete_all_jobs' do 158 | post '/api/web/ApiTestWorker/morgue_delete_all_jobs' 159 | expect( last_response.status ).to eq(200) 160 | expect( json_last_response ).to eq('ok') 161 | end 162 | 163 | it 'perform_all_jobs_now' do 164 | post '/api/web/ApiTestWorker/perform_all_jobs_now' 165 | expect( last_response.status ).to eq(200) 166 | expect( json_last_response ).to eq('ok') 167 | end 168 | 169 | it 'kill_all_failed_jobs' do 170 | post '/api/web/ApiTestWorker/kill_all_failed_jobs' 171 | expect( last_response.status ).to eq(200) 172 | expect( json_last_response ).to eq('ok') 173 | end 174 | 175 | it 'delete_all_failed_jobs' do 176 | post '/api/web/ApiTestWorker/delete_all_failed_jobs' 177 | expect( last_response.status ).to eq(200) 178 | expect( json_last_response ).to eq('ok') 179 | end 180 | end 181 | end 182 | end 183 | -------------------------------------------------------------------------------- /spec/queue/actions_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::Queue::Actions do 2 | 3 | let(:redis_pool) { ConnectionPool.new(size: 5, timeout: 5) { Redis.new url: ENV['REDIS_URL'] } } 4 | let(:shards_count) { 1 } 5 | let(:shard_index) { 0 } 6 | 7 | let(:queue_name) { 'Test' } 8 | 9 | let(:queue) { Lowkiq::Queue::Queue.new redis_pool, queue_name, shards_count } 10 | let(:queries) { Lowkiq::Queue::Queries.new redis_pool, queue_name } 11 | let(:actions) { Lowkiq::Queue::Actions.new queue, queries } 12 | 13 | before(:each) { redis_pool.with(&:flushdb) } 14 | before(:each) { redis_pool.with { |r| Lowkiq::Script.load! r } } 15 | before(:each) { $now = Lowkiq::Utils::Timestamp.now } 16 | before(:each) do 17 | allow(Lowkiq::Utils::Timestamp).to receive(:now) { $now } 18 | end 19 | 20 | describe '#perform_all_jobs_now' do 21 | it 'empty' do 22 | expect { actions.perform_all_jobs_now }.to_not raise_error 23 | end 24 | 25 | it 'reset' do 26 | queue.push [ { id: '1'} ] 27 | 28 | expect { 29 | actions.perform_all_jobs_now 30 | }.to change { 31 | queries.fetch(['1']).first[:perform_in] 32 | }.to(0) 33 | end 34 | end 35 | 36 | describe '#kill_all_failed_jobs' do 37 | it 'empty' do 38 | expect { actions.kill_all_failed_jobs }.to_not raise_error 39 | end 40 | 41 | it 'kill' do 42 | queue.push( 43 | [ 44 | { id: '1' }, 45 | { id: '2', retry_count: 0 }, 46 | ] 47 | ) 48 | 49 | actions.kill_all_failed_jobs 50 | 51 | expect( queries.fetch(['1']) ).to_not be_empty 52 | expect( queries.fetch(['2']) ).to be_empty 53 | 54 | expect( queries.morgue_fetch(['1']) ).to be_empty 55 | expect( queries.morgue_fetch(['2']) ).to_not be_empty 56 | end 57 | end 58 | 59 | describe '#delete_all_failed_jobs' do 60 | it 'empty' do 61 | expect { actions.delete_all_failed_jobs }.to_not raise_error 62 | end 63 | 64 | it 'delete' do 65 | queue.push( 66 | [ 67 | { id: '1' }, 68 | { id: '2', retry_count: 0 }, 69 | ] 70 | ) 71 | 72 | actions.delete_all_failed_jobs 73 | 74 | expect( queries.fetch(['1']) ).to_not be_empty 75 | expect( queries.fetch(['2']) ).to be_empty 76 | 77 | expect( queries.morgue_fetch(['1']) ).to be_empty 78 | expect( queries.morgue_fetch(['2']) ).to be_empty 79 | end 80 | end 81 | 82 | describe '#morgue_queue_up' do 83 | it 'empty queue' do 84 | queue.push_to_morgue( 85 | [ 86 | { id: '1', payloads: [['v1', $now]] }, 87 | ] 88 | ) 89 | 90 | actions.morgue_queue_up ['1'] 91 | 92 | expected = { 93 | id: '1', perform_in: $now, retry_count: -1, payloads: [['v1', $now]], 94 | } 95 | 96 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 97 | expect( queries.morgue_fetch ['1'] ).to be_empty 98 | end 99 | 100 | # несколько странный случай, но все же 101 | # странный, т.к. в морге не должна оказаться payload с бОльшим score 102 | it 'same payload' do 103 | queue.push( 104 | [ 105 | { id: '1', retry_count: -1, perform_in: $now, payload: "v1", score: $now }, 106 | ] 107 | ) 108 | 109 | queue.push_to_morgue( 110 | [ 111 | { id: '1', payloads: [["v1", $now + 10]] }, 112 | ] 113 | ) 114 | 115 | actions.morgue_queue_up ['1'] 116 | 117 | expected = { 118 | id: '1', retry_count: -1, perform_in: $now, payloads: [['v1', $now]], 119 | } 120 | 121 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 122 | expect( queries.morgue_fetch ['1'] ).to be_empty 123 | end 124 | 125 | it 'payload order' do 126 | queue.push( 127 | [ 128 | { id: '1', retry_count: -1, perform_in: $now, payload: "v2", score: $now + 10 }, 129 | ] 130 | ) 131 | 132 | queue.push_to_morgue( 133 | [ 134 | { id: '1', payloads: [["v1", $now]] }, 135 | ] 136 | ) 137 | 138 | actions.morgue_queue_up ['1'] 139 | 140 | expected = { 141 | id: '1', retry_count: -1, perform_in: $now, 142 | payloads: [['v1', $now], ['v2', $now + 10]] 143 | } 144 | 145 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 146 | expect( queries.morgue_fetch ['1'] ).to be_empty 147 | end 148 | 149 | it 'perform_in' do 150 | queue.push( 151 | [ 152 | { id: '1', retry_count: -1, perform_in: $now + 10, payload: "v1", score: $now }, 153 | ] 154 | ) 155 | 156 | queue.push_to_morgue( 157 | [ 158 | { id: '1', payloads: [["v1", $now]] }, 159 | ] 160 | ) 161 | 162 | actions.morgue_queue_up ['1'] 163 | 164 | expected = { 165 | id: '1', retry_count: -1, perform_in: $now, 166 | payloads: [['v1', $now]] 167 | } 168 | 169 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 170 | expect( queries.morgue_fetch ['1'] ).to be_empty 171 | end 172 | 173 | it 'retry_count' do 174 | queue.push( 175 | [ 176 | { id: '1', retry_count: 0, perform_in: $now, payload: "v1", score: $now }, 177 | ] 178 | ) 179 | 180 | queue.push_to_morgue( 181 | [ 182 | { id: '1', payloads: [["v1", $now]] }, 183 | ] 184 | ) 185 | 186 | actions.morgue_queue_up ['1'] 187 | 188 | expected = { 189 | id: '1', retry_count: -1, perform_in: $now, payloads: [['v1', $now]] 190 | } 191 | 192 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 193 | expect( queries.morgue_fetch ['1'] ).to be_empty 194 | end 195 | end 196 | 197 | it '#morgue_queue_up_all_jobs' do 198 | queue.push_to_morgue( 199 | [ 200 | { id: '1', payloads: [["v1", $now]] }, 201 | ] 202 | ) 203 | 204 | actions.morgue_queue_up_all_jobs 205 | 206 | expect( queries.morgue_fetch ['1'] ).to be_empty 207 | expect( queries.fetch ['1'] ).to_not be_empty 208 | end 209 | 210 | it '#morgue_delete_all_jobs' do 211 | queue.push_to_morgue( 212 | [ 213 | { id: '1', payloads: [["v1", $now]] }, 214 | ] 215 | ) 216 | 217 | actions.morgue_delete_all_jobs 218 | 219 | expect( queries.morgue_fetch ['1'] ).to be_empty 220 | end 221 | end 222 | -------------------------------------------------------------------------------- /spec/queue/queries_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::Queue::Queries do 2 | 3 | let(:redis_pool) { ConnectionPool.new(size: 5, timeout: 5) { Redis.new url: ENV['REDIS_URL'] } } 4 | let(:shards_count) { 1 } 5 | let(:shard_index) { 0 } 6 | 7 | let(:queue_name) { 'Test' } 8 | let(:queue) { Lowkiq::Queue::Queue.new redis_pool, queue_name, shards_count } 9 | let(:queries) { Lowkiq::Queue::Queries.new redis_pool, queue_name } 10 | 11 | before(:each) { redis_pool.with(&:flushdb) } 12 | before(:each) { redis_pool.with { |r| Lowkiq::Script.load! r } } 13 | before(:each) { $now = Lowkiq::Utils::Timestamp.now } 14 | before(:each) do 15 | allow(Lowkiq::Utils::Timestamp).to receive(:now) { $now } 16 | end 17 | 18 | describe 'queries' do 19 | describe 'empty' do 20 | it '#range_by_id' do 21 | expect( queries.range_by_id '-', '+' ).to be_empty 22 | end 23 | 24 | it '#rev_range_by_id' do 25 | expect( queries.rev_range_by_id '+', '-' ).to be_empty 26 | end 27 | 28 | it '#range_by_perfom_in' do 29 | expect( queries.range_by_perform_in '-inf', '+inf' ).to be_empty 30 | end 31 | 32 | it '#rev_range_by_perfom_in' do 33 | expect( queries.rev_range_by_perform_in '+inf', '-inf' ).to be_empty 34 | end 35 | 36 | it '#range_by_retry_count' do 37 | expect( queries.range_by_retry_count '-inf', '+inf' ).to be_empty 38 | end 39 | 40 | it '#rev_range_by_retry_count' do 41 | expect( queries.rev_range_by_retry_count '+inf', '-inf' ).to be_empty 42 | end 43 | 44 | it '#fetch' do 45 | expect( queries.fetch ['1', '2', '3'] ).to be_empty 46 | end 47 | 48 | it '#morgue_range_by_id' do 49 | expect( queries.morgue_range_by_id '-', '+' ).to be_empty 50 | end 51 | 52 | it '#morgue_rev_range_by_id' do 53 | expect( queries.morgue_rev_range_by_id '+', '-' ).to be_empty 54 | end 55 | 56 | it '#morgue_range_by_updated_at' do 57 | expect( queries.morgue_range_by_updated_at '-inf', '+inf' ).to be_empty 58 | end 59 | 60 | it '#morgue_rev_range_by_updated_at' do 61 | expect( queries.morgue_rev_range_by_updated_at '+inf', '-inf' ).to be_empty 62 | end 63 | 64 | it '#morgue_fetch' do 65 | expect( queries.morgue_fetch ['1', '2', '3'] ).to be_empty 66 | end 67 | end 68 | 69 | describe 'filled' do 70 | before(:each) do 71 | queue.push( 72 | [ 73 | { id: '1', retry_count: 0, perform_in: $now + 120, payload: "v1", score: $now }, 74 | { id: '2', retry_count: -1, perform_in: $now + 60, payload: "v1", score: $now }, 75 | ] 76 | ) 77 | end 78 | 79 | it '#fetch' do 80 | expected = 81 | [ 82 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 83 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 84 | ] 85 | 86 | expect( queries.fetch ['1', '2'] ).to eq(expected) 87 | end 88 | 89 | it '#range_by_id' do 90 | expected = 91 | [ 92 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 93 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 94 | ] 95 | 96 | expect( queries.range_by_id '-', '+' ).to eq(expected) 97 | end 98 | 99 | it '#rev_range_by_id' do 100 | expected = 101 | [ 102 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 103 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 104 | ] 105 | 106 | expect( queries.rev_range_by_id '+', '-' ).to eq(expected) 107 | end 108 | 109 | it '#range_by_perform_in' do 110 | expected = 111 | [ 112 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 113 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 114 | ] 115 | 116 | expect( queries.range_by_perform_in '-inf', '+inf' ).to eq(expected) 117 | end 118 | 119 | it '#rev_range_by_perform_in' do 120 | expected = 121 | [ 122 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 123 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 124 | ] 125 | 126 | expect( queries.rev_range_by_perform_in '+inf', '-inf' ).to eq(expected) 127 | end 128 | 129 | it '#range_by_retry_count' do 130 | expected = 131 | [ 132 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 133 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 134 | ] 135 | 136 | expect( queries.range_by_retry_count '-inf', '+inf' ).to eq(expected) 137 | end 138 | 139 | it '#rev_range_by_retry_count' do 140 | expected = 141 | [ 142 | { id: '1', retry_count: 0, perform_in: $now + 120, payloads: [['v1', $now]] }, 143 | { id: '2', retry_count: -1, perform_in: $now + 60, payloads: [['v1', $now]] }, 144 | ] 145 | 146 | expect( queries.rev_range_by_retry_count '+inf', '-inf' ).to eq(expected) 147 | end 148 | end 149 | 150 | describe 'filled_morgue' do 151 | before(:each) do 152 | queue.push_to_morgue( 153 | [ 154 | { id: '1', payloads: [['v1', $now]] }, 155 | { id: '2', payloads: [['v1', $now]] }, 156 | ] 157 | ) 158 | $now += 1 159 | queue.push_to_morgue( 160 | [ 161 | { id: '1', payloads: [['v1', $now]] }, 162 | ] 163 | ) 164 | end 165 | 166 | it '#morgue_fetch' do 167 | expected = 168 | [ 169 | { id: '1', updated_at: $now, payloads: [['v1', $now - 1]] }, 170 | { id: '2', updated_at: $now - 1, payloads: [['v1', $now - 1]] }, 171 | ] 172 | 173 | expect( queries.morgue_fetch ['1', '2'] ).to eq(expected) 174 | end 175 | 176 | it '#morgue_range_by_id' do 177 | expected = 178 | [ 179 | { id: '1', updated_at: $now, payloads: [['v1', $now - 1]] }, 180 | { id: '2', updated_at: $now - 1, payloads: [['v1', $now - 1]] }, 181 | ] 182 | 183 | expect( queries.morgue_range_by_id '-', '+' ).to eq(expected) 184 | end 185 | 186 | it '#morgue_rev_range_by_id' do 187 | expected = 188 | [ 189 | { id: '2', updated_at: $now - 1, payloads: [['v1', $now - 1]] }, 190 | { id: '1', updated_at: $now, payloads: [['v1', $now - 1]] }, 191 | ] 192 | 193 | expect( queries.morgue_rev_range_by_id '+', '-' ).to eq(expected) 194 | end 195 | 196 | it '#morgue_range_by_updated_at' do 197 | expected = 198 | [ 199 | { id: '2', updated_at: $now - 1, payloads: [['v1', $now - 1]] }, 200 | { id: '1', updated_at: $now, payloads: [['v1', $now - 1]] }, 201 | ] 202 | 203 | expect( queries.morgue_range_by_updated_at '-inf', '+inf' ).to eq(expected) 204 | end 205 | 206 | it '#morgue_rev_range_by_updated_at' do 207 | expected = 208 | [ 209 | { id: '1', updated_at: $now, payloads: [['v1', $now - 1]] }, 210 | { id: '2', updated_at: $now - 1, payloads: [['v1', $now - 1]] }, 211 | ] 212 | 213 | expect( queries.morgue_rev_range_by_updated_at '+inf', '-inf' ).to eq(expected) 214 | end 215 | end 216 | end 217 | end 218 | -------------------------------------------------------------------------------- /spec/queue/queue_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe Lowkiq::Queue::Queue do 2 | 3 | let(:redis_pool) { ConnectionPool.new(size: 5, timeout: 5) { Redis.new url: ENV['REDIS_URL'] } } 4 | let(:shards_count) { 1 } 5 | let(:shard_index) { 0 } 6 | 7 | let(:queue_name) { 'Test' } 8 | let(:queue) { Lowkiq::Queue::Queue.new redis_pool, queue_name, shards_count } 9 | let(:queries) { Lowkiq::Queue::Queries.new redis_pool, queue_name } 10 | 11 | before(:each) { redis_pool.with(&:flushdb) } 12 | before(:each) { redis_pool.with { |r| Lowkiq::Script.load! r } } 13 | before(:each) { $now = Lowkiq::Utils::Timestamp.now } 14 | before(:each) do 15 | allow(Lowkiq::Utils::Timestamp).to receive(:now) { $now } 16 | end 17 | 18 | describe 'queue #pop' do 19 | it 'empty' do 20 | expect( queue.pop(shard_index, limit: 10) ).to be_empty 21 | end 22 | 23 | it 'pop' do 24 | queue.push( 25 | [ 26 | { id: '1', perform_in: $now - 10, payload: 'v1' }, 27 | ] 28 | ) 29 | 30 | expected = { 31 | id: '1', perform_in: $now - 10, retry_count: -1, payloads: [['v1', $now]] 32 | } 33 | 34 | expect( queue.pop(shard_index, limit: 10) ).to contain_exactly(expected) 35 | end 36 | 37 | it 'skip future' do 38 | queue.push( 39 | [ 40 | { id: '1', perform_in: $now + 10, payload: 'v1', score: $now }, 41 | ] 42 | ) 43 | 44 | expect( queue.pop(shard_index, limit: 10) ).to be_empty 45 | end 46 | end 47 | 48 | describe 'queue #push merge' do 49 | it 'same payload' do 50 | queue.push( 51 | [ 52 | { id: '1', perform_in: $now, payload: "v1", score: $now }, 53 | { id: '1', perform_in: $now, payload: "v1", score: $now + 10 }, 54 | ] 55 | ) 56 | 57 | expected = { 58 | id: '1', retry_count: -1, perform_in: $now, 59 | payloads: [['v1', $now]], 60 | } 61 | 62 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 63 | end 64 | 65 | it 'payload order' do 66 | queue.push( 67 | [ 68 | { id: '1', perform_in: $now, payload: "v2", score: $now + 10 }, 69 | { id: '1', perform_in: $now, payload: "v1", score: $now }, 70 | ] 71 | ) 72 | 73 | expected = { 74 | id: '1', retry_count: -1, perform_in: $now, 75 | payloads: [['v1', $now], ['v2', $now + 10]] 76 | } 77 | 78 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 79 | end 80 | 81 | it 'perform_in' do 82 | queue.push( 83 | [ 84 | { id: '1', perform_in: $now, payload: "v1", score: $now }, 85 | { id: '1', perform_in: $now + 10, payload: "v1", score: $now }, 86 | ] 87 | ) 88 | 89 | expected = { 90 | id: '1', retry_count: -1, perform_in: $now, 91 | payloads: [['v1', $now]] 92 | } 93 | 94 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 95 | end 96 | 97 | it 'retry_count' do 98 | queue.push( 99 | [ 100 | { id: '1', retry_count: 0, perform_in: $now, payload: "v1", score: $now }, 101 | { id: '1', perform_in: $now, payload: "v1", score: $now }, 102 | ] 103 | ) 104 | 105 | expected = { 106 | id: '1', retry_count: 0, perform_in: $now, 107 | payloads: [['v1', $now]] 108 | } 109 | 110 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 111 | end 112 | end 113 | 114 | describe 'queue #push_back merge' do 115 | it 'same payload' do 116 | queue.push_back( 117 | [ 118 | { id: '1', retry_count: 0, perform_in: $now, payloads: [["v1", $now]] }, 119 | { id: '1', retry_count: 0, perform_in: $now, payloads: [["v1", $now + 10]] }, 120 | ] 121 | ) 122 | 123 | expected = { 124 | id: '1', retry_count: 0, perform_in: $now, 125 | payloads: [['v1', $now]], 126 | } 127 | 128 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 129 | end 130 | 131 | it 'payload order' do 132 | queue.push_back( 133 | [ 134 | { id: '1', retry_count: 0, perform_in: $now, payloads: [["v2", $now + 10]] }, 135 | { id: '1', retry_count: 0, perform_in: $now, payloads: [["v1", $now]] }, 136 | ] 137 | ) 138 | 139 | expected = { 140 | id: '1', retry_count: 0, perform_in: $now, 141 | payloads: [['v1', $now], ['v2', $now + 10]] 142 | } 143 | 144 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 145 | end 146 | 147 | it 'perform_in' do 148 | queue.push_back( 149 | [ 150 | { id: '1', perform_in: $now, payloads: [["v1", $now]] }, 151 | { id: '1', perform_in: $now + 10, payloads: [["v1", $now]] }, 152 | ] 153 | ) 154 | 155 | expected = { 156 | id: '1', retry_count: -1, perform_in: $now + 10, 157 | payloads: [['v1', $now]] 158 | } 159 | 160 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 161 | end 162 | 163 | it 'retry_count' do 164 | queue.push_back( 165 | [ 166 | { id: '1', retry_count: 0, perform_in: $now, payloads: [["v1", $now]] }, 167 | { id: '1', retry_count: 1, perform_in: $now, payloads: [["v1", $now]] }, 168 | ] 169 | ) 170 | 171 | expected = { 172 | id: '1', retry_count: 1, perform_in: $now, 173 | payloads: [['v1', $now]] 174 | } 175 | 176 | expect( queries.fetch ['1'] ).to contain_exactly(expected) 177 | end 178 | 179 | it 'error' do 180 | error = "some error" 181 | queue.push_back( 182 | [ 183 | { id: '1', payloads: [['v1', $now]], error: error }, 184 | ] 185 | ) 186 | 187 | expect( queries.fetch(['1']).first[:error] ).to eq(error) 188 | end 189 | end 190 | 191 | it '#processing_data' do 192 | queue.push( 193 | [ 194 | { id: '1', perform_in: $now - 10, payload: 'v1', score: $now }, 195 | ] 196 | ) 197 | 198 | queue.pop(shard_index, limit: 10) 199 | 200 | expected = { 201 | id: '1', perform_in: $now - 10, retry_count: -1, payloads: [['v1', $now]] 202 | } 203 | 204 | expect( queue.processing_data(shard_index) ).to contain_exactly(expected) 205 | end 206 | 207 | describe '#ack' do 208 | before(:each) do 209 | queue.push( 210 | [ 211 | { id: '1', perform_in: $now - 10, payload: 'v1', score: $now }, 212 | ] 213 | ) 214 | end 215 | let!(:data) { queue.pop shard_index, limit: 10 } 216 | 217 | it '#processing_data' do 218 | expect { queue.ack shard_index, data }.to change { queue.processing_data(shard_index) }.to([]) 219 | end 220 | end 221 | 222 | describe '#push_to_morgue' do 223 | it 'error' do 224 | error = "some error" 225 | queue.push_to_morgue( 226 | [ 227 | { id: '1', payloads: [['v1', $now]], error: error }, 228 | ] 229 | ) 230 | 231 | expect( queries.morgue_fetch(['1']).first[:error] ).to eq(error) 232 | end 233 | end 234 | 235 | describe 'serialization' do 236 | let(:payload) { { num: 1, str: 'str', time: Time.now, arr: [1, 2, 3] } } 237 | before(:each) do 238 | queue.push( 239 | [ 240 | { id: '1', payload: payload }, 241 | ] 242 | ) 243 | end 244 | 245 | it '#fetch' do 246 | expect( queries.fetch(['1']).first[:payloads].first.first ).to eq(payload) 247 | end 248 | end 249 | 250 | it '#morgue_delete' do 251 | queue.push_to_morgue( 252 | [ 253 | { id: '1', payloads: [["v1", $now]] }, 254 | ] 255 | ) 256 | 257 | queue.morgue_delete(['1']) 258 | 259 | expect( queries.morgue_fetch ['1'] ).to be_empty 260 | end 261 | 262 | it '#delete' do 263 | queue.push( 264 | [ 265 | { id: '1' }, 266 | ] 267 | ) 268 | 269 | queue.delete(['1']) 270 | 271 | expect( queries.fetch ['1'] ).to be_empty 272 | end 273 | end 274 | -------------------------------------------------------------------------------- /lib/lowkiq/queue/queue.rb: -------------------------------------------------------------------------------- 1 | module Lowkiq 2 | module Queue 3 | class Queue 4 | attr_reader :name, :pool 5 | 6 | def initialize(redis_pool, name, shards_count) 7 | @pool = redis_pool 8 | @name = name 9 | @shards_count = shards_count 10 | @timestamp = Utils::Timestamp.method(:now) 11 | @keys = Keys.new name 12 | @fetch = Fetch.new name 13 | end 14 | 15 | def push(batch) 16 | @pool.with do |redis| 17 | redis.multi do 18 | batch.each do |job| 19 | id = job.fetch(:id) 20 | perform_in = job.fetch(:perform_in, @timestamp.call) 21 | retry_count = job.fetch(:retry_count, -1) # for testing 22 | payload = job.fetch(:payload, "") 23 | score = job.fetch(:score, @timestamp.call) 24 | 25 | shard = id_to_shard id 26 | 27 | redis.zadd @keys.all_ids_lex_zset, 0, id 28 | redis.zadd @keys.all_ids_scored_by_perform_in_zset, perform_in, id, nx: true 29 | redis.zadd @keys.all_ids_scored_by_retry_count_zset, retry_count, id, nx: true 30 | 31 | redis.zadd @keys.ids_scored_by_perform_in_zset(shard), perform_in, id, nx: true 32 | redis.zadd @keys.payloads_zset(id), score, Lowkiq.dump_payload.call(payload), nx: true 33 | end 34 | end 35 | end 36 | end 37 | 38 | def pop(shard, limit:) 39 | @pool.with do |redis| 40 | ids = redis.zrangebyscore @keys.ids_scored_by_perform_in_zset(shard), 41 | 0, @timestamp.call, 42 | limit: [0, limit] 43 | return [] if ids.empty? 44 | 45 | res = redis.multi do |redis| 46 | redis.hset @keys.processing_length_by_shard_hash, shard, ids.length 47 | 48 | ids.each do |id| 49 | redis.zrem @keys.all_ids_lex_zset, id 50 | redis.zrem @keys.ids_scored_by_perform_in_zset(shard), id 51 | 52 | Script.zremhset redis, 53 | @keys.all_ids_scored_by_perform_in_zset, 54 | @keys.processing_ids_with_perform_in_hash(shard), 55 | id 56 | Script.zremhset redis, 57 | @keys.all_ids_scored_by_retry_count_zset, 58 | @keys.processing_ids_with_retry_count_hash(shard), 59 | id 60 | redis.rename @keys.payloads_zset(id), 61 | @keys.processing_payloads_zset(id) 62 | Script.hmove redis, 63 | @keys.errors_hash, 64 | @keys.processing_errors_hash(shard), 65 | id 66 | end 67 | processing_data_pipeline(redis, shard, ids) 68 | end 69 | 70 | res.shift 1 + ids.length * 6 71 | processing_data_build res, ids 72 | end 73 | end 74 | 75 | def push_back(batch) 76 | @pool.with do |redis| 77 | timestamp = @timestamp.call 78 | redis.multi do |redis| 79 | batch.each do |job| 80 | id = job.fetch(:id) 81 | perform_in = job.fetch(:perform_in, timestamp) 82 | retry_count = job.fetch(:retry_count, -1) 83 | payloads = job.fetch(:payloads).map do |(payload, score)| 84 | [score, Lowkiq.dump_payload.call(payload)] 85 | end 86 | error = Lowkiq.dump_error.call(job.fetch(:error, nil)) 87 | 88 | shard = id_to_shard id 89 | 90 | redis.zadd @keys.all_ids_lex_zset, 0, id 91 | redis.zadd @keys.all_ids_scored_by_perform_in_zset, perform_in, id 92 | redis.zadd @keys.all_ids_scored_by_retry_count_zset, retry_count, id 93 | 94 | redis.zadd @keys.ids_scored_by_perform_in_zset(shard), perform_in, id 95 | redis.zadd @keys.payloads_zset(id), payloads, nx: true 96 | 97 | redis.hset @keys.errors_hash, id, error unless error.nil? 98 | end 99 | end 100 | end 101 | end 102 | 103 | def ack(shard, data, result = nil) 104 | ids = data.map { |job| job[:id] } 105 | length = ids.length 106 | 107 | @pool.with do |redis| 108 | redis.multi do 109 | redis.del @keys.processing_ids_with_perform_in_hash(shard) 110 | redis.del @keys.processing_ids_with_retry_count_hash(shard) 111 | redis.del @keys.processing_errors_hash(shard) 112 | ids.each do |id| 113 | redis.del @keys.processing_payloads_zset(id) 114 | end 115 | redis.hdel @keys.processing_length_by_shard_hash, shard 116 | redis.incrby @keys.processed_key, length if result == :success 117 | redis.incrby @keys.failed_key, length if result == :fail 118 | end 119 | end 120 | end 121 | 122 | def processing_data(shard) 123 | @pool.with do |redis| 124 | ids = redis.hkeys @keys.processing_ids_with_perform_in_hash(shard) 125 | return [] if ids.empty? 126 | 127 | res = redis.multi do |redis| 128 | processing_data_pipeline redis, shard, ids 129 | end 130 | 131 | processing_data_build res, ids 132 | end 133 | end 134 | 135 | def push_to_morgue(batch) 136 | @pool.with do |redis| 137 | timestamp = @timestamp.call 138 | redis.multi do 139 | batch.each do |job| 140 | id = job.fetch(:id) 141 | payloads = job.fetch(:payloads).map do |(payload, score)| 142 | [score, Lowkiq.dump_payload.call(payload)] 143 | end 144 | error = Lowkiq.dump_error.call(job.fetch(:error, nil)) 145 | 146 | 147 | redis.zadd @keys.morgue_all_ids_lex_zset, 0, id 148 | redis.zadd @keys.morgue_all_ids_scored_by_updated_at_zset, timestamp, id 149 | redis.zadd @keys.morgue_payloads_zset(id), payloads, nx: true 150 | 151 | redis.hset @keys.morgue_errors_hash, id, error unless error.nil? 152 | end 153 | end 154 | end 155 | end 156 | 157 | def morgue_delete(ids) 158 | @pool.with do |redis| 159 | redis.multi do 160 | ids.each do |id| 161 | redis.zrem @keys.morgue_all_ids_lex_zset, id 162 | redis.zrem @keys.morgue_all_ids_scored_by_updated_at_zset, id 163 | redis.del @keys.morgue_payloads_zset(id) 164 | redis.hdel @keys.morgue_errors_hash, id 165 | end 166 | end 167 | end 168 | end 169 | 170 | def delete(ids) 171 | @pool.with do |redis| 172 | redis.multi do 173 | ids.each do |id| 174 | shard = id_to_shard id 175 | redis.zrem @keys.all_ids_lex_zset, id 176 | redis.zrem @keys.all_ids_scored_by_perform_in_zset, id 177 | redis.zrem @keys.all_ids_scored_by_retry_count_zset, id 178 | redis.zrem @keys.ids_scored_by_perform_in_zset(shard), id 179 | redis.del @keys.payloads_zset(id) 180 | redis.hdel @keys.errors_hash, id 181 | end 182 | end 183 | end 184 | end 185 | 186 | def shards 187 | (0...@shards_count) 188 | end 189 | 190 | private 191 | 192 | def id_to_shard(id) 193 | Zlib.crc32(id.to_s) % @shards_count 194 | end 195 | 196 | def processing_data_pipeline(redis, shard, ids) 197 | redis.hgetall @keys.processing_ids_with_perform_in_hash(shard) 198 | redis.hgetall @keys.processing_ids_with_retry_count_hash(shard) 199 | redis.hgetall @keys.processing_errors_hash(shard) 200 | 201 | ids.each do |id| 202 | redis.zrange @keys.processing_payloads_zset(id), 0, -1, with_scores: true 203 | end 204 | end 205 | 206 | def processing_data_build(arr, ids) 207 | ids_with_perform_in = arr.shift 208 | ids_with_retry_count = arr.shift 209 | errors = arr.shift 210 | payloads = arr 211 | 212 | ids.zip(payloads).map do |(id, payloads)| 213 | next if payloads.empty? 214 | { 215 | id: id, 216 | perform_in: ids_with_perform_in[id].to_f, 217 | retry_count: ids_with_retry_count[id].to_f, 218 | payloads: payloads.map { |(payload, score)| [Lowkiq.load_payload.call(payload), score] }, 219 | error: Lowkiq.load_error.call(errors[id]) 220 | }.compact 221 | end.compact 222 | end 223 | end 224 | end 225 | end 226 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | This software is dual-licensed under the LGPL version 3 or under the Licence Agreement. 2 | Recipients can choose the terms under which they want to use or distribute the 3 | software. 4 | 5 | Copyright © BIA-Technologies Limited Liability Company (OOO) 6 | 7 | # The LGPL Version 3 (LGPL-3.0) 8 | 9 | https://www.gnu.org/licenses/lgpl-3.0.html 10 | 11 | # Licence Agreement 12 | On granting a non-exclusive right to use open source software 13 | 14 | **BIA-Technologies Limited Liability Company (OOO)**, registered and operating under the laws of the Russian Federation, state registration date November 6, 2014, under the main state registration number (OGRN) 1147847386906, registered in the Interdistrict FTSI (Federal Tax Service Inspectorate) of Russia No. 23 on Saint-Petersburg (TIN (taxpayer ID number) 7810385714, RRC (registration reason code) 781001001), hereinafter referred to as the **"Licensor"**, represented by the Director General Sergey Sergeevich Barykin, acting under the Charter, guided by paragraph 1 of Article 1286.1 of the Civil Code of RF (Russian Federation), provides the user (hereinafter referred to as **"the Licensee"**) on the basis and under the terms of this licence agreement (hereinafter the “Agreement”) the non-exclusive right to use **the Lowkiq open source software**: 15 | 16 | ## 1. SUBJECT OF AGREEMENT 17 | 18 | 1.1. The Licensor provides the Licensee, in the manner and on the terms set forth in this Agreement, the right to use (license) **the Lowkiq open source software** (hereinafter - the "Software"). 19 | 20 | 1.2. The source code for the software is available on the website located in the Internet telecommunication network "Internet" at the address: https://github.com/bia-technologies/lowkiq. 21 | 22 | 1.3. Software characteristics, that individualize it as a unique result of intellectual activity: 23 | 24 | 1.3.1. Functionality: Server for the streamlined parallel processing of Ruby background tasks. 25 | 26 | 1.3.2. Requirements for working with the software: Redis >= 3.2 is required to work with the Library 27 | 28 | 1.4. The licensor guarantees, that it is the copyright holder of the exclusive right on the software, that is not alienated, not mortgaged and not challenged. 29 | 30 | 1.5. The software is not registered with the federal executive authority for intellectual property. 31 | 32 | 1.6. The right to use (license) granted to the Licensee under this Agreement is non-exclusive. The Licensor reserves the right to grant licenses to other parties. 33 | 34 | 1.7. The right to use the software (license) is issued from the moment the Licensee joins the terms of this Agreement in electronic form. 35 | 36 | 1.8. Under this Agreement, the use of the Software by the Licensee is allowed all over the world. 37 | 38 | ## 2. RIGHTS ASSIGNED TO THE LICENSEE ON THIS AGREEMENT 39 | 40 | 2.1. Under this Agreement, the Licensor grants the Licensee the right to use the software in the following ways: 41 | * reproduction (full or partial), that is, the production of one or more copies of the software or its part in any tangible form, including, but not limited to, recording the software on an electronic media, including writing to the memory of the Licensee’s computer, without restrictions on the number of such computers; 42 | * distribution by any means and in any ways of software copies through the Licensee’s internal LAN, on the Licensee’s computer, among the employees of the Licensee, related to it with labor and (or) civil - law relations. 43 | 44 | ## 3. PROCEDURE FOR ASSIGNMENT OF AN INSTANCE AND NON-EXCLUSIVE RIGHTS TO THE SOFTWARE 45 | 46 | 3.1. The assignment of a copy of the software to the Licensee is carried out by downloading a copy of the software by the Licensee fro the website, located in the Internet telecommunication network “Internet” at the address: https://github.com/bia-tech/lowkiq. 47 | 48 | ## 4. RIGHTS AND OBLIGATIONS OF THE PARTIES UNDER THIS AGREEMENT 49 | 50 | 4.1. The licensor undertakes: 51 | 52 | 4.1.1. assign to the Licensee the software free of the rights of third parties, in the manner prescribed by this Agreement, in a condition that allows its use on the terms of this Agreement, no later than the day the Agreement is concluded; 53 | 54 | 4.1.2. refrain from any action that could impede the Licensee from exercising the rights granted to him under this Agreement; 55 | 56 | 4.1.3. comply with confidentiality in accordance with section 8 of this Agreement. 57 | 58 | 4.2. Licensee undertakes: 59 | 60 | 4.2.1. at the request of the Licensor, provide him with the opportunity to get acquainted with accounting and other documents containing information about the use of the software; 61 | 62 | 4.2.2. at the request of the Licensor, provide a report on the use of the software in accordance with this Agreement in the time and manner specified in such a requirement; 63 | 64 | 4.2.3. comply with confidentiality in accordance with section 8 of this Agreement. 65 | 66 | 4.3. Licensee has the right: 67 | 68 | 4.3.1. carry out actions, necessary for the functioning of the software (including during use in accordance with the purpose), including recording and storing in memory of an unlimited number of Licensee's computers; 69 | 70 | 4.3.2. study, research, test the functioning of the software for the purpose of its use in accordance with this Agreement. 71 | 72 | Application of the provisions of clause 4.3. of this Agreement shall not contradict the normal use of the software and shall not infringe in any other way the rights and legitimate interests of the Licensor. 73 | 74 | ## 5. REMUNERATION FOR THE ASSGNMENT OF NON-EXCLUSIVE RIGHTS UNDER THIS AGREEMENT 75 | 76 | 5.1. Compensation for using the software under this Agreement is not pad by the Licenseeo the Licensor on the basis of paragraph 1 of clause 3 of Article 1286.1 of the Civil Code of RF. 77 | 78 | ## 6. EXCLUSIVE SOFTWARE RIGHTS 79 | 80 | 6.1. Exclusive rights to the software, all modules compiling the software, copied and / or included in all softwares, provided to the Licensee under this Agreement, or in its part, as well as all documentation related to the software, belong to the Licensor. 81 | 82 | 6.2. The Licensee may not use the software not on its own behalf, and also may not use the software in ways not established by this Agreement. 83 | 84 | 6.3. The Licensor confirms that at the time of signing this Agreement, he does not know anything about the rights of third parties, that could be violated by granting the Licensee a non-exclusive right to use the software under this Agreement. 85 | 86 | 6.4. The Licensor will assist the Licensee in the consideration of claims of third parties related to the use of the software, in case of receipt of information about such claims from the Licensee. In the event of receipt of information on the filing of such a claim, the Licensee shall immediately inform the Licensor of all claims made by the plaintiff and provide all information that he has regarding such a dispute. 87 | 88 | ## 7. RESPONSIBILITY OF THE PARTIES UNDER THIS AGREEMENT 89 | 90 | 7.1. For failure to fulfill or improper fulfillment of obligations under this Agreement, the Parties are liable in accordance with the current legislation of the Russian Federation. 91 | 92 | 7.2. The use by the Licensee of the software in a manner not provided for by this Agreement, or after the termination of the Agreement, or otherwise - beyond the rights, granted to the Licensee under the Agreement, shall entail liability for violation of exclusive rights to the result of intellectual activity, established by the current legislation of the Russian Federation. 93 | 94 | ## 8. EMERGENCIES 95 | 96 | 8.1. The parties are exempted from liability for full or partial failure to fulfill obligations under this Agreement,if such failure is the result of emergencies (“force majeure circumstances”), which include riots, prohibitive actions of authorities, natural disasters, fires, catastrophes and other emergencies that the Parties cannot influence. The parties will consider the document issued by the authorized body of state power and (or) the corresponding chamber of commerce to be proper evidence of the presence of the emergencies. 97 | 98 | 8.2. The parties are obliged to notify each other in writing about the existence of emergencies within 3 (three) days from the date of their discovery. 99 | 100 | ## 9. DISPUTE RESOLUTION PROCEDURES 101 | 102 | 9.1. All disputes and disagreements that may arise between the Parties on issues that have not been resolved in the text of this Agreement will be resolved through negotiations (in the complaint procedure) in accordance with the current legislation of the Russian Federation. The term for responding to a claim is 14 (fourteen) calendar days. 103 | 104 | 9.2. If the dispute is not settled during the negotiations, the disputes are referred to the Court at the location of the Plaintiff 105 | 106 | ## 10. VALIDITY AND PROCEDURE OF TERMINATION OF THIS AGREEMENT 107 | 108 | 10.1. This Agreement is considered concluded and comes into force from the moment the Licensee joins its terms in electronic form (clause 2 of Article 434 of the Civil Code of the Russian Federation). The term of this Agreement may not exceed the term of the exclusive right to software. 109 | 110 | 10.2. The non-exclusive right to use the software received by the Licensee shall terminate upon the early termination or termination of this Agreement. 111 | 112 | 10.3. In case of violation by the Licensee of any obligation under this Agreement, the Licensor has the right to send a written notice to the Licensee demanding that this obligation be performed. In case of failure to fulfill or improper fulfillment of such an obligation within 3 (three) calendar days after the receipt by the Licensee of the written notice, this Agreement may be terminated by sending a notice of unilateral cancellation of this Agreement from the Licensor the Licensee . In this case, the Agreement shall be deemed to have expired upon the expiration of 3 (three) calendar days from the date a receipt of such notification. 113 | 114 | 10.4. After the termination of this Agreement, the Licensee shall immediately cease using the software and continue to not use it. Within 5 (five) business days from the date of termination or denouncement of the Agreement, the Licensee must destroy all copies of the software received under this Agreement. 115 | 116 | 10.5. Termination of the Agreement does not relieve the Licensee from the fulfillment of the obligation to pay fees in accordance with this Agreement 117 | 118 | ## 11. FINAL PROVISIONS 119 | 120 | 11.1. None of the Parties has the right to assign their rights and obligations under the Agreement to third parties without the written consent of the other Party. 121 | 122 | 11.2. In all other aspects, which are not provided by this Agreement, the Parties will be guided by the current legislation of the Russian Federation. 123 | 124 | 11.3. When activating and using the software, the Licensee agrees with all the terms of this Agreement, and also agrees to the transfer, receipt and processing of the personal data. 125 | 126 | ## 12. ADDRESS AND DETAILS OF THE LICENSOR: 127 | 128 | LICENSOR: 129 | 130 | OOO BIA- Technologies (Limited Liability Company) 131 | 132 | Statutory Seat and the postal address: 196084, Russian Federation,
133 | g. Saint-Petersburg, pr. Moskovskiy, d.94, lit. A, pom. 12- H 134 | 135 | OGRN 1147847386906 136 | 137 | TIN/ 7810385714 138 | 139 | RRC/ 781001001 140 | 141 | Name and email address of the representative:
142 | Sergey Staskov
143 | Sergey.Staskov@bia-tech.ru
144 | -------------------------------------------------------------------------------- /README.ru.md: -------------------------------------------------------------------------------- 1 | [![Gem Version](https://badge.fury.io/rb/lowkiq.svg)](https://badge.fury.io/rb/lowkiq) 2 | 3 | # Lowkiq 4 | 5 | Упорядоченная обработка фоновых задач. 6 | 7 | ![dashboard](doc/dashboard.png) 8 | 9 | * [Rationale](#rationale) 10 | * [Description](#description) 11 | * [Sidekiq](#sidekiq) 12 | * [Очередь](#%D0%BE%D1%87%D0%B5%D1%80%D0%B5%D0%B4%D1%8C) 13 | + [Алгоритм расчета retry_count и perform_in](#%D0%B0%D0%BB%D0%B3%D0%BE%D1%80%D0%B8%D1%82%D0%BC-%D1%80%D0%B0%D1%81%D1%87%D0%B5%D1%82%D0%B0-retry_count-%D0%B8-perform_in) 14 | + [Правило слияния задач](#%D0%BF%D1%80%D0%B0%D0%B2%D0%B8%D0%BB%D0%BE-%D1%81%D0%BB%D0%B8%D1%8F%D0%BD%D0%B8%D1%8F-%D0%B7%D0%B0%D0%B4%D0%B0%D1%87) 15 | * [Install](#install) 16 | * [Api](#api) 17 | * [Ring app](#ring-app) 18 | * [Настройка](#%D0%BD%D0%B0%D1%81%D1%82%D1%80%D0%BE%D0%B9%D0%BA%D0%B0) 19 | * [Запуск](#%D0%B7%D0%B0%D0%BF%D1%83%D1%81%D0%BA) 20 | * [Остановка](#%D0%BE%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0) 21 | * [Debug](#debug) 22 | * [Development](#development) 23 | * [Исключения](#%D0%B8%D1%81%D0%BA%D0%BB%D1%8E%D1%87%D0%B5%D0%BD%D0%B8%D1%8F) 24 | * [Rails integration](#rails-integration) 25 | * [Splitter](#splitter) 26 | * [Scheduler](#scheduler) 27 | * [Рекомендации по настройке](#%D1%80%D0%B5%D0%BA%D0%BE%D0%BC%D0%B5%D0%BD%D0%B4%D0%B0%D1%86%D0%B8%D0%B8-%D0%BF%D0%BE-%D0%BD%D0%B0%D1%81%D1%82%D1%80%D0%BE%D0%B9%D0%BA%D0%B5) 28 | + [`SomeWorker.shards_count`](#someworkershards_count) 29 | + [`SomeWorker.max_retry_count`](#someworkermax_retry_count) 30 | * [Изменение количества шардов воркера](#%D0%B8%D0%B7%D0%BC%D0%B5%D0%BD%D0%B5%D0%BD%D0%B8%D0%B5-%D0%BA%D0%BE%D0%BB%D0%B8%D1%87%D0%B5%D1%81%D1%82%D0%B2%D0%B0-%D1%88%D0%B0%D1%80%D0%B4%D0%BE%D0%B2-%D0%B2%D0%BE%D1%80%D0%BA%D0%B5%D1%80%D0%B0) 31 | 32 | ## Rationale 33 | 34 | При использовании Sidekiq мы столкнулись с проблемами при обработке сообщений от сторонней системы. 35 | 36 | Скажем, сообщение представляет собой данные заказа в определенный момент времени. 37 | При изменении атрибутов или статуса отправляется новое сообщение сторонней системой. 38 | Заказы обновляются часто и в очереди рядом находятся сообщения, касающиеся одного и того же заказа. 39 | 40 | Sidekiq не гарантирует строгого порядка сообщений, т.к. очередь обрабатывается в несколько потоков. 41 | Например, пришло 2 сообщения: M1 и M2. 42 | Sidekiq обработчики начинают обрабатывать их параллельно, 43 | при этом M2 может обработаться раньше M1. 44 | 45 | Параллельная обработка данных одного заказа приводит к: 46 | 47 | + dead locks 48 | + затиранию новых данных старыми 49 | 50 | Lowkiq призван устранить эти проблемы, исключая параллельность обработки сообщений в рамках одной сущности. 51 | 52 | ## Description 53 | 54 | Очереди надежны. Lowkiq сохраняет данные об обрабатываемой задаче и при запуске переносит 55 | незавершенные задачи обратно в очередь. 56 | 57 | Задачи в очереди отсортированы по заданному времени исполнения, т.е. это не FIFO очереди. 58 | 59 | Каждая задача имеет идентификатор. Очереди гарантируют, что не может быть ситуации, 60 | когда несколько потоков обрабатывают задачи с одинаковыми идентификаторами. 61 | 62 | Каждая очередь разбивается на постоянный набор шардов. 63 | На основе идентификатора задачи выбирается шард, в который попадет задача. 64 | Таким образом задачи с одним идентификатором всегда попадают в один и тот же шард. 65 | Задачи шарда всегда обрабатываются одним и тем же потоком. 66 | Это гарантирует порядок обработки задач с одинаковым идентификатором и исключает возможность блокировок. 67 | 68 | Кроме идентификатора задача имеет полезную нагрузку или данные задачи (payload). 69 | Для задач с одинаковым идентификаторм происходит слияние полезных нагрузок. 70 | Таким образом одновременно в обработку попадают все накопленные полезные нагрузки задачи. 71 | Это полезно, если нужно обработать только последнее сообщение и отбросить все предыдущие. 72 | 73 | Каждой очереди соответствует воркер, содержащий логику обработки задачи. 74 | 75 | Для обработки всех задач всех очередей используется фиксированное количество тредов. 76 | Добавление или удаление очередей или их шардов не приводит к изменению числа тредов. 77 | 78 | ## Sidekiq 79 | 80 | Если для ваших задач подходит Sidekiq - используйте его. 81 | 82 | Если вы используете плагины вроде 83 | [sidekiq-grouping](https://github.com/gzigzigzeo/sidekiq-grouping), 84 | [sidekiq-unique-jobs](https://github.com/mhenrixon/sidekiq-unique-jobs), 85 | [sidekiq-merger](https://github.com/dtaniwaki/sidekiq-merger) 86 | или реализуете собственный механизм блокировок, то стоит рассмотреть Lowkiq. 87 | 88 | Например, sidekiq-grouping предварительно накапливает пачку задач, ставит ее в очередь и начинает накапливать следующую. 89 | При таком подходе случается ситуация, когда в очереди находятся 2 пачки с данными одного заказа. 90 | Эти пачки начинают обрабатываться одновременно разными тредами, что приводит к изначальной проблеме. 91 | 92 | Lowkiq изначально проектировался так, чтобы не использовать любые блокировки. 93 | 94 | Кроме того, в Lowkiq очереди изначально надежны. Только Sidekiq Pro или плагины добавляют такую функциональность. 95 | 96 | Этот [бенчмарк](examples/benchmark) показывает накладные расходы на взаимодействие с redis. 97 | Для 5 threads, 100'000 blank jobs получились результаты: 98 | 99 | + lowkiq: 214 sec или 2,14 мс на задачу 100 | + sidekiq: 29 sec или 0,29 мс на задачу 101 | 102 | Эта разница связана с принципиально различным устройством очередей. 103 | Sidekiq использует один список для всех воркеров и извлекает задачу целиком за O(1). 104 | Lowkiq использует несколько типов данных, включая сортированные множества для хранения идентификаторов задач. 105 | Таким образом только получение идентификатора задачи занимает O(log(N)). 106 | 107 | ## Очередь 108 | 109 | Каждая задача в очереди имеет аттрибуты: 110 | 111 | + `id` - идентификатор задачи (строка) 112 | + `payloads` - сортированное множество payload'ов (объекты) по их score (вещественное число) 113 | + `perform_in` - запланированное время начала иполнения задачи (unix timestamp, вещественное число) 114 | + `retry_count` - количество совершённых повторов задачи (вещественное число) 115 | 116 | `id` может быть, например, идентификатором реплицируемой сущности 117 | `payloads` - множество, 118 | получаемое в результате группировки полезной нагрузки задачи по `id` и отсортированное по ее `score`. 119 | `payload` может быть ruby объектом, т.к. сериализуется с помощью `Marshal.dump`. 120 | `score` может быть датой (unix timestamp) создания `payload` 121 | или ее монотонно увеличивающимся номером версии. 122 | По умолчанию - текущий unix timestamp. 123 | По умолчанию `perform_in` - текущий unix timestamp. 124 | `retry_count` для новой необработанной задачи равен `-1`, для упавшей один раз - `0`, 125 | т.е. считаются не совершённые, а запланированные повторы. 126 | 127 | Выполнение задачи может закончиться неудачей. 128 | В этом случае ее `retry_count` инкрементируется и по заданной формуле вычисляется новый `perform_in`, 129 | и она ставится обратно в очередь. 130 | 131 | В случае, когда `retry_count` становится `>=` `max_retry_count` 132 | элемент payloads с наименьшим(старейшим) score перемещается в морг, 133 | а оставшиеся элементы помещаются обратно в очередь, при этом 134 | `retry_count` и `perform_in` сбрасываются в `-1` и `now()` соответственно. 135 | 136 | ### Алгоритм расчета retry_count и perform_in 137 | 138 | 0. задача выполнилась и упала 139 | 1. `retry_count++` 140 | 2. `perform_in = now + retry_in(try_count)` 141 | 3. `if retry_count >= max_retry_count` задача перемещается в морг 142 | 143 | | тип | `retry_count` | `perform_in` | 144 | | --- | --- | --- | 145 | | новая не выполнялась | -1 | задан или `now()` | 146 | | новая упала | 0 | `now() + retry_in(0)` | 147 | | повтор упал | 1 | `now() + retry_in(1)` | 148 | 149 | Если `max_retry_count = 1`, то попытки прекращаются. 150 | 151 | ### Правило слияния задач 152 | 153 | Когда применяется: 154 | 155 | + если в очереди была задача и добавляется еще одна с тем же id 156 | + если при обработке возникла ошибка, а в очередь успели добавили задачу с тем же id 157 | + если задачу из морга поставили в очередь, а в очереди уже есть задача с тем же id 158 | 159 | Алгоритм: 160 | 161 | + payloads объединяются, при этом выбирается минимальный score, 162 | т.е. для одинаковых payload выигрывает самая старая 163 | + если объединяется новая и задача из очереди, 164 | то `perform_in` и `retry_count` берутся из задачи из очереди 165 | + если объединяется упавшая задача и задача из очереди, 166 | то `perform_in` и `retry_count` берутся из упавшей 167 | + если объединяется задача из морга и задача из очереди, 168 | то `perform_in = now()`, `retry_count = -1` 169 | 170 | Пример: 171 | 172 | ``` 173 | # v1 - первая версия, v2 - вторая 174 | # #{"v1": 1} - сортированное множество одного элемента, payload - "v1", score - 1 175 | 176 | # задача в очереди 177 | { id: "1", payloads: #{"v1": 1, "v2": 2}, retry_count: 0, perform_in: 1536323288 } 178 | # добавляемая задача 179 | { id: "1", payloads: #{"v2": 3, "v3": 4}, retry_count: -1, perform_in: 1536323290 } 180 | 181 | # результат 182 | { id: "1", payloads: #{"v1": 1, "v2": 3, "v3": 4}, retry_count: 0, perform_in: 1536323288 } 183 | ``` 184 | 185 | Морг - часть очереди. Задачи в морге не обрабатываются. 186 | Задача в морге имеет следующие атрибуты: 187 | 188 | + id - идентификатор задачи 189 | + payloads 190 | 191 | Задачи в морге можно отсортировать по дате изменения или id. 192 | 193 | Задачу из морга можно переместить в очередь. При этом для нее `retry_count = 0`, `perform_in = now()`. 194 | 195 | ## Install 196 | 197 | ``` 198 | # Gemfile 199 | 200 | gem 'lowkiq' 201 | ``` 202 | 203 | Redis версии >= 3.2. 204 | 205 | ## Api 206 | 207 | ```ruby 208 | module ATestWorker 209 | extend Lowkiq::Worker 210 | 211 | self.shards_count = 24 212 | self.batch_size = 10 213 | self.max_retry_count = 5 214 | 215 | def self.retry_in(count) 216 | 10 * (count + 1) # (i.e. 10, 20, 30, 40, 50) 217 | end 218 | 219 | def self.perform(payloads_by_id) 220 | # payloads_by_id - хеш 221 | payloads_by_id.each do |id, payloads| 222 | # id - идентификатор задачи 223 | # payloads отсортированы по score, от старых к новым (от минимальных к максимальным) 224 | payloads.each do |payload| 225 | do_some_work(id, payload) 226 | end 227 | end 228 | end 229 | end 230 | ``` 231 | 232 | Значения по умолчанию: 233 | 234 | ```ruby 235 | self.shards_count = 5 236 | self.batch_size = 1 237 | self.max_retry_count = 25 238 | self.queue_name = self.name 239 | 240 | # i.e. 15, 16, 31, 96, 271, ... seconds + a random amount of time 241 | def retry_in(retry_count) 242 | (retry_count ** 4) + 15 + (rand(30) * (retry_count + 1)) 243 | end 244 | ``` 245 | 246 | ```ruby 247 | ATestWorker.perform_async [ 248 | { id: 0 }, 249 | { id: 1, payload: { attr: 'v1' } }, 250 | { id: 2, payload: { attr: 'v1' }, score: Time.now.to_f, perform_in: Time.now.to_f }, 251 | ] 252 | # payload по умолчанию равен "" 253 | # score и perform_in по умолчанию равны Time.now.to_f 254 | ``` 255 | 256 | Вы можете переопределить `perform_async` и вычислять `id`, `score` и `perform_in` в воркере: 257 | 258 | ```ruby 259 | module ATestWorker 260 | extend Lowkiq::Worker 261 | 262 | def self.perform_async(jobs) 263 | jobs.each do |job| 264 | job.merge! id: job[:payload][:id] 265 | end 266 | super 267 | end 268 | 269 | def self.perform(payloads_by_id) 270 | #... 271 | end 272 | end 273 | 274 | ATestWorker.perform_async 1000.times.map { |id| { payload: {id: id} } } 275 | ``` 276 | 277 | ## Ring app 278 | 279 | `Lowkiq::Web` - ring app. 280 | 281 | + `/` - dashboard 282 | + `/api/v1/stats` - длина очереди, длина морга, лаг для каждого воркера и суммарно 283 | 284 | ## Настройка 285 | 286 | Опции и значения по умолчанию: 287 | 288 | + `Lowkiq.poll_interval = 1` - задержка в секундах между опросами очереди на предмет новых задач. 289 | Используется только если на предыдущей итерации очередь оказалась пуста или случилась ошибка. 290 | + `Lowkiq.threads_per_node = 5` - кол-во тредов для каждой ноды. 291 | + `Lowkiq.redis = ->() { Redis.new url: ENV.fetch('REDIS_URL') }` - настройка redis. 292 | + `Lowkiq.client_pool_size = 5` - размер пула редиса для постановки задач в очередь. 293 | + `Lowkiq.pool_timeout = 5` - таймаут клиентского и серверного пула редиса 294 | + `Lowkiq.server_middlewares = []` - список middleware, оборачивающих воркер. 295 | + `Lowkiq.on_server_init = ->() {}` - выполнения кода при инициализации сервера. 296 | + `Lowkiq.build_scheduler = ->() { Lowkiq.build_lag_scheduler }` - планировщик. 297 | + `Lowkiq.build_splitter = ->() { Lowkiq.build_default_splitter }` - сплиттер. 298 | + `Lowkiq.last_words = ->(ex) {}` - обработчик исключений, потомков `StandardError`, вызвавших остановку процесса. 299 | 300 | ```ruby 301 | $logger = Logger.new(STDOUT) 302 | 303 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 304 | $logger.info "Started job for #{worker} #{batch}" 305 | block.call 306 | $logger.info "Finished job for #{worker} #{batch}" 307 | end 308 | 309 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 310 | begin 311 | block.call 312 | rescue => e 313 | $logger.error "#{e.message} #{worker} #{batch}" 314 | raise e 315 | end 316 | end 317 | ``` 318 | 319 | ## Запуск 320 | 321 | `lowkiq -r ./path_to_app` 322 | 323 | `path_to_app.rb` должен загрузить приложение. [Пример](examples/dummy/lib/app.rb). 324 | 325 | Ленивая загрузка модулей воркеров недопустима. 326 | Используйте для предварительной загрузки модулей 327 | `require` или [`require_dependency`](https://api.rubyonrails.org/classes/ActiveSupport/Dependencies/Loadable.html#method-i-require_dependency) 328 | для Ruby on Rails. 329 | 330 | ## Остановка 331 | 332 | Послать процессу TERM или INT (Ctrl-C). 333 | Процесс будет ждать завершения всех задач. 334 | 335 | Обратите внимание, если очередь пуста, процесс спит `poll_interval` секунд. 336 | Таким образом завершится не позднее чем через `poll_interval` секунд. 337 | 338 | ## Debug 339 | 340 | Получить trace всех тредов приложения: 341 | 342 | ``` 343 | kill -TTIN 344 | cat /tmp/lowkiq_ttin.txt 345 | ``` 346 | 347 | ## Development 348 | 349 | ``` 350 | docker-compose run --rm --service-port app bash 351 | bundle 352 | rspec 353 | cd examples/dummy ; bundle exec ../../exe/lowkiq -r ./lib/app.rb 354 | ``` 355 | 356 | ## Исключения 357 | 358 | `StandardError` выброшенные воркером обрабатываются с помощью middleware. 359 | Такие исключения не приводят к остановке процесса. 360 | 361 | Все прочие исключения приводят к остановке процесса. 362 | При этом Lowkiq дожидается выполнения задач другими тредами. 363 | 364 | `StandardError` выброшенные вне воркера передаются в `Lowkiq.last_words`. 365 | Например это происходит при потере соединения к Redis или при ошибке в коде Lowkiq. 366 | 367 | ## Rails integration 368 | 369 | ```ruby 370 | # config/routes.rb 371 | 372 | Rails.application.routes.draw do 373 | # ... 374 | mount Lowkiq::Web => '/lowkiq' 375 | # ... 376 | end 377 | ``` 378 | 379 | ```ruby 380 | # config/initializers/lowkiq.rb 381 | 382 | # загружаем все lowkiq воркеры 383 | Dir["#{Rails.root}/app/lowkiq_workers/**/*.rb"].each { |file| require_dependency file } 384 | 385 | # конфигурация: 386 | # Lowkiq.redis = -> { Redis.new url: ENV.fetch('LOWKIQ_REDIS_URL') } 387 | # Lowkiq.threads_per_node = ENV.fetch('LOWKIQ_THREADS_PER_NODE').to_i 388 | # Lowkiq.client_pool_size = ENV.fetch('LOWKIQ_CLIENT_POOL_SIZE').to_i 389 | # ... 390 | 391 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 392 | logger = Rails.logger 393 | tag = "#{worker}-#{Thread.current.object_id}" 394 | 395 | logger.tagged(tag) do 396 | time_start = Time.now 397 | logger.info "#{time_start} Started job, batch: #{batch}" 398 | begin 399 | block.call 400 | rescue => e 401 | logger.error e.message 402 | raise e 403 | ensure 404 | time_end = Time.now 405 | logger.info "#{time_end} Finished job, duration: #{time_end - time_start} sec" 406 | end 407 | end 408 | end 409 | 410 | # Sentry integration 411 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 412 | opts = { 413 | extra: { 414 | lowkiq: { 415 | worker: worker.name, 416 | batch: batch, 417 | } 418 | } 419 | } 420 | 421 | Raven.capture opts do 422 | block.call 423 | end 424 | end 425 | 426 | # NewRelic integration 427 | if defined? NewRelic 428 | class NewRelicLowkiqMiddleware 429 | include NewRelic::Agent::Instrumentation::ControllerInstrumentation 430 | 431 | def call(worker, batch, &block) 432 | opts = { 433 | category: 'OtherTransaction/LowkiqJob', 434 | class_name: worker.name, 435 | name: :perform, 436 | } 437 | 438 | perform_action_with_newrelic_trace opts do 439 | block.call 440 | end 441 | end 442 | end 443 | 444 | Lowkiq.server_middlewares << NewRelicLowkiqMiddleware.new 445 | end 446 | 447 | # Rails reloader, в том числе отвечает за высвобождение ActiveRecord коннектов 448 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 449 | Rails.application.reloader.wrap do 450 | block.call 451 | end 452 | end 453 | 454 | Lowkiq.on_server_init = ->() do 455 | [[ActiveRecord::Base, ActiveRecord::Base.configurations[Rails.env]]].each do |(klass, init_config)| 456 | klass.connection_pool.disconnect! 457 | config = init_config.merge 'pool' => Lowkiq.threads_per_node 458 | klass.establish_connection(config) 459 | end 460 | end 461 | ``` 462 | 463 | Запуск: `bundle exec lowkiq -r ./config/environment.rb` 464 | 465 | ## Splitter 466 | 467 | У каждого воркера есть несколько шардов: 468 | 469 | ``` 470 | # worker: shard ids 471 | worker A: 0, 1, 2 472 | worker B: 0, 1, 2, 3 473 | worker C: 0 474 | worker D: 0, 1 475 | ``` 476 | 477 | Lowkiq использует фиксированное кол-во тредов для обработки задач, следовательно нужно распределить шарды 478 | между тредами. Этим занимается Splitter. 479 | 480 | Чтобы определить набор шардов, которые будет обрабатывать тред, поместим их в один список: 481 | 482 | ``` 483 | A0, A1, A2, B0, B1, B2, B3, C0, D0, D1 484 | ``` 485 | 486 | Рассмотрим Default splitter, который равномерно распределяет шарды по тредам единственной ноды. 487 | 488 | Если `threads_per_node` установлено в 3, то распределение будет таким: 489 | 490 | ``` 491 | # thread id: shards 492 | t0: A0, B0, B3, D1 493 | t1: A1, B1, C0 494 | t2: A2, B2, D0 495 | ``` 496 | 497 | Помимо Default есть ByNode splitter. Он позволяет распределить нагрузку по нескольким процессам (нодам). 498 | 499 | ``` 500 | Lowkiq.build_splitter = -> () do 501 | Lowkiq.build_by_node_splitter( 502 | ENV.fetch('LOWKIQ_NUMBER_OF_NODES').to_i, 503 | ENV.fetch('LOWKIQ_NODE_NUMBER').to_i 504 | ) 505 | end 506 | ``` 507 | 508 | Таким образом, вместо одного процесса нужно запустить несколько и указать переменные окружения: 509 | 510 | ``` 511 | # process 0 512 | LOWKIQ_NUMBER_OF_NODES=2 LOWKIQ_NODE_NUMBER=0 bundle exec lowkiq -r ./lib/app.rb 513 | 514 | # process 1 515 | LOWKIQ_NUMBER_OF_NODES=2 LOWKIQ_NODE_NUMBER=1 bundle exec lowkiq -r ./lib/app.rb 516 | ``` 517 | 518 | Отмечу, что общее количество тредов будет равно произведению `ENV.fetch('LOWKIQ_NUMBER_OF_NODES')` и `Lowkiq.threads_per_node`. 519 | 520 | Вы можете написать свой сплиттер, если ваше приложение требует особого распределения шардов между тредами или нодами. 521 | 522 | ## Scheduler 523 | 524 | Каждый тред обрабатывает набор шардов. За выбор шарда для обработки отвечает планировщик. 525 | Каждый поток имеет свой собственный экземпляр планировщика. 526 | 527 | Lowkiq имеет 2 планировщика на выбор. 528 | Первый, `Seq` - последовательно перебирает шарды. 529 | Второй, `Lag` - выбирает шард с самой старой задачей, т.е. стремится минимизировать лаг. 530 | Используется по умолчанию. 531 | 532 | Планировщик задается через настройки: 533 | 534 | ``` 535 | Lowkiq.build_scheduler = ->() { Lowkiq.build_seq_scheduler } 536 | # или 537 | Lowkiq.build_scheduler = ->() { Lowkiq.build_lag_scheduler } 538 | ``` 539 | 540 | ## Рекомендации по настройке 541 | 542 | ### `SomeWorker.shards_count` 543 | 544 | Сумма `shards_count` всех воркеров не должна быть меньше `Lowkiq.threads_per_node` 545 | иначе треды будут простаивать. 546 | 547 | Сумма `shards_count` всех воркеров может быть равна `Lowkiq.threads_per_node`. 548 | В этом случае тред обрабатывает единственный шард. Это имеет смысл только при равномерной нагрузке на очереди. 549 | 550 | Сумма `shards_count` всех воркеров может быть больше `Lowkiq.threads_per_node`. 551 | В этом случае `shards_count` можно рассматривать в качестве приоритета. 552 | Чем он выше, тем чаще задачи этой очереди будут обрабатываться. 553 | 554 | Нет смысла устанавливать `shards_count` одного воркера больше чем `Lowkiq.threads_per_node`, 555 | т.к. каждый тред будет обрабатывать более одного шарда этой очереди, что увеличит накладные расходы. 556 | 557 | ### `SomeWorker.max_retry_count` 558 | 559 | Исходя из `retry_in` и `max_retry_count`, 560 | можно вычислить примерное время, которая задача проведет в очереди. 561 | Под задачей тут понимается payload задачи. 562 | После достижения `max_retry_count` в морг переносится только payload с минимальным score. 563 | 564 | Для `retry_in`, заданного по умолчанию получается следующая таблица: 565 | 566 | ```ruby 567 | def retry_in(retry_count) 568 | (retry_count ** 4) + 15 + (rand(30) * (retry_count + 1)) 569 | end 570 | ``` 571 | 572 | | `max_retry_count` | кол-во дней жизни задачи | 573 | | --- | --- | 574 | | 14 | 1 | 575 | | 16 | 2 | 576 | | 18 | 3 | 577 | | 19 | 5 | 578 | | 20 | 6 | 579 | | 21 | 8 | 580 | | 22 | 10 | 581 | | 23 | 13 | 582 | | 24 | 16 | 583 | | 25 | 20 | 584 | 585 | `(0...25).map{ |c| retry_in c }.sum / 60 / 60 / 24` 586 | 587 | 588 | ## Изменение количества шардов воркера 589 | 590 | Старайтесь сразу расчитать количество шардов и не именять их количество в будущем. 591 | 592 | Если вы можете отключить добавление новых заданий, 593 | то дождитесь опустошения очередей и выкатите новую версию кода с измененным количеством шардов. 594 | 595 | Если такой возможности нет, воспользуйтесь следующим сценарием. 596 | 597 | Например, есть воркер: 598 | 599 | ```ruby 600 | module ATestWorker 601 | extend Lowkiq::Worker 602 | 603 | self.shards_count = 5 604 | 605 | def self.perform(payloads_by_id) 606 | some_code 607 | end 608 | end 609 | ``` 610 | 611 | Теперь нужно указать новое кол-во шардов и задать новое имя очереди: 612 | 613 | ```ruby 614 | module ATestWorker 615 | extend Lowkiq::Worker 616 | 617 | self.shards_count = 10 618 | self.queue_name = "#{self.name}_V2" 619 | 620 | def self.perform(payloads_by_id) 621 | some_code 622 | end 623 | end 624 | ``` 625 | 626 | И добавить воркер, перекладывающий задачи из старой очереди в новую: 627 | 628 | ```ruby 629 | module ATestMigrationWorker 630 | extend Lowkiq::Worker 631 | 632 | self.shards_count = 5 633 | self.queue_name = "ATestWorker" 634 | 635 | def self.perform(payloads_by_id) 636 | jobs = payloads_by_id.each_with_object([]) do |(id, payloads), acc| 637 | payloads.each do |payload| 638 | acc << { id: id, payload: payload } 639 | end 640 | end 641 | 642 | ATestWorker.perform_async jobs 643 | end 644 | end 645 | ``` 646 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Gem Version](https://badge.fury.io/rb/lowkiq.svg)](https://badge.fury.io/rb/lowkiq) 2 | 3 | # Lowkiq 4 | 5 | Ordered background jobs processing 6 | 7 | ![dashboard](doc/dashboard.png) 8 | 9 | * [Rationale](#rationale) 10 | * [Description](#description) 11 | * [Sidekiq comparison](#sidekiq-comparison) 12 | * [Queue](#queue) 13 | + [Calculation algorithm for `retry_count` and `perform_in`](#calculation-algorithm-for-retry_count-and-perform_in) 14 | + [Job merging rules](#job-merging-rules) 15 | * [Install](#install) 16 | * [Api](#api) 17 | * [Ring app](#ring-app) 18 | * [Configuration](#configuration) 19 | * [Performance](#performance) 20 | * [Execution](#execution) 21 | * [Shutdown](#shutdown) 22 | * [Debug](#debug) 23 | * [Development](#development) 24 | * [Exceptions](#exceptions) 25 | * [Rails integration](#rails-integration) 26 | * [Splitter](#splitter) 27 | * [Scheduler](#scheduler) 28 | * [Recommendations on configuration](#recommendations-on-configuration) 29 | + [`SomeWorker.shards_count`](#someworkershards_count) 30 | + [`SomeWorker.max_retry_count`](#someworkermax_retry_count) 31 | * [Changing of worker's shards amount](#changing-of-workers-shards-amount) 32 | * [Extended error info](#extended-error-info) 33 | 34 | ## Rationale 35 | 36 | We've faced some problems using Sidekiq while processing messages from a side system. 37 | For instance, the message is the data of an order at a particular time. 38 | The side system will send new data of an order on every change. 39 | Orders are frequently updated and a queue contains some closely located messages of the same order. 40 | 41 | Sidekiq doesn't guarantee a strict message order, because a queue is processed by multiple threads. 42 | For example, we've received 2 messages: M1 and M2. 43 | Sidekiq handlers begin to process them parallel, 44 | so M2 can be processed before M1. 45 | 46 | Parallel processing of such kind of messages can result in: 47 | 48 | + deadlocks 49 | + overwriting new data with an old one 50 | 51 | Lowkiq has been created to eliminate such problems by avoiding parallel task processing within one entity. 52 | 53 | ## Description 54 | 55 | Lowkiq's queues are reliable i.e., 56 | Lowkiq saves information about a job being processed 57 | and returns uncompleted jobs to the queue on startup. 58 | 59 | Jobs in queues are ordered by preassigned execution time, so they are not FIFO queues. 60 | 61 | Every job has its identifier. Lowkiq guarantees that jobs with equal IDs are processed by the same thread. 62 | 63 | Every queue is divided into a permanent set of shards. 64 | A job is placed into a particular shard based on an id of the job. 65 | So jobs with the same id are always placed into the same shard. 66 | All jobs of the shard are always processed with the same thread. 67 | This guarantees the sequential processing of jobs with the same ids and excludes the possibility of locks. 68 | 69 | Besides the id, every job has a payload. 70 | Payloads are accumulated for jobs with the same id. 71 | So all accumulated payloads will be processed together. 72 | It's useful when you need to process only the last message and drop all previous ones. 73 | 74 | A worker corresponds to a queue and contains a job processing logic. 75 | 76 | The fixed number of threads is used to process all jobs of all queues. 77 | Adding or removing queues or their shards won't affect the number of threads. 78 | 79 | ## Sidekiq comparison 80 | 81 | If Sidekiq is good for your tasks you should use it. 82 | But if you use plugins like 83 | [sidekiq-grouping](https://github.com/gzigzigzeo/sidekiq-grouping), 84 | [sidekiq-unique-jobs](https://github.com/mhenrixon/sidekiq-unique-jobs), 85 | [sidekiq-merger](https://github.com/dtaniwaki/sidekiq-merger) 86 | or implement your own lock system, you should look at Lowkiq. 87 | 88 | For example, sidekiq-grouping accumulates a batch of jobs then enqueues it and accumulates the next batch. 89 | With this approach, a queue can contain two batches with data of the same order. 90 | These batches are parallel processed with different threads, so we come back to the initial problem. 91 | 92 | Lowkiq was designed to avoid any type of locking. 93 | 94 | Furthermore, Lowkiq's queues are reliable. Only Sidekiq Pro or plugins can add such functionality. 95 | 96 | This [benchmark](examples/benchmark) shows overhead on Redis usage. 97 | These are the results for 5 threads, 100,000 blank jobs: 98 | 99 | + lowkiq: 155 sec or 1.55 ms per job 100 | + lowkiq +hiredis: 80 sec or 0.80 ms per job 101 | + sidekiq: 15 sec or 0.15 ms per job 102 | 103 | This difference is related to different queues structure. 104 | Sidekiq uses one list for all workers and fetches the job entirely for O(1). 105 | Lowkiq uses several data structures, including sorted sets for keeping ids of jobs. 106 | So fetching only an id of a job takes O(log(N)). 107 | 108 | ## Queue 109 | 110 | Please, look at [the presentation](https://docs.google.com/presentation/d/e/2PACX-1vRdwA2Ck22r26KV1DbY__XcYpj2FdlnR-2G05w1YULErnJLB_JL1itYbBC6_JbLSPOHwJ0nwvnIHH2A/pub?start=false&loop=false&delayms=3000). 111 | 112 | Every job has the following attributes: 113 | 114 | + `id` is a job identifier with string type. 115 | + `payloads` is a sorted set of payloads ordered by its score. A payload is an object. A score is a real number. 116 | + `perform_in` is planned execution time. It's a Unix timestamp with a real number type. 117 | + `retry_count` is amount of retries. It's a real number. 118 | 119 | For example, `id` can be an identifier of a replicated entity. 120 | `payloads` is a sorted set ordered by a score of payload and resulted by grouping a payload of the job by its `id`. 121 | `payload` can be a ruby object because it is serialized by `Marshal.dump`. 122 | `score` can be `payload`'s creation date (Unix timestamp) or it's an incremental version number. 123 | By default, `score` and `perform_in` are current Unix timestamp. 124 | `retry_count` for new unprocessed job equals to `-1`, 125 | for one-time failed is `0`, so the planned retries are counted, not the performed ones. 126 | 127 | Job execution can be unsuccessful. In this case, its `retry_count` is incremented, the new `perform_in` is calculated with determined formula, and it moves back to a queue. 128 | 129 | In case of `retry_count` is getting `>=` `max_retry_count` an element of `payloads` with less (oldest) score is moved to a morgue, 130 | rest elements are moved back to the queue, wherein `retry_count` and `perform_in` are reset to `-1` and `now()` respectively. 131 | 132 | ### Calculation algorithm for `retry_count` and `perform_in` 133 | 134 | 0. a job's been executed and failed 135 | 1. `retry_count++` 136 | 2. `perform_in = now + retry_in (try_count)` 137 | 3. `if retry_count >= max_retry_count` the job will be moved to a morgue. 138 | 139 | | type | `retry_count` | `perform_in` | 140 | | --- | --- | --- | 141 | | new haven't been executed | -1 | set or `now()` | 142 | | new failed | 0 | `now() + retry_in(0)` | 143 | | retry failed | 1 | `now() + retry_in(1)` | 144 | 145 | If `max_retry_count = 1`, retries stop. 146 | 147 | ### Job merging rules 148 | 149 | They are applied when: 150 | 151 | + a job has been in a queue and a new one with the same id is added 152 | + a job is failed, but a new one with the same id has been added 153 | + a job from a morgue is moved back to a queue, but the queue has had a job with the same id 154 | 155 | Algorithm: 156 | 157 | + payloads are merged, the minimal score is chosen for equal payloads 158 | + if a new job and queued job is merged, `perform_in` and `retry_count` is taken from the job from the queue 159 | + if a failed job and queued job is merged, `perform_in` and `retry_count` is taken from the failed one 160 | + if morgue job and queued job is merged, `perform_in = now()`, `retry_count = -1` 161 | 162 | Example: 163 | 164 | ``` 165 | # v1 is the first version and v2 is the second 166 | # #{"v1": 1} is a sorted set of a single element, the payload is "v1", the score is 1 167 | 168 | # a job is in a queue 169 | { id: "1", payloads: #{"v1": 1, "v2": 2}, retry_count: 0, perform_in: 1536323288 } 170 | # a job which is being added 171 | { id: "1", payloads: #{"v2": 3, "v3": 4}, retry_count: -1, perform_in: 1536323290 } 172 | 173 | # a resulted job in the queue 174 | { id: "1", payloads: #{"v1": 1, "v2": 3, "v3": 4}, retry_count: 0, perform_in: 1536323288 } 175 | ``` 176 | 177 | A morgue is a part of a queue. Jobs in a morgue are not processed. 178 | A job in a morgue has the following attributes: 179 | 180 | + id is the job identifier 181 | + payloads 182 | 183 | A job from morgue can be moved back to the queue, `retry_count` = 0 and `perform_in = now()` would be set. 184 | 185 | ## Install 186 | 187 | ``` 188 | # Gemfile 189 | 190 | gem 'lowkiq' 191 | ``` 192 | 193 | Redis >= 3.2 194 | 195 | ## Api 196 | 197 | ```ruby 198 | module ATestWorker 199 | extend Lowkiq::Worker 200 | 201 | self.shards_count = 24 202 | self.batch_size = 10 203 | self.max_retry_count = 5 204 | 205 | def self.retry_in(count) 206 | 10 * (count + 1) # (i.e. 10, 20, 30, 40, 50) 207 | end 208 | 209 | def self.retries_exhausted(batch) 210 | batch.each do |job| 211 | Rails.logger.info "retries exhausted for #{name} with error #{job[:error]}" 212 | end 213 | end 214 | 215 | def self.perform(payloads_by_id) 216 | # payloads_by_id is a hash map 217 | payloads_by_id.each do |id, payloads| 218 | # payloads are sorted by score, from old to new (min to max) 219 | payloads.each do |payload| 220 | do_some_work(id, payload) 221 | end 222 | end 223 | end 224 | end 225 | ``` 226 | 227 | And then you have to add it to Lowkiq in your initializer file due to problems with autoloading: 228 | 229 | ```ruby 230 | Lowkiq.workers = [ ATestWorker ] 231 | ``` 232 | 233 | Default values: 234 | 235 | ```ruby 236 | self.shards_count = 5 237 | self.batch_size = 1 238 | self.max_retry_count = 25 239 | self.queue_name = self.name 240 | 241 | # i.e. 15, 16, 31, 96, 271, ... seconds + a random amount of time 242 | def retry_in(retry_count) 243 | (retry_count ** 4) + 15 + (rand(30) * (retry_count + 1)) 244 | end 245 | ``` 246 | 247 | ```ruby 248 | ATestWorker.perform_async [ 249 | { id: 0 }, 250 | { id: 1, payload: { attr: 'v1' } }, 251 | { id: 2, payload: { attr: 'v1' }, score: Time.now.to_f, perform_in: Time.now.to_f }, 252 | ] 253 | # payload by default equals to "" 254 | # score and perform_in by default equals to Time.now.to_f 255 | ``` 256 | 257 | It is possible to redefine `perform_async` and calculate `id`, `score` и `perform_in` in a worker code: 258 | 259 | ```ruby 260 | module ATestWorker 261 | extend Lowkiq::Worker 262 | 263 | def self.perform_async(jobs) 264 | jobs.each do |job| 265 | job.merge! id: job[:payload][:id] 266 | end 267 | super 268 | end 269 | 270 | def self.perform(payloads_by_id) 271 | #... 272 | end 273 | end 274 | 275 | ATestWorker.perform_async 1000.times.map { |id| { payload: {id: id} } } 276 | ``` 277 | 278 | ## Ring app 279 | 280 | `Lowkiq::Web` - a ring app. 281 | 282 | + `/` - a dashboard 283 | + `/api/v1/stats` - queue length, morgue length, lag for each worker and total result 284 | 285 | ## Configuration 286 | 287 | Options and their default values are: 288 | 289 | + `Lowkiq.workers = []`- list of workers to use. Since 1.1.0. 290 | + `Lowkiq.poll_interval = 1` - delay in seconds between queue polling for new jobs. 291 | Used only if a queue was empty in a previous cycle or an error occurred. 292 | + `Lowkiq.threads_per_node = 5` - threads per node. 293 | + `Lowkiq.redis = ->() { Redis.new url: ENV.fetch('REDIS_URL') }` - redis connection options 294 | + `Lowkiq.client_pool_size = 5` - redis pool size for queueing jobs 295 | + `Lowkiq.pool_timeout = 5` - client and server redis pool timeout 296 | + `Lowkiq.server_middlewares = []` - a middleware list, used when job is processed 297 | + `Lowkiq.client_middlewares = []` - a middleware list, used when job is enqueued 298 | + `Lowkiq.on_server_init = ->() {}` - a lambda is being executed when server inits 299 | + `Lowkiq.build_scheduler = ->() { Lowkiq.build_lag_scheduler }` is a scheduler 300 | + `Lowkiq.build_splitter = ->() { Lowkiq.build_default_splitter }` is a splitter 301 | + `Lowkiq.last_words = ->(ex) {}` is an exception handler of descendants of `StandardError` caused the process stop 302 | + `Lowkiq.dump_payload = Marshal.method :dump` 303 | + `Lowkiq.load_payload = Marshal.method :load` 304 | 305 | + `Lowkiq.format_error = -> (error) { error.message }` can be used to add error backtrace. Please see [Extended error info](#extended-error-info) 306 | + `Lowkiq.dump_error = -> (msg) { msg }` can be used to implement a custom compression logic for errors. Recommended when using `Lowkiq.format_error`. 307 | + `Lowkiq.load_error = -> (msg) { msg }` can be used to implement a custom decompression logic for errors. 308 | 309 | ```ruby 310 | $logger = Logger.new(STDOUT) 311 | 312 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 313 | $logger.info "Started job for #{worker} #{batch}" 314 | block.call 315 | $logger.info "Finished job for #{worker} #{batch}" 316 | end 317 | 318 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 319 | begin 320 | block.call 321 | rescue => e 322 | $logger.error "#{e.message} #{worker} #{batch}" 323 | raise e 324 | end 325 | end 326 | 327 | Lowkiq.client_middlewares << -> (worker, batch, &block) do 328 | $logger.info "Enqueueing job for #{worker} #{batch}" 329 | block.call 330 | $logger.info "Enqueued job for #{worker} #{batch}" 331 | end 332 | 333 | ``` 334 | 335 | ## Performance 336 | 337 | Use [hiredis](https://github.com/redis/hiredis-rb) for better performance. 338 | 339 | ```ruby 340 | # Gemfile 341 | 342 | gem "hiredis" 343 | ``` 344 | 345 | ```ruby 346 | # config 347 | 348 | Lowkiq.redis = ->() { Redis.new url: ENV.fetch('REDIS_URL'), driver: :hiredis } 349 | ``` 350 | 351 | ## Execution 352 | 353 | `lowkiq -r ./path_to_app` 354 | 355 | `path_to_app.rb` must load app. [Example](examples/dummy/lib/app.rb). 356 | 357 | The lazy loading of worker modules is unacceptable. 358 | For preliminarily loading modules use 359 | `require` 360 | or [`require_dependency`](https://api.rubyonrails.org/classes/ActiveSupport/Dependencies/Loadable.html#method-i-require_dependency) 361 | for Ruby on Rails. 362 | 363 | ## Shutdown 364 | 365 | Send TERM or INT signal to the process (Ctrl-C). 366 | The process will wait for executed jobs to finish. 367 | 368 | Note that if a queue is empty, the process sleeps `poll_interval` seconds, 369 | therefore, the process will not stop until the `poll_interval` seconds have passed. 370 | 371 | ## Debug 372 | 373 | To get trace of all threads of an app: 374 | 375 | ``` 376 | kill -TTIN 377 | cat /tmp/lowkiq_ttin.txt 378 | ``` 379 | 380 | ## Development 381 | 382 | ``` 383 | docker-compose run --rm --service-port app bash 384 | bundle 385 | rspec 386 | cd examples/dummy ; bundle exec ../../exe/lowkiq -r ./lib/app.rb 387 | 388 | # open localhost:8080 389 | ``` 390 | 391 | ``` 392 | docker-compose run --rm --service-port frontend bash 393 | npm run dumb 394 | # open localhost:8081 395 | 396 | # npm run build 397 | # npm run web-api 398 | ``` 399 | 400 | ## Exceptions 401 | 402 | `StandardError` thrown by a worker are handled with middleware. Such exceptions don't lead to process stops. 403 | 404 | All other exceptions cause the process to stop. 405 | Lowkiq will wait for job execution by other threads. 406 | 407 | `StandardError` thrown outside of worker are passed to `Lowkiq.last_words`. 408 | For example, it can happen when Redis connection is lost or when Lowkiq's code has a bug. 409 | 410 | ## Rails integration 411 | 412 | ```ruby 413 | # config/routes.rb 414 | 415 | Rails.application.routes.draw do 416 | # ... 417 | mount Lowkiq::Web => '/lowkiq' 418 | # ... 419 | end 420 | ``` 421 | 422 | ```ruby 423 | # config/initializers/lowkiq.rb 424 | 425 | # configuration: 426 | # Lowkiq.redis = -> { Redis.new url: ENV.fetch('LOWKIQ_REDIS_URL') } 427 | # Lowkiq.threads_per_node = ENV.fetch('LOWKIQ_THREADS_PER_NODE').to_i 428 | # Lowkiq.client_pool_size = ENV.fetch('LOWKIQ_CLIENT_POOL_SIZE').to_i 429 | # ... 430 | 431 | # since 1.1.0 432 | Lowkiq.workers = [ 433 | ATestWorker, 434 | OtherCoolWorker 435 | ] 436 | 437 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 438 | logger = Rails.logger 439 | tag = "#{worker}-#{Thread.current.object_id}" 440 | 441 | logger.tagged(tag) do 442 | time_start = Time.now 443 | logger.info "#{time_start} Started job, batch: #{batch}" 444 | begin 445 | block.call 446 | rescue => e 447 | logger.error e.message 448 | raise e 449 | ensure 450 | time_end = Time.now 451 | logger.info "#{time_end} Finished job, duration: #{time_end - time_start} sec" 452 | end 453 | end 454 | end 455 | 456 | # Sentry integration 457 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 458 | opts = { 459 | extra: { 460 | lowkiq: { 461 | worker: worker.name, 462 | batch: batch, 463 | } 464 | } 465 | } 466 | 467 | Raven.capture opts do 468 | block.call 469 | end 470 | end 471 | 472 | # NewRelic integration 473 | if defined? NewRelic 474 | class NewRelicLowkiqMiddleware 475 | include NewRelic::Agent::Instrumentation::ControllerInstrumentation 476 | 477 | def call(worker, batch, &block) 478 | opts = { 479 | category: 'OtherTransaction/LowkiqJob', 480 | class_name: worker.name, 481 | name: :perform, 482 | } 483 | 484 | perform_action_with_newrelic_trace opts do 485 | block.call 486 | end 487 | end 488 | end 489 | 490 | Lowkiq.server_middlewares << NewRelicLowkiqMiddleware.new 491 | end 492 | 493 | # Rails reloader, responsible for cleaning of ActiveRecord connections 494 | Lowkiq.server_middlewares << -> (worker, batch, &block) do 495 | Rails.application.reloader.wrap do 496 | block.call 497 | end 498 | end 499 | 500 | Lowkiq.on_server_init = ->() do 501 | [[ActiveRecord::Base, ActiveRecord::Base.configurations[Rails.env]]].each do |(klass, init_config)| 502 | klass.connection_pool.disconnect! 503 | config = init_config.merge 'pool' => Lowkiq.threads_per_node 504 | klass.establish_connection(config) 505 | end 506 | end 507 | ``` 508 | 509 | Note: In Rails 7, the worker files wouldn't be loaded by default in the initializers since they are managed by the `main` autoloader. To solve this, we can wrap setting the workers around the `to_prepare` configuration. 510 | 511 | ```ruby 512 | Rails.application.config.to_prepare do 513 | Lowkiq.workers = [ 514 | ATestWorker, 515 | OtherCoolWorker 516 | ] 517 | end 518 | ``` 519 | 520 | Execution: `bundle exec lowkiq -r ./config/environment.rb` 521 | 522 | 523 | ## Splitter 524 | 525 | Each worker has several shards: 526 | 527 | ``` 528 | # worker: shard ids 529 | worker A: 0, 1, 2 530 | worker B: 0, 1, 2, 3 531 | worker C: 0 532 | worker D: 0, 1 533 | ``` 534 | 535 | Lowkiq uses a fixed number of threads for job processing, therefore it is necessary to distribute shards between threads. 536 | Splitter does it. 537 | 538 | To define a set of shards, which is being processed by a thread, let's move them to one list: 539 | 540 | ``` 541 | A0, A1, A2, B0, B1, B2, B3, C0, D0, D1 542 | ``` 543 | 544 | Default splitter evenly distributes shards by threads of a single node. 545 | 546 | If `threads_per_node` is set to 3, the distribution will be: 547 | 548 | ``` 549 | # thread id: shards 550 | t0: A0, B0, B3, D1 551 | t1: A1, B1, C0 552 | t2: A2, B2, D0 553 | ``` 554 | 555 | Besides Default Lowkiq has the ByNode splitter. It allows dividing the load by several processes (nodes). 556 | 557 | ``` 558 | Lowkiq.build_splitter = -> () do 559 | Lowkiq.build_by_node_splitter( 560 | ENV.fetch('LOWKIQ_NUMBER_OF_NODES').to_i, 561 | ENV.fetch('LOWKIQ_NODE_NUMBER').to_i 562 | ) 563 | end 564 | ``` 565 | 566 | So, instead of a single process, you need to execute multiple ones and to set environment variables up: 567 | 568 | ``` 569 | # process 0 570 | LOWKIQ_NUMBER_OF_NODES=2 LOWKIQ_NODE_NUMBER=0 bundle exec lowkiq -r ./lib/app.rb 571 | 572 | # process 1 573 | LOWKIQ_NUMBER_OF_NODES=2 LOWKIQ_NODE_NUMBER=1 bundle exec lowkiq -r ./lib/app.rb 574 | ``` 575 | 576 | Summary amount of threads are equal product of `ENV.fetch('LOWKIQ_NUMBER_OF_NODES')` and `Lowkiq.threads_per_node`. 577 | 578 | You can also write your own splitter if your app needs an extra distribution of shards between threads or nodes. 579 | 580 | ## Scheduler 581 | 582 | Every thread processes a set of shards. The scheduler selects shard for processing. 583 | Every thread has its own instance of the scheduler. 584 | 585 | Lowkiq has 2 schedulers for your choice. 586 | `Seq` sequentially looks over shards. 587 | `Lag` chooses shard with the oldest job minimizing the lag. It's used by default. 588 | 589 | The scheduler can be set up through settings: 590 | 591 | ``` 592 | Lowkiq.build_scheduler = ->() { Lowkiq.build_seq_scheduler } 593 | # or 594 | Lowkiq.build_scheduler = ->() { Lowkiq.build_lag_scheduler } 595 | ``` 596 | 597 | ## Recommendations on configuration 598 | 599 | ### `SomeWorker.shards_count` 600 | 601 | Sum of `shards_count` of all workers shouldn't be less than `Lowkiq.threads_per_node` 602 | otherwise, threads will stay idle. 603 | 604 | Sum of `shards_count` of all workers can be equal to `Lowkiq.threads_per_node`. 605 | In this case, a thread processes a single shard. This makes sense only with a uniform queue load. 606 | 607 | Sum of `shards_count` of all workers can be more than `Lowkiq.threads_per_node`. 608 | In this case, `shards_count` can be counted as a priority. 609 | The larger it is, the more often the tasks of this queue will be processed. 610 | 611 | There is no reason to set `shards_count` of one worker more than `Lowkiq.threads_per_node`, 612 | because every thread will handle more than one shard from this queue, so it increases the overhead. 613 | 614 | ### `SomeWorker.max_retry_count` 615 | 616 | From `retry_in` and `max_retry_count`, you can calculate the approximate time that a payload of a job will be in a queue. 617 | After `max_retry_count` is reached a payload with a minimal score will be moved to a morgue. 618 | 619 | For default `retry_in` we receive the following table. 620 | 621 | ```ruby 622 | def retry_in(retry_count) 623 | (retry_count ** 4) + 15 + (rand(30) * (retry_count + 1)) 624 | end 625 | ``` 626 | 627 | | `max_retry_count` | amount of days of job's life | 628 | | --- | --- | 629 | | 14 | 1 | 630 | | 16 | 2 | 631 | | 18 | 3 | 632 | | 19 | 5 | 633 | | 20 | 6 | 634 | | 21 | 8 | 635 | | 22 | 10 | 636 | | 23 | 13 | 637 | | 24 | 16 | 638 | | 25 | 20 | 639 | 640 | `(0...25).map{ |c| retry_in c }.sum / 60 / 60 / 24` 641 | 642 | 643 | ## Changing of worker's shards amount 644 | 645 | Try to count the number of shards right away and don't change it in the future. 646 | 647 | If you can disable adding of new jobs, wait for queues to get empty, and deploy the new version of code with a changed amount of shards. 648 | 649 | If you can't do it, follow the next steps: 650 | 651 | A worker example: 652 | 653 | ```ruby 654 | module ATestWorker 655 | extend Lowkiq::Worker 656 | 657 | self.shards_count = 5 658 | 659 | def self.perform(payloads_by_id) 660 | some_code 661 | end 662 | end 663 | ``` 664 | 665 | Set the number of shards and the new queue name: 666 | 667 | ```ruby 668 | module ATestWorker 669 | extend Lowkiq::Worker 670 | 671 | self.shards_count = 10 672 | self.queue_name = "#{self.name}_V2" 673 | 674 | def self.perform(payloads_by_id) 675 | some_code 676 | end 677 | end 678 | ``` 679 | 680 | Add a worker moving jobs from the old queue to the new one: 681 | 682 | ```ruby 683 | module ATestMigrationWorker 684 | extend Lowkiq::Worker 685 | 686 | self.shards_count = 5 687 | self.queue_name = "ATestWorker" 688 | 689 | def self.perform(payloads_by_id) 690 | jobs = payloads_by_id.each_with_object([]) do |(id, payloads), acc| 691 | payloads.each do |payload| 692 | acc << { id: id, payload: payload } 693 | end 694 | end 695 | 696 | ATestWorker.perform_async jobs 697 | end 698 | end 699 | ``` 700 | 701 | ## Extended error info 702 | For failed jobs, lowkiq only stores `error.message` by default. This can be configured by using `Lowkiq.format_error` setting. 703 | `Lowkiq.dump` and `Lowkiq.load_error` can be used to compress and decompress the error messages respectively. 704 | Example: 705 | ```ruby 706 | Lowkiq.format_error = -> (error) { error.full_message(highlight: false) } 707 | 708 | Lowkiq.dump_error = Proc.new do |msg| 709 | compressed = Zlib::Deflate.deflate(msg.to_s) 710 | Base64.encode64(compressed) 711 | end 712 | 713 | Lowkiq.load_error = Proc.new do |input| 714 | decoded = Base64.decode64(input) 715 | Zlib::Inflate.inflate(decoded) 716 | rescue 717 | input 718 | end 719 | ``` 720 | --------------------------------------------------------------------------------