├── .editorconfig ├── .eslintrc.yml ├── .github ├── issue_template.md ├── stale.yml └── workflows │ ├── codeql-analysis.yml │ ├── node.js.yml │ └── release.yml ├── .gitignore ├── .mocharc.json ├── .npmignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.md ├── MIGRATION.md ├── PATTERNS.md ├── README.md ├── REFERENCE.md ├── commandTransform.js ├── commitlint.config.js ├── docker-compose.yml ├── docs ├── README.md ├── _config.yml └── job-lifecycle.png ├── generateRawScripts.js ├── index.d.ts ├── index.js ├── lib ├── backoffs.js ├── commands │ ├── addJob-6.lua │ ├── addLog-2.lua │ ├── cleanJobsInSet-3.lua │ ├── extendLock-2.lua │ ├── getCountsPerPriority-4.lua │ ├── includes │ │ ├── addJobWithPriority.lua │ │ ├── batches.lua │ │ ├── collectMetrics.lua │ │ ├── debounceJob.lua │ │ ├── getTargetQueueList.lua │ │ ├── removeDebounceKey.lua │ │ ├── removeDebounceKeyIfNeeded.lua │ │ └── removeLock.lua │ ├── index.js │ ├── isFinished-2.lua │ ├── isJobInList-1.lua │ ├── moveStalledJobsToWait-7.lua │ ├── moveToActive-8.lua │ ├── moveToDelayed-4.lua │ ├── moveToFinished-9.lua │ ├── obliterate-2.lua │ ├── pause-5.lua │ ├── promote-5.lua │ ├── releaseLock-1.lua │ ├── removeJob-11.lua │ ├── removeJobs-8.lua │ ├── removeRepeatable-2.lua │ ├── reprocessJob-6.lua │ ├── retryJob-7.lua │ ├── retryJobs-5.lua │ ├── saveStacktrace-1.lua │ ├── script-loader.js │ ├── takeLock-1.lua │ ├── updateData-1.lua │ ├── updateDelaySet-6.lua │ └── updateProgress-2.lua ├── errors.js ├── getters.js ├── job.js ├── p-timeout.js ├── process │ ├── child-pool.js │ ├── master.js │ ├── sandbox.js │ └── utils.js ├── queue.js ├── repeatable.js ├── scripts.js ├── timer-manager.js ├── utils.js └── worker.js ├── package.json ├── support ├── logo.png ├── logo.sketch ├── logo.svg └── logo@2x.png ├── test ├── .eslintrc.yml ├── fixtures │ ├── fixture_processor.js │ ├── fixture_processor_bar.js │ ├── fixture_processor_broken.js │ ├── fixture_processor_callback.js │ ├── fixture_processor_callback_fail.js │ ├── fixture_processor_crash.js │ ├── fixture_processor_data.js │ ├── fixture_processor_discard.js │ ├── fixture_processor_exit.js │ ├── fixture_processor_fail.js │ ├── fixture_processor_foo.js │ ├── fixture_processor_progress.js │ └── fixture_processor_slow.js ├── test_child-pool.js ├── test_connection.js ├── test_events.js ├── test_getters.js ├── test_job.js ├── test_metrics.js ├── test_obliterate.js ├── test_pause.js ├── test_queue.js ├── test_rate_limiter.js ├── test_repeat.js ├── test_sandboxed_process.js ├── test_when_current_jobs_finished.js ├── test_worker.js └── utils.js └── yarn.lock /.editorconfig: -------------------------------------------------------------------------------- 1 | ; EditorConfig file: http://EditorConfig.org 2 | ; Install the "EditorConfig" plugin into Sublime Text to use 3 | 4 | root = true 5 | 6 | [*] 7 | charset = utf-8 8 | end_of_line = lf 9 | insert_final_newline = true 10 | indent_style = space 11 | indent_size = 2 12 | trim_trailing_whitespace = true 13 | -------------------------------------------------------------------------------- /.eslintrc.yml: -------------------------------------------------------------------------------- 1 | env: 2 | node: true 3 | 4 | parserOptions: 5 | ecmaVersion: 2018 6 | 7 | extends: 8 | - eslint:recommended 9 | - plugin:mocha/recommended 10 | - plugin:node/recommended 11 | 12 | plugins: 13 | - mocha 14 | - node 15 | 16 | rules: 17 | valid-jsdoc: 0 18 | func-style: 0 19 | no-use-before-define: 0 20 | camelcase: 1 21 | no-unused-vars: 2 22 | no-alert: 2 23 | no-console: [2, { allow: ['warn', 'error'] }] 24 | no-underscore-dangle: 0 25 | object-shorthand: 0 26 | 27 | strict: [2, 'global'] 28 | no-var: 2 29 | prefer-arrow-callback: 2 30 | prefer-const: 2 31 | no-inner-declarations: 0 32 | newline-per-chained-call: 2 33 | 34 | mocha/no-exclusive-tests: 2 35 | mocha/no-hooks-for-single-case: 0 36 | mocha/no-mocha-arrows: 0 37 | mocha/no-setup-in-describe: 0 38 | mocha/no-sibling-hooks: 0 39 | mocha/no-skipped-tests: 0 40 | 41 | node/no-deprecated-api: 0 42 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | ## Description 13 | 14 | ## Minimal, Working Test code to reproduce the issue. 15 | #### (An easy to reproduce test case will dramatically decrease the resolution time.) 16 | 17 | ## Bull version 18 | 19 | ## Additional information 20 | 21 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 60 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | - enhancement 10 | - BETTER DOC 11 | - bug 12 | # Label to use when marking an issue as stale 13 | staleLabel: wontfix 14 | # Comment to post when marking an issue as stale. Set to `false` to disable 15 | markComment: > 16 | This issue has been automatically marked as stale because it has not had 17 | recent activity. It will be closed if no further activity occurs. Thank you 18 | for your contributions. 19 | # Comment to post when closing a stale issue. Set to `false` to disable 20 | closeComment: false 21 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ develop ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ develop ] 20 | schedule: 21 | - cron: '24 2 * * 0' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'javascript' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v2 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v1 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 52 | 53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 54 | # If this step fails, then you should remove it and run the build manually (see below) 55 | - name: Autobuild 56 | uses: github/codeql-action/autobuild@v1 57 | 58 | # ℹ️ Command-line programs to run using the OS shell. 59 | # 📚 https://git.io/JvXDl 60 | 61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 62 | # and modify them (or add more) to build your code if your project 63 | # uses a compiled language 64 | 65 | #- run: | 66 | # make bootstrap 67 | # make release 68 | 69 | - name: Perform CodeQL Analysis 70 | uses: github/codeql-action/analyze@v1 71 | -------------------------------------------------------------------------------- /.github/workflows/node.js.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions 3 | 4 | name: Node.js CI 5 | 6 | on: 7 | push: 8 | branches: [develop] 9 | pull_request: 10 | branches: [develop] 11 | 12 | permissions: 13 | contents: read # to fetch code (actions/checkout) 14 | 15 | jobs: 16 | build: 17 | runs-on: ubuntu-latest 18 | 19 | services: 20 | redis: 21 | image: redis 22 | ports: 23 | - 6379:6379 24 | 25 | strategy: 26 | matrix: 27 | node-version: [12.x, 14.x, 16.x] 28 | redis-version: [7.0-alpine] 29 | include: 30 | - node-version: '16.x' 31 | redis-version: 6-alpine 32 | 33 | steps: 34 | - uses: actions/checkout@v2 35 | - name: Use Node.js ${{ matrix.node-version }} 36 | uses: actions/setup-node@v1 37 | with: 38 | node-version: ${{ matrix.node-version }} 39 | - run: yarn install --frozen-lockfile --non-interactive 40 | - run: yarn prettier -- --list-different 41 | - run: yarn test 42 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | branches: 5 | - develop 6 | permissions: {} 7 | jobs: 8 | release: 9 | permissions: 10 | contents: write # for release publishing 11 | 12 | name: Release 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | with: 18 | fetch-depth: 0 19 | - name: Setup Node.js 20 | uses: actions/setup-node@v1 21 | with: 22 | node-version: 12 23 | - name: Install dependencies 24 | run: yarn install --frozen-lockfile --non-interactive 25 | - name: Generate scripts 26 | run: yarn pretest 27 | - name: Release 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} 30 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 31 | run: npx semantic-release 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | tmp 3 | coverage 4 | *.rdb 5 | .vscode 6 | package-lock.json 7 | .nyc_output 8 | rawScripts 9 | lib/scripts 10 | -------------------------------------------------------------------------------- /.mocharc.json: -------------------------------------------------------------------------------- 1 | { 2 | "timeout": 5000, 3 | "reporter": "spec", 4 | "exit": true 5 | } 6 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | tmp 3 | test 4 | bugs 5 | docs 6 | support 7 | .github 8 | .editorconfig 9 | .eslintrc.yml 10 | .travis.yml 11 | .gitignore 12 | *.md 13 | commitlint.config.js 14 | rawScripts 15 | commandTransform.js 16 | docker-compose.yml 17 | generateRawScripts.js 18 | .nyc_output 19 | .mocharc.json 20 | lib/commands/*.js 21 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at manuel@optimalbits.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Release process 2 | --------------- 3 | 4 | First, update `CHANGELOG.md` with the release number about to be released. 5 | 6 | npm outdated --depth 0 # See if you can upgrade any dependencies 7 | npm version [major|minor|patch] # Update package.json 8 | npm publish # Tag repo and publish npm package 9 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | 2 | License 3 | ------- 4 | 5 | (The MIT License) 6 | 7 | Copyright © 2013-2018 Manuel Astudillo 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 14 | -------------------------------------------------------------------------------- /MIGRATION.md: -------------------------------------------------------------------------------- 1 | # Migration from 2.x to 3.0.0 2 | 3 | Although version 3.0 is almost backwards compatible with 2.x, there are some important changes that needs 4 | to be taking in consideration before upgrading to 3.0. 5 | 6 | # Complete and failed sets. 7 | 8 | In 3.x, the jobs that are completed and failed end in two ZSETS, instead of a standard SET. 9 | This gives the possibility of retrieving a subset of the jobs in a high performant way, which 10 | is useful for graphical tools and scripts. However an old queue will not be compatible with 3.x. 11 | You will need to either delete the complete and failed keys, or create a new queue. 12 | 13 | # Data structure changes 14 | 15 | job.jobId to job.id 16 | 17 | toJSON -> 18 | job.data 19 | job.opts 20 | 21 | # Queue instantiation options 22 | 23 | Sanitized and cleaned all the options. Check the [Reference](./REFERENCE.md) to see the new structure. 24 | 25 | 26 | # Events 27 | 28 | All events are now published atomically in the scripts where they are relevant, this increases efficiency and 29 | reduces chances for hazards. 30 | 31 | 'ready' event has been removed, you can use ```Queue##isReady()``` instead if you want to know when the queue 32 | has been initialized. Normally you will never need to wait for readyness since this is taken care internally 33 | by the queue methods that require the queue to be ready. 34 | 35 | Events arguments are now the same for local and global events. This affects events such as completed and failed, 36 | where in 2.x the first argument was a job instance for local jobs. Now both local and global events pass 37 | jobId as first argument to the event handler. If the job instance is needed it can be easily retrieved with 38 | ```Job.fromId()```. 39 | 40 | -------------------------------------------------------------------------------- /PATTERNS.md: -------------------------------------------------------------------------------- 1 | 2 | Patterns 3 | ======== 4 | 5 | Here are a few examples of useful patterns that are often implemented with Bull: 6 | 7 | - [Message Queue](#message-queue) 8 | - [Returning Job Completions](#returning-job-completions) 9 | - [Reusing Redis Connections](#reusing-redis-connections) 10 | - [Redis Cluster](#redis-cluster) 11 | - [Debugging](#debugging) 12 | - [Custom backoff strategy](#custom-backoff-strategy) 13 | - [Manually fetching jobs](#manually-fetching-jobs) 14 | 15 | If you have any other common patterns you want to add, pull request them! 16 | 17 | 18 | Message Queue 19 | ------------- 20 | 21 | Bull can also be used for persistent message queues. This is a quite useful 22 | feature in some use cases. For example, you can have two servers that need to 23 | communicate with each other. By using a queue the servers do not need to be online at the same time, so this creates a very robust communication channel. You can treat `add` as *send* and `process` as *receive*: 24 | 25 | Server A: 26 | 27 | ```js 28 | const Queue = require('bull'); 29 | 30 | const sendQueue = new Queue('Server B'); 31 | const receiveQueue = new Queue('Server A'); 32 | 33 | receiveQueue.process(function (job, done) { 34 | console.log('Received message', job.data.msg); 35 | done(); 36 | }); 37 | 38 | sendQueue.add({ msg: 'Hello' }); 39 | ``` 40 | 41 | Server B: 42 | 43 | ```js 44 | const Queue = require('bull'); 45 | 46 | const sendQueue = new Queue('Server A'); 47 | const receiveQueue = new Queue('Server B'); 48 | 49 | receiveQueue.process(function (job, done) { 50 | console.log('Received message', job.data.msg); 51 | done(); 52 | }); 53 | 54 | sendQueue.add({ msg: 'World' }); 55 | ``` 56 | 57 | 58 | Returning Job Completions 59 | ------------------------- 60 | 61 | A common pattern is where you have a cluster of queue processors that just process jobs as fast as they can, and some other services that need to take the result of this processors and do something with it, maybe storing results in a database. 62 | 63 | The most robust and scalable way to accomplish this is by combining the standard job queue with the message queue pattern: a service sends jobs to the cluster just by opening a job queue and adding jobs to it, and the cluster will start processing as fast as it can. Everytime a job gets completed in the cluster a message is sent to a results message queue with the result data, and this queue is listened by some other service that stores the results in a database. 64 | 65 | 66 | Reusing Redis Connections 67 | ------------------------- 68 | 69 | A standard queue requires **3 connections** to the Redis server. In some situations you might want to re-use connections—for example on Heroku where the connection count is restricted. You can do this with the `createClient` option in the `Queue` constructor. 70 | 71 | Notes: 72 | - bclient connections [cannot be re-used](https://github.com/OptimalBits/bull/issues/880), so you should return a new connection each time this is called. 73 | - client and subscriber connections can be shared and will not be closed when the queue is closed. When you are shutting down the process, first close the queues, then the shared connections (if they are shared). 74 | - if you are not sharing connections but still using `createClient` to do some custom connection logic, you may still need to keep a list of all the connections you created so you can manually close them later when the queue shuts down, if you need a graceful shutdown for your process 75 | - do not set a `keyPrefix` on the connection you create, use bull's built-in prefix feature if you need a key prefix 76 | 77 | ```js 78 | const { REDIS_URL } = process.env; 79 | 80 | const Redis = require('ioredis'); 81 | let client; 82 | let subscriber; 83 | 84 | const opts = { 85 | // redisOpts here will contain at least a property of connectionName which will identify the queue based on its name 86 | createClient: function (type, redisOpts) { 87 | switch (type) { 88 | case 'client': 89 | if (!client) { 90 | client = new Redis(REDIS_URL, redisOpts); 91 | } 92 | return client; 93 | case 'subscriber': 94 | if (!subscriber) { 95 | subscriber = new Redis(REDIS_URL, redisOpts); 96 | } 97 | return subscriber; 98 | case 'bclient': 99 | return new Redis(REDIS_URL, redisOpts); 100 | default: 101 | throw new Error('Unexpected connection type: ' + type); 102 | } 103 | } 104 | } 105 | 106 | const queueFoo = new Queue('foobar', opts); 107 | const queueQux = new Queue('quxbaz', opts); 108 | ``` 109 | 110 | Redis cluster 111 | ------------- 112 | 113 | Bull internals require atomic operations that span different keys. This behavior breaks Redis's 114 | rules for cluster configurations. However, it is still possible to use a cluster environment 115 | by using the proper bull prefix option as a cluster "hash tag". Hash tags are used to guarantee 116 | that certain keys are placed in the same hash slot, read more about hash tags in the [redis cluster 117 | tutorial](https://redis.io/topics/cluster-tutorial). A hash tag is defined with brackets. I.e. a key that has a substring inside brackets will use that 118 | substring to determine in which hash slot the key will be placed. 119 | 120 | In summary, to make bull compatible with Redis cluster, use a queue prefix inside brackets. 121 | For example: 122 | 123 | ```js 124 | const queue = new Queue('cluster', { 125 | prefix: '{myprefix}' 126 | }); 127 | ``` 128 | 129 | If you use several queues in the same cluster, you should use different prefixes so that the 130 | queues are evenly placed in the cluster nodes. 131 | 132 | Debugging 133 | --------- 134 | 135 | To see debug statements set or add `bull` to the `NODE_DEBUG` environment variable: 136 | 137 | ```bash 138 | export NODE_DEBUG=bull 139 | ``` 140 | 141 | ```bash 142 | NODE_DEBUG=bull node ./your-script.js 143 | ``` 144 | 145 | Custom backoff strategy 146 | ----------------------- 147 | 148 | When the builtin backoff strategies on retries are not sufficient, a custom strategy can be defined. Custom backoff strategies are defined by a function on the queue. The number of attempts already made to process the job is passed to this function as the first parameter, and the error that the job failed with as the second parameter. 149 | The function returns either the time to delay the retry with, 0 to retry immediately or -1 to fail the job immediately. 150 | 151 | ```js 152 | const Queue = require('bull'); 153 | 154 | const myQueue = new Queue('Server B', { 155 | settings: { 156 | backoffStrategies: { 157 | jitter: function (attemptsMade, err) { 158 | return 5000 + Math.random() * 500; 159 | } 160 | } 161 | } 162 | }); 163 | ``` 164 | 165 | The new backoff strategy can then be specified on the job, using the name defined above: 166 | 167 | ```js 168 | myQueue.add({foo: 'bar'}, { 169 | attempts: 3, 170 | backoff: { 171 | type: 'jitter' 172 | } 173 | }); 174 | ``` 175 | 176 | You may specify options for your strategy: 177 | ```js 178 | const Queue = require('bull'); 179 | 180 | const myQueue = new Queue('Server B', { 181 | settings: { 182 | backoffStrategies: { 183 | // truncated binary exponential backoff 184 | binaryExponential: function (attemptsMade, err, options) { 185 | // Options can be undefined, you need to handle it by yourself 186 | if (!options) { 187 | options = {} 188 | } 189 | const delay = options.delay || 1000; 190 | const truncate = options.truncate || 1000; 191 | console.error({ attemptsMade, err, options }); 192 | return Math.round(Math.random() * (Math.pow(2, Math.max(attemptsMade, truncate)) - 1) * delay) 193 | } 194 | } 195 | } 196 | }); 197 | 198 | myQueue.add({ foo: 'bar' }, { 199 | attempts: 10, 200 | backoff: { 201 | type: 'binaryExponential', 202 | options: { 203 | delay: 500, 204 | truncate: 5 205 | } 206 | } 207 | }); 208 | 209 | ``` 210 | 211 | You may base your backoff strategy on the error that the job throws: 212 | ```js 213 | const Queue = require('bull'); 214 | 215 | function MySpecificError() {}; 216 | 217 | const myQueue = new Queue('Server C', { 218 | settings: { 219 | backoffStrategies: { 220 | foo: function (attemptsMade, err) { 221 | if (err instanceof MySpecificError) { 222 | return 10000; 223 | } 224 | return 1000; 225 | } 226 | } 227 | } 228 | }); 229 | 230 | myQueue.process(function (job, done) { 231 | if (job.data.msg === 'Specific Error') { 232 | throw new MySpecificError(); 233 | } else { 234 | throw new Error(); 235 | } 236 | }); 237 | 238 | myQueue.add({ msg: 'Hello' }, { 239 | attempts: 3, 240 | backoff: { 241 | type: 'foo' 242 | } 243 | }); 244 | 245 | myQueue.add({ msg: 'Specific Error' }, { 246 | attempts: 3, 247 | backoff: { 248 | type: 'foo' 249 | } 250 | }); 251 | ``` 252 | 253 | Manually fetching jobs 254 | ---------------------------------- 255 | 256 | If you want the actual job processing to be done in a seperate repo/service than where `bull` is running, this pattern may be for you. 257 | 258 | Manually transitioning states for jobs can be done with a few simple methods. 259 | 260 | 1. Adding a job to the 'waiting' queue. Grab the queue and call `add`. 261 | 262 | ```typescript 263 | import Queue from 'bull'; 264 | 265 | const queue = new Queue({ 266 | limiter: { 267 | max: 5, 268 | duration: 5000, 269 | bounceBack: true // important 270 | }, 271 | ...queueOptions 272 | }); 273 | queue.add({ random_attr: 'random_value' }); 274 | ``` 275 | 276 | 2. Pulling a job from 'waiting' and moving it to 'active'. 277 | 278 | ```typescript 279 | const job: Job = await queue.getNextJob(); 280 | ``` 281 | 282 | 3. Move the job to the 'failed' queue if something goes wrong. 283 | 284 | ```typescript 285 | const (nextJobData, nextJobId) = await job.moveToFailed( 286 | { 287 | message: 'Call to external service failed!', 288 | }, 289 | true, 290 | ); 291 | ``` 292 | 293 | 3. Move the job to the 'completed' queue. 294 | 295 | ```typescript 296 | const (nextJobData, nextJobId) = await job.moveToCompleted('succeeded', true); 297 | ``` 298 | 299 | 4. Return the next job if one is returned. 300 | 301 | ```typescript 302 | if (nextJobdata) { 303 | return Job.fromJSON(queue, nextJobData, nextJobId); 304 | } 305 | ``` 306 | 307 | **Note** 308 | 309 | By default the lock duration for a job that has been returned by ```getNextJob``` or ```moveToCompleted``` is 30 seconds, if it takes more time than that the job will be automatically 310 | marked as stalled and depending on the max stalled options be moved back to the wait state or marked as failed. In order to avoid this you must use [```job.extendLock(duration)```](REFERENCE.md#jobextendlock) in order to give you some more time before the lock expires. The recommended is to extend the lock when half the lock time has passsed. 311 | 312 | -------------------------------------------------------------------------------- /commandTransform.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const path = require('path'); 3 | const fs = require('fs'); 4 | const { argv } = require('process'); 5 | 6 | const readFile = fs.promises.readFile; 7 | const writeFile = fs.promises.writeFile; 8 | const readdir = fs.promises.readdir; 9 | 10 | const loadScripts = async (readDir, writeDir) => { 11 | const normalizedDir = path.normalize(readDir); 12 | 13 | const files = await readdir(normalizedDir); 14 | 15 | const luaFiles = files.filter(file => path.extname(file) === '.lua'); 16 | const writeFilenamePath = path.normalize(writeDir); 17 | 18 | if (!fs.existsSync(writeFilenamePath)) { 19 | fs.mkdirSync(writeFilenamePath); 20 | } 21 | 22 | let indexContent = "'use strict';\nmodule.exports = {\n"; 23 | 24 | if (luaFiles.length === 0) { 25 | /** 26 | * To prevent unclarified runtime error "updateDelayset is not a function 27 | * @see https://github.com/OptimalBits/bull/issues/920 28 | */ 29 | throw new Error('No .lua files found!'); 30 | } 31 | 32 | for (let i = 0; i < luaFiles.length; i++) { 33 | const completedFilename = path.join(normalizedDir, luaFiles[i]); 34 | const longName = path.basename(luaFiles[i], '.lua'); 35 | indexContent += ` ["${longName}"]: require('./${longName}'),\n`; 36 | 37 | await loadCommand(completedFilename, longName, writeFilenamePath); 38 | } 39 | indexContent += `}\n`; 40 | 41 | await writeFile(path.join(writeFilenamePath, 'index.js'), indexContent); 42 | }; 43 | 44 | const loadCommand = async (filename, longName, writeFilenamePath) => { 45 | const filenamePath = path.resolve(filename); 46 | 47 | const content = (await readFile(filenamePath)).toString(); 48 | 49 | const [name, num] = longName.split('-'); 50 | const numberOfKeys = num && parseInt(num, 10); 51 | 52 | const newContent = `'use strict'; 53 | const content = \`${content}\`; 54 | module.exports = { 55 | name: '${name}', 56 | content,${ 57 | numberOfKeys 58 | ? ` 59 | keys: ${numberOfKeys},` 60 | : '' 61 | } 62 | }; 63 | `; 64 | await writeFile(path.join(writeFilenamePath, longName + '.js'), newContent); 65 | }; 66 | 67 | loadScripts(argv[2], argv[3]); 68 | -------------------------------------------------------------------------------- /commitlint.config.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = { extends: ['@commitlint/config-conventional'] }; 4 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | services: 3 | redis: 4 | image: redis:6.2-alpine 5 | container_name: cache 6 | ports: 7 | - 6379:6379 8 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 |
2 |
3 | 4 |
5 |
6 | 7 | # What is Bull? 8 | 9 | Bull is a Node library that implements a fast and robust queue system based on [redis](https://redis.io). 10 | 11 | Although it is possible to implement queues directly using Redis commands, this library provides an API that takes care of all the low-level details and enriches Redis basic functionality so that more complex use-cases can be handled easily. 12 | 13 | If you are new to queues you may wonder why they are needed after all. Queues can solve many different problems in an elegant way, from smoothing out processing peaks to creating robust communication channels between microservices or offloading heavy work from one server to many smaller workers, etc. 14 | 15 | # Getting Started 16 | 17 | Bull is a public npm package and can be installed using either npm or yarn: 18 | 19 | ```bash 20 | $ npm install bull --save 21 | ``` 22 | 23 | or 24 | 25 | ```bash 26 | $ yarn add bull 27 | ``` 28 | 29 | In order to work with Bull, you also need to have a Redis server running. For local development, you can easily install 30 | it using [docker](https://hub.docker.com/_/redis/). 31 | 32 | Bull will by default try to connect to a Redis server running on `localhost:6379` 33 | 34 | # Simple Queues 35 | 36 | A queue is simply created by instantiating a Bull instance: 37 | 38 | ```js 39 | const myFirstQueue = new Bull('my-first-queue'); 40 | ``` 41 | 42 | A queue instance can normally have 3 main different roles: A job producer, a job consumer or/and an events listener. 43 | 44 | Although one given instance can be used for the 3 roles, normally the producer and consumer are divided into several instances. A given queue, always referred to by its instantiation name ( `my-first-queue` in the example above ), can have many producers, many consumers, and many listeners. An important aspect is that producers can add jobs to a queue even if there are no consumers available at that moment: queues provide asynchronous communication, which is one of the features that makes them so powerful. 45 | 46 | Conversely, you can have one or more workers consuming jobs from the queue, which will consume the jobs in a given order: FIFO (the default), LIFO or according to priorities. 47 | 48 | Talking about workers, they can run in the same or different processes, in the same machine, or in a cluster. Redis will act as a common point, and as long as a consumer or producer can connect to Redis, they will be able to co-operate in processing the jobs. 49 | 50 | ## Producers 51 | 52 | A job producer is simply some Node program that adds jobs to a queue, like this: 53 | 54 | ```js 55 | const myFirstQueue = new Bull('my-first-queue'); 56 | 57 | const job = await myFirstQueue.add({ 58 | foo: 'bar' 59 | }); 60 | ``` 61 | 62 | As you can see a job is just a javascript object. This object needs to be serializable, more concrete it should be possible to JSON stringify it since that is how it is going to be stored in Redis. 63 | 64 | It is also possible to provide an options object after the job's data, but we will cover that later on. 65 | 66 | ## Consumers 67 | 68 | A consumer or worker (we will use these two terms interchangeably in this guide), is nothing more than a Node program 69 | that defines a process function like so: 70 | 71 | ```js 72 | const myFirstQueue = new Bull('my-first-queue'); 73 | 74 | myFirstQueue.process(async (job) => { 75 | return doSomething(job.data); 76 | }); 77 | ``` 78 | 79 | The `process` function will be called every time the worker is idling and there are jobs to process in the queue. Since 80 | the consumer does not need to be online when the jobs are added, the queue could have many jobs already waiting in it. So then the process will be kept busy processing jobs one by one until all of them are done. 81 | 82 | In the example above we define the process function as `async`, which is the highly recommended way to define them. 83 | If your Node runtime does not support async/await, then you can just return a promise at the end of the process 84 | function for a similar result. 85 | 86 | The value returned by your process function will be stored in the jobs object and can be accessed later on, for example 87 | in a listener for the `completed` event. 88 | 89 | Sometimes you need to provide a job's _progress_ information to an external listener, this can be easily accomplished 90 | by using the `progress` method on the job object: 91 | 92 | ```js 93 | myFirstQueue.process(async (job) => { 94 | let progress = 0; 95 | for (i = 0; i < 100; i++) { 96 | await doSomething(job.data); 97 | progress += 10; 98 | job.progress(progress); 99 | } 100 | }); 101 | ``` 102 | 103 | ## Listeners 104 | 105 | Finally, you can just listen to events that happen in the queue. Listeners can be local, meaning that they only will 106 | receive notifications produced in the _given queue instance_, or global, meaning that they listen to _all_ the events 107 | for a given queue. So you can attach a listener to any instance, even instances that are acting as consumers or producers. But note that a local event will never fire if the queue is not a consumer or producer, you will need to use global events in that 108 | case. 109 | 110 | ```js 111 | const myFirstQueue = new Bull('my-first-queue'); 112 | 113 | // Define a local completed event 114 | myFirstQueue.on('completed', (job, result) => { 115 | console.log(`Job completed with result ${result}`); 116 | }) 117 | ``` 118 | 119 | ## A Job's Lifecycle 120 | 121 | In order to use the full potential of Bull queues, it is important to understand the lifecycle of a job. 122 | From the moment a producer calls the `add` method on a queue instance, a job enters a lifecycle where it will 123 | be in different states, until its completion or failure (although technically a failed job could be retried and get a new lifecycle). 124 | 125 | ![Diagram showing job statuses](job-lifecycle.png) 126 | 127 | When a job is added to a queue it can be in one of two states, it can either be in the "wait" status, which is, in fact, a waiting list, where all jobs must enter before they can be processed, or it can be in a "delayed" status: a delayed status implies that the job is waiting for some timeout or to be promoted for being processed. However, a delayed job will not be processed directly, instead, it will be placed at the beginning of the waiting list and processed as soon as a worker is idle. 128 | 129 | The next state for a job is the "active" state. The active state is represented by a set, and are jobs that are currently being 130 | processed, i.e. they are running in the `process` function explained in the previous chapter. A job can be in the active state for an unlimited amount of time until the process is completed or an exception is thrown so that the job will end in 131 | either the "completed" or the "failed" status. 132 | 133 | ## Stalled jobs 134 | 135 | In Bull, we defined the concept of stalled jobs. A stalled job is a job that is being processed but where Bull suspects that 136 | the process function has hanged. This happens when the process function is processing a job and is keeping the CPU so busy that 137 | the worker is not able to tell the queue that it is still working on the job. 138 | 139 | When a job stalls, depending on the job settings the job can be retried by another idle worker or it can just move to the failed status. 140 | 141 | Stalled jobs can be avoided by either making sure that the process function does not keep the Node event loop busy for too long (we are talking several seconds with Bull default options), or by using a separate [sandboxed processor](#sandboxed-processors). 142 | 143 | # Events 144 | 145 | A Queue in Bull generates a handful of events that are useful in many use cases. 146 | Events can be local for a given queue instance (a worker), for example, if a job is completed in a given worker, a local event will be emitted just for that instance. However, it is possible to listen to all events, by prefixing ```global:``` to the local event name. Then we can listen to all the events produced by all the workers of a given queue. 147 | 148 | A local complete event: 149 | 150 | ```js 151 | queue.on('completed', job => { 152 | console.log(`Job with id ${job.id} has been completed`); 153 | }) 154 | ``` 155 | 156 | Whereas the global version of the event can be listened to with: 157 | 158 | ```js 159 | queue.on('global:completed', jobId => { 160 | console.log(`Job with id ${jobId} has been completed`); 161 | }) 162 | ``` 163 | 164 | Note that signatures of global events are slightly different than their local counterpart, in the example above it is only sent the job id not a complete instance of the job itself, this is done for performance reasons. 165 | 166 | The list of available events can be found in the [reference](https://github.com/OptimalBits/bull/blob/master/REFERENCE.md#eventsk). 167 | 168 | # Queue Options 169 | 170 | A queue can be instantiated with some useful options, for instance, you can specify the location and password of your Redis server, 171 | as well as some other useful settings. All these settings are described in Bull's [reference](https://github.com/OptimalBits/bull/blob/master/REFERENCE.md#queue) and we will not repeat them here. However, we will go through some use cases. 172 | 173 | ## Rate Limiter 174 | 175 | It is possible to create queues that limit the number of jobs processed in a unit of time. The limiter is defined per queue, independently of the number of workers, so you can scale horizontally and still limit the rate of processing easily: 176 | 177 | ```js 178 | // Limit queue to max 1000 jobs per 5000 milliseconds. 179 | const myRateLimitedQueue = new Queue('rateLimited', { 180 | limiter: { 181 | max: 1000, 182 | duration: 5000 183 | } 184 | }); 185 | ``` 186 | 187 | When a queue hits the rate limit, requested jobs will join the `delayed` queue. 188 | 189 | ## Named jobs 190 | 191 | It is possible to give names to jobs. This does not change any of the mechanics of the queue but can be used for clearer code and 192 | better visualization in UI tools: 193 | 194 | ```js 195 | // Jobs producer 196 | const myJob = await transcoderQueue.add('image', { input: 'myimagefile' }); 197 | const myJob = await transcoderQueue.add('audio', { input: 'myaudiofile' }); 198 | const myJob = await transcoderQueue.add('video', { input: 'myvideofile' }); 199 | ``` 200 | 201 | ```js 202 | // Worker 203 | transcoderQueue.process('image', processImage); 204 | transcoderQueue.process('audio', processAudio); 205 | transcoderQueue.process('video', processVideo); 206 | ``` 207 | 208 | Just keep in mind that every queue instance is required to provide a processor for *every* named job or you will get an exception. 209 | 210 | ## Sandboxed Processors 211 | 212 | As explained above, when defining a process function, it is also possible to provide a concurrency setting. This setting allows the worker to process several 213 | jobs in parallel. The jobs are still processed in the same Node process, 214 | and if the jobs are very IO intensive they will be handled just fine. 215 | 216 | Sometimes jobs are more CPU intensive which could lock the Node event loop 217 | for too long and Bull could decide the job has been stalled. To avoid this situation, it is possible to run the process functions in separate Node processes. In this case, the concurrency parameter will decide the maximum number of concurrent processes that are allowed to run. 218 | 219 | We call these kinds of processes "sandboxed" processes, and they also have the property that if they crash they will not affect any other process, and a new 220 | process will be spawned automatically to replace it. 221 | 222 | 223 | # Job types 224 | 225 | The default job type in Bull is "FIFO" (first in first out), meaning that the jobs are processed in the same order they are coming into the 226 | queue. Sometimes it is useful to process jobs in a different order. 227 | 228 | ## LIFO 229 | 230 | Lifo (last in first out) means that jobs are added to the beginning of the queue and therefore will be processed as soon as the worker is idle. 231 | 232 | ```js 233 | const myJob = await myqueue.add({ foo: 'bar' }, { lifo: true }); 234 | ``` 235 | 236 | ## Delayed 237 | 238 | It is also possible to add jobs to the queue that are delayed a certain amount of time before they will be processed. Note that the delay parameter means the _minimum_ amount of time the job will wait before being processed. When the delay time has passed the job will be moved to the beginning of the queue and be processed as soon as a worker is idle. 239 | 240 | ```js 241 | // Delayed 5 seconds 242 | const myJob = await myqueue.add({ foo: 'bar' }, { delay: 5000 }); 243 | ``` 244 | 245 | ## Prioritized 246 | 247 | Jobs can be added to a queue with a priority value. Jobs with higher priority will be processed before jobs with lower priority. The highest priority is 1, and the larger the integer you use, the lower the priority of the job. Keep in mind that priority queues are a bit slower than a standard queue (currently insertion time O(n), n being the number of jobs currently waiting in the queue, instead of O(1) for standard queues). 248 | 249 | ```js 250 | const myJob = await myqueue.add({ foo: 'bar' }, { priority: 3 }); 251 | ``` 252 | 253 | ## Repeatable 254 | 255 | Repeatable jobs are special jobs that repeat themselves indefinitely or until a given maximum date or the number of repetitions has been reached, according to a cron specification or a time interval. 256 | 257 | ```js 258 | // Repeat every 10 seconds for 100 times. 259 | const myJob = await myqueue.add( 260 | { foo: 'bar' }, 261 | { 262 | repeat: { 263 | every: 10000, 264 | limit: 100 265 | } 266 | } 267 | ); 268 | 269 | // Repeat payment job once every day at 3:15 (am) 270 | paymentsQueue.add(paymentsData, { repeat: { cron: '15 3 * * *' } }); 271 | ``` 272 | 273 | There are some important considerations regarding repeatable jobs: 274 | 275 | - Bull is smart enough not to add the same repeatable job if the repeat options are the same. (CAUTION: A job id is part of the repeat options since: https://github.com/OptimalBits/bull/pull/603, therefore passing job ids will allow jobs with the same cron to be inserted in the queue) 276 | - If there are no workers running, repeatable jobs will not accumulate the next time a worker is online. 277 | - Repeatable jobs can be removed using the [removeRepeatable](https://github.com/OptimalBits/bull/blob/master/REFERENCE.md#queueremoverepeatable) method. 278 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-minimal 2 | title: Welcome to Bull's Guide 3 | -------------------------------------------------------------------------------- /docs/job-lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OptimalBits/bull/489c6ab8466c1db122f92af3ddef12eacc54179e/docs/job-lifecycle.png -------------------------------------------------------------------------------- /generateRawScripts.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const { ScriptLoader } = require('./lib/commands'); 3 | const path = require('path'); 4 | const fs = require('fs'); 5 | const { promisify } = require('util'); 6 | 7 | const writeFile = promisify(fs.writeFile); 8 | 9 | class RawScriptLoader extends ScriptLoader { 10 | /** 11 | * Transpile lua scripts in one file, specifying an specific directory to be saved 12 | * @param pathname - the path to the directory containing the scripts 13 | * @param writeDir - the path to the directory where scripts will be saved 14 | */ 15 | async transpileScripts(pathname, writeDir) { 16 | const writeFilenamePath = path.normalize(writeDir); 17 | 18 | if (!fs.existsSync(writeFilenamePath)) { 19 | fs.mkdirSync(writeFilenamePath); 20 | } 21 | 22 | const paths = new Set(); 23 | if (!paths.has(pathname)) { 24 | paths.add(pathname); 25 | const scripts = await this.loadScripts(pathname); 26 | for (const command of scripts) { 27 | const { 28 | name, 29 | options: { numberOfKeys, lua } 30 | } = command; 31 | await writeFile( 32 | path.join(writeFilenamePath, `${name}-${numberOfKeys}.lua`), 33 | lua 34 | ); 35 | } 36 | } 37 | } 38 | } 39 | 40 | const scriptLoader = new RawScriptLoader(); 41 | 42 | scriptLoader.transpileScripts( 43 | path.join(__dirname, './lib/commands'), 44 | path.join(__dirname, './rawScripts') 45 | ); 46 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = require('./lib/queue'); 4 | module.exports.Job = require('./lib/job'); 5 | module.exports.utils = require('./lib/utils'); 6 | -------------------------------------------------------------------------------- /lib/backoffs.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const _ = require('lodash'); 4 | 5 | const builtinStrategies = { 6 | fixed(delay) { 7 | return function() { 8 | return delay; 9 | }; 10 | }, 11 | 12 | exponential(delay) { 13 | return function(attemptsMade) { 14 | return Math.round((Math.pow(2, attemptsMade) - 1) * delay); 15 | }; 16 | } 17 | }; 18 | 19 | function lookupStrategy(backoff, customStrategies) { 20 | if (backoff.type in customStrategies) { 21 | return customStrategies[backoff.type]; 22 | } else if (backoff.type in builtinStrategies) { 23 | return builtinStrategies[backoff.type](backoff.delay); 24 | } else { 25 | throw new Error( 26 | 'Unknown backoff strategy ' + 27 | backoff.type + 28 | '. If a custom backoff strategy is used, specify it when the queue is created.' 29 | ); 30 | } 31 | } 32 | 33 | module.exports = { 34 | normalize(backoff) { 35 | if (_.isFinite(backoff)) { 36 | return { 37 | type: 'fixed', 38 | delay: backoff 39 | }; 40 | } else if (backoff) { 41 | return backoff; 42 | } 43 | }, 44 | 45 | calculate(backoff, attemptsMade, customStrategies, err, strategyOptions) { 46 | if (backoff) { 47 | const strategy = lookupStrategy( 48 | backoff, 49 | customStrategies, 50 | strategyOptions 51 | ); 52 | 53 | return strategy(attemptsMade, err, strategyOptions); 54 | } 55 | } 56 | }; 57 | -------------------------------------------------------------------------------- /lib/commands/addJob-6.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Adds a job to the queue by doing the following: 3 | - Increases the job counter if needed. 4 | - Creates a new job key with the job data. 5 | 6 | - if delayed: 7 | - computes timestamp. 8 | - adds to delayed zset. 9 | - Emits a global event 'delayed' if the job is delayed. 10 | - if not delayed 11 | - Adds the jobId to the wait/paused list in one of three ways: 12 | - LIFO 13 | - FIFO 14 | - prioritized. 15 | - Adds the job to the "added" list so that workers gets notified. 16 | 17 | Input: 18 | KEYS[1] 'wait', 19 | KEYS[2] 'paused' 20 | KEYS[3] 'meta-paused' 21 | KEYS[4] 'id' 22 | KEYS[5] 'delayed' 23 | KEYS[6] 'priority' 24 | 25 | ARGV[1] key prefix, 26 | ARGV[2] custom id (will not generate one automatically) 27 | ARGV[3] name 28 | ARGV[4] data (json stringified job data) 29 | ARGV[5] opts (json stringified job opts) 30 | ARGV[6] timestamp 31 | ARGV[7] delay 32 | ARGV[8] delayedTimestamp 33 | ARGV[9] priority 34 | ARGV[10] LIFO 35 | ARGV[11] token 36 | ARGV[12] debounce key 37 | ARGV[13] debounceId 38 | ARGV[14] debounceTtl 39 | ]] 40 | local jobId 41 | local jobIdKey 42 | local rcall = redis.call 43 | 44 | -- Includes 45 | --- @include "includes/addJobWithPriority" 46 | --- @include "includes/debounceJob" 47 | --- @include "includes/getTargetQueueList" 48 | 49 | local jobCounter = rcall("INCR", KEYS[4]) 50 | 51 | if ARGV[2] == "" then 52 | jobId = jobCounter 53 | jobIdKey = ARGV[1] .. jobId 54 | else 55 | jobId = ARGV[2] 56 | jobIdKey = ARGV[1] .. jobId 57 | if rcall("EXISTS", jobIdKey) == 1 then 58 | rcall("PUBLISH", ARGV[1] .. "duplicated@" .. ARGV[11], jobId) 59 | return jobId .. "" -- convert to string 60 | end 61 | end 62 | 63 | local debounceKey = ARGV[12] 64 | 65 | local opts = cmsgpack.unpack(ARGV[5]) 66 | 67 | local debouncedJobId = debounceJob(ARGV[1], ARGV[13], ARGV[14], 68 | jobId, debounceKey, ARGV[11]) 69 | if debouncedJobId then 70 | return debouncedJobId 71 | end 72 | 73 | local debounceId = ARGV[13] 74 | 75 | local optionalValues = {} 76 | 77 | if debounceId ~= "" then 78 | table.insert(optionalValues, "deid") 79 | table.insert(optionalValues, debounceId) 80 | end 81 | 82 | -- Store the job. 83 | rcall("HMSET", jobIdKey, "name", ARGV[3], "data", ARGV[4], "opts", opts, "timestamp", 84 | ARGV[6], "delay", ARGV[7], "priority", ARGV[9], unpack(optionalValues)) 85 | 86 | -- Check if job is delayed 87 | local delayedTimestamp = tonumber(ARGV[8]) 88 | if(delayedTimestamp ~= 0) then 89 | local timestamp = delayedTimestamp * 0x1000 + bit.band(jobCounter, 0xfff) 90 | rcall("ZADD", KEYS[5], timestamp, jobId) 91 | rcall("PUBLISH", KEYS[5], delayedTimestamp) 92 | else 93 | local target 94 | 95 | -- Whe check for the meta-paused key to decide if we are paused or not 96 | -- (since an empty list and !EXISTS are not really the same) 97 | local target, paused = getTargetQueueList(KEYS[3], KEYS[1], KEYS[2]) 98 | 99 | -- Standard or priority add 100 | local priority = tonumber(ARGV[9]) 101 | if priority == 0 then 102 | -- LIFO or FIFO 103 | rcall(ARGV[10], target, jobId) 104 | else 105 | addJobWithPriority(KEYS[6], priority, jobId, target) 106 | end 107 | 108 | -- Emit waiting event (wait..ing@token) 109 | rcall("PUBLISH", KEYS[1] .. "ing@" .. ARGV[11], jobId) 110 | end 111 | 112 | return jobId .. "" -- convert to string 113 | -------------------------------------------------------------------------------- /lib/commands/addLog-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Add job log 3 | 4 | Input: 5 | KEYS[1] job id key 6 | KEYS[2] job logs key 7 | 8 | ARGV[1] id 9 | ARGV[2] log 10 | ARGV[3] keepLogs 11 | 12 | Output: 13 | -1 - Missing job. 14 | ]] 15 | local rcall = redis.call 16 | 17 | if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists 18 | local logCount = rcall("RPUSH", KEYS[2], ARGV[2]) 19 | 20 | if ARGV[3] ~= '' then 21 | local keepLogs = tonumber(ARGV[3]) 22 | rcall("LTRIM", KEYS[2], -keepLogs, -1) 23 | 24 | return math.min(keepLogs, logCount) 25 | end 26 | 27 | return logCount 28 | else 29 | return -1 30 | end 31 | -------------------------------------------------------------------------------- /lib/commands/cleanJobsInSet-3.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Remove jobs from the specific set. 3 | 4 | Input: 5 | KEYS[1] set key, 6 | KEYS[2] priority key 7 | KEYS[3] rate limiter key 8 | 9 | ARGV[1] prefix key 10 | ARGV[2] maxTimestamp 11 | ARGV[3] limit the number of jobs to be removed. 0 is unlimited 12 | ARGV[4] set name, can be any of 'wait', 'active', 'paused', 'delayed', 'completed', or 'failed' 13 | ]] 14 | 15 | local setKey = KEYS[1] 16 | local priorityKey = KEYS[2] 17 | local rateLimiterKey = KEYS[3] 18 | 19 | local prefixKey = ARGV[1] 20 | local maxTimestamp = ARGV[2] 21 | local limitStr = ARGV[3] 22 | local setName = ARGV[4] 23 | 24 | local isList = false 25 | local rcall = redis.call 26 | 27 | -- Includes 28 | --- @include "includes/removeDebounceKey" 29 | 30 | if setName == "wait" or setName == "active" or setName == "paused" then 31 | isList = true 32 | end 33 | 34 | -- We use ZRANGEBYSCORE to make the case where we're deleting a limited number 35 | -- of items in a sorted set only run a single iteration. If we simply used 36 | -- ZRANGE, we may take a long time traversing through jobs that are within the 37 | -- grace period. 38 | local function shouldUseZRangeByScore(isList, limit) 39 | return not isList and limit > 0 40 | end 41 | 42 | local function getJobs(setKey, isList, rangeStart, rangeEnd, maxTimestamp, limit) 43 | if isList then 44 | return rcall("LRANGE", setKey, rangeStart, rangeEnd) 45 | elseif shouldUseZRangeByScore(isList, limit) then 46 | return rcall("ZRANGEBYSCORE", setKey, 0, maxTimestamp, "LIMIT", 0, limit) 47 | else 48 | return rcall("ZRANGE", setKey, rangeStart, rangeEnd) 49 | end 50 | end 51 | 52 | local limit = tonumber(limitStr) 53 | local rangeStart = 0 54 | local rangeEnd = -1 55 | 56 | -- If we're only deleting _n_ items, avoid retrieving all items 57 | -- for faster performance 58 | -- 59 | -- Start from the tail of the list, since that's where oldest elements 60 | -- are generally added for FIFO lists 61 | if limit > 0 then 62 | rangeStart = -1 - limit + 1 63 | rangeEnd = -1 64 | end 65 | 66 | local jobIds = getJobs(setKey, isList, rangeStart, rangeEnd, maxTimestamp, limit) 67 | local deleted = {} 68 | local deletedCount = 0 69 | local jobTS 70 | 71 | -- Run this loop: 72 | -- - Once, if limit is -1 or 0 73 | -- - As many times as needed if limit is positive 74 | while ((limit <= 0 or deletedCount < limit) and next(jobIds, nil) ~= nil) do 75 | local jobIdsLen = #jobIds 76 | for i, jobId in ipairs(jobIds) do 77 | if limit > 0 and deletedCount >= limit then 78 | break 79 | end 80 | 81 | local jobKey = prefixKey .. jobId 82 | if (rcall("EXISTS", jobKey .. ":lock") == 0) then 83 | -- Find the right timestamp of the job to compare to maxTimestamp: 84 | -- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed 85 | -- * processedOn represents when the job was last attempted, but it doesn't get populated until the job is first tried 86 | -- * timestamp is the original job submission time 87 | -- Fetch all three of these (in that order) and use the first one that is set so that we'll leave jobs that have been active within the grace period: 88 | for _, ts in ipairs(rcall("HMGET", jobKey, "finishedOn", "processedOn", "timestamp")) do 89 | if (ts) then 90 | jobTS = ts 91 | break 92 | end 93 | end 94 | if (not jobTS or jobTS < maxTimestamp) then 95 | if isList then 96 | -- Job ids can't be the empty string. Use the empty string as a 97 | -- deletion marker. The actual deletion will occur at the end of the 98 | -- script. 99 | rcall("LSET", setKey, rangeEnd - jobIdsLen + i, "") 100 | else 101 | rcall("ZREM", setKey, jobId) 102 | end 103 | rcall("ZREM", priorityKey, jobId) 104 | 105 | if setName ~= "completed" and setName ~= "failed" then 106 | removeDebounceKey(prefixKey, jobKey) 107 | end 108 | 109 | rcall("DEL", jobKey) 110 | rcall("DEL", jobKey .. ":logs") 111 | 112 | -- delete keys related to rate limiter 113 | -- NOTE: this code is unncessary for other sets than wait, paused and delayed. 114 | local limiterIndexTable = rateLimiterKey .. ":index" 115 | local limitedSetKey = rcall("HGET", limiterIndexTable, jobId) 116 | 117 | if limitedSetKey then 118 | rcall("SREM", limitedSetKey, jobId) 119 | rcall("HDEL", limiterIndexTable, jobId) 120 | end 121 | 122 | deletedCount = deletedCount + 1 123 | table.insert(deleted, jobId) 124 | end 125 | end 126 | end 127 | 128 | -- If we didn't have a limit or used the single-iteration ZRANGEBYSCORE 129 | -- function, return immediately. We should have deleted all the jobs we can 130 | if limit <= 0 or shouldUseZRangeByScore(isList, limit) then 131 | break 132 | end 133 | 134 | if deletedCount < limit then 135 | -- We didn't delete enough. Look for more to delete 136 | rangeStart = rangeStart - limit 137 | rangeEnd = rangeEnd - limit 138 | jobIds = getJobs(setKey, isList, rangeStart, rangeEnd, maxTimestamp, limit) 139 | end 140 | end 141 | 142 | if isList then 143 | rcall("LREM", setKey, 0, "") 144 | end 145 | 146 | return deleted 147 | -------------------------------------------------------------------------------- /lib/commands/extendLock-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Extend lock and removes the job from the stalled set. 3 | 4 | Input: 5 | KEYS[1] 'lock', 6 | KEYS[2] 'stalled' 7 | 8 | ARGV[1] token 9 | ARGV[2] lock duration in milliseconds 10 | ARGV[3] jobid 11 | 12 | Output: 13 | "1" if lock extended succesfully. 14 | ]] 15 | local rcall = redis.call 16 | if rcall("GET", KEYS[1]) == ARGV[1] then 17 | if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) then 18 | rcall("SREM", KEYS[2], ARGV[3]) 19 | return 1 20 | end 21 | end 22 | return 0 23 | -------------------------------------------------------------------------------- /lib/commands/getCountsPerPriority-4.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get counts per provided states 3 | 4 | Input: 5 | KEYS[1] wait key 6 | KEYS[2] paused key 7 | KEYS[3] meta-paused key 8 | KEYS[4] priority key 9 | 10 | ARGV[1...] priorities 11 | ]] 12 | local rcall = redis.call 13 | local results = {} 14 | local prioritizedKey = KEYS[4] 15 | 16 | -- Includes 17 | --- @include "includes/getTargetQueueList" 18 | 19 | for i = 1, #ARGV do 20 | local priority = tonumber(ARGV[i]) 21 | if priority == 0 then 22 | local target = getTargetQueueList(KEYS[3], KEYS[1], KEYS[2]) 23 | local count = rcall("LLEN", target) - rcall("ZCARD", prioritizedKey) 24 | if count < 0 then 25 | -- considering when last waiting job is moved to active before 26 | -- removing priority reference 27 | results[#results+1] = 0 28 | else 29 | results[#results+1] = count 30 | end 31 | else 32 | results[#results+1] = rcall("ZCOUNT", prioritizedKey, 33 | priority, priority) 34 | end 35 | end 36 | 37 | return results 38 | -------------------------------------------------------------------------------- /lib/commands/includes/addJobWithPriority.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to add job considering priority. 3 | ]] 4 | 5 | local function addJobWithPriority(priorityKey, priority, jobId, targetKey) 6 | rcall("ZADD", priorityKey, priority, jobId) 7 | local count = rcall("ZCOUNT", priorityKey, 0, priority) 8 | 9 | local len = rcall("LLEN", targetKey) 10 | local id = rcall("LINDEX", targetKey, len - (count - 1)) 11 | if id then 12 | rcall("LINSERT", targetKey, "BEFORE", id, jobId) 13 | else 14 | rcall("RPUSH", targetKey, jobId) 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /lib/commands/includes/batches.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to loop in batches. 3 | Just a bit of warning, some commands as ZREM 4 | could receive a maximum of 7000 parameters per call. 5 | ]] 6 | 7 | local function batches(n, batchSize) 8 | local i = 0 9 | 10 | return function() 11 | local from = i * batchSize + 1 12 | i = i + 1 13 | if (from <= n) then 14 | local to = math.min(from + batchSize - 1, n) 15 | return from, to 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /lib/commands/includes/collectMetrics.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to collect metrics based on a current and previous count of jobs. 3 | Granualarity is fixed at 1 minute. 4 | ]] 5 | 6 | -- Includes 7 | --- @include "batches" 8 | 9 | local function collectMetrics(metaKey, dataPointsList, maxDataPoints, timestamp) 10 | -- Increment current count 11 | local count = rcall("HINCRBY", metaKey, "count", 1) - 1 12 | 13 | -- Compute how many data points we need to add to the list, N. 14 | local prevTS = rcall("HGET", metaKey, "prevTS") 15 | 16 | if not prevTS then 17 | -- If prevTS is nil, set it to the current timestamp 18 | rcall("HSET", metaKey, "prevTS", timestamp, "prevCount", 0) 19 | return 20 | end 21 | 22 | local N = math.min(math.floor(timestamp / 60000) - math.floor(prevTS / 60000), tonumber(maxDataPoints)) 23 | 24 | if N > 0 then 25 | local delta = count - rcall("HGET", metaKey, "prevCount") 26 | -- If N > 1, add N-1 zeros to the list 27 | if N > 1 then 28 | local points = {} 29 | points[1] = delta 30 | for i = 2, N do points[i] = 0 end 31 | 32 | for from, to in batches(#points, 7000) do 33 | rcall("LPUSH", dataPointsList, unpack(points, from, to)) 34 | end 35 | else 36 | -- LPUSH delta to the list 37 | rcall("LPUSH", dataPointsList, delta) 38 | end 39 | 40 | -- LTRIM to keep list to its max size 41 | rcall("LTRIM", dataPointsList, 0, maxDataPoints - 1) 42 | 43 | -- update prev count with current count 44 | rcall("HSET", metaKey, "prevCount", count, "prevTS", timestamp) 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/commands/includes/debounceJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to debounce a job. 3 | ]] 4 | 5 | local function debounceJob(prefixKey, debounceId, ttl, jobId, debounceKey, token) 6 | if debounceId ~= "" then 7 | local debounceKeyExists 8 | if ttl ~= "" then 9 | debounceKeyExists = not rcall('SET', debounceKey, jobId, 'PX', ttl, 'NX') 10 | else 11 | debounceKeyExists = not rcall('SET', debounceKey, jobId, 'NX') 12 | end 13 | if debounceKeyExists then 14 | local currentDebounceJobId = rcall('GET', debounceKey) 15 | rcall("PUBLISH", prefixKey .. "debounced@" .. token, currentDebounceJobId) 16 | 17 | return currentDebounceJobId 18 | end 19 | end 20 | end -------------------------------------------------------------------------------- /lib/commands/includes/getTargetQueueList.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check for the meta.paused key to decide if we are paused or not 3 | (since an empty list and !EXISTS are not really the same). 4 | ]] 5 | 6 | local function getTargetQueueList(queueMetaKey, waitKey, pausedKey) 7 | if rcall("EXISTS", queueMetaKey) ~= 1 then 8 | return waitKey, false 9 | else 10 | return pausedKey, true 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /lib/commands/includes/removeDebounceKey.lua: -------------------------------------------------------------------------------- 1 | 2 | --[[ 3 | Function to remove debounce key. 4 | ]] 5 | 6 | local function removeDebounceKey(prefixKey, jobKey) 7 | local debounceId = rcall("HGET", jobKey, "deid") 8 | if debounceId then 9 | local debounceKey = prefixKey .. "de:" .. debounceId 10 | rcall("DEL", debounceKey) 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /lib/commands/includes/removeDebounceKeyIfNeeded.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove debounce key if needed. 3 | ]] 4 | 5 | local function removeDebounceKeyIfNeeded(prefixKey, debounceId) 6 | if debounceId then 7 | local debounceKey = prefixKey .. "de:" .. debounceId 8 | local pttl = rcall("PTTL", debounceKey) 9 | 10 | if pttl == 0 or pttl == -1 then 11 | rcall("DEL", debounceKey) 12 | end 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /lib/commands/includes/removeLock.lua: -------------------------------------------------------------------------------- 1 | local function removeLock(jobKey, stalledKey, token, jobId) 2 | if token ~= "0" then 3 | local lockKey = jobKey .. ':lock' 4 | local lockToken = rcall("GET", lockKey) 5 | if lockToken == token then 6 | rcall("DEL", lockKey) 7 | rcall("SREM", stalledKey, jobId) 8 | else 9 | if lockToken then 10 | -- Lock exists but token does not match 11 | return -6 12 | else 13 | -- Lock is missing completely 14 | return -2 15 | end 16 | end 17 | end 18 | return 0 19 | end 20 | -------------------------------------------------------------------------------- /lib/commands/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const { ScriptLoader } = require('./script-loader'); 3 | 4 | const scriptLoader = new ScriptLoader(); 5 | 6 | module.exports = { 7 | ScriptLoader, 8 | scriptLoader 9 | }; 10 | -------------------------------------------------------------------------------- /lib/commands/isFinished-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Checks if a job is finished (.i.e. is in the completed or failed set) 3 | 4 | Input: 5 | KEYS[1] completed key 6 | KEYS[2] failed key 7 | 8 | ARGV[1] job id 9 | Output: 10 | 0 - not finished. 11 | 1 - completed. 12 | 2 - failed. 13 | ]] 14 | if redis.call("ZSCORE", KEYS[1], ARGV[1]) ~= false then 15 | return 1 16 | end 17 | 18 | if redis.call("ZSCORE", KEYS[2], ARGV[1]) ~= false then 19 | return 2 20 | end 21 | 22 | return redis.call("ZSCORE", KEYS[2], ARGV[1]) 23 | -------------------------------------------------------------------------------- /lib/commands/isJobInList-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Checks if job is in a given list. 3 | 4 | Input: 5 | KEYS[1] 6 | ARGV[1] 7 | 8 | Output: 9 | 1 if element found in the list. 10 | ]] 11 | local function item_in_list (list, item) 12 | for _, v in pairs(list) do 13 | if v == item then 14 | return 1 15 | end 16 | end 17 | return nil 18 | end 19 | local items = redis.call("LRANGE", KEYS[1] , 0, -1) 20 | return item_in_list(items, ARGV[1]) 21 | -------------------------------------------------------------------------------- /lib/commands/moveStalledJobsToWait-7.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Move stalled jobs to wait. 3 | 4 | Input: 5 | KEYS[1] 'stalled' (SET) 6 | KEYS[2] 'wait', (LIST) 7 | KEYS[3] 'active', (LIST) 8 | KEYS[4] 'failed', (ZSET) 9 | KEYS[5] 'stalled-check', (KEY) 10 | 11 | KEYS[6] 'meta-paused', (KEY) 12 | KEYS[7] 'paused', (LIST) 13 | 14 | ARGV[1] Max stalled job count 15 | ARGV[2] queue.toKey('') 16 | ARGV[3] timestamp 17 | ARGV[4] max check time 18 | 19 | Events: 20 | 'stalled' with stalled job id. 21 | ]] 22 | 23 | local rcall = redis.call 24 | 25 | -- Includes 26 | --- @include "includes/batches" 27 | --- @include "includes/getTargetQueueList" 28 | --- @include "includes/removeDebounceKeyIfNeeded" 29 | 30 | local function removeJob(jobId, baseKey) 31 | local jobKey = baseKey .. jobId 32 | rcall("DEL", jobKey, jobKey .. ':logs') 33 | end 34 | 35 | local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix) 36 | local start = timestamp - maxAge * 1000 37 | local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf") 38 | for i, jobId in ipairs(jobIds) do 39 | removeJob(jobId, prefix) 40 | end 41 | rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start) 42 | end 43 | 44 | local function removeJobsByMaxCount(maxCount, targetSet, prefix) 45 | local start = maxCount 46 | local jobIds = rcall("ZREVRANGE", targetSet, start, -1) 47 | for i, jobId in ipairs(jobIds) do 48 | removeJob(jobId, prefix) 49 | end 50 | rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1)) 51 | end 52 | 53 | -- Check if we need to check for stalled jobs now. 54 | if rcall("EXISTS", KEYS[5]) == 1 then 55 | return {{}, {}} 56 | end 57 | 58 | rcall("SET", KEYS[5], ARGV[3], "PX", ARGV[4]) 59 | 60 | -- Move all stalled jobs to wait 61 | local stalling = rcall('SMEMBERS', KEYS[1]) 62 | local stalled = {} 63 | local failed = {} 64 | if(#stalling > 0) then 65 | rcall('DEL', KEYS[1]) 66 | 67 | local MAX_STALLED_JOB_COUNT = tonumber(ARGV[1]) 68 | 69 | -- Remove from active list 70 | for i, jobId in ipairs(stalling) do 71 | local jobKey = ARGV[2] .. jobId 72 | 73 | -- Check that the lock is also missing, then we can handle this job as really stalled. 74 | if(rcall("EXISTS", jobKey .. ":lock") == 0) then 75 | -- Remove from the active queue. 76 | local removed = rcall("LREM", KEYS[3], 1, jobId) 77 | 78 | if(removed > 0) then 79 | -- If this job has been stalled too many times, such as if it crashes the worker, then fail it. 80 | local stalledCount = rcall("HINCRBY", jobKey, "stalledCounter", 1) 81 | if(stalledCount > MAX_STALLED_JOB_COUNT) then 82 | local jobAttributes = rcall("HMGET", jobKey, "opts", "deid") 83 | local opts = cjson.decode(jobAttributes[1]) 84 | local removeOnFailType = type(opts["removeOnFail"]) 85 | rcall("ZADD", KEYS[4], ARGV[3], jobId) 86 | rcall("HMSET", jobKey, "failedReason", "job stalled more than allowable limit", 87 | "finishedOn", ARGV[3]) 88 | removeDebounceKeyIfNeeded(ARGV[2], jobAttributes[2]) 89 | rcall("PUBLISH", KEYS[4], '{"jobId":"' .. jobId .. '", "val": "job stalled more than maxStalledCount"}') 90 | 91 | if removeOnFailType == "number" then 92 | removeJobsByMaxCount(opts["removeOnFail"], 93 | KEYS[4], ARGV[2]) 94 | elseif removeOnFailType == "boolean" then 95 | if opts["removeOnFail"] then 96 | removeJob(jobId, ARGV[2]) 97 | rcall("ZREM", KEYS[4], jobId) 98 | end 99 | elseif removeOnFailType ~= "nil" then 100 | local maxAge = opts["removeOnFail"]["age"] 101 | local maxCount = opts["removeOnFail"]["count"] 102 | 103 | if maxAge ~= nil then 104 | removeJobsByMaxAge(ARGV[3], maxAge, 105 | KEYS[4], ARGV[2]) 106 | end 107 | 108 | if maxCount ~= nil and maxCount > 0 then 109 | removeJobsByMaxCount(maxCount, KEYS[4], 110 | ARGV[2]) 111 | end 112 | end 113 | 114 | table.insert(failed, jobId) 115 | else 116 | local target = getTargetQueueList(KEYS[6], KEYS[2], KEYS[7]) 117 | 118 | -- Move the job back to the wait queue, to immediately be picked up by a waiting worker. 119 | rcall("RPUSH", target, jobId) 120 | rcall('PUBLISH', KEYS[1] .. '@', jobId) 121 | table.insert(stalled, jobId) 122 | end 123 | end 124 | end 125 | end 126 | end 127 | 128 | -- Mark potentially stalled jobs 129 | local active = rcall('LRANGE', KEYS[3], 0, -1) 130 | 131 | if (#active > 0) then 132 | for from, to in batches(#active, 7000) do 133 | rcall('SADD', KEYS[1], unpack(active, from, to)) 134 | end 135 | end 136 | 137 | return {failed, stalled} 138 | -------------------------------------------------------------------------------- /lib/commands/moveToActive-8.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Move next job to be processed to active, lock it and fetch its data. The job 3 | may be delayed, in that case we need to move it to the delayed set instead. 4 | 5 | This operation guarantees that the worker owns the job during the locks 6 | expiration time. The worker is responsible of keeping the lock fresh 7 | so that no other worker picks this job again. 8 | 9 | Input: 10 | KEYS[1] wait key 11 | KEYS[2] active key 12 | KEYS[3] priority key 13 | KEYS[4] active event key 14 | KEYS[5] stalled key 15 | 16 | -- Rate limiting 17 | KEYS[6] rate limiter key 18 | KEYS[7] delayed key 19 | 20 | -- 21 | KEYS[8] drained key 22 | 23 | ARGV[1] key prefix 24 | ARGV[2] lock token 25 | ARGV[3] lock duration in milliseconds 26 | ARGV[4] timestamp 27 | ARGV[5] optional jobid 28 | 29 | ARGV[6] optional jobs per time unit (rate limiter) 30 | ARGV[7] optional time unit (rate limiter) 31 | ARGV[8] optional do not do anything with job if rate limit hit 32 | ARGV[9] optional rate limit by key 33 | ]] 34 | 35 | local rcall = redis.call 36 | 37 | local rateLimit = function(jobId, maxJobs) 38 | local rateLimiterKey = KEYS[6]; 39 | local limiterIndexTable = rateLimiterKey .. ":index" 40 | 41 | -- Rate limit by group? 42 | if(ARGV[9]) then 43 | local group = string.match(jobId, "[^:]+$") 44 | if group ~= nil then 45 | rateLimiterKey = rateLimiterKey .. ":" .. group 46 | end 47 | end 48 | 49 | -- -- key for storing rate limited jobs 50 | -- When a job has been previously rate limited it should be part of this set 51 | -- if the job is back here means that the delay time for this job has passed and now we should 52 | -- be able to process it again. 53 | local limitedSetKey = rateLimiterKey .. ":limited" 54 | local delay = 0 55 | 56 | -- -- Check if job was already limited 57 | local isLimited = rcall("SISMEMBER", limitedSetKey, jobId); 58 | 59 | if isLimited == 1 then 60 | -- Remove from limited zset since we are going to try to process it 61 | rcall("SREM", limitedSetKey, jobId) 62 | rcall("HDEL", limiterIndexTable, jobId) 63 | else 64 | -- If not, check if there are any limited jobs 65 | -- If the job has not been rate limited, we should check if there are any other rate limited jobs, because if that 66 | -- is the case we do not want to process this job, just calculate a delay for it and put it to "sleep". 67 | local numLimitedJobs = rcall("SCARD", limitedSetKey) 68 | 69 | if numLimitedJobs > 0 then 70 | -- Note, add some slack to compensate for drift. 71 | delay = ((numLimitedJobs * ARGV[7] * 1.1) / maxJobs) + tonumber(rcall("PTTL", rateLimiterKey)) 72 | end 73 | end 74 | 75 | local jobCounter = tonumber(rcall("GET", rateLimiterKey)) 76 | if(jobCounter == nil) then 77 | jobCounter = 0 78 | end 79 | -- check if rate limit hit 80 | if (delay == 0) and (jobCounter >= maxJobs) then 81 | -- Seems like there are no current rated limited jobs, but the jobCounter has exceeded the number of jobs for this unit of time so we need to rate limit this job. 82 | local exceedingJobs = jobCounter - maxJobs 83 | delay = tonumber(rcall("PTTL", rateLimiterKey)) + ((exceedingJobs) * ARGV[7]) / maxJobs 84 | end 85 | 86 | if delay > 0 then 87 | local bounceBack = ARGV[8] 88 | if bounceBack == 'false' then 89 | local timestamp = delay + tonumber(ARGV[4]) 90 | -- put job into delayed queue 91 | rcall("ZADD", KEYS[7], timestamp * 0x1000 + bit.band(jobCounter, 0xfff), jobId) 92 | rcall("PUBLISH", KEYS[7], timestamp) 93 | rcall("SADD", limitedSetKey, jobId) 94 | 95 | -- store index so that we can delete rate limited data 96 | rcall("HSET", limiterIndexTable, jobId, limitedSetKey) 97 | 98 | end 99 | 100 | -- remove from active queue 101 | rcall("LREM", KEYS[2], 1, jobId) 102 | return true 103 | else 104 | -- false indicates not rate limited 105 | -- increment jobCounter only when a job is not rate limited 106 | if (jobCounter == 0) then 107 | rcall("PSETEX", rateLimiterKey, ARGV[7], 1) 108 | else 109 | rcall("INCR", rateLimiterKey) 110 | end 111 | return false 112 | end 113 | end 114 | 115 | local jobId = ARGV[5] 116 | 117 | if jobId ~= '' then 118 | -- clean stalled key 119 | rcall("SREM", KEYS[5], jobId) 120 | else 121 | -- move from wait to active 122 | jobId = rcall("RPOPLPUSH", KEYS[1], KEYS[2]) 123 | end 124 | 125 | if jobId then 126 | -- Check if we need to perform rate limiting. 127 | local maxJobs = tonumber(ARGV[6]) 128 | 129 | if maxJobs then 130 | if rateLimit(jobId, maxJobs) then 131 | return 132 | end 133 | end 134 | 135 | -- get a lock 136 | local jobKey = ARGV[1] .. jobId 137 | local lockKey = jobKey .. ':lock' 138 | rcall("SET", lockKey, ARGV[2], "PX", ARGV[3]) 139 | 140 | -- remove from priority 141 | rcall("ZREM", KEYS[3], jobId) 142 | rcall("PUBLISH", KEYS[4], jobId) 143 | rcall("HSET", jobKey, "processedOn", ARGV[4]) 144 | 145 | return {rcall("HGETALL", jobKey), jobId} -- get job data 146 | else 147 | rcall("PUBLISH", KEYS[8], "") 148 | end 149 | 150 | -------------------------------------------------------------------------------- /lib/commands/moveToDelayed-4.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Moves job from active to delayed set. 3 | 4 | Input: 5 | KEYS[1] active key 6 | KEYS[2] delayed key 7 | KEYS[3] job key 8 | KEYS[4] stalled key 9 | 10 | ARGV[1] delayedTimestamp 11 | ARGV[2] the id of the job 12 | ARGV[3] queue token 13 | 14 | Output: 15 | 0 - OK 16 | -1 - Missing job. 17 | -2 - Job is locked. 18 | 19 | Events: 20 | - delayed key. 21 | ]] 22 | local rcall = redis.call 23 | 24 | -- Includes 25 | --- @include "includes/removeLock" 26 | 27 | if rcall("EXISTS", KEYS[3]) == 1 then 28 | local errorCode = removeLock(KEYS[3], KEYS[4], ARGV[3], ARGV[2]) 29 | if errorCode < 0 then 30 | return errorCode 31 | end 32 | 33 | local numRemovedElements = rcall("LREM", KEYS[1], -1, ARGV[2]) 34 | if numRemovedElements < 1 then return -3 end 35 | 36 | local score = tonumber(ARGV[1]) 37 | rcall("ZADD", KEYS[2], score, ARGV[2]) 38 | rcall("PUBLISH", KEYS[2], (score / 0x1000)) 39 | 40 | return 0 41 | else 42 | return -1 43 | end 44 | -------------------------------------------------------------------------------- /lib/commands/moveToFinished-9.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Move job from active to a finished status (completed or failed) 3 | A job can only be moved to completed if it was active. 4 | The job must be locked before it can be moved to a finished status, 5 | and the lock must be released in this script. 6 | 7 | Input: 8 | KEYS[1] active key 9 | KEYS[2] completed/failed key 10 | KEYS[3] jobId key 11 | 12 | KEYS[4] wait key 13 | KEYS[5] priority key 14 | KEYS[6] active event key 15 | 16 | KEYS[7] delayed key 17 | KEYS[8] stalled key 18 | 19 | KEYS[9] metrics key 20 | 21 | ARGV[1] jobId 22 | ARGV[2] timestamp 23 | ARGV[3] msg property 24 | ARGV[4] return value / failed reason 25 | ARGV[5] token 26 | ARGV[6] shouldRemove 27 | ARGV[7] event data (? maybe just send jobid). 28 | ARGV[8] should fetch next job 29 | ARGV[9] base key 30 | ARGV[10] lock token 31 | ARGV[11] lock duration in milliseconds 32 | ARGV[12] maxMetricsSize 33 | 34 | Output: 35 | 0 OK 36 | -1 Missing key. 37 | -2 Missing lock. 38 | -3 - Job not in active set. 39 | 40 | Events: 41 | 'completed/failed' 42 | ]] 43 | local rcall = redis.call 44 | 45 | -- Includes 46 | --- @include "includes/collectMetrics" 47 | --- @include "includes/removeLock" 48 | --- @include "includes/removeDebounceKeyIfNeeded" 49 | 50 | if rcall("EXISTS", KEYS[3]) == 1 then -- // Make sure job exists 51 | local errorCode = removeLock(KEYS[3], KEYS[8], ARGV[5], ARGV[1]) 52 | if errorCode < 0 then 53 | return errorCode 54 | end 55 | 56 | -- Remove from active list (if not active we shall return error) 57 | local numRemovedElements = rcall("LREM", KEYS[1], -1, ARGV[1]) 58 | 59 | if numRemovedElements < 1 then return -3 end 60 | 61 | local debounceId = rcall("HGET", KEYS[3], "deid") 62 | removeDebounceKeyIfNeeded(ARGV[9], debounceId) 63 | 64 | -- Remove job? 65 | local keepJobs = cmsgpack.unpack(ARGV[6]) 66 | local maxCount = keepJobs['count'] 67 | local maxAge = keepJobs['age'] 68 | local targetSet = KEYS[2] 69 | local timestamp = ARGV[2] 70 | 71 | if maxCount ~= 0 then 72 | 73 | -- Add to complete/failed set 74 | rcall("ZADD", targetSet, timestamp, ARGV[1]) 75 | rcall("HMSET", KEYS[3], ARGV[3], ARGV[4], "finishedOn", timestamp) -- "returnvalue" / "failedReason" and "finishedOn" 76 | 77 | local function removeJobs(jobIds) 78 | for i, jobId in ipairs(jobIds) do 79 | local jobKey = ARGV[9] .. jobId 80 | local jobLogKey = jobKey .. ':logs' 81 | rcall("DEL", jobKey, jobLogKey) 82 | end 83 | end 84 | 85 | -- Remove old jobs? 86 | if maxAge ~= nil then 87 | local start = timestamp - maxAge * 1000 88 | local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf") 89 | removeJobs(jobIds) 90 | rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start) 91 | end 92 | 93 | if maxCount ~= nil and maxCount > 0 then 94 | local start = maxCount 95 | local jobIds = rcall("ZREVRANGE", targetSet, start, -1) 96 | removeJobs(jobIds) 97 | rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1)); 98 | end 99 | else 100 | local jobLogKey = KEYS[3] .. ':logs' 101 | rcall("DEL", KEYS[3], jobLogKey) 102 | end 103 | 104 | -- Collect metrics 105 | if ARGV[12] ~= "" then 106 | collectMetrics(KEYS[9], KEYS[9]..':data', ARGV[12], timestamp) 107 | end 108 | 109 | rcall("PUBLISH", targetSet, ARGV[7]) 110 | 111 | -- Try to get next job to avoid an extra roundtrip if the queue is not closing, 112 | -- and not rate limited. 113 | if (ARGV[8] == "1") then 114 | -- move from wait to active 115 | local jobId = rcall("RPOPLPUSH", KEYS[4], KEYS[1]) 116 | if jobId then 117 | local jobKey = ARGV[9] .. jobId 118 | local lockKey = jobKey .. ':lock' 119 | 120 | -- get a lock 121 | rcall("SET", lockKey, ARGV[11], "PX", ARGV[10]) 122 | 123 | rcall("ZREM", KEYS[5], jobId) -- remove from priority 124 | rcall("PUBLISH", KEYS[6], jobId) 125 | rcall("HSET", jobKey, "processedOn", ARGV[2]) 126 | 127 | return {rcall("HGETALL", jobKey), jobId} -- get job data 128 | end 129 | end 130 | 131 | return 0 132 | else 133 | return -1 134 | end 135 | -------------------------------------------------------------------------------- /lib/commands/obliterate-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Completely obliterates a queue and all of its contents 3 | Input: 4 | 5 | KEYS[1] meta-paused 6 | KEYS[2] base 7 | 8 | ARGV[1] count 9 | ARGV[2] force 10 | ]] 11 | -- This command completely destroys a queue including all of its jobs, current or past 12 | -- leaving no trace of its existence. Since this script needs to iterate to find all the job 13 | -- keys, consider that this call may be slow for very large queues. 14 | -- The queue needs to be "paused" or it will return an error 15 | -- If the queue has currently active jobs then the script by default will return error, 16 | -- however this behaviour can be overrided using the 'force' option. 17 | local maxCount = tonumber(ARGV[1]) 18 | local baseKey = KEYS[2] 19 | 20 | local rcall = redis.call 21 | 22 | -- Includes 23 | --- @include "includes/removeDebounceKey" 24 | 25 | local function getListItems(keyName, max) 26 | return rcall('LRANGE', keyName, 0, max - 1) 27 | end 28 | 29 | local function getZSetItems(keyName, max) 30 | return rcall('ZRANGE', keyName, 0, max - 1) 31 | end 32 | 33 | local function removeJobs(baseKey, keys) 34 | for i, key in ipairs(keys) do 35 | local jobKey = baseKey .. key 36 | rcall("DEL", jobKey, jobKey .. ':logs') 37 | removeDebounceKey(baseKey, jobKey) 38 | end 39 | maxCount = maxCount - #keys 40 | end 41 | 42 | local function removeListJobs(keyName, max) 43 | local jobs = getListItems(keyName, max) 44 | removeJobs(baseKey, jobs) 45 | rcall("LTRIM", keyName, #jobs, -1) 46 | end 47 | 48 | local function removeZSetJobs(keyName, max) 49 | local jobs = getZSetItems(keyName, max) 50 | removeJobs(baseKey, jobs) 51 | if (#jobs > 0) then rcall("ZREM", keyName, unpack(jobs)) end 52 | end 53 | 54 | local function removeLockKeys(keys) 55 | for i, key in ipairs(keys) do rcall("DEL", baseKey .. key .. ':lock') end 56 | end 57 | 58 | -- 1) Check if paused, if not return with error. 59 | if rcall("EXISTS", KEYS[1]) ~= 1 then 60 | return -1 -- Error, NotPaused 61 | end 62 | 63 | -- 2) Check if there are active jobs, if there are and not "force" return error. 64 | local activeKey = baseKey .. 'active' 65 | local activeJobs = getListItems(activeKey, maxCount) 66 | if (#activeJobs > 0) then 67 | if (ARGV[2] == "") then 68 | return -2 -- Error, ExistsActiveJobs 69 | end 70 | end 71 | 72 | removeLockKeys(activeJobs) 73 | removeJobs(baseKey, activeJobs) 74 | rcall("LTRIM", activeKey, #activeJobs, -1) 75 | if (maxCount <= 0) then return 1 end 76 | 77 | local waitKey = baseKey .. 'paused' 78 | removeListJobs(waitKey, maxCount) 79 | if (maxCount <= 0) then return 1 end 80 | 81 | local delayedKey = baseKey .. 'delayed' 82 | removeZSetJobs(delayedKey, maxCount) 83 | if (maxCount <= 0) then return 1 end 84 | 85 | local completedKey = baseKey .. 'completed' 86 | removeZSetJobs(completedKey, maxCount) 87 | if (maxCount <= 0) then return 1 end 88 | 89 | local failedKey = baseKey .. 'failed' 90 | removeZSetJobs(failedKey, maxCount) 91 | if (maxCount <= 0) then return 1 end 92 | 93 | if (maxCount > 0) then 94 | rcall("DEL", baseKey .. 'priority') 95 | rcall("DEL", baseKey .. 'stalled-check') 96 | rcall("DEL", baseKey .. 'stalled') 97 | rcall("DEL", baseKey .. 'meta-paused') 98 | rcall("DEL", baseKey .. 'meta') 99 | rcall("DEL", baseKey .. 'id') 100 | rcall("DEL", baseKey .. 'repeat') 101 | rcall("DEL", baseKey .. 'metrics:completed') 102 | rcall("DEL", baseKey .. 'metrics:completed:data') 103 | rcall("DEL", baseKey .. 'metrics:failed') 104 | rcall("DEL", baseKey .. 'metrics:failed:data') 105 | return 0 106 | else 107 | return 1 108 | end 109 | -------------------------------------------------------------------------------- /lib/commands/pause-5.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Pauses or resumes a queue globably. 3 | 4 | Input: 5 | KEYS[1] 'wait' or 'paused'' 6 | KEYS[2] 'paused' or 'wait' 7 | KEYS[3] 'meta-paused' 8 | KEYS[4] 'paused' o 'resumed' event. 9 | KEYS[5] 'meta' this key is only used in BullMQ and above. 10 | 11 | ARGV[1] 'paused' or 'resumed' 12 | 13 | Event: 14 | publish paused or resumed event. 15 | ]] 16 | local rcall = redis.call 17 | 18 | if rcall("EXISTS", KEYS[1]) == 1 then 19 | rcall("RENAME", KEYS[1], KEYS[2]) 20 | end 21 | 22 | if ARGV[1] == "paused" then 23 | rcall("SET", KEYS[3], 1) 24 | 25 | -- for forwards compatibility 26 | rcall("HSET", KEYS[5], "paused", 1) 27 | else 28 | rcall("DEL", KEYS[3]) 29 | 30 | -- for forwards compatibility 31 | rcall("HDEL", KEYS[5], "paused") 32 | 33 | end 34 | 35 | rcall("PUBLISH", KEYS[4], ARGV[1]) 36 | -------------------------------------------------------------------------------- /lib/commands/promote-5.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Promotes a job that is currently "delayed" to the "waiting" state 3 | 4 | Input: 5 | KEYS[1] 'delayed' 6 | KEYS[2] 'wait' 7 | KEYS[3] 'paused' 8 | KEYS[4] 'meta-paused' 9 | KEYS[5] 'priority' 10 | 11 | ARGV[1] queue.toKey('') 12 | ARGV[2] jobId 13 | ARGV[3] queue token 14 | 15 | Events: 16 | 'waiting' 17 | ]] 18 | local rcall = redis.call; 19 | local jobId = ARGV[2] 20 | 21 | -- Includes 22 | --- @include "includes/addJobWithPriority" 23 | --- @include "includes/getTargetQueueList" 24 | 25 | if rcall("ZREM", KEYS[1], jobId) == 1 then 26 | local priority = tonumber(rcall("HGET", ARGV[1] .. jobId, "priority")) or 0 27 | 28 | local target = getTargetQueueList(KEYS[4], KEYS[2], KEYS[3]) 29 | 30 | if priority == 0 then 31 | -- LIFO or FIFO 32 | rcall("LPUSH", target, jobId) 33 | else 34 | addJobWithPriority(KEYS[5], priority, jobId, target) 35 | end 36 | 37 | -- Emit waiting event (wait..ing@token) 38 | rcall("PUBLISH", KEYS[2] .. "ing@" .. ARGV[3], jobId) 39 | 40 | rcall("HSET", ARGV[1] .. jobId, "delay", 0) 41 | 42 | return 0 43 | else 44 | return -1 45 | end 46 | -------------------------------------------------------------------------------- /lib/commands/releaseLock-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Release lock 3 | 4 | Input: 5 | KEYS[1] 'lock', 6 | 7 | ARGV[1] token 8 | ARGV[2] lock duration in milliseconds 9 | 10 | Output: 11 | "OK" if lock extented succesfully. 12 | ]] 13 | local rcall = redis.call 14 | 15 | if rcall("GET", KEYS[1]) == ARGV[1] then 16 | return rcall("DEL", KEYS[1]) 17 | else 18 | return 0 19 | end 20 | -------------------------------------------------------------------------------- /lib/commands/removeJob-11.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Remove a job from all the queues it may be in as well as all its data. 3 | In order to be able to remove a job, it must be unlocked. 4 | 5 | Input: 6 | KEYS[1] 'active', 7 | KEYS[2] 'wait', 8 | KEYS[3] 'delayed', 9 | KEYS[4] 'paused', 10 | KEYS[5] 'completed', 11 | KEYS[6] 'failed', 12 | KEYS[7] 'priority', 13 | KEYS[8] jobId key 14 | KEYS[9] job logs 15 | KEYS[10] rate limiter index table 16 | KEYS[11] prefix key 17 | 18 | ARGV[1] jobId 19 | ARGV[2] lock token 20 | 21 | Events: 22 | 'removed' 23 | ]] 24 | 25 | -- TODO PUBLISH global event 'removed' 26 | 27 | local rcall = redis.call 28 | 29 | -- Includes 30 | --- @include "includes/removeDebounceKey" 31 | 32 | local lockKey = KEYS[8] .. ':lock' 33 | local lock = rcall("GET", lockKey) 34 | if not lock then -- or (lock == ARGV[2])) then 35 | local jobId = ARGV[1] 36 | rcall("LREM", KEYS[1], 0, jobId) 37 | rcall("LREM", KEYS[2], 0, jobId) 38 | rcall("ZREM", KEYS[3], jobId) 39 | rcall("LREM", KEYS[4], 0, jobId) 40 | rcall("ZREM", KEYS[5], jobId) 41 | rcall("ZREM", KEYS[6], jobId) 42 | rcall("ZREM", KEYS[7], jobId) 43 | 44 | removeDebounceKey(KEYS[11], KEYS[8]) 45 | 46 | rcall("DEL", KEYS[8]) 47 | rcall("DEL", KEYS[9]) 48 | 49 | -- delete keys related to rate limiter 50 | local limiterIndexTable = KEYS[10] .. ":index" 51 | local limitedSetKey = rcall("HGET", limiterIndexTable, jobId) 52 | if limitedSetKey then 53 | rcall("SREM", limitedSetKey, jobId) 54 | rcall("HDEL", limiterIndexTable, jobId) 55 | end 56 | return 1 57 | else 58 | return 0 59 | end 60 | -------------------------------------------------------------------------------- /lib/commands/removeJobs-8.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Remove all jobs matching a given pattern from all the queues they may be in as well as all its data. 3 | In order to be able to remove any job, they must be unlocked. 4 | 5 | Input: 6 | KEYS[1] 'active', 7 | KEYS[2] 'wait', 8 | KEYS[3] 'delayed', 9 | KEYS[4] 'paused', 10 | KEYS[5] 'completed', 11 | KEYS[6] 'failed', 12 | KEYS[7] 'priority', 13 | KEYS[8] 'rate-limiter' 14 | 15 | ARGV[1] prefix 16 | ARGV[2] pattern 17 | ARGV[3] cursor 18 | 19 | Events: 20 | 'removed' 21 | ]] 22 | 23 | -- TODO PUBLISH global events 'removed' 24 | 25 | local rcall = redis.call 26 | local result = rcall("SCAN", ARGV[3], "MATCH", ARGV[1] .. ARGV[2]) 27 | local cursor = result[1]; 28 | local jobKeys = result[2]; 29 | local removed = {} 30 | 31 | local prefixLen = string.len(ARGV[1]) + 1 32 | for i, jobKey in ipairs(jobKeys) do 33 | local keyTypeResp = rcall("TYPE", jobKey) 34 | if keyTypeResp["ok"] == "hash" then 35 | local jobId = string.sub(jobKey, prefixLen) 36 | local lockKey = jobKey .. ':lock' 37 | local lock = redis.call("GET", lockKey) 38 | if not lock then 39 | rcall("LREM", KEYS[1], 0, jobId) 40 | rcall("LREM", KEYS[2], 0, jobId) 41 | rcall("ZREM", KEYS[3], jobId) 42 | rcall("LREM", KEYS[4], 0, jobId) 43 | rcall("ZREM", KEYS[5], jobId) 44 | rcall("ZREM", KEYS[6], jobId) 45 | rcall("ZREM", KEYS[7], jobId) 46 | rcall("DEL", jobKey) 47 | rcall("DEL", jobKey .. ':logs') 48 | 49 | -- delete keys related to rate limiter 50 | local limiterIndexTable = KEYS[8] .. ":index" 51 | local limitedSetKey = rcall("HGET", limiterIndexTable, jobId) 52 | 53 | if limitedSetKey then 54 | rcall("SREM", limitedSetKey, jobId) 55 | rcall("HDEL", limiterIndexTable, jobId) 56 | end 57 | table.insert(removed, jobId) 58 | end 59 | end 60 | end 61 | return {cursor, removed} 62 | -------------------------------------------------------------------------------- /lib/commands/removeRepeatable-2.lua: -------------------------------------------------------------------------------- 1 | 2 | --[[ 3 | Removes a repeatable job 4 | Input: 5 | KEYS[1] repeat jobs key 6 | KEYS[2] delayed jobs key 7 | 8 | ARGV[1] repeat job id 9 | ARGV[2] repeat job key 10 | ARGV[3] queue key 11 | ]] 12 | local millis = redis.call("ZSCORE", KEYS[1], ARGV[2]) 13 | 14 | if(millis) then 15 | -- Delete next programmed job. 16 | local repeatJobId = ARGV[1] .. millis 17 | if(redis.call("ZREM", KEYS[2], repeatJobId) == 1) then 18 | redis.call("DEL", ARGV[3] .. repeatJobId) 19 | end 20 | end 21 | 22 | redis.call("ZREM", KEYS[1], ARGV[2]); 23 | -------------------------------------------------------------------------------- /lib/commands/reprocessJob-6.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Attempts to reprocess a job 3 | 4 | Input: 5 | KEYS[1] job key 6 | KEYS[2] job lock key 7 | KEYS[3] job state 8 | KEYS[4] wait key 9 | KEYS[5] meta-pause 10 | KEYS[6] paused key 11 | 12 | ARGV[1] job.id, 13 | ARGV[2] (job.opts.lifo ? 'R' : 'L') + 'PUSH' 14 | ARGV[3] token 15 | ARGV[4] timestamp 16 | 17 | Output: 18 | 1 means the operation was a success 19 | 0 means the job does not exist 20 | -1 means the job is currently locked and can't be retried. 21 | -2 means the job was not found in the expected set. 22 | 23 | 24 | ]] 25 | local rcall = redis.call; 26 | if (rcall("EXISTS", KEYS[1]) == 1) then 27 | if (rcall("EXISTS", KEYS[2]) == 0) then 28 | rcall("HDEL", KEYS[1], "finishedOn", "processedOn", "failedReason") 29 | rcall("HSET", KEYS[1], "retriedOn", ARGV[4]) 30 | 31 | if (rcall("ZREM", KEYS[3], ARGV[1]) == 1) then 32 | local target 33 | if rcall("EXISTS", KEYS[5]) ~= 1 then 34 | target = KEYS[4] 35 | else 36 | target = KEYS[6] 37 | end 38 | 39 | rcall(ARGV[2], target, ARGV[1]) 40 | 41 | -- Emit waiting event (wait..ing@token) 42 | rcall("PUBLISH", KEYS[4] .. "ing@" .. ARGV[3], ARGV[1]) 43 | return 1 44 | else 45 | return -2 46 | end 47 | else 48 | return -1 49 | end 50 | else 51 | return 0 52 | end 53 | -------------------------------------------------------------------------------- /lib/commands/retryJob-7.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Retries a failed job by moving it back to the wait queue. 3 | 4 | Input: 5 | KEYS[1] 'active', 6 | KEYS[2] 'wait' 7 | KEYS[3] jobId key 8 | KEYS[4] 'meta-paused' 9 | KEYS[5] 'paused' 10 | KEYS[6] stalled key 11 | KEYS[7] 'priority' 12 | 13 | ARGV[1] pushCmd 14 | ARGV[2] jobId 15 | ARGV[3] token 16 | 17 | Events: 18 | 'prefix:added' 19 | 20 | Output: 21 | 0 - OK 22 | -1 - Missing key 23 | -2 - Job Not locked 24 | -3 - Job not in active set 25 | ]] 26 | local rcall = redis.call 27 | 28 | -- Includes 29 | --- @include "includes/addJobWithPriority" 30 | --- @include "includes/getTargetQueueList" 31 | --- @include "includes/removeLock" 32 | 33 | if rcall("EXISTS", KEYS[3]) == 1 then 34 | local errorCode = removeLock(KEYS[3], KEYS[6], ARGV[3], ARGV[2]) 35 | if errorCode < 0 then 36 | return errorCode 37 | end 38 | 39 | local numRemovedElements = rcall("LREM", KEYS[1], -1, ARGV[2]) 40 | if numRemovedElements < 1 then return -3 end 41 | 42 | local target = getTargetQueueList(KEYS[4], KEYS[2], KEYS[5]) 43 | 44 | local priority = tonumber(rcall("HGET", KEYS[3], "priority")) or 0 45 | 46 | if priority == 0 then 47 | -- LIFO or FIFO 48 | rcall(ARGV[1], target, ARGV[2]) 49 | else 50 | addJobWithPriority(KEYS[7], priority, ARGV[2], target) 51 | end 52 | 53 | return 0 54 | else 55 | return -1 56 | end 57 | -------------------------------------------------------------------------------- /lib/commands/retryJobs-5.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Attempts to retry all failed jobs 3 | 4 | Input: 5 | KEYS[1] base key 6 | KEYS[2] failed state key 7 | KEYS[3] wait state key 8 | KEYS[4] 'meta-paused' 9 | KEYS[5] 'paused' 10 | 11 | ARGV[1] count 12 | 13 | Output: 14 | 1 means the operation is not completed 15 | 0 means the operation is completed 16 | ]] 17 | local baseKey = KEYS[1] 18 | local maxCount = tonumber(ARGV[1]) 19 | 20 | local rcall = redis.call; 21 | 22 | -- Includes 23 | --- @include "includes/batches" 24 | 25 | local function getZSetItems(keyName, max) 26 | return rcall('ZRANGE', keyName, 0, max - 1) 27 | end 28 | 29 | local jobs = getZSetItems(KEYS[2], maxCount) 30 | 31 | if (#jobs > 0) then 32 | for i, key in ipairs(jobs) do 33 | local jobKey = baseKey .. key 34 | rcall("HDEL", jobKey, "finishedOn", "processedOn", "failedReason") 35 | end 36 | 37 | local target 38 | if rcall("EXISTS", KEYS[4]) ~= 1 then 39 | target = KEYS[3] 40 | else 41 | target = KEYS[5] 42 | end 43 | 44 | for from, to in batches(#jobs, 7000) do 45 | rcall("ZREM", KEYS[2], unpack(jobs, from, to)) 46 | rcall("LPUSH", target, unpack(jobs, from, to)) 47 | end 48 | end 49 | 50 | maxCount = maxCount - #jobs 51 | 52 | if (maxCount <= 0) then return 1 end 53 | 54 | return 0 55 | -------------------------------------------------------------------------------- /lib/commands/saveStacktrace-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Save stacktrace and failedReason. 3 | 4 | Input: 5 | KEYS[1] job key 6 | 7 | ARGV[1] stacktrace 8 | ARGV[2] failedReason 9 | ARGV[3] attemptsMade 10 | 11 | Output: 12 | 0 - OK 13 | -1 - Missing key 14 | ]] 15 | local rcall = redis.call 16 | 17 | if rcall("EXISTS", KEYS[1]) == 1 then 18 | rcall("HMSET", KEYS[1], "stacktrace", ARGV[1], "failedReason", ARGV[2], 19 | "attemptsMade", ARGV[3]) 20 | 21 | return 0 22 | else 23 | return -1 24 | end 25 | -------------------------------------------------------------------------------- /lib/commands/takeLock-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Takes a lock 3 | 4 | Input: 5 | KEYS[1] 'lock', 6 | 7 | ARGV[1] token 8 | ARGV[2] lock duration in milliseconds 9 | 10 | Output: 11 | "OK" if lock taken successfully. 12 | ]] 13 | if redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2]) then 14 | return 1 15 | else 16 | return 0 17 | end 18 | -------------------------------------------------------------------------------- /lib/commands/updateData-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Update job data 3 | 4 | Input: 5 | KEYS[1] Job id key 6 | 7 | ARGV[1] data 8 | 9 | Output: 10 | 0 - OK 11 | -1 - Missing job. 12 | ]] 13 | local rcall = redis.call 14 | 15 | if rcall("EXISTS",KEYS[1]) == 1 then -- // Make sure job exists 16 | rcall("HSET", KEYS[1], "data", ARGV[1]) 17 | return 0 18 | else 19 | return -1 20 | end 21 | -------------------------------------------------------------------------------- /lib/commands/updateDelaySet-6.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Updates the delay set, by picking a delayed job that should 3 | be processed now. 4 | 5 | Input: 6 | KEYS[1] 'delayed' 7 | KEYS[2] 'active' 8 | KEYS[3] 'wait' 9 | KEYS[4] 'priority' 10 | 11 | KEYS[5] 'paused' 12 | KEYS[6] 'meta-paused' 13 | 14 | ARGV[1] queue.toKey('') 15 | ARGV[2] delayed timestamp 16 | ARGV[3] queue token 17 | 18 | Events: 19 | 'removed' 20 | ]] 21 | local rcall = redis.call; 22 | 23 | -- Includes 24 | --- @include "includes/addJobWithPriority" 25 | --- @include "includes/getTargetQueueList" 26 | 27 | -- Try to get as much as 1000 jobs at once 28 | local jobs = rcall("ZRANGEBYSCORE", KEYS[1], 0, tonumber(ARGV[2]) * 0x1000, "LIMIT", 0, 1000) 29 | 30 | if(#jobs > 0) then 31 | rcall("ZREM", KEYS[1], unpack(jobs)) 32 | 33 | -- check if we need to use push in paused instead of waiting 34 | local target = getTargetQueueList(KEYS[6], KEYS[3], KEYS[5]) 35 | 36 | for _, jobId in ipairs(jobs) do 37 | -- Is this really needed? 38 | rcall("LREM", KEYS[2], 0, jobId) 39 | 40 | local priority = tonumber(rcall("HGET", ARGV[1] .. jobId, "priority")) or 0 41 | 42 | if priority == 0 then 43 | -- LIFO or FIFO 44 | rcall("LPUSH", target, jobId) 45 | else 46 | addJobWithPriority(KEYS[4], priority, jobId, target) 47 | end 48 | 49 | -- Emit waiting event (wait..ing@token) 50 | rcall("PUBLISH", KEYS[3] .. "ing@" .. ARGV[3], jobId) 51 | rcall("HSET", ARGV[1] .. jobId, "delay", 0) 52 | end 53 | end 54 | 55 | local nextTimestamp = rcall("ZRANGE", KEYS[1], 0, 0, "WITHSCORES")[2] 56 | if(nextTimestamp ~= nil) then 57 | rcall("PUBLISH", KEYS[1], nextTimestamp / 0x1000) 58 | end 59 | return nextTimestamp 60 | -------------------------------------------------------------------------------- /lib/commands/updateProgress-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Update job progress 3 | 4 | Input: 5 | KEYS[1] Job id key 6 | KEYS[2] progress event key 7 | 8 | ARGV[1] progress 9 | ARGV[2] event data 10 | 11 | Event: 12 | progress(jobId, progress) 13 | ]] 14 | local rcall = redis.call 15 | if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists 16 | rcall("HSET", KEYS[1], "progress", ARGV[1]) 17 | rcall("PUBLISH", KEYS[2], ARGV[2]) 18 | return 0 19 | else 20 | return -1 21 | end 22 | -------------------------------------------------------------------------------- /lib/errors.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports.Messages = { 4 | RETRY_JOB_NOT_EXIST: "Couldn't retry job: The job doesn't exist", 5 | RETRY_JOB_IS_LOCKED: "Couldn't retry job: The job is locked", 6 | RETRY_JOB_NOT_FAILED: 7 | "Couldn't retry job: The job has been already retried or has not failed", 8 | MISSING_REDIS_OPTS: `Using a redis instance with enableReadyCheck or maxRetriesPerRequest for bclient/subscriber is not permitted. 9 | see https://github.com/OptimalBits/bull/issues/1873 10 | ` 11 | }; 12 | -------------------------------------------------------------------------------- /lib/getters.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const _ = require('lodash'); 4 | const Job = require('./job'); 5 | const scripts = require('./scripts'); 6 | 7 | module.exports = function(Queue) { 8 | Queue.prototype.getJob = async function(jobId) { 9 | await this.isReady(); 10 | return Job.fromId(this, jobId); 11 | }; 12 | 13 | Queue.prototype.getCountsPerPriority = async function(priorities) { 14 | const uniquePriorities = [...new Set(priorities)]; 15 | const responses = await scripts.getCountsPerPriority( 16 | this, 17 | uniquePriorities 18 | ); 19 | 20 | const counts = {}; 21 | responses.forEach((res, index) => { 22 | counts[`${uniquePriorities[index]}`] = res || 0; 23 | }); 24 | 25 | return counts; 26 | }; 27 | 28 | Queue.prototype._commandByType = function(types, count, callback) { 29 | return _.map(types, type => { 30 | type = type === 'waiting' ? 'wait' : type; // alias 31 | 32 | const key = this.toKey(type); 33 | 34 | switch (type) { 35 | case 'completed': 36 | case 'failed': 37 | case 'delayed': 38 | case 'repeat': 39 | return callback(key, count ? 'zcard' : 'zrange'); 40 | case 'active': 41 | case 'wait': 42 | case 'paused': 43 | return callback(key, count ? 'llen' : 'lrange'); 44 | } 45 | }); 46 | }; 47 | 48 | /** 49 | Returns the number of jobs waiting to be processed. 50 | */ 51 | Queue.prototype.count = function() { 52 | return this.getJobCountByTypes('wait', 'paused', 'delayed'); 53 | }; 54 | 55 | // Job counts by type 56 | // Queue#getJobCountByTypes('completed') => completed count 57 | // Queue#getJobCountByTypes('completed,failed') => completed + failed count 58 | // Queue#getJobCountByTypes('completed', 'failed') => completed + failed count 59 | // Queue#getJobCountByTypes('completed,waiting', 'failed') => completed + waiting + failed count 60 | Queue.prototype.getJobCountByTypes = function() { 61 | return this.getJobCounts.apply(this, arguments).then(result => { 62 | return _.chain(result) 63 | .values() 64 | .sum() 65 | .value(); 66 | }); 67 | }; 68 | 69 | /** 70 | * Returns the job counts for each type specified or every list/set in the queue by default. 71 | * 72 | */ 73 | Queue.prototype.getJobCounts = function() { 74 | const types = parseTypeArg(arguments); 75 | const multi = this.multi(); 76 | 77 | this._commandByType(types, true, (key, command) => { 78 | multi[command](key); 79 | }); 80 | 81 | return multi.exec().then(res => { 82 | const counts = {}; 83 | res.forEach((res, index) => { 84 | counts[types[index]] = res[1] || 0; 85 | }); 86 | return counts; 87 | }); 88 | }; 89 | 90 | Queue.prototype.getCompletedCount = function() { 91 | return this.getJobCountByTypes('completed'); 92 | }; 93 | 94 | Queue.prototype.getFailedCount = function() { 95 | return this.getJobCountByTypes('failed'); 96 | }; 97 | 98 | Queue.prototype.getDelayedCount = function() { 99 | return this.getJobCountByTypes('delayed'); 100 | }; 101 | 102 | Queue.prototype.getActiveCount = function() { 103 | return this.getJobCountByTypes('active'); 104 | }; 105 | 106 | Queue.prototype.getWaitingCount = function() { 107 | return this.getJobCountByTypes('wait', 'paused'); 108 | }; 109 | 110 | /** 111 | * 112 | * @returns the potential stalled jobs. Only useful for tests. 113 | */ 114 | Queue.prototype.getStalledCount = function() { 115 | const key = this.toKey('stalled'); 116 | return this.client.scard(key); 117 | }; 118 | 119 | // TO BE DEPRECATED ---> 120 | Queue.prototype.getPausedCount = function() { 121 | return this.getJobCountByTypes('paused'); 122 | }; 123 | // <----- 124 | 125 | Queue.prototype.getWaiting = function(start, end, opts) { 126 | return this.getJobs(['wait', 'paused'], start, end, true, opts); 127 | }; 128 | 129 | Queue.prototype.getActive = function(start, end, opts) { 130 | return this.getJobs('active', start, end, true, opts); 131 | }; 132 | 133 | Queue.prototype.getDelayed = function(start, end, opts) { 134 | return this.getJobs('delayed', start, end, true, opts); 135 | }; 136 | 137 | Queue.prototype.getCompleted = function(start, end, opts) { 138 | return this.getJobs('completed', start, end, false, opts); 139 | }; 140 | 141 | Queue.prototype.getFailed = function(start, end, opts) { 142 | return this.getJobs('failed', start, end, false, opts); 143 | }; 144 | 145 | Queue.prototype.getRanges = function(types, start, end, asc) { 146 | start = _.isUndefined(start) ? 0 : start; 147 | end = _.isUndefined(end) ? -1 : end; 148 | 149 | const multi = this.multi(); 150 | const multiCommands = []; 151 | 152 | this._commandByType(parseTypeArg(types), false, (key, command) => { 153 | switch (command) { 154 | case 'lrange': 155 | if (asc) { 156 | multiCommands.push('lrange'); 157 | multi.lrange(key, -(end + 1), -(start + 1)); 158 | } else { 159 | multi.lrange(key, start, end); 160 | } 161 | break; 162 | case 'zrange': 163 | multiCommands.push('zrange'); 164 | if (asc) { 165 | multi.zrange(key, start, end); 166 | } else { 167 | multi.zrevrange(key, start, end); 168 | } 169 | break; 170 | } 171 | }); 172 | 173 | return multi.exec().then(responses => { 174 | let results = []; 175 | 176 | responses.forEach((response, index) => { 177 | const result = response[1] || []; 178 | 179 | if (asc && multiCommands[index] === 'lrange') { 180 | results = results.concat(result.reverse()); 181 | } else { 182 | results = results.concat(result); 183 | } 184 | }); 185 | return results; 186 | }); 187 | }; 188 | 189 | Queue.prototype.getJobs = function(types, start, end, asc, opts) { 190 | return this.getRanges(types, start, end, asc).then(jobIds => { 191 | return Promise.all(jobIds.map(jobId => this.getJobFromId(jobId, opts))); 192 | }); 193 | }; 194 | 195 | Queue.prototype.getJobLogs = function(jobId, start, end, asc = true) { 196 | start = _.isUndefined(start) ? 0 : start; 197 | end = _.isUndefined(end) ? -1 : end; 198 | 199 | const multi = this.multi(); 200 | 201 | const logsKey = this.toKey(jobId + ':logs'); 202 | if (asc) { 203 | multi.lrange(logsKey, start, end); 204 | } else { 205 | multi.lrange(logsKey, -(end + 1), -(start + 1)); 206 | } 207 | multi.llen(logsKey); 208 | return multi.exec().then(result => { 209 | if (!asc) { 210 | result[0][1].reverse(); 211 | } 212 | return { 213 | logs: result[0][1], 214 | count: result[1][1] 215 | }; 216 | }); 217 | }; 218 | 219 | /** 220 | * Get queue metrics related to the queue. 221 | * 222 | * This method returns the gathered metrics for the queue. 223 | * The metrics are represented as an array of job counts 224 | * per unit of time (1 minute). 225 | * 226 | * @param start - Start point of the metrics, where 0 227 | * is the newest point to be returned. 228 | * @param end - End poinf of the metrics, where -1 is the 229 | * oldest point to be returned. 230 | * 231 | * @returns - Returns an object with queue metrics. 232 | */ 233 | Queue.prototype.getMetrics = async function(type, start = 0, end = -1) { 234 | const metricsKey = this.toKey(`metrics:${type}`); 235 | const dataKey = `${metricsKey}:data`; 236 | 237 | const multi = this.multi(); 238 | multi.hmget(metricsKey, 'count', 'prevTS', 'prevCount'); 239 | multi.lrange(dataKey, start, end); 240 | multi.llen(dataKey); 241 | 242 | const [hmget, range, len] = await multi.exec(); 243 | const [err, [count, prevTS, prevCount]] = hmget; 244 | const [err2, data] = range; 245 | const [err3, numPoints] = len; 246 | if (err || err2) { 247 | throw err || err2 || err3; 248 | } 249 | 250 | return { 251 | meta: { 252 | count: parseInt(count || '0', 10), 253 | prevTS: parseInt(prevTS || '0', 10), 254 | prevCount: parseInt(prevCount || '0', 10) 255 | }, 256 | data, 257 | count: numPoints 258 | }; 259 | }; 260 | }; 261 | 262 | function parseTypeArg(args) { 263 | const types = _.chain([]) 264 | .concat(args) 265 | .join(',') 266 | .split(/\s*,\s*/g) 267 | .compact() 268 | .value(); 269 | 270 | return types.length 271 | ? types 272 | : ['waiting', 'active', 'completed', 'failed', 'delayed', 'paused']; 273 | } 274 | -------------------------------------------------------------------------------- /lib/p-timeout.js: -------------------------------------------------------------------------------- 1 | // Extracted from p-timeout https://github.com/sindresorhus/p-timeout 2 | // as it is not commonjs compatible. This is version 5.0.2 3 | 'use strict'; 4 | 5 | class TimeoutError extends Error { 6 | constructor(message) { 7 | super(message); 8 | this.name = 'TimeoutError'; 9 | } 10 | } 11 | 12 | module.exports.TimeoutError = TimeoutError; 13 | 14 | module.exports.pTimeout = function pTimeout( 15 | promise, 16 | milliseconds, 17 | fallback, 18 | options 19 | ) { 20 | let timer; 21 | const cancelablePromise = new Promise((resolve, reject) => { 22 | if (typeof milliseconds !== 'number' || Math.sign(milliseconds) !== 1) { 23 | throw new TypeError( 24 | `Expected \`milliseconds\` to be a positive number, got \`${milliseconds}\`` 25 | ); 26 | } 27 | 28 | if (milliseconds === Number.POSITIVE_INFINITY) { 29 | resolve(promise); 30 | return; 31 | } 32 | 33 | options = { 34 | customTimers: { setTimeout, clearTimeout }, 35 | ...options 36 | }; 37 | 38 | timer = options.customTimers.setTimeout.call( 39 | undefined, 40 | () => { 41 | if (typeof fallback === 'function') { 42 | try { 43 | resolve(fallback()); 44 | } catch (error) { 45 | reject(error); 46 | } 47 | 48 | return; 49 | } 50 | 51 | const message = 52 | typeof fallback === 'string' 53 | ? fallback 54 | : `Promise timed out after ${milliseconds} milliseconds`; 55 | const timeoutError = 56 | fallback instanceof Error ? fallback : new TimeoutError(message); 57 | 58 | if (typeof promise.cancel === 'function') { 59 | promise.cancel(); 60 | } 61 | 62 | reject(timeoutError); 63 | }, 64 | milliseconds 65 | ); 66 | 67 | (async () => { 68 | try { 69 | resolve(await promise); 70 | } catch (error) { 71 | reject(error); 72 | } finally { 73 | options.customTimers.clearTimeout.call(undefined, timer); 74 | } 75 | })(); 76 | }); 77 | 78 | cancelablePromise['clear'] = () => { 79 | clearTimeout(timer); 80 | timer = undefined; 81 | }; 82 | 83 | return cancelablePromise; 84 | }; 85 | -------------------------------------------------------------------------------- /lib/process/child-pool.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const fork = require('child_process').fork; 4 | const path = require('path'); 5 | const _ = require('lodash'); 6 | const getPort = require('get-port'); 7 | const { killAsync } = require('./utils'); 8 | 9 | const CHILD_KILL_TIMEOUT = 30000; 10 | 11 | const ChildPool = function ChildPool() { 12 | if (!(this instanceof ChildPool)) { 13 | return new ChildPool(); 14 | } 15 | 16 | this.retained = {}; 17 | this.free = {}; 18 | }; 19 | 20 | const convertExecArgv = function(execArgv) { 21 | const standard = []; 22 | const promises = []; 23 | 24 | _.forEach(execArgv, arg => { 25 | if (arg.indexOf('--inspect') === -1) { 26 | standard.push(arg); 27 | } else { 28 | const argName = arg.split('=')[0]; 29 | promises.push( 30 | getPort().then(port => { 31 | return `${argName}=${port}`; 32 | }) 33 | ); 34 | } 35 | }); 36 | 37 | return Promise.all(promises).then(convertedArgs => { 38 | return standard.concat(convertedArgs); 39 | }); 40 | }; 41 | 42 | ChildPool.prototype.retain = function(processFile) { 43 | const _this = this; 44 | let child = _this.getFree(processFile).pop(); 45 | 46 | if (child) { 47 | _this.retained[child.pid] = child; 48 | return Promise.resolve(child); 49 | } 50 | 51 | return convertExecArgv(process.execArgv).then(execArgv => { 52 | child = fork(path.join(__dirname, './master.js'), { 53 | execArgv 54 | }); 55 | child.processFile = processFile; 56 | 57 | _this.retained[child.pid] = child; 58 | 59 | child.on('exit', _this.remove.bind(_this, child)); 60 | 61 | return initChild(child, child.processFile) 62 | .then(() => { 63 | return child; 64 | }) 65 | .catch(err => { 66 | this.remove(child); 67 | throw err; 68 | }); 69 | }); 70 | }; 71 | 72 | ChildPool.prototype.release = function(child) { 73 | delete this.retained[child.pid]; 74 | this.getFree(child.processFile).push(child); 75 | }; 76 | 77 | ChildPool.prototype.remove = function(child) { 78 | delete this.retained[child.pid]; 79 | 80 | const free = this.getFree(child.processFile); 81 | 82 | const childIndex = free.indexOf(child); 83 | if (childIndex > -1) { 84 | free.splice(childIndex, 1); 85 | } 86 | }; 87 | 88 | ChildPool.prototype.kill = function(child, signal) { 89 | this.remove(child); 90 | return killAsync(child, signal || 'SIGKILL', CHILD_KILL_TIMEOUT); 91 | }; 92 | 93 | ChildPool.prototype.clean = function() { 94 | const children = _.values(this.retained).concat(this.getAllFree()); 95 | this.retained = {}; 96 | this.free = {}; 97 | 98 | const allKillPromises = []; 99 | children.forEach(child => { 100 | allKillPromises.push(this.kill(child, 'SIGTERM')); 101 | }); 102 | return Promise.all(allKillPromises).then(() => {}); 103 | }; 104 | 105 | ChildPool.prototype.getFree = function(id) { 106 | return (this.free[id] = this.free[id] || []); 107 | }; 108 | 109 | ChildPool.prototype.getAllFree = function() { 110 | return _.flatten(_.values(this.free)); 111 | }; 112 | 113 | async function initChild(child, processFile) { 114 | const onComplete = new Promise((resolve, reject) => { 115 | const onMessageHandler = msg => { 116 | if (msg.cmd === 'init-complete') { 117 | resolve(); 118 | } else if (msg.cmd === 'error') { 119 | reject(msg.error); 120 | } 121 | child.off('message', onMessageHandler); 122 | }; 123 | child.on('message', onMessageHandler); 124 | }); 125 | 126 | await new Promise(resolve => 127 | child.send({ cmd: 'init', value: processFile }, resolve) 128 | ); 129 | await onComplete; 130 | } 131 | function ChildPoolSingleton(isSharedChildPool = false) { 132 | if (isSharedChildPool === false) { 133 | return new ChildPool(); 134 | } else if ( 135 | !(this instanceof ChildPool) && 136 | ChildPoolSingleton.instance === undefined 137 | ) { 138 | ChildPoolSingleton.instance = new ChildPool(); 139 | } 140 | 141 | return ChildPoolSingleton.instance; 142 | } 143 | 144 | module.exports = ChildPoolSingleton; 145 | -------------------------------------------------------------------------------- /lib/process/master.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Master of child processes. Handles communication between the 3 | * processor and the main process. 4 | * 5 | */ 6 | 'use strict'; 7 | 8 | let status; 9 | let processor; 10 | let currentJobPromise; 11 | 12 | const { promisify } = require('util'); 13 | const { asyncSend } = require('./utils'); 14 | 15 | // https://stackoverflow.com/questions/18391212/is-it-not-possible-to-stringify-an-error-using-json-stringify 16 | if (!('toJSON' in Error.prototype)) { 17 | Object.defineProperty(Error.prototype, 'toJSON', { 18 | value: function() { 19 | const alt = {}; 20 | 21 | Object.getOwnPropertyNames(this).forEach(function(key) { 22 | alt[key] = this[key]; 23 | }, this); 24 | 25 | return alt; 26 | }, 27 | configurable: true, 28 | writable: true 29 | }); 30 | } 31 | 32 | async function waitForCurrentJobAndExit() { 33 | status = 'TERMINATING'; 34 | try { 35 | await currentJobPromise; 36 | } finally { 37 | // it's an exit handler 38 | // eslint-disable-next-line no-process-exit 39 | process.exit(process.exitCode || 0); 40 | } 41 | } 42 | 43 | process.on('SIGTERM', waitForCurrentJobAndExit); 44 | process.on('SIGINT', waitForCurrentJobAndExit); 45 | 46 | process.on('message', msg => { 47 | switch (msg.cmd) { 48 | case 'init': 49 | try { 50 | processor = require(msg.value); 51 | } catch (err) { 52 | status = 'Errored'; 53 | err.message = `Error loading process file ${msg.value}. ${err.message}`; 54 | return process.send({ 55 | cmd: 'error', 56 | error: err 57 | }); 58 | } 59 | 60 | if (processor.default) { 61 | // support es2015 module. 62 | processor = processor.default; 63 | } 64 | if (processor.length > 1) { 65 | processor = promisify(processor); 66 | } else { 67 | const origProcessor = processor; 68 | processor = function() { 69 | try { 70 | return Promise.resolve(origProcessor.apply(null, arguments)); 71 | } catch (err) { 72 | return Promise.reject(err); 73 | } 74 | }; 75 | } 76 | status = 'IDLE'; 77 | process.send({ 78 | cmd: 'init-complete' 79 | }); 80 | break; 81 | 82 | case 'start': 83 | if (status !== 'IDLE') { 84 | return process.send({ 85 | cmd: 'error', 86 | err: new Error('cannot start a not idling child process') 87 | }); 88 | } 89 | status = 'STARTED'; 90 | currentJobPromise = (async () => { 91 | try { 92 | const result = (await processor(wrapJob(msg.job))) || {}; 93 | await asyncSend(process, { 94 | cmd: 'completed', 95 | value: result 96 | }); 97 | } catch (err) { 98 | if (!err.message) { 99 | // eslint-disable-next-line no-ex-assign 100 | err = new Error(err); 101 | } 102 | await asyncSend(process, { 103 | cmd: 'failed', 104 | value: err 105 | }); 106 | } finally { 107 | status = 'IDLE'; 108 | currentJobPromise = null; 109 | } 110 | })(); 111 | break; 112 | case 'stop': 113 | break; 114 | } 115 | }); 116 | 117 | /*eslint no-process-exit: "off"*/ 118 | process.on('uncaughtException', err => { 119 | if (!err.message) { 120 | err = new Error(err); 121 | } 122 | process.send({ 123 | cmd: 'failed', 124 | value: err 125 | }); 126 | 127 | // An uncaughException leaves this process in a potentially undetermined state so 128 | // we must exit 129 | process.exit(-1); 130 | }); 131 | 132 | /** 133 | * Enhance the given job argument with some functions 134 | * that can be called from the sandboxed job processor. 135 | * 136 | * Note, the `job` argument is a JSON deserialized message 137 | * from the main node process to this forked child process, 138 | * the functions on the original job object are not in tact. 139 | * The wrapped job adds back some of those original functions. 140 | */ 141 | function wrapJob(job) { 142 | /* 143 | * Emulate the real job `progress` function. 144 | * If no argument is given, it behaves as a sync getter. 145 | * If an argument is given, it behaves as an async setter. 146 | */ 147 | let progressValue = job.progress; 148 | job.progress = function(progress) { 149 | if (progress) { 150 | // Locally store reference to new progress value 151 | // so that we can return it from this process synchronously. 152 | progressValue = progress; 153 | // Send message to update job progress. 154 | return asyncSend(process, { 155 | cmd: 'progress', 156 | value: progress 157 | }); 158 | } else { 159 | // Return the last known progress value. 160 | return progressValue; 161 | } 162 | }; 163 | /** 164 | * Update job info 165 | */ 166 | job.update = function(data) { 167 | process.send({ 168 | cmd: 'update', 169 | value: data 170 | }); 171 | }; 172 | /* 173 | * Emulate the real job `log` function. 174 | */ 175 | job.log = function(row) { 176 | return asyncSend(process, { 177 | cmd: 'log', 178 | value: row 179 | }); 180 | }; 181 | /* 182 | * Emulate the real job `update` function. 183 | */ 184 | job.update = function(data) { 185 | process.send({ 186 | cmd: 'update', 187 | value: data 188 | }); 189 | job.data = data; 190 | }; 191 | /* 192 | * Emulate the real job `discard` function. 193 | */ 194 | job.discard = function() { 195 | process.send({ 196 | cmd: 'discard' 197 | }); 198 | }; 199 | return job; 200 | } 201 | -------------------------------------------------------------------------------- /lib/process/sandbox.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const { asyncSend } = require('./utils'); 4 | 5 | module.exports = function(processFile, childPool) { 6 | return function process(job) { 7 | return childPool.retain(processFile).then(async child => { 8 | let msgHandler; 9 | let exitHandler; 10 | 11 | await asyncSend(child, { 12 | cmd: 'start', 13 | job: job 14 | }); 15 | 16 | const done = new Promise((resolve, reject) => { 17 | msgHandler = function(msg) { 18 | switch (msg.cmd) { 19 | case 'completed': 20 | resolve(msg.value); 21 | break; 22 | case 'failed': 23 | case 'error': { 24 | const err = new Error(); 25 | Object.assign(err, msg.value); 26 | reject(err); 27 | break; 28 | } 29 | case 'progress': 30 | job.progress(msg.value); 31 | break; 32 | case 'update': 33 | job.update(msg.value); 34 | break; 35 | case 'discard': 36 | job.discard(); 37 | break; 38 | case 'log': 39 | job.log(msg.value); 40 | break; 41 | } 42 | }; 43 | 44 | exitHandler = (exitCode, signal) => { 45 | reject( 46 | new Error( 47 | 'Unexpected exit code: ' + exitCode + ' signal: ' + signal 48 | ) 49 | ); 50 | }; 51 | 52 | child.on('message', msgHandler); 53 | child.on('exit', exitHandler); 54 | }); 55 | 56 | return done.finally(() => { 57 | child.removeListener('message', msgHandler); 58 | child.removeListener('exit', exitHandler); 59 | 60 | if (child.exitCode !== null || /SIG.*/.test(child.signalCode)) { 61 | childPool.remove(child); 62 | } else { 63 | childPool.release(child); 64 | } 65 | }); 66 | }); 67 | }; 68 | }; 69 | -------------------------------------------------------------------------------- /lib/process/utils.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | function hasProcessExited(child) { 4 | return !!(child.exitCode !== null || child.signalCode); 5 | } 6 | 7 | function onExitOnce(child) { 8 | return new Promise(resolve => { 9 | child.once('exit', () => resolve()); 10 | }); 11 | } 12 | 13 | /** 14 | * Sends a kill signal to a child resolving when the child has exited, 15 | * resorting to SIGKILL if the given timeout is reached 16 | * 17 | * @param {ChildProcess} child 18 | * @param {'SIGTERM' | 'SIGKILL'} [signal] initial signal to use 19 | * @param {number} [timeoutMs] time to wait until sending SIGKILL 20 | * 21 | * @returns {Promise} the killed child 22 | */ 23 | function killAsync(child, signal, timeoutMs) { 24 | if (hasProcessExited(child)) { 25 | return Promise.resolve(child); 26 | } 27 | 28 | // catch any new on exit 29 | let onExit = onExitOnce(child); 30 | 31 | child.kill(signal || 'SIGKILL'); 32 | 33 | if (timeoutMs === 0 || isFinite(timeoutMs)) { 34 | const timeout = setTimeout(() => { 35 | if (!hasProcessExited(child)) { 36 | child.kill('SIGKILL'); 37 | } 38 | }, timeoutMs); 39 | 40 | onExit = onExit.then(() => { 41 | clearTimeout(timeout); 42 | }); 43 | } 44 | return onExit; 45 | } 46 | 47 | /* 48 | asyncSend 49 | Same as process.send but waits until the send is complete 50 | the async version is used below because otherwise 51 | the termination handler may exit before the parent 52 | process has recived the messages it requires 53 | */ 54 | 55 | const asyncSend = (proc, msg) => { 56 | return new Promise((resolve, reject) => { 57 | proc.send(msg, err => { 58 | if (err) { 59 | reject(err); 60 | } else { 61 | resolve(); 62 | } 63 | }); 64 | }); 65 | }; 66 | 67 | module.exports = { 68 | killAsync, 69 | asyncSend 70 | }; 71 | -------------------------------------------------------------------------------- /lib/repeatable.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const _ = require('lodash'); 4 | const parser = require('cron-parser'); 5 | const crypto = require('crypto'); 6 | 7 | const Job = require('./job'); 8 | 9 | module.exports = function(Queue) { 10 | Queue.prototype.nextRepeatableJob = function( 11 | name, 12 | data, 13 | opts, 14 | skipCheckExists 15 | ) { 16 | const client = this.client; 17 | const repeat = opts.repeat; 18 | const prevMillis = opts.prevMillis || 0; 19 | 20 | if (!prevMillis && opts.jobId) { 21 | repeat.jobId = opts.jobId; 22 | } 23 | 24 | const currentCount = repeat.count ? repeat.count + 1 : 1; 25 | 26 | if (!_.isUndefined(repeat.limit) && currentCount > repeat.limit) { 27 | return Promise.resolve(); 28 | } 29 | 30 | let now = Date.now(); 31 | 32 | if (!_.isUndefined(repeat.endDate) && now > new Date(repeat.endDate)) { 33 | return Promise.resolve(); 34 | } 35 | 36 | now = prevMillis < now ? now : prevMillis; 37 | 38 | const nextMillis = getNextMillis(now, repeat); 39 | if (nextMillis) { 40 | const jobId = repeat.jobId ? repeat.jobId + ':' : ':'; 41 | const repeatKey = getRepeatKey(name, repeat, jobId); 42 | 43 | const createNextJob = () => { 44 | return client.zadd(this.keys.repeat, nextMillis, repeatKey).then(() => { 45 | // 46 | // Generate unique job id for this iteration. 47 | // 48 | const customId = getRepeatJobId( 49 | name, 50 | jobId, 51 | nextMillis, 52 | md5(repeatKey) 53 | ); 54 | now = Date.now(); 55 | const delay = nextMillis - now; 56 | 57 | return Job.create( 58 | this, 59 | name, 60 | data, 61 | _.defaultsDeep( 62 | { 63 | repeat: { 64 | count: currentCount, 65 | key: repeatKey 66 | }, 67 | jobId: customId, 68 | delay: delay < 0 ? 0 : delay, 69 | timestamp: now, 70 | prevMillis: nextMillis 71 | }, 72 | opts 73 | ) 74 | ); 75 | }); 76 | }; 77 | 78 | if (skipCheckExists) { 79 | return createNextJob(); 80 | } 81 | 82 | // Check that the repeatable job hasn't been removed 83 | // TODO: a lua script would be better here 84 | return client 85 | .zscore(this.keys.repeat, repeatKey) 86 | .then(repeatableExists => { 87 | // The job could have been deleted since this check 88 | if (repeatableExists) { 89 | return createNextJob(); 90 | } 91 | return Promise.resolve(); 92 | }); 93 | } else { 94 | return Promise.resolve(); 95 | } 96 | }; 97 | 98 | Queue.prototype.removeRepeatable = function(name, repeat) { 99 | if (typeof name !== 'string') { 100 | repeat = name; 101 | name = Job.DEFAULT_JOB_NAME; 102 | } 103 | 104 | return this.isReady().then(() => { 105 | const jobId = repeat.jobId ? repeat.jobId + ':' : ':'; 106 | const repeatJobKey = getRepeatKey(name, repeat, jobId); 107 | const repeatJobId = getRepeatJobId(name, jobId, '', md5(repeatJobKey)); 108 | const queueKey = this.keys['']; 109 | return this.client.removeRepeatable( 110 | this.keys.repeat, 111 | this.keys.delayed, 112 | repeatJobId, 113 | repeatJobKey, 114 | queueKey 115 | ); 116 | }); 117 | }; 118 | 119 | Queue.prototype.removeRepeatableByKey = function(repeatJobKey) { 120 | const repeatMeta = this._keyToData(repeatJobKey); 121 | const queueKey = this.keys['']; 122 | 123 | const jobId = repeatMeta.id ? repeatMeta.id + ':' : ':'; 124 | const repeatJobId = getRepeatJobId( 125 | repeatMeta.name || Job.DEFAULT_JOB_NAME, 126 | jobId, 127 | '', 128 | md5(repeatJobKey) 129 | ); 130 | 131 | return this.isReady().then(() => { 132 | return this.client.removeRepeatable( 133 | this.keys.repeat, 134 | this.keys.delayed, 135 | repeatJobId, 136 | repeatJobKey, 137 | queueKey 138 | ); 139 | }); 140 | }; 141 | 142 | Queue.prototype._keyToData = function(key) { 143 | const data = key.split(':'); 144 | 145 | return { 146 | key: key, 147 | name: data[0], 148 | id: data[1] || null, 149 | endDate: parseInt(data[2]) || null, 150 | tz: data[3] || null, 151 | cron: data[4] 152 | }; 153 | }; 154 | 155 | Queue.prototype.getRepeatableJobs = function(start, end, asc) { 156 | const key = this.keys.repeat; 157 | start = start || 0; 158 | end = end || -1; 159 | return (asc 160 | ? this.client.zrange(key, start, end, 'WITHSCORES') 161 | : this.client.zrevrange(key, start, end, 'WITHSCORES') 162 | ).then(result => { 163 | const jobs = []; 164 | for (let i = 0; i < result.length; i += 2) { 165 | const data = this._keyToData(result[i]); 166 | jobs.push({ 167 | key: data.key, 168 | name: data.name, 169 | id: data.id, 170 | endDate: data.endDate, 171 | tz: data.cron ? data.tz : null, 172 | cron: data.cron || null, 173 | every: !data.cron ? parseInt(data.tz) : null, 174 | next: parseInt(result[i + 1]) 175 | }); 176 | } 177 | return jobs; 178 | }); 179 | }; 180 | 181 | Queue.prototype.getRepeatableCount = function() { 182 | return this.client.zcard(this.toKey('repeat')); 183 | }; 184 | 185 | function getRepeatJobId(name, jobId, nextMillis, namespace) { 186 | return 'repeat:' + md5(name + jobId + namespace) + ':' + nextMillis; 187 | } 188 | 189 | function getRepeatKey(name, repeat, jobId) { 190 | const endDate = repeat.endDate 191 | ? new Date(repeat.endDate).getTime() + ':' 192 | : ':'; 193 | const tz = repeat.tz ? repeat.tz + ':' : ':'; 194 | const suffix = repeat.cron ? tz + repeat.cron : String(repeat.every); 195 | 196 | return name + ':' + jobId + endDate + suffix; 197 | } 198 | 199 | function getNextMillis(millis, opts) { 200 | if (opts.cron && opts.every) { 201 | throw new Error( 202 | 'Both .cron and .every options are defined for this repeatable job' 203 | ); 204 | } 205 | 206 | if (opts.every) { 207 | return Math.floor(millis / opts.every) * opts.every + opts.every; 208 | } 209 | 210 | const currentDate = 211 | opts.startDate && new Date(opts.startDate) > new Date(millis) 212 | ? new Date(opts.startDate) 213 | : new Date(millis); 214 | const interval = parser.parseExpression( 215 | opts.cron, 216 | _.defaults( 217 | { 218 | currentDate 219 | }, 220 | opts 221 | ) 222 | ); 223 | 224 | try { 225 | return interval.next().getTime(); 226 | } catch (e) { 227 | // Ignore error 228 | } 229 | } 230 | 231 | function md5(str) { 232 | return crypto 233 | .createHash('md5') 234 | .update(str) 235 | .digest('hex'); 236 | } 237 | }; 238 | -------------------------------------------------------------------------------- /lib/timer-manager.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const _ = require('lodash'); 4 | const uuid = require('uuid'); 5 | 6 | /** 7 | Timer Manager 8 | 9 | Keep track of timers to ensure that disconnect() is 10 | only called (via close()) at a time when it's safe 11 | to do so. 12 | 13 | Queues currently use two timers: 14 | 15 | - The first one is used for delayed jobs and is 16 | preemptible i.e. it is possible to close a queue 17 | while delayed jobs are still pending (they will 18 | be processed when the queue is resumed). This timer 19 | is cleared by close() and is not managed here. 20 | 21 | - The second one is used to lock Redis while 22 | processing jobs. These timers are short-lived, 23 | and there can be more than one active at a 24 | time. 25 | 26 | The lock timer executes Redis commands, which 27 | means we can't close queues while it's active i.e. 28 | this won't work: 29 | 30 | queue.process(function (job, jobDone) { 31 | handle(job); 32 | queue.disconnect().then(jobDone); 33 | }) 34 | 35 | The disconnect() call closes the Redis connections; then, when 36 | a queue tries to perform the scheduled Redis commands, 37 | they block until a Redis connection becomes available... 38 | 39 | The solution is to close the Redis connections when there are no 40 | active timers i.e. when the queue is idle. This helper class keeps 41 | track of the active timers and executes any queued listeners 42 | whenever that count goes to zero. 43 | 44 | Since disconnect() simply can't work if there are active handles, 45 | its close() wrapper postpones closing the Redis connections 46 | until the next idle state. This means that close() can safely 47 | be called from anywhere at any time, even from within a job 48 | handler: 49 | 50 | queue.process(function (job, jobDone) { 51 | handle(job); 52 | queue.close(); 53 | jobDone(); 54 | }) 55 | */ 56 | 57 | function TimerManager() { 58 | this.idle = true; 59 | this.listeners = []; 60 | this.timers = {}; 61 | } 62 | 63 | /** 64 | Create a new timer (setTimeout). 65 | 66 | Expired timers are automatically cleared 67 | 68 | @param {String} name - Name of a timer key. Used only for debugging. 69 | @param {Number} delay - delay of timeout 70 | @param {Function} fn - Function to execute after delay 71 | @returns {Number} id - The timer id. Used to clear the timer 72 | */ 73 | TimerManager.prototype.set = function(name, delay, fn) { 74 | const id = uuid.v4(); 75 | const timer = setTimeout( 76 | (timerInstance, timeoutId) => { 77 | timerInstance.clear(timeoutId); 78 | try { 79 | fn(); 80 | } catch (err) { 81 | console.error(err); 82 | } 83 | }, 84 | delay, 85 | this, 86 | id 87 | ); 88 | 89 | // XXX only the timer is used, but the 90 | // other fields are useful for 91 | // troubleshooting/debugging 92 | this.timers[id] = { 93 | name, 94 | timer 95 | }; 96 | 97 | this.idle = false; 98 | return id; 99 | }; 100 | 101 | /** 102 | Clear a timer (clearTimeout). 103 | 104 | Queued listeners are executed if there are no 105 | remaining timers 106 | */ 107 | TimerManager.prototype.clear = function(id) { 108 | const timers = this.timers; 109 | const timer = timers[id]; 110 | if (!timer) { 111 | return; 112 | } 113 | clearTimeout(timer.timer); 114 | delete timers[id]; 115 | if (!this.idle && _.size(timers) === 0) { 116 | while (this.listeners.length) { 117 | this.listeners.pop()(); 118 | } 119 | this.idle = true; 120 | } 121 | }; 122 | 123 | TimerManager.prototype.clearAll = function() { 124 | _.each(this.timers, (timer, id) => { 125 | this.clear(id); 126 | }); 127 | }; 128 | 129 | /** 130 | * Returns a promise that resolves when there are no active timers. 131 | */ 132 | TimerManager.prototype.whenIdle = function() { 133 | return new Promise(resolve => { 134 | if (this.idle) { 135 | resolve(); 136 | } else { 137 | this.listeners.unshift(resolve); 138 | } 139 | }); 140 | }; 141 | 142 | module.exports = TimerManager; 143 | -------------------------------------------------------------------------------- /lib/utils.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const errorObject = { value: null }; 3 | function tryCatch(fn, ctx, args) { 4 | try { 5 | return fn.apply(ctx, args); 6 | } catch (e) { 7 | errorObject.value = e; 8 | return errorObject; 9 | } 10 | } 11 | 12 | /** 13 | * Waits for a redis client to be ready. 14 | * @param {Redis} redis client 15 | */ 16 | function isRedisReady(client) { 17 | return new Promise((resolve, reject) => { 18 | if (client.status === 'ready') { 19 | resolve(); 20 | } else { 21 | function handleReady() { 22 | client.removeListener('end', handleEnd); 23 | client.removeListener('error', handleError); 24 | resolve(); 25 | } 26 | 27 | let lastError; 28 | function handleError(err) { 29 | lastError = err; 30 | } 31 | 32 | function handleEnd() { 33 | client.removeListener('ready', handleReady); 34 | client.removeListener('error', handleError); 35 | reject(lastError); 36 | } 37 | 38 | client.once('ready', handleReady); 39 | client.on('error', handleError); 40 | client.once('end', handleEnd); 41 | } 42 | }); 43 | } 44 | 45 | module.exports.errorObject = errorObject; 46 | module.exports.tryCatch = tryCatch; 47 | module.exports.isRedisReady = isRedisReady; 48 | module.exports.emitSafe = function(emitter, event, ...args) { 49 | try { 50 | return emitter.emit(event, ...args); 51 | } catch (err) { 52 | try { 53 | return emitter.emit('error', err); 54 | } catch (err) { 55 | // We give up if the error event also throws an exception. 56 | console.error(err); 57 | } 58 | } 59 | }; 60 | 61 | module.exports.MetricsTime = { 62 | ONE_MINUTE: 1, 63 | FIVE_MINUTES: 5, 64 | FIFTEEN_MINUTES: 15, 65 | THIRTY_MINUTES: 30, 66 | ONE_HOUR: 60, 67 | ONE_WEEK: 60 * 24 * 7, 68 | TWO_WEEKS: 60 * 24 * 7 * 2, 69 | ONE_MONTH: 60 * 24 * 7 * 2 * 4 70 | }; 71 | -------------------------------------------------------------------------------- /lib/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const utils = require('./utils'); 4 | const clientCommandMessageReg = /ERR unknown command ['`]\s*client\s*['`]/; 5 | 6 | module.exports = function(Queue) { 7 | // IDEA, How to store metadata associated to a worker. 8 | // create a key from the worker ID associated to the given name. 9 | // We keep a hash table bull:myqueue:workers where every worker is a hash key workername:workerId with json holding 10 | // metadata of the worker. The worker key gets expired every 30 seconds or so, we renew the worker metadata. 11 | // 12 | Queue.prototype.setWorkerName = function() { 13 | return utils 14 | .isRedisReady(this.client) 15 | .then(() => { 16 | const connectionName = this.clientName(); 17 | this.bclient.options.connectionName = connectionName; 18 | return this.bclient.client('setname', connectionName); 19 | }) 20 | .catch(err => { 21 | if (!clientCommandMessageReg.test(err.message)) throw err; 22 | }); 23 | }; 24 | 25 | Queue.prototype.getWorkers = function() { 26 | return utils 27 | .isRedisReady(this.client) 28 | .then(() => { 29 | return this.client.client('list'); 30 | }) 31 | .then(clients => { 32 | return this.parseClientList(clients); 33 | }) 34 | .catch(err => { 35 | if (!clientCommandMessageReg.test(err.message)) throw err; 36 | }); 37 | }; 38 | 39 | Queue.prototype.base64Name = function() { 40 | return Buffer.from(this.name).toString('base64'); 41 | }; 42 | 43 | Queue.prototype.clientName = function() { 44 | return this.keyPrefix + ':' + this.base64Name(); 45 | }; 46 | 47 | Queue.prototype.parseClientList = function(list) { 48 | const lines = list.split('\n'); 49 | const clients = []; 50 | 51 | lines.forEach(line => { 52 | const client = {}; 53 | const keyValues = line.split(' '); 54 | keyValues.forEach(keyValue => { 55 | const index = keyValue.indexOf('='); 56 | const key = keyValue.substring(0, index); 57 | const value = keyValue.substring(index + 1); 58 | client[key] = value; 59 | }); 60 | const name = client['name']; 61 | if (name && name.startsWith(this.clientName())) { 62 | client['name'] = this.name; 63 | clients.push(client); 64 | } 65 | }); 66 | return clients; 67 | }; 68 | }; 69 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bull", 3 | "version": "4.16.5", 4 | "description": "Job manager", 5 | "engines": { 6 | "node": ">=12" 7 | }, 8 | "main": "./index.js", 9 | "types": "./index.d.ts", 10 | "repository": { 11 | "type": "git", 12 | "url": "git://github.com/OptimalBits/bull.git" 13 | }, 14 | "keywords": [ 15 | "job", 16 | "queue", 17 | "task", 18 | "parallel" 19 | ], 20 | "author": "OptimalBits", 21 | "license": "MIT", 22 | "readmeFilename": "README.md", 23 | "dependencies": { 24 | "cron-parser": "^4.9.0", 25 | "get-port": "^5.1.1", 26 | "ioredis": "^5.3.2", 27 | "lodash": "^4.17.21", 28 | "msgpackr": "^1.11.2", 29 | "semver": "^7.5.2", 30 | "uuid": "^8.3.0" 31 | }, 32 | "devDependencies": { 33 | "@commitlint/cli": "^7.6.1", 34 | "@commitlint/config-conventional": "^7.6.0", 35 | "@semantic-release/changelog": "^5.0.1", 36 | "@semantic-release/commit-analyzer": "^8.0.1", 37 | "@semantic-release/git": "^9.0.0", 38 | "@semantic-release/github": "^7.2.1", 39 | "@semantic-release/npm": "^7.1.1", 40 | "@semantic-release/release-notes-generator": "^9.0.2", 41 | "chai": "^4.2.0", 42 | "coveralls": "^3.1.0", 43 | "delay": "^4.3.0", 44 | "eslint": "^7.4.0", 45 | "eslint-plugin-mocha": "^7.0.1", 46 | "eslint-plugin-node": "^8.0.1", 47 | "expect.js": "^0.3.1", 48 | "fast-glob": "^3.3.2", 49 | "husky": "^4.2.5", 50 | "istanbul": "^0.4.5", 51 | "lint-staged": "^8.2.1", 52 | "minimatch": "^7.4.4", 53 | "mocha": "^8.1.1", 54 | "mocha-lcov-reporter": "^1.3.0", 55 | "moment": "^2.24.0", 56 | "npm-run-all": "^4.1.5", 57 | "nyc": "^15.1.0", 58 | "p-reflect": "^1.0.0", 59 | "prettier": "^1.19.1", 60 | "rimraf": "^3.0.2", 61 | "semantic-release": "^17.4.2", 62 | "sinon": "^7.5.0" 63 | }, 64 | "scripts": { 65 | "clean:scripts": "rimraf rawScripts lib/scripts", 66 | "dc:up": "docker-compose -f docker-compose.yml up -d", 67 | "dc:down": "docker-compose -f docker-compose.yml down", 68 | "dry-run": "npm publish --dry-run", 69 | "generate:raw:scripts": "node generateRawScripts.js", 70 | "pretest": "npm-run-all clean:scripts generate:raw:scripts transform:commands lint", 71 | "lint": "eslint lib test *.js", 72 | "test": "NODE_ENV=test nyc mocha -- 'test/test_*' --recursive --exit", 73 | "test:nolint": "NODE_ENV=test mocha 'test/test_*' --recursive --exit", 74 | "coverage": "nyc report --reporter=text-lcov | coveralls", 75 | "postpublish": "git push && git push --tags", 76 | "prettier": "prettier --config package.json --write '**/*.js'", 77 | "precommit": "lint-staged", 78 | "build": "tsc", 79 | "transform:commands": "node ./commandTransform.js ./rawScripts ./lib/scripts" 80 | }, 81 | "lint-staged": { 82 | "*.{js,json}": [ 83 | "prettier --write", 84 | "git add" 85 | ] 86 | }, 87 | "prettier": { 88 | "singleQuote": true 89 | }, 90 | "husky": { 91 | "hooks": { 92 | "commit-msg": "commitlint -E HUSKY_GIT_PARAMS" 93 | } 94 | }, 95 | "release": { 96 | "branches": [ 97 | "develop" 98 | ], 99 | "plugins": [ 100 | "@semantic-release/commit-analyzer", 101 | "@semantic-release/release-notes-generator", 102 | [ 103 | "@semantic-release/changelog", 104 | { 105 | "changelogFile": "CHANGELOG.md" 106 | } 107 | ], 108 | [ 109 | "@semantic-release/npm", 110 | { 111 | "npmPublish": true 112 | } 113 | ], 114 | "@semantic-release/github", 115 | [ 116 | "@semantic-release/git", 117 | { 118 | "assets": [ 119 | "package.json", 120 | "yarn.lock", 121 | "CHANGELOG.md" 122 | ], 123 | "message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" 124 | } 125 | ] 126 | ] 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /support/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OptimalBits/bull/489c6ab8466c1db122f92af3ddef12eacc54179e/support/logo.png -------------------------------------------------------------------------------- /support/logo.sketch: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OptimalBits/bull/489c6ab8466c1db122f92af3ddef12eacc54179e/support/logo.sketch -------------------------------------------------------------------------------- /support/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | logo 5 | Created with Sketch. 6 | 7 | 8 | 16 | 17 | -------------------------------------------------------------------------------- /support/logo@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OptimalBits/bull/489c6ab8466c1db122f92af3ddef12eacc54179e/support/logo@2x.png -------------------------------------------------------------------------------- /test/.eslintrc.yml: -------------------------------------------------------------------------------- 1 | extends: 2 | - ../.eslintrc.yml 3 | 4 | env: 5 | mocha: true 6 | 7 | rules: 8 | no-console: 2 9 | no-process-exit: 0 10 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(500).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_bar.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(500).then(() => { 11 | return 'bar'; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_broken.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | throw new Error('Broken file processor'); 3 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_callback.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(job, done) { 10 | delay(500).then(() => { 11 | done(null, 42); 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_callback_fail.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(job, done) { 10 | delay(500).then(() => { 11 | done(new Error('Manually failed processor')); 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_crash.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = function(job) { 8 | setTimeout(() => { 9 | if (typeof job.data.exitCode !== 'number') { 10 | throw new Error('boom!'); 11 | } 12 | process.exit(job.data.exitCode); 13 | }, 100); 14 | 15 | return new Promise(() => {}); 16 | }; 17 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_data.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(job) { 10 | return delay(50).then(() => { 11 | job.update({ baz: 'qux' }); 12 | return job.data; 13 | }); 14 | }; 15 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_discard.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(job) { 10 | return delay(500).then(() => { 11 | job.discard(); 12 | throw new Error('Manually discarded processor'); 13 | }); 14 | }; 15 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_exit.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(500).then(() => { 11 | delay(100).then(() => { 12 | process.exit(0); 13 | }); 14 | }); 15 | }; 16 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_fail.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(500).then(() => { 11 | throw new Error('Manually failed processor'); 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_foo.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(500).then(() => { 11 | return 'foo'; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_progress.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(job) { 10 | return delay(50) 11 | .then(() => { 12 | job.progress(10); 13 | job.log(job.progress()); 14 | return delay(100); 15 | }) 16 | .then(() => { 17 | job.progress(27); 18 | job.log(job.progress()); 19 | return delay(150); 20 | }) 21 | .then(() => { 22 | job.progress(78); 23 | job.log(job.progress()); 24 | return delay(100); 25 | }) 26 | .then(() => { 27 | job.progress(100); 28 | job.log(job.progress()); 29 | }) 30 | .then(() => { 31 | return 37; 32 | }); 33 | }; 34 | -------------------------------------------------------------------------------- /test/fixtures/fixture_processor_slow.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('delay'); 8 | 9 | module.exports = function(/*job*/) { 10 | return delay(1000).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /test/test_child-pool.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const childPool = require('../lib/process/child-pool'); 5 | 6 | describe('Child pool', () => { 7 | let pool; 8 | 9 | beforeEach(() => { 10 | pool = new childPool(); 11 | }); 12 | 13 | afterEach(() => { 14 | pool.clean(); 15 | }); 16 | 17 | it('should return same child if free', () => { 18 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 19 | let child; 20 | return pool 21 | .retain(processor) 22 | .then(_child => { 23 | expect(_child).to.be.ok; 24 | child = _child; 25 | pool.release(child); 26 | 27 | expect(pool.retained).to.be.empty; 28 | 29 | return pool.retain(processor); 30 | }) 31 | .then(newChild => { 32 | expect(child).to.be.eql(newChild); 33 | }); 34 | }); 35 | 36 | it('should return a new child if reused the last free one', () => { 37 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 38 | let child; 39 | return pool 40 | .retain(processor) 41 | .then(_child => { 42 | expect(_child).to.be.ok; 43 | child = _child; 44 | pool.release(child); 45 | 46 | expect(pool.retained).to.be.empty; 47 | 48 | return pool.retain(processor); 49 | }) 50 | .then(newChild => { 51 | expect(child).to.be.eql(newChild); 52 | child = newChild; 53 | return pool.retain(processor); 54 | }) 55 | .then(newChild => { 56 | expect(child).not.to.be.eql(newChild); 57 | }); 58 | }); 59 | 60 | it('should return a new child if none free', () => { 61 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 62 | let child; 63 | return pool 64 | .retain(processor) 65 | .then(_child => { 66 | expect(_child).to.be.ok; 67 | child = _child; 68 | 69 | expect(pool.retained).not.to.be.empty; 70 | 71 | return pool.retain(processor); 72 | }) 73 | .then(newChild => { 74 | expect(child).to.not.be.eql(newChild); 75 | }); 76 | }); 77 | 78 | it('should return a new child if killed', () => { 79 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 80 | let child; 81 | return pool 82 | .retain(processor) 83 | .then(_child => { 84 | expect(_child).to.be.ok; 85 | child = _child; 86 | 87 | pool.kill(child); 88 | 89 | expect(pool.retained).to.be.empty; 90 | 91 | return pool.retain(processor); 92 | }) 93 | .then(newChild => { 94 | expect(child).to.not.be.eql(newChild); 95 | }); 96 | }); 97 | 98 | it('should return a new child if many retained and none free', () => { 99 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 100 | let children; 101 | 102 | return Promise.all([ 103 | pool.retain(processor), 104 | pool.retain(processor), 105 | pool.retain(processor), 106 | pool.retain(processor), 107 | pool.retain(processor), 108 | pool.retain(processor) 109 | ]) 110 | .then(_children => { 111 | children = _children; 112 | expect(children).to.have.length(6); 113 | return pool.retain(processor); 114 | }) 115 | .then(child => { 116 | expect(children).not.to.include(child); 117 | }); 118 | }); 119 | 120 | it('should return an old child if many retained and one free', () => { 121 | const processor = __dirname + '/fixtures/fixture_processor_bar.js'; 122 | let children; 123 | 124 | return Promise.all([ 125 | pool.retain(processor), 126 | pool.retain(processor), 127 | pool.retain(processor), 128 | pool.retain(processor), 129 | pool.retain(processor), 130 | pool.retain(processor) 131 | ]) 132 | .then(_children => { 133 | children = _children; 134 | expect(children).to.have.length(6); 135 | 136 | pool.release(_children[0]); 137 | 138 | return pool.retain(processor); 139 | }) 140 | .then(child => { 141 | expect(children).to.include(child); 142 | }); 143 | }); 144 | 145 | it('should returned a shared child pool is isSharedChildPool is true', () => { 146 | expect(childPool(true)).to.be.equal(new childPool(true)); 147 | }); 148 | 149 | it('should return a different childPool if isSharedChildPool is false', () => { 150 | expect(childPool()).to.not.be.equal(childPool()); 151 | }); 152 | 153 | it('should not overwrite the the childPool singleton when isSharedChildPool is false', () => { 154 | const childPoolA = new childPool(true); 155 | const childPoolB = new childPool(false); 156 | const childPoolC = new childPool(true); 157 | 158 | expect(childPoolA).to.be.equal(childPoolC); 159 | expect(childPoolB).to.not.be.equal(childPoolA); 160 | expect(childPoolB).to.not.be.equal(childPoolC); 161 | }); 162 | }); 163 | -------------------------------------------------------------------------------- /test/test_connection.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('expect.js'); 4 | const utils = require('./utils'); 5 | const { isRedisReady } = require('../lib/utils'); 6 | const Redis = require('ioredis'); 7 | const Queue = require('../lib/queue'); 8 | 9 | describe('connection', () => { 10 | let client; 11 | 12 | beforeEach(() => { 13 | client = new Redis(); 14 | return client.flushdb(); 15 | }); 16 | 17 | afterEach(() => { 18 | return client.quit(); 19 | }); 20 | 21 | it('should fail if reusing connections with invalid options', () => { 22 | const errMsg = Queue.ErrorMessages.MISSING_REDIS_OPTS; 23 | 24 | const client = new Redis(); 25 | 26 | const opts = { 27 | createClient(type) { 28 | switch (type) { 29 | case 'client': 30 | return client; 31 | default: 32 | return new Redis(); 33 | } 34 | } 35 | }; 36 | const queue = utils.buildQueue('external connections', opts); 37 | expect(queue).to.be.ok(); 38 | 39 | try { 40 | // eslint-disable-next-line no-unused-vars 41 | const _ = queue.bclient; 42 | throw new Error('should fail with invalid redis options'); 43 | } catch (err) { 44 | expect(err.message).to.be.equal(errMsg); 45 | } 46 | 47 | try { 48 | // eslint-disable-next-line no-unused-vars 49 | const _ = queue.eclient; 50 | throw new Error('should fail with invalid redis options'); 51 | } catch (err) { 52 | expect(err.message).to.be.equal(errMsg); 53 | } 54 | }); 55 | 56 | it('should recover from a connection loss', async () => { 57 | const queue = utils.buildQueue(); 58 | queue.on('error', () => { 59 | // error event has to be observed or the exception will bubble up 60 | }); 61 | 62 | const done = new Promise((resolve, reject) => { 63 | queue 64 | .process((job, jobDone) => { 65 | expect(job.data.foo).to.be.equal('bar'); 66 | jobDone(); 67 | queue.close(); 68 | }) 69 | .then(() => { 70 | resolve(); 71 | }) 72 | .catch(reject); 73 | }); 74 | 75 | // Simulate disconnect 76 | await queue.isReady(); 77 | await isRedisReady(queue.client); 78 | queue.client.stream.end(); 79 | queue.client.emit('error', new Error('ECONNRESET')); 80 | 81 | // add something to the queue 82 | await queue.add({ foo: 'bar' }); 83 | 84 | await done; 85 | }); 86 | 87 | it('should handle jobs added before and after a redis disconnect', done => { 88 | let count = 0; 89 | const queue = utils.buildQueue(); 90 | 91 | queue 92 | .process((job, jobDone) => { 93 | if (count == 0) { 94 | expect(job.data.foo).to.be.equal('bar'); 95 | jobDone(); 96 | } else { 97 | jobDone(); 98 | queue.close().then(done, done); 99 | } 100 | count++; 101 | }) 102 | .catch(done); 103 | 104 | queue.on('completed', () => { 105 | if (count === 1) { 106 | queue.client.stream.end(); 107 | queue.client.emit('error', new Error('ECONNRESET')); 108 | } 109 | }); 110 | 111 | queue.isReady().then(() => { 112 | queue.add({ foo: 'bar' }); 113 | }); 114 | 115 | queue.on('error', (/*err*/) => { 116 | if (count === 1) { 117 | queue.add({ foo: 'bar' }); 118 | } 119 | }); 120 | }); 121 | 122 | it('should not close external connections', () => { 123 | const redisOpts = { 124 | maxRetriesPerRequest: null, 125 | enableReadyCheck: false 126 | }; 127 | 128 | const client = new Redis(redisOpts); 129 | const subscriber = new Redis(redisOpts); 130 | 131 | const opts = { 132 | createClient(type) { 133 | switch (type) { 134 | case 'client': 135 | return client; 136 | case 'subscriber': 137 | return subscriber; 138 | default: 139 | return new Redis(); 140 | } 141 | } 142 | }; 143 | 144 | const testQueue = utils.buildQueue('external connections', opts); 145 | 146 | return new Promise(resolve => { 147 | if (subscriber.status === 'ready') { 148 | return resolve(); 149 | } 150 | subscriber.once('ready', resolve); 151 | }) 152 | .then(() => { 153 | return testQueue.isReady(); 154 | }) 155 | .then(() => { 156 | return testQueue.add({ foo: 'bar' }); 157 | }) 158 | .then(() => { 159 | expect(testQueue.client).to.be.eql(client); 160 | expect(testQueue.eclient).to.be.eql(subscriber); 161 | 162 | return testQueue.close(); 163 | }) 164 | .then(() => { 165 | expect(client.status).to.be.eql('ready'); 166 | expect(subscriber.status).to.be.eql('ready'); 167 | return Promise.all([client.quit(), subscriber.quit()]); 168 | }); 169 | }); 170 | 171 | it('should fail if redis connection fails and does not reconnect', async () => { 172 | const queue = utils.buildQueue('connection fail 123', { 173 | redis: { 174 | host: 'localhost', 175 | port: 1234, 176 | retryStrategy: () => false 177 | } 178 | }); 179 | try { 180 | await isRedisReady(queue.client); 181 | new Error('Did not fail connecting to invalid redis instance'); 182 | } catch (err) { 183 | expect(err.code).to.be.eql('ECONNREFUSED'); 184 | await queue.close(); 185 | } 186 | }); 187 | 188 | it('should close cleanly if redis connection fails', async () => { 189 | const queue = new Queue('connection fail', { 190 | redis: { 191 | host: 'localhost', 192 | port: 1235, 193 | retryStrategy: () => false 194 | } 195 | }); 196 | 197 | await queue.close(); 198 | }); 199 | 200 | it('should accept ioredis options on the query string', async () => { 201 | const queue = new Queue( 202 | 'connection query string', 203 | 'redis://localhost?tls=RedisCloudFixed' 204 | ); 205 | 206 | expect(queue.clients[0].options).to.have.property('tls'); 207 | expect(queue.clients[0].options.tls).to.have.property('ca'); 208 | 209 | await queue.close(); 210 | }); 211 | }); 212 | -------------------------------------------------------------------------------- /test/test_events.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const utils = require('./utils'); 4 | const redis = require('ioredis'); 5 | const delay = require('delay'); 6 | const Queue = require('../'); 7 | const Job = require('../lib/job'); 8 | const expect = require('chai').expect; 9 | 10 | const _ = require('lodash'); 11 | 12 | describe('events', () => { 13 | let queue; 14 | 15 | beforeEach(() => { 16 | const client = new redis(); 17 | return client.flushdb().then(() => { 18 | queue = utils.buildQueue('test events', { 19 | settings: { 20 | stalledInterval: 100, 21 | lockDuration: 50 22 | } 23 | }); 24 | return queue; 25 | }); 26 | }); 27 | 28 | afterEach(() => { 29 | return queue.close(); 30 | }); 31 | 32 | it('should emit waiting when a job has been added', done => { 33 | queue.on('waiting', () => { 34 | done(); 35 | }); 36 | 37 | queue.on('registered:waiting', () => { 38 | queue.add({ foo: 'bar' }); 39 | }); 40 | }); 41 | 42 | it('should emit global:waiting when a job has been added', done => { 43 | queue.on('global:waiting', () => { 44 | done(); 45 | }); 46 | 47 | queue.on('registered:global:waiting', () => { 48 | queue.add({ foo: 'bar' }); 49 | }); 50 | }); 51 | 52 | it('should emit stalled when a job has been stalled', done => { 53 | queue.on('completed', (/*job*/) => { 54 | done(new Error('should not have completed')); 55 | }); 56 | 57 | queue.process((/*job*/) => { 58 | return delay(250); 59 | }); 60 | 61 | queue.add({ foo: 'bar' }); 62 | 63 | const queue2 = utils.buildQueue('test events', { 64 | settings: { 65 | stalledInterval: 100 66 | } 67 | }); 68 | 69 | queue2.on('stalled', (/*job*/) => { 70 | queue2.close().then(done); 71 | }); 72 | 73 | queue.on('active', () => { 74 | queue2.startMoveUnlockedJobsToWait(); 75 | queue.close(true); 76 | }); 77 | }); 78 | 79 | it('should emit global:stalled when a job has been stalled', done => { 80 | queue.on('completed', (/*job*/) => { 81 | done(new Error('should not have completed')); 82 | }); 83 | 84 | queue.process((/*job*/) => { 85 | return delay(250); 86 | }); 87 | 88 | queue.add({ foo: 'bar' }); 89 | 90 | const queue2 = utils.buildQueue('test events', { 91 | settings: { 92 | stalledInterval: 100 93 | } 94 | }); 95 | 96 | queue2.on('global:stalled', (/*job*/) => { 97 | queue2.close().then(done); 98 | }); 99 | 100 | queue.on('active', () => { 101 | queue2.startMoveUnlockedJobsToWait(); 102 | queue.close(true); 103 | }); 104 | }); 105 | 106 | it('should emit global:failed when a job has stalled more than allowable times', done => { 107 | queue.on('completed', (/*job*/) => { 108 | done(new Error('should not have completed')); 109 | }); 110 | 111 | queue.process((/*job*/) => { 112 | return delay(250); 113 | }); 114 | 115 | queue.add({ foo: 'bar' }); 116 | 117 | const queue2 = utils.buildQueue('test events', { 118 | settings: { 119 | stalledInterval: 100, 120 | maxStalledCount: 0 121 | } 122 | }); 123 | 124 | queue2.on('global:failed', (jobId, error) => { 125 | expect(error).equal('job stalled more than maxStalledCount'); 126 | expect(jobId).not.to.be.undefined; 127 | queue2.close().then(done); 128 | }); 129 | 130 | queue.on('active', () => { 131 | queue2.startMoveUnlockedJobsToWait(); 132 | queue.close(true); 133 | }); 134 | }); 135 | 136 | it('emits waiting event when a job is added', done => { 137 | const queue = utils.buildQueue(); 138 | 139 | queue.once('waiting', jobId => { 140 | Job.fromId(queue, jobId).then(job => { 141 | expect(job.data.foo).to.be.equal('bar'); 142 | queue.close().then(done); 143 | }); 144 | }); 145 | queue.once('registered:waiting', () => { 146 | queue.add({ foo: 'bar' }); 147 | }); 148 | }); 149 | 150 | it('emits drained and global:drained event when all jobs have been processed', done => { 151 | const queue = utils.buildQueue('event drained', { 152 | settings: { drainDelay: 1 } 153 | }); 154 | 155 | queue.process((job, done) => { 156 | done(); 157 | }); 158 | 159 | const drainedCallback = _.after(2, () => { 160 | queue.getJobCountByTypes('completed').then(completed => { 161 | expect(completed).to.be.equal(2); 162 | return queue.close().then(done); 163 | }); 164 | }); 165 | 166 | queue.once('drained', drainedCallback); 167 | queue.once('global:drained', drainedCallback); 168 | 169 | queue.add({ foo: 'bar' }); 170 | queue.add({ foo: 'baz' }); 171 | }); 172 | 173 | it('should emit an event when a new message is added to the queue', done => { 174 | const client = new redis(6379, '127.0.0.1', {}); 175 | client.select(0); 176 | const queue = new Queue('test pub sub'); 177 | queue.on('waiting', jobId => { 178 | expect(parseInt(jobId, 10)).to.be.eql(1); 179 | client.quit(); 180 | done(); 181 | }); 182 | queue.once('registered:waiting', () => { 183 | queue.add({ test: 'stuff' }); 184 | }); 185 | }); 186 | 187 | it('should emit an event when a new priority message is added to the queue', done => { 188 | const client = new redis(6379, '127.0.0.1', {}); 189 | client.select(0); 190 | const queue = new Queue('test pub sub'); 191 | queue.on('waiting', jobId => { 192 | expect(parseInt(jobId, 10)).to.be.eql(1); 193 | client.quit(); 194 | done(); 195 | }); 196 | queue.once('registered:waiting', () => { 197 | queue.add({ test: 'stuff' }, { priority: 1 }); 198 | }); 199 | }); 200 | 201 | it('should emit an event when a job becomes active', done => { 202 | const queue = utils.buildQueue(); 203 | queue.process((job, jobDone) => { 204 | jobDone(); 205 | }); 206 | queue.add({}); 207 | queue.once('active', () => { 208 | queue.once('completed', () => { 209 | queue.close().then(done); 210 | }); 211 | }); 212 | }); 213 | 214 | it('should emit an event if a job fails to extend lock', done => { 215 | const LOCK_RENEW_TIME = 1; 216 | queue = utils.buildQueue('queue fails to extend lock', { 217 | settings: { 218 | lockRenewTime: LOCK_RENEW_TIME 219 | } 220 | }); 221 | queue.once('lock-extension-failed', (lockingFailedJob, error) => { 222 | expect(lockingFailedJob.data.foo).to.be.equal('lockingFailedJobFoo'); 223 | expect(error.message).to.be.equal('Connection is closed.'); 224 | queue.close().then(done); 225 | }); 226 | queue.isReady().then(() => { 227 | queue.process(() => { 228 | utils.simulateDisconnect(queue); 229 | return delay(LOCK_RENEW_TIME + 0.25); 230 | }); 231 | queue.add({ foo: 'lockingFailedJobFoo' }); 232 | }); 233 | }); 234 | 235 | it('should listen to global events', done => { 236 | const queue1 = utils.buildQueue(); 237 | const queue2 = utils.buildQueue(); 238 | queue1.process((job, jobDone) => { 239 | jobDone(); 240 | }); 241 | 242 | let state; 243 | queue2.on('global:waiting', () => { 244 | expect(state).to.be.undefined; 245 | state = 'waiting'; 246 | }); 247 | queue2.once('registered:global:waiting', () => { 248 | queue2.once('global:active', () => { 249 | expect(state).to.be.equal('waiting'); 250 | state = 'active'; 251 | }); 252 | }); 253 | queue2.once('registered:global:active', () => { 254 | queue2.once('global:completed', () => { 255 | expect(state).to.be.equal('active'); 256 | queue1.close().then(() => { 257 | queue2.close().then(done); 258 | }); 259 | }); 260 | }); 261 | 262 | queue2.once('registered:global:completed', () => { 263 | queue1.add({}); 264 | }); 265 | }); 266 | }); 267 | -------------------------------------------------------------------------------- /test/test_getters.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const redis = require('ioredis'); 4 | 5 | const utils = require('./utils'); 6 | const expect = require('chai').expect; 7 | 8 | const _ = require('lodash'); 9 | 10 | describe('Jobs getters', function() { 11 | this.timeout(12000); 12 | let queue; 13 | let client; 14 | 15 | beforeEach(() => { 16 | client = new redis(); 17 | return client.flushdb(); 18 | }); 19 | 20 | beforeEach(() => { 21 | queue = utils.buildQueue(); 22 | }); 23 | 24 | afterEach(function() { 25 | this.timeout( 26 | queue.settings.stalledInterval * (1 + queue.settings.maxStalledCount) 27 | ); 28 | return queue 29 | .clean(1000) 30 | .then(() => { 31 | return queue.close(); 32 | }) 33 | .then(() => { 34 | return client.quit(); 35 | }); 36 | }); 37 | 38 | it('should get waiting jobs', () => { 39 | return Promise.all([ 40 | queue.add({ foo: 'bar' }), 41 | queue.add({ baz: 'qux' }) 42 | ]).then(() => { 43 | return queue.getWaiting().then(jobs => { 44 | expect(jobs).to.be.a('array'); 45 | expect(jobs.length).to.be.equal(2); 46 | expect(jobs[0].data.foo).to.be.equal('bar'); 47 | expect(jobs[1].data.baz).to.be.equal('qux'); 48 | }); 49 | }); 50 | }); 51 | 52 | it('should get paused jobs', () => { 53 | return queue.pause().then(() => { 54 | return Promise.all([ 55 | queue.add({ foo: 'bar' }), 56 | queue.add({ baz: 'qux' }) 57 | ]).then(() => { 58 | return queue.getWaiting().then(jobs => { 59 | expect(jobs).to.be.a('array'); 60 | expect(jobs.length).to.be.equal(2); 61 | expect(jobs[0].data.foo).to.be.equal('bar'); 62 | expect(jobs[1].data.baz).to.be.equal('qux'); 63 | }); 64 | }); 65 | }); 66 | }); 67 | 68 | it('should get active jobs', done => { 69 | queue.process((job, jobDone) => { 70 | queue.getActive().then(jobs => { 71 | expect(jobs).to.be.a('array'); 72 | expect(jobs.length).to.be.equal(1); 73 | expect(jobs[0].data.foo).to.be.equal('bar'); 74 | done(); 75 | }); 76 | jobDone(); 77 | }); 78 | 79 | queue.add({ foo: 'bar' }); 80 | }); 81 | 82 | it('should get a specific job', done => { 83 | const data = { foo: 'sup!' }; 84 | queue.add(data).then(job => { 85 | queue.getJob(job.id).then(returnedJob => { 86 | expect(returnedJob.data).to.eql(data); 87 | expect(returnedJob.id).to.be.eql(job.id); 88 | done(); 89 | }); 90 | }); 91 | }); 92 | 93 | it('should get completed jobs', done => { 94 | let counter = 2; 95 | 96 | queue.process((job, jobDone) => { 97 | jobDone(); 98 | }); 99 | 100 | queue.on('completed', () => { 101 | counter--; 102 | 103 | if (counter === 0) { 104 | queue.getCompleted().then(jobs => { 105 | expect(jobs).to.be.a('array'); 106 | // We need a "empty completed" kind of function. 107 | //expect(jobs.length).to.be.equal(2); 108 | done(); 109 | }); 110 | } 111 | }); 112 | 113 | queue.add({ foo: 'bar' }); 114 | queue.add({ baz: 'qux' }); 115 | }); 116 | 117 | it('should get completed jobs excluding their data', done => { 118 | let counter = 2; 119 | const timestamp = Date.now(); 120 | 121 | queue.process((job, jobDone) => { 122 | jobDone(); 123 | }); 124 | 125 | queue.on('completed', () => { 126 | counter--; 127 | 128 | if (counter === 0) { 129 | queue.getCompleted(0, -1, { excludeData: true }).then(jobs => { 130 | expect(jobs).to.be.a('array'); 131 | expect(jobs).to.have.length(2); 132 | 133 | for (let i = 0; i < jobs.length; i++) { 134 | expect(jobs[i]).to.have.property('data'); 135 | expect(jobs[i].data).to.be.empty; 136 | 137 | expect(jobs[i]).to.have.property('timestamp'); 138 | expect(jobs[i].timestamp).to.be.gte(timestamp); 139 | expect(jobs[i]).to.have.property('processedOn'); 140 | expect(jobs[i].processedOn).to.be.gte(timestamp); 141 | } 142 | 143 | done(); 144 | }); 145 | } 146 | }); 147 | 148 | queue.add({ foo: 'bar' }); 149 | queue.add({ baz: 'qux' }); 150 | }); 151 | 152 | it('should get failed jobs', done => { 153 | let counter = 2; 154 | 155 | queue.process((job, jobDone) => { 156 | jobDone(new Error('Forced error')); 157 | }); 158 | 159 | queue.on('failed', () => { 160 | counter--; 161 | 162 | if (counter === 0) { 163 | queue.getFailed().then(jobs => { 164 | expect(jobs).to.be.a('array'); 165 | done(); 166 | }); 167 | } 168 | }); 169 | 170 | queue.add({ foo: 'bar' }); 171 | queue.add({ baz: 'qux' }); 172 | }); 173 | 174 | describe('.getCountsPerPriority', () => { 175 | it('returns job counts per priority', done => { 176 | const jobsArray = Array.from(Array(42).keys()).map(index => ({ 177 | name: 'test', 178 | data: {}, 179 | opts: { 180 | priority: index % 4 181 | } 182 | })); 183 | queue.addBulk(jobsArray).then(() => { 184 | queue.getCountsPerPriority([0, 1, 2, 3]).then(counts => { 185 | expect(counts).to.be.eql({ 186 | '0': 11, 187 | '1': 11, 188 | '2': 10, 189 | '3': 10 190 | }); 191 | done(); 192 | }); 193 | }); 194 | }); 195 | 196 | describe('when queue is paused', () => { 197 | it('returns job counts per priority', async () => { 198 | await queue.pause(); 199 | const jobsArray = Array.from(Array(42).keys()).map(index => ({ 200 | name: 'test', 201 | data: {}, 202 | opts: { 203 | priority: index % 4 204 | } 205 | })); 206 | await queue.addBulk(jobsArray); 207 | const counts = await queue.getCountsPerPriority([0, 1, 2, 3]); 208 | 209 | expect(counts).to.be.eql({ 210 | '0': 11, 211 | '1': 11, 212 | '2': 10, 213 | '3': 10 214 | }); 215 | }); 216 | }); 217 | }); 218 | 219 | it('fails jobs that exceed their specified timeout', done => { 220 | queue.process((job, jobDone) => { 221 | setTimeout(jobDone, 200); 222 | }); 223 | 224 | queue.on('failed', (job, error) => { 225 | expect(error.message).to.contain('timed out'); 226 | done(); 227 | }); 228 | 229 | queue.on('completed', () => { 230 | const error = new Error('The job should have timed out'); 231 | done(error); 232 | }); 233 | 234 | queue.add( 235 | { some: 'data' }, 236 | { 237 | timeout: 100 238 | } 239 | ); 240 | }); 241 | 242 | it('should return all completed jobs when not setting start/end', done => { 243 | queue.process((job, completed) => { 244 | completed(); 245 | }); 246 | 247 | queue.on( 248 | 'completed', 249 | _.after(3, () => { 250 | queue 251 | .getJobs('completed') 252 | .then(jobs => { 253 | expect(jobs) 254 | .to.be.an('array') 255 | .that.have.length(3); 256 | expect(jobs[0]).to.have.property('finishedOn'); 257 | expect(jobs[1]).to.have.property('finishedOn'); 258 | expect(jobs[2]).to.have.property('finishedOn'); 259 | 260 | expect(jobs[0]).to.have.property('processedOn'); 261 | expect(jobs[1]).to.have.property('processedOn'); 262 | expect(jobs[2]).to.have.property('processedOn'); 263 | done(); 264 | }) 265 | .catch(done); 266 | }) 267 | ); 268 | 269 | queue.add({ foo: 1 }); 270 | queue.add({ foo: 2 }); 271 | queue.add({ foo: 3 }); 272 | }); 273 | 274 | it('should return all failed jobs when not setting start/end', done => { 275 | queue.process((job, completed) => { 276 | completed(new Error('error')); 277 | }); 278 | 279 | queue.on( 280 | 'failed', 281 | _.after(3, () => { 282 | queue 283 | .getJobs('failed') 284 | .then(jobs => { 285 | expect(jobs) 286 | .to.be.an('array') 287 | .that.has.length(3); 288 | expect(jobs[0]).to.have.property('finishedOn'); 289 | expect(jobs[1]).to.have.property('finishedOn'); 290 | expect(jobs[2]).to.have.property('finishedOn'); 291 | 292 | expect(jobs[0]).to.have.property('processedOn'); 293 | expect(jobs[1]).to.have.property('processedOn'); 294 | expect(jobs[2]).to.have.property('processedOn'); 295 | done(); 296 | }) 297 | .catch(done); 298 | }) 299 | ); 300 | 301 | queue.add({ foo: 1 }); 302 | queue.add({ foo: 2 }); 303 | queue.add({ foo: 3 }); 304 | }); 305 | 306 | it('should return subset of jobs when setting positive range', done => { 307 | queue.process((job, completed) => { 308 | completed(); 309 | }); 310 | 311 | queue.on( 312 | 'completed', 313 | _.after(3, () => { 314 | queue 315 | .getJobs('completed', 1, 2, true) 316 | .then(jobs => { 317 | expect(jobs) 318 | .to.be.an('array') 319 | .that.has.length(2); 320 | expect(jobs[0].data.foo).to.be.eql(2); 321 | expect(jobs[1].data.foo).to.be.eql(3); 322 | expect(jobs[0]).to.have.property('finishedOn'); 323 | expect(jobs[1]).to.have.property('finishedOn'); 324 | expect(jobs[0]).to.have.property('processedOn'); 325 | expect(jobs[1]).to.have.property('processedOn'); 326 | done(); 327 | }) 328 | .catch(done); 329 | }) 330 | ); 331 | 332 | queue 333 | .add({ foo: 1 }) 334 | .then(() => { 335 | return queue.add({ foo: 2 }); 336 | }) 337 | .then(() => { 338 | return queue.add({ foo: 3 }); 339 | }); 340 | }); 341 | 342 | it('should return subset of jobs when setting a negative range', done => { 343 | queue.process((job, completed) => { 344 | completed(); 345 | }); 346 | 347 | queue.on( 348 | 'completed', 349 | _.after(3, () => { 350 | queue 351 | .getJobs('completed', -3, -1, true) 352 | .then(jobs => { 353 | expect(jobs) 354 | .to.be.an('array') 355 | .that.has.length(3); 356 | expect(jobs[0].data.foo).to.be.equal(1); 357 | expect(jobs[1].data.foo).to.be.eql(2); 358 | expect(jobs[2].data.foo).to.be.eql(3); 359 | done(); 360 | }) 361 | .catch(done); 362 | }) 363 | ); 364 | 365 | queue.add({ foo: 1 }); 366 | queue.add({ foo: 2 }); 367 | queue.add({ foo: 3 }); 368 | }); 369 | 370 | it('should return subset of jobs when range overflows', done => { 371 | queue.process((job, completed) => { 372 | completed(); 373 | }); 374 | 375 | queue.on( 376 | 'completed', 377 | _.after(3, () => { 378 | queue 379 | .getJobs('completed', -300, 99999, true) 380 | .then(jobs => { 381 | expect(jobs) 382 | .to.be.an('array') 383 | .that.has.length(3); 384 | expect(jobs[0].data.foo).to.be.equal(1); 385 | expect(jobs[1].data.foo).to.be.eql(2); 386 | expect(jobs[2].data.foo).to.be.eql(3); 387 | done(); 388 | }) 389 | .catch(done); 390 | }) 391 | ); 392 | 393 | queue.add({ foo: 1 }); 394 | queue.add({ foo: 2 }); 395 | queue.add({ foo: 3 }); 396 | }); 397 | 398 | it('should return jobs for multiple types', done => { 399 | let counter = 0; 400 | queue.process((/*job*/) => { 401 | counter++; 402 | if (counter == 2) { 403 | return queue.pause(); 404 | } 405 | }); 406 | 407 | queue.on( 408 | 'completed', 409 | _.after(2, () => { 410 | queue 411 | .getJobs(['completed', 'paused']) 412 | .then(jobs => { 413 | expect(jobs).to.be.an('array'); 414 | expect(jobs).to.have.length(3); 415 | done(); 416 | }) 417 | .catch(done); 418 | }) 419 | ); 420 | 421 | queue.add({ foo: 1 }); 422 | queue.add({ foo: 2 }); 423 | queue.add({ foo: 3 }); 424 | }); 425 | }); 426 | -------------------------------------------------------------------------------- /test/test_metrics.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const utils = require('./utils'); 5 | const sinon = require('sinon'); 6 | const redis = require('ioredis'); 7 | 8 | const ONE_SECOND = 1000; 9 | const ONE_MINUTE = 60 * ONE_SECOND; 10 | const ONE_HOUR = 60 * ONE_MINUTE; 11 | 12 | const { MetricsTime } = require('../lib/utils'); 13 | 14 | describe('metrics', () => { 15 | beforeEach(async function() { 16 | this.clock = sinon.useFakeTimers(); 17 | const client = new redis(); 18 | await client.flushdb(); 19 | return client.quit(); 20 | }); 21 | 22 | it('should gather metrics for completed jobs', async function() { 23 | const date = new Date('2017-02-07 9:24:00'); 24 | this.clock.setSystemTime(date); 25 | this.clock.tick(0); 26 | 27 | const timmings = [ 28 | 0, 29 | 0, // For the fixtures to work we need to use 0 as first timing 30 | ONE_MINUTE / 2, 31 | ONE_MINUTE / 2, 32 | 0, 33 | 0, 34 | ONE_MINUTE, 35 | ONE_MINUTE, 36 | ONE_MINUTE * 3, 37 | ONE_SECOND * 70, 38 | ONE_SECOND * 50, 39 | ONE_HOUR, 40 | ONE_MINUTE 41 | ]; 42 | 43 | const fixture = [ 44 | '1', 45 | '0', 46 | '0', 47 | '0', 48 | '0', 49 | '0', 50 | '0', 51 | '0', 52 | '0', 53 | '0', 54 | '0', 55 | '0', 56 | '0', 57 | '0', 58 | '0', 59 | '0', 60 | '0', 61 | '0', 62 | '0', 63 | '0', 64 | '0', 65 | '0', 66 | '0', 67 | '0', 68 | '0', 69 | '0', 70 | '0', 71 | '0', 72 | '0', 73 | '0', 74 | '0', 75 | '0', 76 | '0', 77 | '0', 78 | '0', 79 | '0', 80 | '0', 81 | '0', 82 | '0', 83 | '0', 84 | '0', 85 | '0', 86 | '0', 87 | '0', 88 | '0', 89 | '0', 90 | '0', 91 | '0', 92 | '0', 93 | '0', 94 | '0', 95 | '0', 96 | '0', 97 | '0', 98 | '0', 99 | '0', 100 | '0', 101 | '0', 102 | '0', 103 | '0', 104 | '1', 105 | '1', 106 | '1', 107 | '0', 108 | '0', 109 | '1', 110 | '1', 111 | '3', 112 | '3' 113 | ]; 114 | 115 | const numJobs = timmings.length; 116 | 117 | const queue = utils.buildQueue('metrics', { 118 | metrics: { 119 | maxDataPoints: MetricsTime.ONE_HOUR * 2 120 | } 121 | }); 122 | 123 | queue.process(job => { 124 | this.clock.tick(timmings[job.data.index]); 125 | }); 126 | 127 | let processed = 0; 128 | const completing = new Promise(resolve => { 129 | queue.on('completed', async () => { 130 | processed++; 131 | if (processed === numJobs) { 132 | resolve(); 133 | } 134 | }); 135 | }); 136 | 137 | for (let i = 0; i < numJobs; i++) { 138 | await queue.add({ index: i }); 139 | } 140 | 141 | await completing; 142 | 143 | const metrics = await queue.getMetrics('completed'); 144 | 145 | const numPoints = Math.floor( 146 | timmings.reduce((sum, timing) => sum + timing, 0) / ONE_MINUTE 147 | ); 148 | 149 | expect(metrics.meta.count).to.be.equal(numJobs); 150 | expect(metrics.data.length).to.be.equal(numPoints); 151 | expect(metrics.count).to.be.equal(metrics.data.length); 152 | expect(processed).to.be.equal(numJobs); 153 | expect(metrics.data).to.be.deep.equal(fixture); 154 | 155 | this.clock.restore(); 156 | await queue.close(); 157 | }); 158 | 159 | it('should only keep metrics for "maxDataPoints"', async function() { 160 | const date = new Date('2017-02-07 9:24:00'); 161 | this.clock.setSystemTime(date); 162 | this.clock.tick(0); 163 | 164 | const timmings = [ 165 | 0, // For the fixtures to work we need to use 0 as first timing 166 | 0, 167 | ONE_MINUTE / 2, 168 | ONE_MINUTE / 2, 169 | 0, 170 | 0, 171 | ONE_MINUTE, 172 | ONE_MINUTE, 173 | ONE_MINUTE * 3, 174 | ONE_HOUR, 175 | 0, 176 | 0, 177 | ONE_MINUTE, 178 | ONE_MINUTE 179 | ]; 180 | 181 | const fixture = [ 182 | '1', 183 | '3', 184 | '0', 185 | '0', 186 | '0', 187 | '0', 188 | '0', 189 | '0', 190 | '0', 191 | '0', 192 | '0', 193 | '0', 194 | '0', 195 | '0', 196 | '0' 197 | ]; 198 | 199 | const numJobs = timmings.length; 200 | 201 | const queue = utils.buildQueue('metrics', { 202 | metrics: { 203 | maxDataPoints: MetricsTime.FIFTEEN_MINUTES 204 | } 205 | }); 206 | 207 | queue.process(job => { 208 | this.clock.tick(timmings[job.data.index]); 209 | }); 210 | 211 | let processed = 0; 212 | const completing = new Promise(resolve => { 213 | queue.on('completed', async () => { 214 | processed++; 215 | if (processed === numJobs) { 216 | resolve(); 217 | } 218 | }); 219 | }); 220 | 221 | for (let i = 0; i < numJobs; i++) { 222 | await queue.add({ index: i }); 223 | } 224 | 225 | await completing; 226 | 227 | const metrics = await queue.getMetrics('completed'); 228 | 229 | expect(metrics.meta.count).to.be.equal(numJobs); 230 | expect(metrics.data.length).to.be.equal(MetricsTime.FIFTEEN_MINUTES); 231 | expect(metrics.count).to.be.equal(metrics.data.length); 232 | expect(processed).to.be.equal(numJobs); 233 | expect(metrics.data).to.be.deep.equal(fixture); 234 | 235 | this.clock.restore(); 236 | await queue.close(); 237 | }); 238 | 239 | it('should gather metrics for failed jobs', async function() { 240 | const date = new Date('2017-02-07 9:24:00'); 241 | this.clock.setSystemTime(date); 242 | this.clock.tick(0); 243 | 244 | const timmings = [ 245 | 0, // For the fixtures to work we need to use 0 as first timing 246 | ONE_MINUTE, 247 | ONE_MINUTE / 5, 248 | ONE_MINUTE / 2, 249 | 0, 250 | ONE_MINUTE, 251 | ONE_MINUTE * 3, 252 | 0 253 | ]; 254 | 255 | const fixture = ['0', '0', '1', '4', '1']; 256 | 257 | const numJobs = timmings.length; 258 | 259 | const queue = utils.buildQueue('metrics', { 260 | metrics: { 261 | maxDataPoints: MetricsTime.ONE_HOUR * 2 262 | } 263 | }); 264 | 265 | queue.process(async job => { 266 | this.clock.tick(timmings[job.data.index]); 267 | throw new Error('test'); 268 | }); 269 | 270 | let processed = 0; 271 | const completing = new Promise(resolve => { 272 | queue.on('failed', async () => { 273 | processed++; 274 | if (processed === numJobs) { 275 | resolve(); 276 | } 277 | }); 278 | }); 279 | 280 | for (let i = 0; i < numJobs; i++) { 281 | await queue.add({ index: i }); 282 | } 283 | 284 | await completing; 285 | 286 | const metrics = await queue.getMetrics('failed'); 287 | 288 | const numPoints = Math.floor( 289 | timmings.reduce((sum, timing) => sum + timing, 0) / ONE_MINUTE 290 | ); 291 | 292 | expect(metrics.meta.count).to.be.equal(numJobs); 293 | expect(metrics.data.length).to.be.equal(numPoints); 294 | expect(metrics.count).to.be.equal(metrics.data.length); 295 | expect(processed).to.be.equal(numJobs); 296 | expect(metrics.data).to.be.deep.equal(fixture); 297 | 298 | this.clock.restore(); 299 | await queue.close(); 300 | }); 301 | 302 | it('should get metrics with pagination', async function() { 303 | const date = new Date('2017-02-07 9:24:00'); 304 | this.clock.setSystemTime(date); 305 | this.clock.tick(0); 306 | 307 | const timmings = [ 308 | 0, 309 | 0, // For the fixtures to work we need to use 0 as first timing 310 | ONE_MINUTE / 2, 311 | ONE_MINUTE / 2, 312 | 0, 313 | 0, 314 | ONE_MINUTE, 315 | ONE_MINUTE, 316 | ONE_MINUTE * 3, 317 | ONE_HOUR, 318 | ONE_MINUTE 319 | ]; 320 | 321 | const numJobs = timmings.length; 322 | 323 | const queue = utils.buildQueue('metrics', { 324 | metrics: { 325 | maxDataPoints: MetricsTime.ONE_HOUR * 2 326 | } 327 | }); 328 | 329 | queue.process(async job => { 330 | this.clock.tick(timmings[job.data.index]); 331 | }); 332 | 333 | let processed = 0; 334 | const completing = new Promise(resolve => { 335 | queue.on('completed', async () => { 336 | processed++; 337 | if (processed === numJobs) { 338 | resolve(); 339 | } 340 | }); 341 | }); 342 | 343 | for (let i = 0; i < numJobs; i++) { 344 | await queue.add({ index: i }); 345 | } 346 | 347 | await completing; 348 | 349 | expect(processed).to.be.equal(numJobs); 350 | 351 | const numPoints = Math.floor( 352 | timmings.reduce((sum, timing) => sum + timing, 0) / ONE_MINUTE 353 | ); 354 | 355 | const pageSize = 10; 356 | const data = []; 357 | let skip = 0; 358 | 359 | while (skip < numPoints) { 360 | const metrics = await queue.getMetrics( 361 | 'completed', 362 | skip, 363 | skip + pageSize - 1 364 | ); 365 | expect(metrics.meta.count).to.be.equal(numJobs); 366 | expect(metrics.data.length).to.be.equal( 367 | Math.min(numPoints - skip, pageSize) 368 | ); 369 | 370 | data.push(...metrics.data); 371 | skip += pageSize; 372 | } 373 | 374 | const metrics = await queue.getMetrics('completed'); 375 | expect(data).to.be.deep.equal(metrics.data); 376 | 377 | this.clock.restore(); 378 | await queue.close(); 379 | }); 380 | }); 381 | -------------------------------------------------------------------------------- /test/test_obliterate.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const uuid = require('uuid'); 5 | const utils = require('./utils'); 6 | const delay = require('delay'); 7 | 8 | describe('Obliterate', () => { 9 | let queue; 10 | 11 | beforeEach(() => { 12 | queue = utils.buildQueue('cleaner' + uuid.v4()); 13 | }); 14 | 15 | afterEach(function() { 16 | this.timeout( 17 | queue.settings.stalledInterval * (1 + queue.settings.maxStalledCount) 18 | ); 19 | return queue.close(); 20 | }); 21 | 22 | it('should obliterate an empty queue', async () => { 23 | await queue.obliterate(); 24 | 25 | const client = await queue.client; 26 | const keys = await client.keys(`bull:${queue.name}*`); 27 | 28 | expect(keys.length).to.be.eql(0); 29 | }); 30 | 31 | it('should obliterate a queue with jobs in different statuses', async () => { 32 | await queue.add({ foo: 'bar' }); 33 | await queue.add({ foo: 'bar2' }); 34 | await queue.add({ foo: 'bar3' }, { delay: 5000 }); 35 | const job = await queue.add({ qux: 'baz' }); 36 | 37 | let first = true; 38 | queue.process(async () => { 39 | if (first) { 40 | first = false; 41 | throw new Error('failed first'); 42 | } 43 | return delay(250); 44 | }); 45 | 46 | await job.finished(); 47 | 48 | await queue.obliterate(); 49 | const client = await queue.client; 50 | const keys = await client.keys(`bull:${queue.name}*`); 51 | expect(keys.length).to.be.eql(0); 52 | }); 53 | 54 | it('should raise exception if queue has active jobs', async () => { 55 | await queue.add({ foo: 'bar' }); 56 | const job = await queue.add({ qux: 'baz' }); 57 | 58 | await queue.add({ foo: 'bar2' }); 59 | await queue.add({ foo: 'bar3' }, { delay: 5000 }); 60 | 61 | let first = true; 62 | queue.process(async () => { 63 | if (first) { 64 | first = false; 65 | throw new Error('failed first'); 66 | } 67 | return delay(250); 68 | }); 69 | 70 | await job.finished(); 71 | 72 | try { 73 | await queue.obliterate(); 74 | } catch (err) { 75 | const client = await queue.client; 76 | const keys = await client.keys(`bull:${queue.name}*`); 77 | expect(keys.length).to.be.not.eql(0); 78 | return; 79 | } 80 | 81 | throw new Error('Should raise an exception if there are active jobs'); 82 | }); 83 | 84 | it('should obliterate if queue has active jobs using "force"', async () => { 85 | await queue.add({ foo: 'bar' }); 86 | const job = await queue.add({ qux: 'baz' }); 87 | 88 | await queue.add({ foo: 'bar2' }); 89 | await queue.add({ foo: 'bar3' }, { delay: 5000 }); 90 | 91 | let first = true; 92 | queue.process(async () => { 93 | if (first) { 94 | first = false; 95 | throw new Error('failed first'); 96 | } 97 | return delay(250); 98 | }); 99 | await job.finished(); 100 | 101 | await queue.obliterate({ force: true }); 102 | const client = await queue.client; 103 | const keys = await client.keys(`bull:${queue.name}*`); 104 | expect(keys.length).to.be.eql(0); 105 | }); 106 | 107 | it('should remove repeatable jobs', async () => { 108 | await queue.add( 109 | 'test', 110 | { foo: 'bar' }, 111 | { 112 | repeat: { 113 | every: 1000 114 | } 115 | } 116 | ); 117 | 118 | const repeatableJobs = await queue.getRepeatableJobs(); 119 | expect(repeatableJobs).to.have.length(1); 120 | 121 | await queue.obliterate(); 122 | const client = await queue.client; 123 | const keys = await client.keys(`bull:${queue.name}:*`); 124 | expect(keys.length).to.be.eql(0); 125 | }); 126 | 127 | it('should remove job logs', async () => { 128 | const job = await queue.add({}); 129 | 130 | queue.process(job => { 131 | return job.log('Lorem Ipsum Dolor Sit Amet'); 132 | }); 133 | 134 | await job.finished(); 135 | 136 | await queue.obliterate({ force: true }); 137 | 138 | const { logs } = await queue.getJobLogs(job.id); 139 | expect(logs).to.have.length(0); 140 | }); 141 | 142 | it('should obliterate a queue with high number of jobs in different statuses', async () => { 143 | const arr1 = []; 144 | for (let i = 0; i < 300; i++) { 145 | arr1.push(queue.add({ foo: `barLoop${i}` })); 146 | } 147 | 148 | const [lastCompletedJob] = (await Promise.all(arr1)).splice(-1); 149 | 150 | let fail = false; 151 | queue.process(async () => { 152 | if (fail) { 153 | throw new Error('failed job'); 154 | } 155 | }); 156 | 157 | await lastCompletedJob.finished(); 158 | 159 | fail = true; 160 | 161 | const arr2 = []; 162 | for (let i = 0; i < 300; i++) { 163 | arr2.push(queue.add({ foo: `barLoop${i}` })); 164 | } 165 | 166 | const [lastFailedJob] = (await Promise.all(arr2)).splice(-1); 167 | 168 | try { 169 | await lastFailedJob.finished(); 170 | expect(true).to.be.equal(false); 171 | } catch (err) { 172 | expect(true).to.be.equal(true); 173 | } 174 | 175 | const arr3 = []; 176 | for (let i = 0; i < 1623; i++) { 177 | arr3.push(queue.add({ foo: `barLoop${i}` }, { delay: 10000 })); 178 | } 179 | await Promise.all(arr3); 180 | 181 | await queue.obliterate(); 182 | const client = await queue.client; 183 | const keys = await client.keys(`bull:${queue.name}*`); 184 | expect(keys.length).to.be.eql(0); 185 | }).timeout(20000); 186 | }); 187 | -------------------------------------------------------------------------------- /test/test_rate_limiter.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const utils = require('./utils'); 5 | const redis = require('ioredis'); 6 | const _ = require('lodash'); 7 | const assert = require('assert'); 8 | 9 | describe('Rate limiter', () => { 10 | let queue; 11 | let client; 12 | 13 | beforeEach(() => { 14 | client = new redis(); 15 | return client.flushdb().then(() => { 16 | queue = utils.buildQueue('test rate limiter', { 17 | limiter: { 18 | max: 1, 19 | duration: 1000 20 | } 21 | }); 22 | return queue; 23 | }); 24 | }); 25 | 26 | afterEach(() => { 27 | return queue.close().then(() => { 28 | return client.quit(); 29 | }); 30 | }); 31 | 32 | it('should throw exception if missing duration option', done => { 33 | try { 34 | utils.buildQueue('rate limiter fail', { 35 | limiter: { 36 | max: 5 37 | } 38 | }); 39 | expect.fail('Should not allow missing `duration` option'); 40 | } catch (err) { 41 | done(); 42 | } 43 | }); 44 | 45 | it('should throw exception if missing max option', done => { 46 | try { 47 | utils.buildQueue('rate limiter fail', { 48 | limiter: { 49 | duration: 5000 50 | } 51 | }); 52 | expect.fail('Should not allow missing `max`option'); 53 | } catch (err) { 54 | done(); 55 | } 56 | }); 57 | 58 | it('should obey the rate limit', done => { 59 | const startTime = new Date().getTime(); 60 | const numJobs = 4; 61 | 62 | queue.process(() => { 63 | return Promise.resolve(); 64 | }); 65 | 66 | for (let i = 0; i < numJobs; i++) { 67 | queue.add({}); 68 | } 69 | 70 | queue.on( 71 | 'completed', 72 | // after every job has been completed 73 | _.after(numJobs, () => { 74 | try { 75 | const timeDiff = new Date().getTime() - startTime; 76 | expect(timeDiff).to.be.above((numJobs - 1) * 1000); 77 | done(); 78 | } catch (err) { 79 | done(err); 80 | } 81 | }) 82 | ); 83 | 84 | queue.on('failed', err => { 85 | done(err); 86 | }); 87 | }).timeout(5000); 88 | 89 | // Skip because currently job priority is maintained in a best effort way, but cannot 90 | // be guaranteed for rate limited jobs. 91 | it.skip('should obey job priority', async () => { 92 | const newQueue = utils.buildQueue('test rate limiter', { 93 | limiter: { 94 | max: 1, 95 | duration: 150 96 | } 97 | }); 98 | const numJobs = 20; 99 | const priorityBuckets = { 100 | 1: 0, 101 | 2: 0, 102 | 3: 0, 103 | 4: 0 104 | }; 105 | 106 | const numPriorities = Object.keys(priorityBuckets).length; 107 | 108 | newQueue.process(job => { 109 | const priority = job.opts.priority; 110 | 111 | priorityBuckets[priority] = priorityBuckets[priority] - 1; 112 | 113 | for (let p = 1; p < priority; p++) { 114 | if (priorityBuckets[p] > 0) { 115 | const before = JSON.stringify(priorityBucketsBefore); 116 | const after = JSON.stringify(priorityBuckets); 117 | throw new Error( 118 | `Priority was not enforced, job with priority ${priority} was processed before all jobs with priority ${p} were processed. Bucket counts before: ${before} / after: ${after}` 119 | ); 120 | } 121 | } 122 | }); 123 | 124 | const result = new Promise((resolve, reject) => { 125 | newQueue.on('failed', (job, err) => { 126 | reject(err); 127 | }); 128 | 129 | const afterNumJobs = _.after(numJobs, () => { 130 | try { 131 | expect(_.every(priorityBuckets, value => value === 0)).to.eq(true); 132 | resolve(); 133 | } catch (err) { 134 | reject(err); 135 | } 136 | }); 137 | 138 | newQueue.on('completed', () => { 139 | afterNumJobs(); 140 | }); 141 | }); 142 | 143 | await newQueue.pause(); 144 | const promises = []; 145 | 146 | for (let i = 0; i < numJobs; i++) { 147 | const opts = { priority: (i % numPriorities) + 1 }; 148 | priorityBuckets[opts.priority] = priorityBuckets[opts.priority] + 1; 149 | promises.push(newQueue.add({ id: i }, opts)); 150 | } 151 | 152 | const priorityBucketsBefore = _.reduce( 153 | priorityBuckets, 154 | (acc, value, key) => { 155 | acc[key] = value; 156 | return acc; 157 | }, 158 | {} 159 | ); 160 | 161 | await Promise.all(promises); 162 | 163 | await newQueue.resume(); 164 | 165 | return result; 166 | }).timeout(60000); 167 | 168 | it('should put a job into the delayed queue when limit is hit', () => { 169 | const newQueue = utils.buildQueue('test rate limiter', { 170 | limiter: { 171 | max: 1, 172 | duration: 1000 173 | } 174 | }); 175 | 176 | queue.on('failed', e => { 177 | assert.fail(e); 178 | }); 179 | 180 | return Promise.all([ 181 | newQueue.add({}), 182 | newQueue.add({}), 183 | newQueue.add({}), 184 | newQueue.add({}) 185 | ]).then(() => { 186 | Promise.all([ 187 | newQueue.getNextJob({}), 188 | newQueue.getNextJob({}), 189 | newQueue.getNextJob({}), 190 | newQueue.getNextJob({}) 191 | ]).then(() => { 192 | return queue.getDelayedCount().then( 193 | delayedCount => { 194 | expect(delayedCount).to.eq(3); 195 | }, 196 | () => { 197 | /*ignore error*/ 198 | } 199 | ); 200 | }); 201 | }); 202 | }); 203 | 204 | it('should not put a job into the delayed queue when discard is true', () => { 205 | const newQueue = utils.buildQueue('test rate limiter', { 206 | limiter: { 207 | max: 1, 208 | duration: 1000, 209 | bounceBack: true 210 | } 211 | }); 212 | 213 | newQueue.on('failed', e => { 214 | assert.fail(e); 215 | }); 216 | return Promise.all([ 217 | newQueue.add({}), 218 | newQueue.add({}), 219 | newQueue.add({}), 220 | newQueue.add({}) 221 | ]).then(() => { 222 | Promise.all([ 223 | newQueue.getNextJob({}), 224 | newQueue.getNextJob({}), 225 | newQueue.getNextJob({}), 226 | newQueue.getNextJob({}) 227 | ]).then(() => { 228 | return newQueue.getDelayedCount().then(delayedCount => { 229 | expect(delayedCount).to.eq(0); 230 | return newQueue.getActiveCount().then(waitingCount => { 231 | expect(waitingCount).to.eq(1); 232 | }); 233 | }); 234 | }); 235 | }); 236 | }); 237 | 238 | it('should rate limit by grouping', async function() { 239 | this.timeout(20000); 240 | const numGroups = 4; 241 | const numJobs = 20; 242 | const startTime = Date.now(); 243 | 244 | const rateLimitedQueue = utils.buildQueue('test rate limiter with group', { 245 | limiter: { 246 | max: 1, 247 | duration: 1000, 248 | groupKey: 'accountId' 249 | } 250 | }); 251 | 252 | rateLimitedQueue.process(() => { 253 | return Promise.resolve(); 254 | }); 255 | 256 | const completed = {}; 257 | 258 | const running = new Promise((resolve, reject) => { 259 | const afterJobs = _.after(numJobs, () => { 260 | try { 261 | const timeDiff = Date.now() - startTime; 262 | expect(timeDiff).to.be.gte(numGroups * 1000); 263 | expect(timeDiff).to.be.below((numGroups + 1) * 1500); 264 | 265 | for (const group in completed) { 266 | let prevTime = completed[group][0]; 267 | for (let i = 1; i < completed[group].length; i++) { 268 | const diff = completed[group][i] - prevTime; 269 | expect(diff).to.be.below(2100); 270 | expect(diff).to.be.gte(900); 271 | prevTime = completed[group][i]; 272 | } 273 | } 274 | resolve(); 275 | } catch (err) { 276 | reject(err); 277 | } 278 | }); 279 | 280 | rateLimitedQueue.on('completed', ({ id }) => { 281 | const group = _.last(id.split(':')); 282 | completed[group] = completed[group] || []; 283 | completed[group].push(Date.now()); 284 | 285 | afterJobs(); 286 | }); 287 | 288 | rateLimitedQueue.on('failed', async err => { 289 | await queue.close(); 290 | reject(err); 291 | }); 292 | }); 293 | 294 | for (let i = 0; i < numJobs; i++) { 295 | rateLimitedQueue.add({ accountId: i % numGroups }); 296 | } 297 | 298 | await running; 299 | await rateLimitedQueue.close(); 300 | }); 301 | }); 302 | -------------------------------------------------------------------------------- /test/test_when_current_jobs_finished.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const redis = require('ioredis'); 5 | const utils = require('./utils'); 6 | const delay = require('delay'); 7 | const sinon = require('sinon'); 8 | 9 | describe('.whenCurrentJobsFinished', () => { 10 | let client; 11 | beforeEach(() => { 12 | client = new redis(); 13 | return client.flushdb(); 14 | }); 15 | 16 | afterEach(async () => { 17 | sinon.restore(); 18 | await utils.cleanupQueues(); 19 | await client.flushdb(); 20 | return client.quit(); 21 | }); 22 | 23 | it('should handle queue with no processor', async () => { 24 | const queue = await utils.newQueue(); 25 | expect(await queue.whenCurrentJobsFinished()).to.equal(undefined); 26 | }); 27 | 28 | it('should handle queue with no jobs', async () => { 29 | const queue = await utils.newQueue(); 30 | queue.process(() => Promise.resolve()); 31 | expect(await queue.whenCurrentJobsFinished()).to.equal(undefined); 32 | }); 33 | 34 | it('should wait for job to complete', async () => { 35 | const queue = await utils.newQueue(); 36 | await queue.add({}); 37 | 38 | let finishJob; 39 | 40 | // wait for job to be active 41 | await new Promise(resolve => { 42 | queue.process(() => { 43 | resolve(); 44 | 45 | return new Promise(resolve => { 46 | finishJob = resolve; 47 | }); 48 | }); 49 | }); 50 | 51 | let isFulfilled = false; 52 | const finished = queue.whenCurrentJobsFinished().then(() => { 53 | isFulfilled = true; 54 | }); 55 | 56 | await delay(100); 57 | expect(isFulfilled).to.equal(false); 58 | 59 | finishJob(); 60 | expect(await finished).to.equal( 61 | undefined, 62 | 'whenCurrentJobsFinished should resolve once jobs are finished' 63 | ); 64 | }); 65 | 66 | it('should wait for all jobs to complete', async () => { 67 | const queue = await utils.newQueue(); 68 | 69 | // add multiple jobs to queue 70 | await queue.add({}); 71 | await queue.add({}); 72 | 73 | let finishJob1; 74 | let finishJob2; 75 | 76 | // wait for all jobs to be active 77 | await new Promise(resolve => { 78 | let callCount = 0; 79 | queue.process(2, () => { 80 | callCount++; 81 | if (callCount === 1) { 82 | return new Promise(resolve => { 83 | finishJob1 = resolve; 84 | }); 85 | } 86 | 87 | resolve(); 88 | return new Promise(resolve => { 89 | finishJob2 = resolve; 90 | }); 91 | }); 92 | }); 93 | 94 | let isFulfilled = false; 95 | const finished = queue.whenCurrentJobsFinished().then(() => { 96 | isFulfilled = true; 97 | }); 98 | 99 | finishJob2(); 100 | await delay(100); 101 | 102 | expect(isFulfilled).to.equal( 103 | false, 104 | 'should not fulfill until all jobs are finished' 105 | ); 106 | 107 | finishJob1(); 108 | await delay(100); 109 | expect(await finished).to.equal( 110 | undefined, 111 | 'whenCurrentJobsFinished should resolve once all jobs are finished' 112 | ); 113 | }); 114 | 115 | it('should wait for job to fail', async () => { 116 | const queue = await utils.newQueue(); 117 | await queue.add({}); 118 | 119 | let rejectJob; 120 | 121 | // wait for job to be active 122 | await new Promise(resolve => { 123 | queue.process(() => { 124 | resolve(); 125 | 126 | return new Promise((resolve, reject) => { 127 | rejectJob = reject; 128 | }); 129 | }); 130 | }); 131 | 132 | let isFulfilled = false; 133 | const finished = queue.whenCurrentJobsFinished().then(() => { 134 | isFulfilled = true; 135 | }); 136 | 137 | await delay(100); 138 | expect(isFulfilled).to.equal(false); 139 | 140 | rejectJob(); 141 | expect(await finished).to.equal( 142 | undefined, 143 | 'whenCurrentJobsFinished should resolve once jobs are finished' 144 | ); 145 | }); 146 | }); 147 | -------------------------------------------------------------------------------- /test/test_worker.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const expect = require('chai').expect; 4 | const utils = require('./utils'); 5 | const redis = require('ioredis'); 6 | 7 | describe('workers', () => { 8 | let queue; 9 | let client; 10 | 11 | beforeEach(() => { 12 | client = new redis(); 13 | return client.flushdb().then(() => { 14 | queue = utils.buildQueue('test workers', { 15 | settings: { 16 | guardInterval: 300000, 17 | stalledInterval: 300000 18 | } 19 | }); 20 | return queue; 21 | }); 22 | }); 23 | 24 | afterEach(() => { 25 | return queue.close().then(() => { 26 | return client.quit(); 27 | }); 28 | }); 29 | 30 | it('should get all workers for this queue', async () => { 31 | queue.process(() => {}); 32 | 33 | await queue.bclient.ping(); 34 | 35 | const workers = await queue.getWorkers(); 36 | expect(workers).to.have.length(1); 37 | }); 38 | }); 39 | -------------------------------------------------------------------------------- /test/utils.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const Queue = require('../'); 4 | const STD_QUEUE_NAME = 'test queue'; 5 | const _ = require('lodash'); 6 | 7 | let queues = []; 8 | 9 | const originalSetTimeout = setTimeout; 10 | 11 | function simulateDisconnect(queue) { 12 | queue.client.disconnect(); 13 | queue.eclient.disconnect(); 14 | } 15 | 16 | function buildQueue(name, options) { 17 | options = _.extend({ redis: { port: 6379, host: '127.0.0.1' } }, options); 18 | const queue = new Queue(name || STD_QUEUE_NAME, options); 19 | queues.push(queue); 20 | return queue; 21 | } 22 | 23 | function newQueue(name, opts) { 24 | const queue = buildQueue(name, opts); 25 | return queue.isReady(); 26 | } 27 | 28 | function cleanupQueue(queue) { 29 | return queue.empty().then(queue.close.bind(queue)); 30 | } 31 | 32 | function cleanupQueues() { 33 | return Promise.all( 34 | queues.map(queue => { 35 | const errHandler = function() {}; 36 | queue.on('error', errHandler); 37 | return queue.close().catch(errHandler); 38 | }) 39 | ).then(() => { 40 | queues = []; 41 | }); 42 | } 43 | 44 | function sleep(ms) { 45 | return new Promise(resolve => { 46 | originalSetTimeout(() => { 47 | resolve(); 48 | }, ms); 49 | }); 50 | } 51 | 52 | module.exports = { 53 | simulateDisconnect, 54 | buildQueue, 55 | cleanupQueue, 56 | newQueue, 57 | cleanupQueues, 58 | sleep 59 | }; 60 | --------------------------------------------------------------------------------