├── .gitbook.yaml ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yaml │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── api.yml │ ├── codeql-analysis.yml │ ├── codeql.yml │ ├── osv-scanner.yml │ ├── release.yml │ └── test.yml ├── .gitignore ├── .husky ├── commit-msg └── pre-commit ├── .madgerc ├── .mocharc.js ├── .npmignore ├── .prettierrc.js ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── commitlint.config.js ├── config └── api-extractor.json ├── contributing.md ├── docker-compose.yml ├── docs └── gitbook │ ├── .gitbook │ └── assets │ │ ├── 1200x627 - Light.png │ │ ├── Atlassian-horizontal-blue-rgb (1).webp │ │ ├── Atlassian-horizontal-blue-rgb.webp │ │ ├── Screenshot 2022-02-15 at 11.32.39 (1).png │ │ ├── Screenshot 2022-02-15 at 11.32.39.png │ │ ├── architecture (1) (1).png │ │ ├── architecture (1).png │ │ ├── architecture.png │ │ ├── autodesk-logo-white.png │ │ ├── clipart1565701.png │ │ ├── complete-architecture.png │ │ ├── curri-small (1).png │ │ ├── curri-small.png │ │ ├── curri.png │ │ ├── datawrapper-logo.png │ │ ├── entethalliance-logo (1).png │ │ ├── entethalliance-logo.png │ │ ├── flow-architecture (1).png │ │ ├── flow-architecture.png │ │ ├── image (1) (1) (1).png │ │ ├── image (1) (1).png │ │ ├── image (1).png │ │ ├── image (10).png │ │ ├── image (11).png │ │ ├── image (2) (1) (1).png │ │ ├── image (2) (1).png │ │ ├── image (2).png │ │ ├── image (3) (1).png │ │ ├── image (3).png │ │ ├── image (4) (1).png │ │ ├── image (4).png │ │ ├── image (5).png │ │ ├── image (6).png │ │ ├── image (7).png │ │ ├── image (8).png │ │ ├── image (9).png │ │ ├── image.png │ │ ├── kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765 (1).jpg │ │ ├── kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765.jpg │ │ ├── kisspng-logo-retail-target-corporation-advertising-5ae5ef43944c89.3404142515250184356074.png │ │ ├── medusa-logo.svg │ │ ├── mermaid-diagram-2023-06-22-093213 (1).png │ │ ├── mermaid-diagram-2023-06-22-093213.png │ │ ├── mermaid-diagram-2023-06-22-093303.png │ │ ├── mermaid-diagram-2023-06-22-095138.png │ │ ├── midwayjs-logo.png │ │ ├── mozilla-logo-bw-rgb (1).png │ │ ├── mozilla-logo-bw-rgb (2).png │ │ ├── mozilla-logo-bw-rgb.png │ │ ├── remix-logo (1).pdf │ │ ├── remix-logo.pdf │ │ ├── remix-logo.png │ │ ├── salesforce-logo (1).png │ │ ├── salesforce-logo.png │ │ └── wordmark-logo.png │ ├── README (1).md │ ├── README.md │ ├── SUMMARY.md │ ├── bull │ ├── important-notes.md │ ├── install.md │ ├── introduction.md │ ├── patterns │ │ ├── README.md │ │ ├── custom-backoff-strategy.md │ │ ├── debugging.md │ │ ├── manually-fetching-jobs.md │ │ ├── message-queue.md │ │ ├── persistent-connections.md │ │ ├── redis-cluster.md │ │ ├── returning-job-completions.md │ │ └── reusing-redis-connections.md │ └── quick-guide.md │ ├── bullmq-pro │ ├── batches.md │ ├── changelog.md │ ├── groups │ │ ├── README.md │ │ ├── concurrency.md │ │ ├── getters.md │ │ ├── local-group-concurrency.md │ │ ├── local-group-rate-limit.md │ │ ├── max-group-size.md │ │ ├── pausing-groups.md │ │ ├── prioritized.md │ │ ├── rate-limiting.md │ │ └── sandboxes-for-groups.md │ ├── install.md │ ├── introduction.md │ ├── nestjs │ │ ├── README.md │ │ ├── changelog.md │ │ ├── producers.md │ │ └── queue-events-listeners.md │ ├── new-releases.md │ ├── observables │ │ ├── README.md │ │ └── cancelation.md │ ├── support.md │ └── telemetry.md │ ├── changelog.md │ ├── changelogs │ ├── changelog-v1.md │ ├── changelog-v2.md │ ├── changelog-v3.md │ └── changelog-v4.md │ ├── guide │ ├── architecture.md │ ├── connections.md │ ├── events │ │ ├── README.md │ │ └── create-custom-events.md │ ├── flows │ │ ├── README.md │ │ ├── adding-bulks.md │ │ ├── continue-parent.md │ │ ├── fail-parent.md │ │ ├── get-flow-tree.md │ │ ├── ignore-dependency.md │ │ ├── remove-child-dependency.md │ │ └── remove-dependency.md │ ├── going-to-production.md │ ├── introduction.md │ ├── job-schedulers │ │ ├── README.md │ │ ├── manage-job-schedulers.md │ │ ├── repeat-options.md │ │ └── repeat-strategies.md │ ├── jobs │ │ ├── README.md │ │ ├── deduplication.md │ │ ├── delayed.md │ │ ├── fifo.md │ │ ├── getters.md │ │ ├── job-data.md │ │ ├── job-ids.md │ │ ├── lifo.md │ │ ├── prioritized.md │ │ ├── removing-job.md │ │ ├── repeatable.md │ │ └── stalled.md │ ├── metrics │ │ ├── README.md │ │ └── prometheus.md │ ├── migration-to-newer-versions.md │ ├── nestjs │ │ ├── README.md │ │ ├── producers.md │ │ └── queue-events-listeners.md │ ├── parallelism-and-concurrency.md │ ├── queues │ │ ├── README.md │ │ ├── adding-bulks.md │ │ ├── auto-removal-of-jobs.md │ │ ├── global-concurrency.md │ │ └── removing-jobs.md │ ├── queuescheduler.md │ ├── rate-limiting.md │ ├── redis-tm-compatibility │ │ ├── README.md │ │ └── dragonfly.md │ ├── redis-tm-hosting │ │ ├── README.md │ │ ├── aws-elasticache.md │ │ └── aws-memorydb.md │ ├── retrying-failing-jobs.md │ ├── returning-job-data.md │ ├── telemetry │ │ ├── README.md │ │ ├── getting-started.md │ │ ├── running-a-simple-example.md │ │ └── running-jaeger.md │ ├── troubleshooting.md │ ├── workers.md │ └── workers │ │ ├── README.md │ │ ├── auto-removal-of-jobs.md │ │ ├── concurrency.md │ │ ├── graceful-shutdown.md │ │ ├── pausing-queues.md │ │ ├── sandboxed-processors.md │ │ └── stalled-jobs.md │ ├── index.md │ ├── patterns │ ├── adding-bulks.md │ ├── failing-fast-when-redis-is-down.md │ ├── flows.md │ ├── idempotent-jobs.md │ ├── manually-fetching-jobs.md │ ├── named-processor.md │ ├── process-step-jobs.md │ ├── redis-cluster.md │ ├── stop-retrying-jobs.md │ ├── throttle-jobs.md │ ├── timeout-for-sandboxed-processors.md │ └── timeout-jobs.md │ └── python │ ├── changelog.md │ └── introduction.md ├── eslint.config.mjs ├── mocha.setup.ts ├── package.json ├── python ├── .gitignore ├── CHANGELOG.md ├── README.md ├── bullmq │ ├── __init__.py │ ├── backoffs.py │ ├── custom_errors │ │ ├── __init__.py │ │ ├── unrecoverable_error.py │ │ └── waiting_children_error.py │ ├── error_code.py │ ├── event_emitter.py │ ├── flow_producer.py │ ├── job.py │ ├── queue.py │ ├── queue_keys.py │ ├── redis_connection.py │ ├── scripts.py │ ├── timer.py │ ├── types │ │ ├── __init__.py │ │ ├── backoff_options.py │ │ ├── job_options.py │ │ ├── keep_jobs.py │ │ ├── promote_jobs_options.py │ │ ├── queue_options.py │ │ ├── retry_jobs_options.py │ │ └── worker_options.py │ ├── utils.py │ └── worker.py ├── pyproject.toml ├── pytest.ini ├── release.sh ├── requirements.txt ├── run_tests.sh ├── run_tests_dragonfly.sh ├── setup.py └── tests │ ├── __init__.py │ ├── bulk_test.py │ ├── delay_test.py │ ├── flow_test.py │ ├── job_test.py │ ├── queue_test.py │ └── worker_test.py ├── scripts ├── commandTransform.js ├── generateRawScripts.ts └── updateVersion.js ├── src ├── classes │ ├── async-fifo-queue.ts │ ├── backoffs.ts │ ├── child-pool.ts │ ├── child-processor.ts │ ├── child.ts │ ├── errors │ │ ├── delayed-error.ts │ │ ├── index.ts │ │ ├── rate-limit-error.ts │ │ ├── unrecoverable-error.ts │ │ └── waiting-children-error.ts │ ├── flow-producer.ts │ ├── index.ts │ ├── job-scheduler.ts │ ├── job.ts │ ├── main-base.ts │ ├── main-worker.ts │ ├── main.ts │ ├── queue-base.ts │ ├── queue-events-producer.ts │ ├── queue-events.ts │ ├── queue-getters.ts │ ├── queue-keys.ts │ ├── queue.ts │ ├── redis-connection.ts │ ├── repeat.ts │ ├── sandbox.ts │ ├── scripts.ts │ └── worker.ts ├── commands │ ├── addDelayedJob-6.lua │ ├── addJobScheduler-11.lua │ ├── addLog-2.lua │ ├── addParentJob-4.lua │ ├── addPrioritizedJob-8.lua │ ├── addRepeatableJob-2.lua │ ├── addStandardJob-8.lua │ ├── changeDelay-4.lua │ ├── changePriority-7.lua │ ├── cleanJobsInSet-3.lua │ ├── drain-5.lua │ ├── extendLock-2.lua │ ├── extendLocks-1.lua │ ├── getCounts-1.lua │ ├── getCountsPerPriority-4.lua │ ├── getDependencyCounts-4.lua │ ├── getJobScheduler-1.lua │ ├── getRanges-1.lua │ ├── getRateLimitTtl-1.lua │ ├── getState-8.lua │ ├── getStateV2-8.lua │ ├── includes │ │ ├── addBaseMarkerIfNeeded.lua │ │ ├── addDelayMarkerIfNeeded.lua │ │ ├── addDelayedJob.lua │ │ ├── addJobFromScheduler.lua │ │ ├── addJobInTargetList.lua │ │ ├── addJobWithPriority.lua │ │ ├── batches.lua │ │ ├── checkItemInList.lua │ │ ├── cleanList.lua │ │ ├── cleanSet.lua │ │ ├── collectMetrics.lua │ │ ├── deduplicateJob.lua │ │ ├── destructureJobKey.lua │ │ ├── filterOutJobsToIgnore.lua │ │ ├── findPage.lua │ │ ├── getDelayedScore.lua │ │ ├── getJobsInZset.lua │ │ ├── getNextDelayedTimestamp.lua │ │ ├── getOrSetMaxEvents.lua │ │ ├── getPriorityScore.lua │ │ ├── getRateLimitTTL.lua │ │ ├── getTargetQueueList.lua │ │ ├── getTimestamp.lua │ │ ├── getZSetItems.lua │ │ ├── handleDuplicatedJob.lua │ │ ├── isJobSchedulerJob.lua │ │ ├── isLocked.lua │ │ ├── isQueueMaxed.lua │ │ ├── isQueuePaused.lua │ │ ├── isQueuePausedOrMaxed.lua │ │ ├── moveChildFromDependenciesIfNeeded.lua │ │ ├── moveJobFromPrioritizedToActive.lua │ │ ├── moveParentToWait.lua │ │ ├── moveParentToWaitIfNeeded.lua │ │ ├── moveParentToWaitIfNoPendingDependencies.lua │ │ ├── prepareJobForProcessing.lua │ │ ├── promoteDelayedJobs.lua │ │ ├── pushBackJobWithPriority.lua │ │ ├── removeDeduplicationKeyIfNeededOnFinalization.lua │ │ ├── removeDeduplicationKeyIfNeededOnRemoval.lua │ │ ├── removeJob.lua │ │ ├── removeJobFromAnyState.lua │ │ ├── removeJobKeys.lua │ │ ├── removeJobWithChildren.lua │ │ ├── removeJobs.lua │ │ ├── removeJobsByMaxAge.lua │ │ ├── removeJobsByMaxCount.lua │ │ ├── removeJobsOnFail.lua │ │ ├── removeListJobs.lua │ │ ├── removeLock.lua │ │ ├── removeParentDependencyKey.lua │ │ ├── removeZSetJobs.lua │ │ ├── storeJob.lua │ │ ├── storeJobScheduler.lua │ │ ├── trimEvents.lua │ │ ├── updateExistingJobsParent.lua │ │ ├── updateJobFields.lua │ │ └── updateParentDepsIfNeeded.lua │ ├── index.ts │ ├── isFinished-3.lua │ ├── isJobInList-1.lua │ ├── isMaxed-2.lua │ ├── moveJobFromActiveToWait-9.lua │ ├── moveJobsToWait-8.lua │ ├── moveStalledJobsToWait-9.lua │ ├── moveToActive-11.lua │ ├── moveToDelayed-8.lua │ ├── moveToFinished-14.lua │ ├── moveToWaitingChildren-8.lua │ ├── obliterate-2.lua │ ├── paginate-1.lua │ ├── pause-7.lua │ ├── promote-9.lua │ ├── releaseLock-1.lua │ ├── removeChildDependency-1.lua │ ├── removeJob-2.lua │ ├── removeJobScheduler-3.lua │ ├── removeRepeatable-3.lua │ ├── removeUnprocessedChildren-2.lua │ ├── reprocessJob-8.lua │ ├── retryJob-11.lua │ ├── saveStacktrace-1.lua │ ├── script-loader.ts │ ├── updateData-1.lua │ ├── updateJobScheduler-12.lua │ ├── updateProgress-3.lua │ └── updateRepeatableJobMillis-1.lua ├── enums │ ├── child-command.ts │ ├── error-code.ts │ ├── index.ts │ ├── metrics-time.ts │ ├── parent-command.ts │ └── telemetry-attributes.ts ├── index.ts ├── interfaces │ ├── advanced-options.ts │ ├── backoff-options.ts │ ├── base-job-options.ts │ ├── child-message.ts │ ├── connection.ts │ ├── debounce-options.ts │ ├── flow-job.ts │ ├── index.ts │ ├── ioredis-events.ts │ ├── job-json.ts │ ├── job-scheduler-json.ts │ ├── keep-jobs.ts │ ├── metrics-options.ts │ ├── metrics.ts │ ├── minimal-job.ts │ ├── parent-message.ts │ ├── parent-options.ts │ ├── parent.ts │ ├── queue-options.ts │ ├── rate-limiter-options.ts │ ├── receiver.ts │ ├── redis-options.ts │ ├── redis-streams.ts │ ├── repeat-options.ts │ ├── repeatable-job.ts │ ├── repeatable-options.ts │ ├── sandboxed-job-processor.ts │ ├── sandboxed-job.ts │ ├── sandboxed-options.ts │ ├── telemetry.ts │ └── worker-options.ts ├── types │ ├── backoff-strategy.ts │ ├── finished-status.ts │ ├── index.ts │ ├── job-json-sandbox.ts │ ├── job-options.ts │ ├── job-progress.ts │ ├── job-scheduler-template-options.ts │ ├── job-type.ts │ ├── minimal-queue.ts │ ├── net.d.ts │ └── repeat-strategy.ts ├── utils.ts └── version.ts ├── tests ├── fixtures │ ├── delay.js │ ├── fixture_processor.cjs │ ├── fixture_processor.js │ ├── fixture_processor_bar.js │ ├── fixture_processor_broken.js │ ├── fixture_processor_crash.js │ ├── fixture_processor_env.js │ ├── fixture_processor_esbuild.js │ ├── fixture_processor_exit.js │ ├── fixture_processor_fail.js │ ├── fixture_processor_fail_with_circular_reference.js │ ├── fixture_processor_foo.js │ ├── fixture_processor_get_children_failures.js │ ├── fixture_processor_get_children_failures_child.js │ ├── fixture_processor_get_children_values.js │ ├── fixture_processor_get_children_values_child.js │ ├── fixture_processor_missing_function.js │ ├── fixture_processor_move_to_delayed.js │ ├── fixture_processor_parent.js │ ├── fixture_processor_queueName.js │ ├── fixture_processor_slow.js │ ├── fixture_processor_stderr.js │ ├── fixture_processor_stdout.js │ ├── fixture_processor_steps.js │ ├── fixture_processor_ttl.js │ ├── fixture_processor_unrecoverable.js │ ├── fixture_processor_update_data.js │ ├── fixture_processor_update_progress.js │ ├── fixture_processor_with_extra_param.js │ └── scripts │ │ ├── dir-test │ │ ├── empty │ │ │ └── test.js │ │ ├── non-lua │ │ │ ├── fixture_non_lua_file.txt │ │ │ └── test.lua │ │ ├── one-0.lua │ │ ├── three-2.lua │ │ └── two-1.lua │ │ ├── fixture_circular_dependency.lua │ │ ├── fixture_circular_dependency_child.lua │ │ ├── fixture_duplicate_elimination.lua │ │ ├── fixture_duplicate_include.lua │ │ ├── fixture_glob_includes.lua │ │ ├── fixture_missing_include.lua │ │ ├── fixture_path_mapped.lua │ │ ├── fixture_path_mapped_glob.lua │ │ ├── fixture_recursive_parent.lua │ │ ├── fixture_simple_include.lua │ │ ├── fixture_simple_include_child.lua │ │ ├── includes │ │ ├── fixture_glob_include_1.lua │ │ ├── fixture_glob_include_2.lua │ │ ├── fixture_recursive_child.lua │ │ ├── fixture_recursive_grandchild.lua │ │ ├── fixture_recursive_great_grandchild.lua │ │ ├── math.lua │ │ ├── strings.lua │ │ └── utils.lua │ │ ├── load │ │ └── broadcastEvent-1.lua │ │ └── mapped │ │ ├── fixture_mapped_include_1.lua │ │ └── fixture_mapped_include_2.lua ├── test_async_fifo_queue.ts ├── test_bulk.ts ├── test_child-pool.ts ├── test_clean.ts ├── test_concurrency.ts ├── test_connection.ts ├── test_delay.ts ├── test_events.ts ├── test_flow.ts ├── test_getters.ts ├── test_job.ts ├── test_job_scheduler.ts ├── test_job_scheduler_stress.ts ├── test_metrics.ts ├── test_obliterate.ts ├── test_pause.ts ├── test_queue.ts ├── test_rate_limiter.ts ├── test_repeat.ts ├── test_sandboxed_process.ts ├── test_script_loader.ts ├── test_scripts.ts ├── test_stalled_jobs.ts ├── test_telemetry_interface.ts ├── test_worker.ts └── utils │ └── repeat_utils.ts ├── tsconfig-cjs.json ├── tsconfig.json └── yarn.lock /.gitbook.yaml: -------------------------------------------------------------------------------- 1 | root: ./docs/gitbook/ 2 | 3 | structure: 4 | readme: README.md 5 | summary: SUMMARY.md 6 | 7 | redirects: 8 | changelog: ./CHANGELOG.md 9 | api-reference: ./api/index.md 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | ### Why 7 | 12 | _Enter your explanation here._ 13 | 14 | ### How 15 | 20 | _Enter the implementation details here._ 21 | 22 | ### Additional Notes (Optional) 23 | 30 | _Any extra info here._ 31 | -------------------------------------------------------------------------------- /.github/workflows/api.yml: -------------------------------------------------------------------------------- 1 | name: Api build 2 | on: 3 | push: 4 | branches: [master] 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: write # Required for merging branches, committing changes, and deploying to GitHub Pages 11 | 12 | strategy: 13 | matrix: 14 | node-version: [lts/*] 15 | 16 | steps: 17 | - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3 18 | with: 19 | # check out all branches 20 | fetch-depth: 0 21 | - name: Use Node.js ${{ matrix.node-version }} 22 | uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v3 23 | with: 24 | node-version: ${{ matrix.node-version }} 25 | cache: 'yarn' 26 | - name: yarn install 27 | run: | 28 | yarn install 29 | - name: build 30 | run: | 31 | git config --global user.email "manast@taskforce.sh" 32 | git config --global user.name "manast" 33 | git checkout docs-api 34 | git merge master --no-edit -m "chore: merge master branch on $(date +%F)" 35 | yarn pretest 36 | yarn docs:json 37 | git add . 38 | yarn docs:merge 39 | env: 40 | CI: true 41 | - name: deploy 42 | uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3 43 | with: 44 | github_token: ${{ secrets.GITHUB_TOKEN }} 45 | publish_dir: ./docs 46 | cname: 'api.docs.bullmq.io' 47 | -------------------------------------------------------------------------------- /.github/workflows/osv-scanner.yml: -------------------------------------------------------------------------------- 1 | name: OSV-Scanner Scheduled Scan 2 | 3 | on: 4 | schedule: 5 | - cron: '30 12 * * 1' 6 | pull_request: 7 | branches: [master] 8 | merge_group: 9 | branches: [master] 10 | 11 | permissions: 12 | # Required to upload SARIF file to CodeQL. See: https://github.com/github/codeql-action/issues/2117 13 | actions: read 14 | # Require writing security events to upload SARIF file to security tab 15 | security-events: write 16 | # Only need to read contents 17 | contents: read 18 | 19 | jobs: 20 | scan-scheduled: 21 | uses: 'google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v2.0.0' 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /dist 4 | /rawScripts 5 | /src/scripts 6 | # Log files 7 | npm-debug.log* 8 | yarn-debug.log* 9 | yarn-error.log* 10 | 11 | # Coverage files 12 | .nyc_output 13 | coverage 14 | coverage/* 15 | 16 | # Editor directories and files 17 | .idea 18 | .vscode 19 | *.suo 20 | *.ntvs* 21 | *.njsproj 22 | *.sln 23 | *.sw* 24 | temp 25 | docs/gitbook/api 26 | package-lock.json 27 | -------------------------------------------------------------------------------- /.husky/commit-msg: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . "$(dirname "$0")/_/husky.sh" 3 | 4 | npx --no -- commitlint --edit "${1}" 5 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | yarn npm-run-all pretty:quick lint:staged 5 | -------------------------------------------------------------------------------- /.madgerc: -------------------------------------------------------------------------------- 1 | { 2 | "detectiveOptions": { 3 | "ts": { 4 | "skipTypeImports": true 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /.mocharc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | exit: true, 3 | file: ['./mocha.setup.ts'], 4 | spec: ['./tests/test_*.ts'], 5 | timeout: 4000, 6 | 'trace-warnings': true, 7 | }; 8 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /dist 4 | /rawScripts 5 | /src/scripts 6 | # Log files 7 | npm-debug.log* 8 | yarn-debug.log* 9 | yarn-error.log* 10 | 11 | # Coverage files 12 | .nyc_output 13 | coverage 14 | coverage/* 15 | 16 | # Editor directories and files 17 | .idea 18 | .vscode 19 | *.suo 20 | *.ntvs* 21 | *.njsproj 22 | *.sln 23 | *.sw* 24 | temp 25 | docs/gitbook/api 26 | package-lock.json 27 | 28 | # Ignore python code from npm 29 | python 30 | -------------------------------------------------------------------------------- /.prettierrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | singleQuote: true, 3 | trailingComma: 'all', 4 | tabWidth: 2, 5 | printWidth: 80, 6 | semi: true, 7 | arrowParens: 'avoid', 8 | }; 9 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ./docs/gitbook/changelog.md -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 BullForce Labs AB. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /commitlint.config.js: -------------------------------------------------------------------------------- 1 | /* istanbul ignore file */ 2 | module.exports = {extends: ['@commitlint/config-conventional']}; 3 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | services: 3 | redis: 4 | image: redis:7-alpine 5 | container_name: cache 6 | ports: 7 | - 6379:6379 8 | -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/1200x627 - Light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/1200x627 - Light.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/Atlassian-horizontal-blue-rgb (1).webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/Atlassian-horizontal-blue-rgb (1).webp -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/Atlassian-horizontal-blue-rgb.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/Atlassian-horizontal-blue-rgb.webp -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/Screenshot 2022-02-15 at 11.32.39 (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/Screenshot 2022-02-15 at 11.32.39 (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/Screenshot 2022-02-15 at 11.32.39.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/Screenshot 2022-02-15 at 11.32.39.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/architecture (1) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/architecture (1) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/architecture (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/architecture (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/architecture.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/autodesk-logo-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/autodesk-logo-white.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/clipart1565701.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/clipart1565701.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/complete-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/complete-architecture.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/curri-small (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/curri-small (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/curri-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/curri-small.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/curri.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/curri.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/datawrapper-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/datawrapper-logo.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/entethalliance-logo (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/entethalliance-logo (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/entethalliance-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/entethalliance-logo.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/flow-architecture (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/flow-architecture (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/flow-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/flow-architecture.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (1) (1) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (1) (1) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (1) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (1) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (10).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (10).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (11).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (11).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (2) (1) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (2) (1) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (2) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (2) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (2).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (3) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (3) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (3).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (4) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (4) (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (4).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (4).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (5).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (5).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (6).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (6).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (7).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (7).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (8).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (8).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image (9).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image (9).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/image.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765 (1).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765 (1).jpg -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef435bed87.5542932915250184353765.jpg -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef43944c89.3404142515250184356074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef43944c89.3404142515250184356074.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093213 (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093213 (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093213.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093213.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093303.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-093303.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-095138.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mermaid-diagram-2023-06-22-095138.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/midwayjs-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/midwayjs-logo.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb (2).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/mozilla-logo-bw-rgb.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/remix-logo (1).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/remix-logo (1).pdf -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/remix-logo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/remix-logo.pdf -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/remix-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/remix-logo.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/salesforce-logo (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/salesforce-logo (1).png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/salesforce-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/salesforce-logo.png -------------------------------------------------------------------------------- /docs/gitbook/.gitbook/assets/wordmark-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/docs/gitbook/.gitbook/assets/wordmark-logo.png -------------------------------------------------------------------------------- /docs/gitbook/bull/install.md: -------------------------------------------------------------------------------- 1 | # Install 2 | 3 | Install with **Npm**: 4 | 5 | ```bash 6 | npm install bull --save 7 | ``` 8 | 9 | or Yarn: 10 | 11 | ```bash 12 | yarn add bull 13 | ``` 14 | 15 | In order to work with Bull, you also need to have a Redis server running. For local development you can easily install it using [docker](https://hub.docker.com/\_/redis/). 16 | 17 | Bull will by default try to connect to a Redis server running on `localhost:6379` 18 | 19 | {% hint style="info" %} 20 | _Bull requires a Redis version greater than or equal to `2.8.18`._ 21 | {% endhint %} 22 | 23 | ### Typescript Definitions 24 | 25 | ```bash 26 | npm install @types/bull --save-dev 27 | ``` 28 | 29 | ```bash 30 | yarn add --dev @types/bull 31 | ``` 32 | 33 | Definitions are currently maintained in the [DefinitelyTyped](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/bull) repo. 34 | -------------------------------------------------------------------------------- /docs/gitbook/bull/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Bull is the legacy version of BullMQ. As it is still heavily used today, it is also maintained for bugs but not for new major features. If you want to use a battle-tested queue library and you do not need a better typescript integration or the latest features you are good to go using this library for years to come. 4 | 5 | ### Used by 6 | 7 | Bull has been a part of the NodeJS ecosystem for a long time and is used by many organizations both in commercial and open-source projects. A few special mentions: 8 | 9 | ![](<../.gitbook/assets/Screenshot 2022-02-15 at 11.32.39 (1).png>) ![](../.gitbook/assets/mozilla-logo-bw-rgb.png) ![](../.gitbook/assets/autodesk-logo-white.png) ![](../.gitbook/assets/Atlassian-horizontal-blue-rgb.webp) 10 | 11 | ![](../.gitbook/assets/midwayjs-logo.png) ![](<../.gitbook/assets/salesforce-logo (1).png>) 12 | 13 | ![](<../.gitbook/assets/entethalliance-logo (1).png>) ![](../.gitbook/assets/kisspng-logo-retail-target-corporation-advertising-5ae5ef43944c89.3404142515250184356074.png) 14 | -------------------------------------------------------------------------------- /docs/gitbook/bull/patterns/README.md: -------------------------------------------------------------------------------- 1 | # Patterns 2 | 3 | Here are a few examples of useful patterns that are often implemented with Bull: 4 | 5 | * [Persistent connections](persistent-connections.md) 6 | * [Message Queue](message-queue.md) 7 | * [Returning Job Completions](returning-job-completions.md) 8 | * [Reusing Redis Connections](reusing-redis-connections.md) 9 | * [Redis Cluster](redis-cluster.md) 10 | * [Debugging](debugging.md) 11 | * [Custom backoff strategy](custom-backoff-strategy.md) 12 | * [Manually fetching jobs](../../patterns/manually-fetching-jobs.md) 13 | 14 | If you have any other common patterns you want to add, pull request them! 15 | -------------------------------------------------------------------------------- /docs/gitbook/bull/patterns/debugging.md: -------------------------------------------------------------------------------- 1 | # Debugging 2 | 3 | To see debug statements set or add `bull` to the `NODE_DEBUG` environment variable: 4 | 5 | ``` 6 | export NODE_DEBUG=bull 7 | ``` 8 | 9 | or: 10 | 11 | ``` 12 | NODE_DEBUG=bull node ./your-script.js 13 | ``` 14 | -------------------------------------------------------------------------------- /docs/gitbook/bull/patterns/message-queue.md: -------------------------------------------------------------------------------- 1 | # Message queue 2 | 3 | Bull can also be used for persistent message queues. This is a quite useful feature in some use cases. For example, you can have two servers that need to communicate with each other. By using a queue, the servers do not need to be online at the same time, so this creates a very robust communication channel. You can treat `add` as _send_ and `process` as _receive_: 4 | 5 | Server A: 6 | 7 | ```typescript 8 | const Queue = require('bull'); 9 | 10 | const sendQueue = new Queue('Server B'); 11 | const receiveQueue = new Queue('Server A'); 12 | 13 | receiveQueue.process(function (job, done) { 14 | console.log('Received message', job.data.msg); 15 | done(); 16 | }); 17 | 18 | sendQueue.add({ msg: 'Hello' }); 19 | ``` 20 | 21 | Server B: 22 | 23 | ```typescript 24 | const Queue = require('bull'); 25 | 26 | const sendQueue = new Queue('Server A'); 27 | const receiveQueue = new Queue('Server B'); 28 | 29 | receiveQueue.process(function (job, done) { 30 | console.log('Received message', job.data.msg); 31 | done(); 32 | }); 33 | 34 | sendQueue.add({ msg: 'World' }); 35 | ``` 36 | -------------------------------------------------------------------------------- /docs/gitbook/bull/patterns/redis-cluster.md: -------------------------------------------------------------------------------- 1 | # Redis cluster 2 | 3 | Bull internals require atomic operations that span different keys. This behavior breaks Redis's rules for cluster configurations. However, it is still possible to use a cluster environment by using the proper bull prefix option as a cluster "hash tag". Hash tags are used to guarantee that certain keys are placed in the same hash slot, read more about hash tags in the [redis cluster tutorial](https://redis.io/topics/cluster-tutorial). A hash tag is defined with brackets. I.e. a key that has a substring inside brackets will use that substring to determine in which hash slot the key will be placed. 4 | 5 | In summary, to make bull compatible with Redis cluster, use a queue prefix inside brackets. For example: 6 | 7 | You can use two approaches in order to make the Queues compatible with Cluster. Either define a queue prefix: 8 | 9 | ```typescript 10 | const queue = new Queue('cluster', { 11 | prefix: '{myprefix}' 12 | }); 13 | ``` 14 | 15 | or wrap the queue name itself: 16 | 17 | ```typescript 18 | const queue = new Queue('{cluster}'); 19 | ``` 20 | 21 | Note that If you use several queues in the same cluster, you should use different prefixes so that the queues are evenly placed in the cluster nodes, potentially increasing performance and memory usage. 22 | -------------------------------------------------------------------------------- /docs/gitbook/bull/patterns/returning-job-completions.md: -------------------------------------------------------------------------------- 1 | # Returning Job Completions 2 | 3 | A common pattern is where you have a cluster of queue processors that just process jobs as fast as they can, and some other services that need to take the result of these processors and do something with it, maybe storing results in a database. 4 | 5 | The most robust and scalable way to accomplish this is by combining the standard job queue with the message queue pattern: a service sends jobs to the cluster just by opening a job queue and adding jobs to it, and the cluster will start processing as fast as it can. Everytime a job gets completed in the cluster a message is sent to a results message queue with the result data, and this queue is listened by some other service that stores the results in a database. 6 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/concurrency.md: -------------------------------------------------------------------------------- 1 | # Concurrency 2 | 3 | By default, there is no limit on the number of jobs that workers can run in parallel for every group. Even using a rate limit, that would only limit the processing speed, but still you could have an unbounded number of jobs processed simultaneously in every group. 4 | 5 | It is possible to constrain how many jobs are allowed to be processed concurrently per group. For example, if you choose 3 as max concurrency factor, the workers will never work on more than 3 jobs at the same time for any given group. This limits only the group; you could have any number of concurrent jobs as long as they are not from the same group. 6 | 7 | The concurrency factor is configured as follows: 8 | 9 | ```typescript 10 | import { WorkerPro } from '@taskforcesh/bullmq-pro'; 11 | 12 | const worker = new WorkerPro('myQueue', processFn, { 13 | group: { 14 | concurrency: 3, // Limit to max 3 parallel jobs per group 15 | }, 16 | concurrency: 100, 17 | connection, 18 | }); 19 | ``` 20 | 21 | The concurrency factor is global, so in the example above, independently of the concurrency factor per worker or the number of workers that you instantiate in your application, it will never process more than 3 jobs per group at any given time. 22 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/getters.md: -------------------------------------------------------------------------------- 1 | # Getters 2 | 3 | #### Job Counts 4 | 5 | It is often necessary to know how many jobs are in a given group: 6 | 7 | ```typescript 8 | import { QueuePro } from '@taskforcesh/bullmq-pro'; 9 | 10 | const queue = new QueuePro('myQueue', { connection }); 11 | const groupId = 'my group'; 12 | const count = await queue.getGroupsJobsCount(1000); // 1000 groups in each iteration 13 | ``` 14 | 15 | {% hint style="info" %} 16 | This count value includes prioritized and non-prioritized jobs included groups. 17 | {% endhint %} 18 | 19 | Or if you want to get active jobs count for an specific group 20 | 21 | ```typescript 22 | const activeCount = await queue.getGroupActiveCount(groupId); 23 | ``` 24 | 25 | #### Get Jobs 26 | 27 | It is also possible to retrieve the jobs with pagination style semantics in a given group. For example: 28 | 29 | ```typescript 30 | const jobs = await queue.getGroupJobs(groupId, 0, 100); 31 | ``` 32 | 33 | ## Read more: 34 | 35 | * 💡 [Get Groups Jobs Count API Reference](https://api.bullmq.pro/classes/v7.Queue.html#getGroupsJobsCount) 36 | * 💡 [Get Group Active Count API Reference](https://api.bullmq.pro/classes/v7.Queue.html#getGroupActiveCount) 37 | * 💡 [Get Group Jobs API Reference](https://api.bullmq.pro/classes/v7.Queue.html#getGroupJobs) 38 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/local-group-concurrency.md: -------------------------------------------------------------------------------- 1 | # Local group concurrency 2 | 3 | It is also possible to set a specific concurrency value to a given group. This is useful if you require that different groups should run with different concurrency factors. 4 | 5 | Please keep in mind that when specifying a group's concurrency factor, you are storing this value in Redis, so it is your responsibility to remove it if you are not using it anymore. 6 | 7 | You can use the `setGroupConcurrency` method like this: 8 | 9 | ```typescript 10 | import { QueuePro } from '@taskforcesh/bullmq-pro'; 11 | 12 | const queue = new QueuePro('myQueue', { connection }); 13 | const groupId = 'my group'; 14 | await queue.setGroupConcurrency(groupId, 4); 15 | ``` 16 | 17 | {% hint style="warning" %} 18 | Make sure to also set the [Group Concurrency](concurrency.md) at the worker instance level along side with the desired local concurrency, as it is required for this feature to function properly and also will act as a default concurrency value for the groups that have no local concurrency defined. 19 | {% endhint %} 20 | 21 | And you can use the `getGroupConcurrency` method like this: 22 | 23 | ```typescript 24 | const concurrency = await queue.getGroupConcurrency(groupId); 25 | ``` 26 | 27 | ## Read more: 28 | 29 | - 💡 [Set Group Concurrency API Reference](https://api.bullmq.pro/classes/v7.Queue.html#setGroupConcurrency) 30 | - 💡 [Get Group Concurrency API Reference](https://api.bullmq.pro/classes/v7.Queue.html#getGroupConcurrency) 31 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/local-group-rate-limit.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: How to rate-limit each group with a different limit per group. 3 | --- 4 | 5 | # Local group rate limit 6 | 7 | Sometimes it is required that different groups have different rate limits, this could be the case for example if a group represents a given user in the system, and depending on the user's quota or other factors we would like to have a different rate-limit for it. 8 | 9 | You can use a local group rate limit, which would be used only for the specific group that have the rate-limit setup. For example: 10 | 11 | ```typescript 12 | import { QueuePro } from '@taskforcesh/bullmq-pro'; 13 | 14 | const queue = new QueuePro('myQueue', { connection }); 15 | const groupId = 'my group'; 16 | const maxJobsPerDuration = 100; 17 | 18 | const duration = 1000; // duration in ms. 19 | await queue.setGroupRateLimit(groupId, maxJobsPerDuration, duration); 20 | 21 | ``` 22 | 23 | This code would set a specific rate limit on the group "my group" of max 100 jobs per second. Note that you can still have a ["default" rate-limit](rate-limiting.md) specified for the rest of the groups, the call to `setGroupRateLimit` will therefore allow you to override that rate-limit . 24 | 25 | ### Read more 26 | 27 | * [ Local Rate Limit Group API Reference](https://api.bullmq.pro/classes/v7.QueuePro.html#setGroupRateLimit) 28 | 29 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/max-group-size.md: -------------------------------------------------------------------------------- 1 | # Max group size 2 | 3 | It is possible to set a maximum group size. This can be useful if you want to keep the number of jobs within some limits and you can afford to discard new jobs. 4 | 5 | When a group has reached the defined max size, adding new jobs to that group will result in an exception being thrown that you can catch and ignore if you do not care about it. 6 | 7 | You can use the `maxSize` option when adding jobs to a group like this: 8 | 9 | ```typescript 10 | import { QueuePro, GroupMaxSizeExceededError } from '@taskforcesh/bullmq-pro'; 11 | 12 | const queue = new QueuePro('myQueue', { connection }); 13 | const groupId = 'my group'; 14 | try { 15 | await queue.add('paint', { foo: 'bar' }, { 16 | group: { 17 | id: groupId, 18 | maxSize: 7, 19 | }, 20 | }); 21 | } catch (err) { 22 | if (err instanceof GroupMaxSizeExceededError){ 23 | console.log(`Job discarded for group ${groupId}`) 24 | } else { 25 | throw err; 26 | } 27 | } 28 | ``` 29 | 30 | {% hint style="info" %} 31 | The `maxSize` option is not yet available for `addBulk`. 32 | {% endhint %} 33 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/pausing-groups.md: -------------------------------------------------------------------------------- 1 | # Pausing groups 2 | 3 | BullMQ Pro supports pausing groups globally. A group is paused when no workers will pick up any jobs that belongs to the paused group. When you pause a group, the workers that are currently busy processing a job from that group, will continue working on that job until it completes (or failed), and then will just keep idling until the group has been resumed. 4 | 5 | Pausing a group is performed by calling the `pauseGroup` method on a [`Queue`](https://api.bullmq.pro/classes/v6.Queue.html#pauseGroup) instance: 6 | 7 | ```typescript 8 | await myQueue.pauseGroup('groupId'); 9 | ``` 10 | 11 | {% hint style="info" %} 12 | Even if the `groupId` does not exist at that time, the `groupId` will be added in our paused list as a group could be ephemeral 13 | {% endhint %} 14 | 15 | {% hint style="warning" %} 16 | `pauseGroup` will return `false` if the group is already paused. 17 | {% endhint %} 18 | 19 | Resuming a group is performed by calling the `resumeGroup` method on a [`Queue`](https://api.bullmq.pro/classes/v6.Queue.html#resumeGroup) instance: 20 | 21 | ```typescript 22 | await myQueue.resumeGroup('groupId'); 23 | ``` 24 | 25 | {% hint style="warning" %} 26 | `resumeGroup` will return `false` if the group does not exist or when the group is already resumed. 27 | {% endhint %} 28 | 29 | ## Read more: 30 | 31 | - 💡 [Pause Group API Reference](https://api.bullmq.pro/classes/v7.Queue.html#pauseGroup) 32 | - 💡 [Resume Group API Reference](https://api.bullmq.pro/classes/v7.Queue.html#resumeGroup) 33 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/prioritized.md: -------------------------------------------------------------------------------- 1 | # Prioritized intra-groups 2 | 3 | BullMQ Pro supports priorities per group. A job is prioritized in a group when group and priority options are provided _together_. 4 | 5 | ```typescript 6 | await myQueue.add( 7 | 'paint', 8 | { foo: 'bar' }, 9 | { 10 | group: { 11 | id: 'groupId', 12 | priority: 10, 13 | }, 14 | }, 15 | ); 16 | ``` 17 | 18 | {% hint style="info" %} 19 | The priorities go from 0 to 2097151, where a higher number means lower priority (as in Unix [processes](https://en.wikipedia.org/wiki/Nice\_\(Unix\))). Thus, jobs without any explicit priority will have the highest priority. 20 | {% endhint %} 21 | 22 | ## Get Counts per Priority for Group 23 | 24 | If you want to get the `count` of jobs in `prioritized` status (priorities higher than 0) or in `waiting` status (priority 0) for specific group, use the **`getCountsPerPriorityForGroup`** method. For example, let's say that you want to get counts for `priority` `1` and `0`: 25 | 26 | ```typescript 27 | const counts = await queue.getCountsPerPriorityForGroup('groupId', [1, 0]); 28 | /* 29 | { 30 | '1': 11, 31 | '0': 10 32 | } 33 | */ 34 | ``` 35 | 36 | ## Read more: 37 | 38 | * 💡 [Add Job API Reference](https://api.bullmq.pro/classes/v7.Queue.html#add) 39 | * 💡 [Get Counts per Priority for Group API Reference](https://api.bullmq.pro/classes/v7.Queue.html#getCountsPerPriorityForGroup) 40 | 41 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/groups/sandboxes-for-groups.md: -------------------------------------------------------------------------------- 1 | # Sandboxes for groups 2 | 3 | It is also possible to use [Sandboxes](../../guide/workers/sandboxed-processors.md) for processing groups. It works essentially the same as in standard BullMQ, but you gain access to the "gid" property in the job object sent to your processor, for example: 4 | 5 | ```typescript 6 | import { SandboxedJobPro } from '@taskforcesh/bullmq-pro'; 7 | 8 | module.exports = function (job: SandboxedJobPro) { 9 | expect(job).to.have.property('gid'); 10 | expect(job.opts).to.have.property('group'); 11 | expect(job.opts.group).to.have.property('id'); 12 | expect(job.opts.group.id).to.be.a('string'); 13 | expect(job.opts.group.id).to.equal(job.gid); 14 | }; 15 | ``` 16 | 17 | 18 | 19 | {% hint style="danger" %} 20 | Groups are the only Pro features supported by Sandboxed processors for now. 21 | {% endhint %} 22 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/install.md: -------------------------------------------------------------------------------- 1 | # Install 2 | 3 | In order to install BullMQ Pro you need to use a NPM token from [taskforce.sh](https://taskforce.sh). 4 | 5 | With the token at hand just update or create a `.npmrc` file in your app repository with the following contents: 6 | 7 | ``` 8 | @taskforcesh:registry=https://npm.taskforce.sh/ 9 | //npm.taskforce.sh/:_authToken=${NPM_TASKFORCESH_TOKEN} 10 | always-auth=true 11 | ``` 12 | 13 | where `NPM_TASKFORCESH_TOKEN` is an environment variable pointing to your token. 14 | 15 | Then just install the `@taskforcesh/bullmq-pro` package as you would install any other package, with `npm`, `yarn` or `pnpm`: 16 | 17 | ``` 18 | yarn add @taskforcesh/bullmq-pro 19 | ``` 20 | 21 | In order to use BullMQ Pro just import the _Pro_ versions of the classes. These classes are subclasses of the open source BullMQ library with new functionality: 22 | 23 | ```typescript 24 | import { QueuePro, WorkerPro } from '@taskforcesh/bullmq-pro'; 25 | 26 | const queue = new QueuePro('myQueue'); 27 | 28 | const worker = new WorkerPro('myQueue', async job => { 29 | // Process job 30 | }); 31 | ``` 32 | 33 | ### Using Docker 34 | 35 | If you use docker you must make sure that you also add the `.npmrc` file above in your `Dockerfile`: 36 | 37 | ```docker 38 | WORKDIR /app 39 | 40 | ADD .npmrc /app/.npmrc 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: The commercial supported version of BullMQ 3 | --- 4 | 5 | # Introduction 6 | 7 | BullMQ Pro, the commercial version of BullMQ, boasts advanced features and offers dedicated support from the library's authors. You can easily install this package as a drop-in replacement for the standard BullMQ to access these new features. 8 | 9 | The BullMQ Pro version continues to evolve, with more features being added regularly. You can check the [roadmap](https://github.com/taskforcesh/bullmq-pro-support/projects/1) section to gain insight into what to expect in the forthcoming months. 10 | 11 | 12 | 13 | ![](<../.gitbook/assets/image (4) (1).png>) 14 | 15 | ### Commercial License 16 | 17 | BullMQ Pro uses a per-organization licensing model, allowing unlimited use across all your projects. You can request a free trial token on [this page](https://taskforce.sh/account/bullmqpro), allowing you to evaluate its value. If you find it beneficial, consider subscribing under the '[Subscriptions](https://taskforce.sh/account/subscriptions)' tab. 18 | 19 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/nestjs/queue-events-listeners.md: -------------------------------------------------------------------------------- 1 | # Queue Events Listeners 2 | 3 | To register a `QueueEvents` instance, you need to use `QueueEventsListener` decorator: 4 | 5 | ```typescript 6 | import { 7 | QueueEventsListener, 8 | QueueEventsHost, 9 | OnQueueEvent, 10 | } from '@taskforcesh/nestjs-bullmq-pro'; 11 | 12 | @QueueEventsListener('queueName') 13 | export class TestQueueEvents extends QueueEventsHost { 14 | @OnQueueEvent('completed') 15 | onCompleted({ 16 | jobId, 17 | }: { 18 | jobId: string; 19 | returnvalue: string; 20 | prev?: string; 21 | }) { 22 | // do some stuff 23 | } 24 | } 25 | ``` 26 | 27 | And then register it as a provider: 28 | 29 | ```typescript 30 | @Module({ 31 | imports: [ 32 | BullModule.registerQueue({ 33 | name: 'queueName', 34 | connection: { 35 | host: '0.0.0.0', 36 | port: 6380, 37 | }, 38 | }), 39 | ], 40 | providers: [TestQueueEvents], 41 | }) 42 | export class AppModule {} 43 | ``` 44 | 45 | ## Read more: 46 | 47 | - 💡 [Queues Technique](https://docs.nestjs.com/techniques/queues) 48 | - 💡 [Register Queue API Reference](https://nestjs.bullmq.pro/classes/BullModule.html#registerQueue) 49 | - 💡 [Queue Events Pro Listener API Reference](https://api.bullmq.pro/interfaces/v6.QueueEventsListenerPro.html) 50 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/new-releases.md: -------------------------------------------------------------------------------- 1 | # New Releases 2 | 3 | If you want to get notifications when we do a new release of BullMQ Pro, please enable notifications on this Github issue where we automatically create a new comment for every new release: 4 | 5 | [https://github.com/taskforcesh/bullmq-pro-support/issues/86](https://github.com/taskforcesh/bullmq-pro-support/issues/86) 6 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/observables/cancelation.md: -------------------------------------------------------------------------------- 1 | # Cancellation 2 | 3 | As mentioned, `Observables` allow for clean cancellation. Currently we support a TTL value that defines the maximum processing time before the job is finally cancelled: 4 | 5 | ```typescript 6 | import { WorkerPro } from '@taskforcesh/bullmq-pro'; 7 | 8 | const worker = new WorkerPro(queueName, processor, { 9 | ttl: 100, 10 | connection, 11 | }); 12 | ``` 13 | 14 | This parameter allows to provide `ttl` values per job name too: 15 | 16 | ```typescript 17 | const worker = new WorkerPro(queueName, processor, { 18 | ttl: { test1: 100, test2: 200 }, 19 | connection, 20 | }); 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/gitbook/bullmq-pro/support.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Commercial support 3 | --- 4 | 5 | # Support 6 | 7 | With your BullMQ Pro subscription, you receive commercial support. For the most recent and detailed terms, please refer to the "[BullMQ Pro](https://taskforce.sh/account/bullmqpro)" tab in your account. Here's a broad overview of what's included: 8 | 9 | * Email-based support: Customers may submit issues or queries via email to our dedicated support email address (support@taskforce.sh). 10 | * Response time: We aim to respond to support emails within one business day (business days are defined in section 4). However, response times may vary, and quicker responses may occur on a best-effort basis. 11 | * Resolution time: The time it takes to resolve an issue depends on the nature of the issue. Simple queries or requests for advice may be resolved quickly. More complex issues, such as bugs, may take several days or more to resolve. In some cases, we may not be able to fully resolve an issue but will provide a workaround. 12 | -------------------------------------------------------------------------------- /docs/gitbook/guide/events/create-custom-events.md: -------------------------------------------------------------------------------- 1 | # Create Custom Events 2 | 3 | In BullMQ, creating a generic distributed realtime event emitter is possible by using our **QueueEventsProducer** class. 4 | 5 | Consumers must use **QueueEvents** class to subscribe to those events that they are interested in. 6 | 7 | ```typescript 8 | const queueName = 'customQueue'; 9 | const queueEventsProducer = new QueueEventsProducer(queueName, { 10 | connection, 11 | }); 12 | const queueEvents = new QueueEvents(queueName, { 13 | connection, 14 | }); 15 | 16 | interface CustomListener extends QueueEventsListener { 17 | example: (args: { custom: string }, id: string) => void; 18 | } 19 | queueEvents.on('example', async ({ custom }) => { 20 | // custom logic 21 | }); 22 | 23 | interface CustomEventPayload { 24 | eventName: string; 25 | custom: string; 26 | } 27 | 28 | await queueEventsProducer.publishEvent({ 29 | eventName: 'example', 30 | custom: 'value', 31 | }); 32 | ``` 33 | 34 | Only eventName attribute is required. 35 | 36 | {% hint style="warning" %} 37 | Some event names are reserved from [Queue Listener API Reference](https://api.docs.bullmq.io/interfaces/v5.QueueListener.html). 38 | {% endhint %} 39 | 40 | ## Read more: 41 | 42 | * 💡 [Queue Events API Reference](https://api.docs.bullmq.io/classes/v5.QueueEvents.html) 43 | * 💡 [Queue Events Listener API Reference](https://api.docs.bullmq.io/interfaces/v5.QueueEventsListener.html) 44 | * 💡 [Queue Events Producer API Reference](https://api.docs.bullmq.io/classes/v5.QueueEventsProducer.html) 45 | -------------------------------------------------------------------------------- /docs/gitbook/guide/flows/adding-bulks.md: -------------------------------------------------------------------------------- 1 | # Adding flows in bulk 2 | 3 | Sometimes it is necessary to atomically add flows in bulk. For example, there could be a requirement that all the flows must be created or none of them. Also, adding flows in bulk can be faster since it reduces the number of roundtrips to Redis: 4 | 5 | ```typescript 6 | import { FlowProducer } from 'bullmq'; 7 | 8 | const flow = new FlowProducer({ connection }); 9 | 10 | const trees = await flow.addBulk([ 11 | { 12 | name: 'root-job-1', 13 | queueName: 'rootQueueName-1', 14 | data: {}, 15 | children: [ 16 | { 17 | name, 18 | data: { idx: 0, foo: 'bar' }, 19 | queueName: 'childrenQueueName-1', 20 | }, 21 | ], 22 | }, 23 | { 24 | name: 'root-job-2', 25 | queueName: 'rootQueueName-2', 26 | data: {}, 27 | children: [ 28 | { 29 | name, 30 | data: { idx: 1, foo: 'baz' }, 31 | queueName: 'childrenQueueName-2', 32 | }, 33 | ], 34 | }, 35 | ]); 36 | ``` 37 | 38 | This call can only succeed or fail, and all or none of the jobs will be added. 39 | 40 | ## Read more: 41 | 42 | - 💡 [Add Bulk API Reference](https://api.docs.bullmq.io/classes/v5.FlowProducer.html#addBulk) 43 | -------------------------------------------------------------------------------- /docs/gitbook/guide/flows/remove-child-dependency.md: -------------------------------------------------------------------------------- 1 | # Remove Child Dependency 2 | 3 | In some situations, you may have a parent job and need to remove the dependency of one of its children. 4 | 5 | The pattern to solve this requirement consists on using the **removeChildDependency** method. It will make sure that if the job is the last pending child, to move its parent to _waiting_ and it won't be listed in unprocessed list of the parent. 6 | 7 | ```typescript 8 | const flow = new FlowProducer({ connection }); 9 | 10 | const originalTree = await flow.add({ 11 | name: 'root-job', 12 | queueName: 'topQueueName', 13 | data: {}, 14 | children: [ 15 | { 16 | name, 17 | data: { idx: 0, foo: 'bar' }, 18 | queueName: 'childrenQueueName', 19 | opts: {}, 20 | }, 21 | ], 22 | }); 23 | 24 | await originalTree.children[0].job.removeChildDependency(); 25 | ``` 26 | 27 | {% hint style="waring" %} 28 | As soon as a **child** calls this method, it will verify if it has an existing parent, if not, it'll throw an error. 29 | {% endhint %} 30 | 31 | Failed or completed children using this option won't generate any removal as they won't be part of unprocessed list: 32 | -------------------------------------------------------------------------------- /docs/gitbook/guide/flows/remove-dependency.md: -------------------------------------------------------------------------------- 1 | # Remove Dependency 2 | 3 | In some situations, you may have a parent job and need to remove the relationship when one of its children fail. 4 | 5 | The pattern to solve this requirement consists on using the **removeDependencyOnFailure** option. This option will make sure that when a job fails, the dependency is removed from the parent, so the parent will complete without waiting for the failed children. 6 | 7 | ```typescript 8 | const flow = new FlowProducer({ connection }); 9 | 10 | const originalTree = await flow.add({ 11 | name: 'root-job', 12 | queueName: 'topQueueName', 13 | data: {}, 14 | children: [ 15 | { 16 | name, 17 | data: { idx: 0, foo: 'bar' }, 18 | queueName: 'childrenQueueName', 19 | opts: { removeDependencyOnFailure: true }, 20 | children: [ 21 | { 22 | name, 23 | data: { idx: 1, foo: 'bah' }, 24 | queueName: 'grandChildrenQueueName', 25 | }, 26 | { 27 | name, 28 | data: { idx: 2, foo: 'baz' }, 29 | queueName: 'grandChildrenQueueName', 30 | }, 31 | ], 32 | }, 33 | { 34 | name, 35 | data: { idx: 3, foo: 'foo' }, 36 | queueName: 'childrenQueueName', 37 | }, 38 | ], 39 | }); 40 | ``` 41 | 42 | {% hint style="info" %} 43 | As soon as a **child** with this option fails, the parent job will be moved to a waiting state only if there are no more pending children. 44 | {% endhint %} 45 | 46 | ## Read more: 47 | 48 | - 💡 [Add Flow API Reference](https://api.docs.bullmq.io/classes/v5.FlowProducer.html#add) 49 | -------------------------------------------------------------------------------- /docs/gitbook/guide/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | BullMQ is based in 4 classes that together can be used to resolve many different problems. These classes are [_**Queue**_](https://api.docs.bullmq.io/classes/v5.Queue.html), [_**Worker**_](https://api.docs.bullmq.io/classes/v5.Worker.html), [_**QueueEvents**_](https://api.docs.bullmq.io/classes/v5.QueueEvents.html) and [_**FlowProducer**_](https://api.docs.bullmq.io/classes/v5.FlowProducer.html). 4 | 5 | The first class you should know about is the _Queue_ class. This class represents a queue and can be used for adding _**jobs**_ to the queue as well as some other basic manipulation such as pausing, cleaning or getting data from the queue. 6 | 7 | Jobs in BullMQ are basically a user created data structure that can be stored in the queue. Jobs are processed by _**workers**_. A _Worker_ is the second class you should be aware about. Workers are instances capable of processing jobs. You can have many workers, either running in the same Node.js process, or in separate processes as well as in different machines. They will all consume jobs from the queue and mark the jobs as completed or failed. 8 | -------------------------------------------------------------------------------- /docs/gitbook/guide/jobs/README.md: -------------------------------------------------------------------------------- 1 | # Jobs 2 | 3 | Queues can hold different types of jobs, which determine how and when they are processed. In this section, we will describe them in detail. 4 | 5 | An important thing to consider is that you can mix the different job types in the same queue, so you can add FIFO jobs, and at any moment add a LIFO or a delayed job. 6 | 7 | ## Read more: 8 | 9 | - 💡 [Job API Reference](https://api.docs.bullmq.io/classes/v5.Job.html) 10 | -------------------------------------------------------------------------------- /docs/gitbook/guide/jobs/job-data.md: -------------------------------------------------------------------------------- 1 | # Job Data 2 | 3 | Every job can have its own custom data. The data is stored in the **`data`** attribute of the job: 4 | 5 | {% tabs %} 6 | {% tab title="TypeScript" %} 7 | ```typescript 8 | import { Queue } from 'bullmq'; 9 | 10 | const myQueue = new Queue('paint'); 11 | 12 | const job = await myQueue.add('wall', { color: 'red' }); 13 | 14 | job.data; // { color: 'red' } 15 | ``` 16 | {% endtab %} 17 | 18 | {% tab title="Python" %} 19 | ```python 20 | from bullmq import Queue 21 | 22 | queue = Queue('paint') 23 | 24 | job = await queue.add('wall', {'color': 'red'}) 25 | 26 | job.data # { color: 'red' } 27 | ``` 28 | {% endtab %} 29 | {% endtabs %} 30 | 31 | ## Update data 32 | 33 | If you want to change the data after inserting a job, just use the **`updateData`** method. For example: 34 | 35 | {% tabs %} 36 | {% tab title="TypeScript" %} 37 | ```typescript 38 | const job = await Job.create(queue, 'wall', { color: 'red' }); 39 | 40 | await job.updateData({ 41 | color: 'blue', 42 | }); 43 | 44 | job.data; // { color: 'blue' } 45 | ``` 46 | {% endtab %} 47 | 48 | {% tab title="Python" %} 49 | ```python 50 | from bullmq import Queue 51 | 52 | queue = Queue('paint') 53 | 54 | job = await queue.add('wall', {'color': 'red'}) 55 | 56 | await job.updateData({'color': 'blue'}) 57 | job.data # { color: 'blue' } 58 | ``` 59 | {% endtab %} 60 | {% endtabs %} 61 | 62 | ## Read more: 63 | 64 | * 💡 [Update Data API Reference](https://api.docs.bullmq.io/classes/v5.Job.html#updateData) 65 | -------------------------------------------------------------------------------- /docs/gitbook/guide/jobs/lifo.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: 'Last-in, First Out' 3 | --- 4 | 5 | # LIFO 6 | 7 | In some cases, it is useful to process jobs in a LIFO \(_Last-in, First-Out_\) fashion. This means that the newest jobs added to the queue will be processed **before** the older ones. 8 | 9 | ```typescript 10 | import { Queue } from 'bullmq'; 11 | 12 | const myQueue = new Queue('Paint'); 13 | 14 | // Add a job that will be processed before all others 15 | await myQueue.add('wall', { color: 'pink' }, { lifo: true }); 16 | ``` 17 | -------------------------------------------------------------------------------- /docs/gitbook/guide/nestjs/queue-events-listeners.md: -------------------------------------------------------------------------------- 1 | # Queue Events Listeners 2 | 3 | To register a QueueEvents instance, you need to use the **`QueueEventsListener`** decorator: 4 | 5 | ```typescript 6 | import { 7 | QueueEventsListener, 8 | QueueEventsHost, 9 | OnQueueEvent, 10 | } from '@nestjs/bullmq'; 11 | 12 | @QueueEventsListener('queueName') 13 | export class TestQueueEvents extends QueueEventsHost { 14 | @OnQueueEvent('completed') 15 | onCompleted({ 16 | jobId, 17 | }: { 18 | jobId: string; 19 | returnvalue: string; 20 | prev?: string; 21 | }) { 22 | // do some stuff 23 | } 24 | } 25 | ``` 26 | 27 | And then register it as a provider: 28 | 29 | ```typescript 30 | @Module({ 31 | imports: [ 32 | BullModule.registerQueue({ 33 | name: 'queueName', 34 | connection: { 35 | host: '0.0.0.0', 36 | port: 6380, 37 | }, 38 | }), 39 | ], 40 | providers: [TestQueueEvents], 41 | }) 42 | export class AppModule {} 43 | ``` 44 | 45 | ## Read more: 46 | 47 | - 💡 [Queues Technique](https://docs.nestjs.com/techniques/queues) 48 | - 💡 [Register Queue API Reference](https://nestjs.bullmq.pro/classes/BullModule.html#registerQueue) 49 | - 💡 [Queue Events Listener API Reference](https://api.docs.bullmq.io/interfaces/v5.QueueEventsListener.html) 50 | -------------------------------------------------------------------------------- /docs/gitbook/guide/queues/adding-bulks.md: -------------------------------------------------------------------------------- 1 | # Adding jobs in bulk 2 | 3 | Sometimes it is necessary to add many jobs atomically. For example, there could be a requirement that all the jobs must be placed in the queue or none of them. Also, adding jobs in bulk can be faster since it reduces the number of roundtrips to Redis: 4 | 5 | {% tabs %} 6 | {% tab title="TypeScript" %} 7 | 8 | ```typescript 9 | import { Queue } from 'bullmq'; 10 | 11 | const queue = new Queue('paint'); 12 | 13 | const name = 'jobName'; 14 | const jobs = await queue.addBulk([ 15 | { name, data: { paint: 'car' } }, 16 | { name, data: { paint: 'house' } }, 17 | { name, data: { paint: 'boat' } }, 18 | ]); 19 | ``` 20 | 21 | {% endtab %} 22 | 23 | {% tab title="Python" %} 24 | 25 | ```python 26 | from bullmq import Queue 27 | 28 | queue = Queue("paint") 29 | 30 | jobs = await queue.addBulk([ 31 | { "name": "jobName", "data": { "paint": "car" } }, 32 | { "name": "jobName", "data": { "paint": "house" } }, 33 | { "name": "jobName", "data": { "paint": "boat" } } 34 | ]) 35 | ``` 36 | 37 | {% endtab %} 38 | {% endtabs %} 39 | 40 | This call can only succeed or fail, and all or none of the jobs will be added. 41 | 42 | ## Read more: 43 | 44 | - 💡 [Add Bulk API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#addBulk) 45 | -------------------------------------------------------------------------------- /docs/gitbook/guide/queues/global-concurrency.md: -------------------------------------------------------------------------------- 1 | # Global Concurrency 2 | 3 | The global concurrency factor is a queue option that determines how many jobs are allowed to be processed in parallel across all your worker instances. 4 | 5 | ```typescript 6 | import { Queue } from 'bullmq'; 7 | 8 | await queue.setGlobalConcurrency(4); 9 | ``` 10 | 11 | And in order to get this value: 12 | 13 | ```typescript 14 | const globalConcurrency = await queue.getGlobalConcurrency(); 15 | ``` 16 | 17 | {% hint style="info" %} 18 | Note that if you choose a concurrency level in your workers, it will not override the global one, it will just be the maximum jobs a given worker can process in parallel but never more than the global one. 19 | {% endhint %} 20 | 21 | ## Read more: 22 | 23 | - 💡 [Set Global Concurrency API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#setGlobalConcurrency) 24 | - 💡 [Get Global Concurrency API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#getGlobalConcurrency) 25 | -------------------------------------------------------------------------------- /docs/gitbook/guide/redis-tm-compatibility/README.md: -------------------------------------------------------------------------------- 1 | # Redis™ Compatibility 2 | 3 | There are several alternatives for Redis and even though BullMQ is full Redis™ compliant with version 6.2.0 or newer, not all the alternatives are going to work properly. In this section we present the vendors that officially support BullMQ and that we regularly test to verify they keep staying compatible. 4 | 5 | -------------------------------------------------------------------------------- /docs/gitbook/guide/redis-tm-compatibility/dragonfly.md: -------------------------------------------------------------------------------- 1 | # Dragonfly 2 | 3 | [Dragonfly](https://www.dragonflydb.io/) offers a drop-in replacement for Redis™, boasting a much faster and more memory-efficient implementation of several data structures used by BullMQ. It also enables the utilization of all available cores in your CPUs. Check [this article](https://bullmq.io/news/101023/dragonfly-compatibility/) for some performance results. 4 | 5 | To fully leverage Dragonfly's capabilities, specific steps are necessary. Primarily, you should name your queues using curly braces. This naming convention allows Dragonfly to assign a thread to each queue. For instance, if your queue is named `myqueue,`rename it to `{myqueue}`. 6 | 7 | If you manage multiple queues, this approach enables you to allocate different CPU cores to each queue, significantly enhancing performance. Even with a single queue, you can still exploit multi-core advantages in some cases. Consider splitting your queue into multiple ones, like`{myqueue-1}`, `{myqueue-2}`, etc., and distribute jobs randomly or using a round-robin method. 8 | 9 | {% hint style="info" %} 10 | Be aware that certain features like priorities and rate-limiting might not function across multiple queues. Your specific requirements will determine whether you can divide a single queue in this manner. 11 | {% endhint %} 12 | 13 | For comprehensive instructions and the necessary flags to optimize your Dragonfly instance for BullMQ, please consult the [official integration guide](https://www.dragonflydb.io/docs/integrations/bullmq). 14 | -------------------------------------------------------------------------------- /docs/gitbook/guide/redis-tm-hosting/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | For BullMQ you are going to need a proper Redis™ hosting solution. In this 4 | section we provide instructions on how to use some of the most popular ones. 5 | --- 6 | 7 | # Redis™ hosting 8 | -------------------------------------------------------------------------------- /docs/gitbook/guide/redis-tm-hosting/aws-memorydb.md: -------------------------------------------------------------------------------- 1 | # AWS MemoryDB 2 | 3 | AWS provides a Redis™ 7 compatible managed database that is easy to use and is fully compatible with BullMQ. 4 | 5 | There are some considerations to take care when using MemoryDB though. 6 | 7 | - MemoryDB only works in Cluster mode. So you need to use "hash tags" so that the queues get attached to a given cluster node ([read more here](../../bull/patterns/redis-cluster.md)). 8 | - MemoryDB can only be accessed within an AWS VPC, so you cannot access the Redis™ cluster outside of AWS. 9 | 10 | The easiest way to use MemoryDB with BullMQ is to first instantiate a IORedis Cluster instance, and then use that connection as an option to your workers or queue instances, for example: 11 | 12 | ```typescript 13 | import { Cluster } from 'ioredis'; 14 | import { Worker } from 'bullmq'; 15 | 16 | const connection = new Cluster( 17 | [ 18 | { 19 | host: 'clustercfg.xxx.amazonaws.com', 20 | port: 6379, 21 | }, 22 | ], 23 | { 24 | tls: {}, 25 | }, 26 | ); 27 | 28 | const worker = new Worker( 29 | 'myqueue', 30 | async (job: Job) => { 31 | // Do some usefull stuff 32 | }, 33 | { connection }, 34 | ); 35 | 36 | // ... 37 | 38 | // Do not forget to close the connection as well as the worker when shutting down 39 | await worker.close(); 40 | await connection.quit(); 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/gitbook/guide/telemetry/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Enabling Telemetry for your BullMQ based applications 3 | --- 4 | 5 | # Telemetry 6 | 7 | BullMQ provides a Telemetry interface that can be used to integrate it with any external telemetry backends. Currently we support the [OpenTelemetry](https://opentelemetry.io) specification, which is the new de-facto standard for telemetry purposes, however the interface if flexible enough to support any other backends in the future. 8 | 9 | Telemetry is very useful for large applications where you want to get a detailed and general overview of the system. For BullMQ it helps to gain insight in the different statuses a job may be during its complete lifecycle. In a large application it helpts tracking the source of the jobs and all the interactions the jobs or messages may perform with other parts of the system. 10 | 11 | -------------------------------------------------------------------------------- /docs/gitbook/guide/telemetry/running-jaeger.md: -------------------------------------------------------------------------------- 1 | # Running Jaeger 2 | 3 | The easiest way to run Jaeger is by using Docker compose. If you have docker installed, it is a matter of running this docker-compose.yaml file: 4 | 5 | ```yaml 6 | services: 7 | jaeger: 8 | image: jaegertracing/all-in-one:latest 9 | container_name: BullMQ_with_opentelemetry_jaeger 10 | ports: 11 | - '4318:4318' 12 | - '16686:16686' 13 | 14 | ``` 15 | 16 | Note that we need to expose 2 ports here, the first (4318) one is the endpoint to export our traces and the second one (16686) is our UI. 17 | 18 | You can now just run this service with: 19 | 20 | ``` 21 | docker-compose up 22 | ``` 23 | 24 | In a few seconds the image will be up and running. You can verify that is working by opening a browser window and pointing it to [http://localhost:16686](http://localhost:16686/search) 25 | 26 | As no traces has been created yet you will get a quite empty dashboard: 27 | 28 |
29 | 30 | -------------------------------------------------------------------------------- /docs/gitbook/guide/workers/graceful-shutdown.md: -------------------------------------------------------------------------------- 1 | # Graceful shutdown 2 | 3 | BullMQ supports graceful shutdowns of workers. This is important so that we can minimize stalled jobs when a worker for some reason must be shutdown. But note that even in the event of a "ungraceful shutdown", the stalled mechanism in BullMQ allows for new workers to pick up stalled jobs and continue working on them. 4 | 5 | {% hint style="danger" %} 6 | Prior to BullMQ 2.0, in order for stalled jobs to be picked up by other workers you need to have a [`QueueScheduler`](../queuescheduler.md) class running in the system. 7 | 8 | From BullMQ 2.0 and onwards, the `QueueScheduler` is not needed anymore, so the information above is only valid for older versions. 9 | {% endhint %} 10 | 11 | In order to perform a shutdown just call the _**`close`**_ method: 12 | 13 | ```typescript 14 | await worker.close(); 15 | ``` 16 | 17 | The above call will mark the worker as _closing_ so it will not pick up new jobs, and at the same time it will wait for all the current jobs to be processed (or failed). This call will not timeout by itself, so you should make sure that your jobs finalize in a timely manner. If this call fails for some reason or it is not able to complete, the pending jobs will be marked as stalled and processed by other workers (if correct stalled options are configured on the [`QueueScheduler`](https://api.docs.bullmq.io/interfaces/v1.QueueSchedulerOptions.html)). 18 | -------------------------------------------------------------------------------- /docs/gitbook/guide/workers/pausing-queues.md: -------------------------------------------------------------------------------- 1 | # Pausing queues 2 | 3 | BullMQ supports pausing queues _globally_ or _locally_. When a queue is paused _globally_ no workers will pick up any jobs from the queue. When you pause a queue, the workers that are currently busy processing a job will continue working on that job until it completes (or fails), and then will keep idling until the queue is unpaused. 4 | 5 | Pausing a queue is performed by calling the _**`pause`**_ method on a [queue](https://api.docs.bullmq.io/classes/v5.Queue.html) instance: 6 | 7 | ```typescript 8 | await myQueue.pause(); 9 | ``` 10 | 11 | It is also possible to _locally_ pause a given worker instance. This pause works in a similar way as the global pause in the sense that the worker will conclude processing the jobs it has already started but will not process any new ones: 12 | 13 | ```typescript 14 | await myWorker.pause(); 15 | ``` 16 | 17 | The call above will wait for all the jobs currently being processed by this worker to complete (or fail). If you do not want to wait for current jobs to complete before the call completes you can pass `true` to pause the worker **ignoring any running jobs**: 18 | 19 | ```typescript 20 | await myWorker.pause(true); 21 | ``` 22 | 23 | ## Read more: 24 | 25 | - 💡 [Pause Queue API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#pause) 26 | - 💡 [Pause Worker API Reference](https://api.docs.bullmq.io/classes/v5.Worker.html#pause) 27 | -------------------------------------------------------------------------------- /docs/gitbook/guide/workers/stalled-jobs.md: -------------------------------------------------------------------------------- 1 | # Stalled Jobs 2 | 3 | Due to the nature of NodeJS, which is \(in general\) single threaded and consists of an event loop to handle the asynchronous operations, the process function needs to be written carefully so that the CPU is not occupied for a long time. 4 | 5 | When a job reaches a worker and starts to be processed, BullMQ will place a lock on this job to protect the job from being modified by any other client or worker. At the same time, the worker needs to periodically notify BullMQ that it is still working on the job. 6 | 7 | {% hint style="info" %} 8 | This period is configured with the `stalledInterval` setting, which normally you should not need to modify. 9 | {% endhint %} 10 | 11 | However if the CPU is very busy (due to the process being very CPU intensive), the worker may not have time to renew the lock and tell the queue that it is still working on the job, which is likely to result in the job being marked as _stalled_. 12 | 13 | A stalled job is moved back to the waiting status and will be processed again by another worker, or if it has reached its maximum number of stalls, it will be moved to the _failed_ set. 14 | 15 | Therefore, it is very important to make sure the workers return control to the NodeJS event loop often enough to avoid this kind of problem. 16 | 17 | -------------------------------------------------------------------------------- /docs/gitbook/index.md: -------------------------------------------------------------------------------- 1 | # API Reference 2 | 3 | [Home](index.md) 4 | 5 | ## API Reference 6 | 7 | ## Packages 8 | 9 | | Package | Description | 10 | | :--- | :--- | 11 | | [bullmq](https://github.com/taskforcesh/bullmq/blob/master/docs/gitbook/api/bullmq.md) | | 12 | 13 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/adding-bulks.md: -------------------------------------------------------------------------------- 1 | # Adding jobs in bulk accross different queues 2 | 3 | Sometimes it is necessary to atomically add jobs to different queues in bulk. For example, there could be a requirement that all the jobs must be created or none of them. Also, adding jobs in bulk can be faster, since it reduces the number of roundtrips to Redis: 4 | 5 | You may be think of [`queue.addBulk`](https://api.docs.bullmq.io/classes/v5.Queue.html#addBulk), but this method only adds jobs to a single queue. Another option is [`flowProducer.addBulk`](https://api.docs.bullmq.io/classes/v5.FlowProducer.html#addBulk), so let's see an example: 6 | 7 | ```typescript 8 | import { FlowProducer } from 'bullmq'; 9 | 10 | const flow = new FlowProducer({ connection }); 11 | 12 | const trees = await flow.addBulk([ 13 | { 14 | name: 'job-1', 15 | queueName: 'queueName-1', 16 | data: {} 17 | }, 18 | { 19 | name: 'job-2', 20 | queueName: 'queueName-2', 21 | data: {} 22 | }, 23 | ]); 24 | ``` 25 | 26 | It is possible to add individual jobs without children. 27 | 28 | This call can only succeed or fail, and all or none of the jobs will be added. 29 | 30 | ## Read more: 31 | 32 | - 💡 [Add Bulk API Reference](https://api.docs.bullmq.io/classes/v5.FlowProducer.html#addBulk) 33 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/failing-fast-when-redis-is-down.md: -------------------------------------------------------------------------------- 1 | # Failing fast when Redis is down 2 | 3 | By design, BullMQ reconnects to Redis automatically. If jobs are added to a queue while the queue instance is disconnected from Redis, the `add` command will not fail; instead, the call will keep waiting for a reconnection to occur until it can complete. 4 | 5 | This behavior is not always desirable; for example, if you have implemented a REST API that results in a call to `add`, you do not want to keep the HTTP call busy while `add` is waiting for the queue to reconnect to Redis. In this case, you can pass the option `enableOfflineQueue: false`, so that `ioredis` do not queue the commands and instead throws an exception: 6 | 7 | ```typescript 8 | const myQueue = new Queue("transcoding", { 9 | connection: { 10 | enableOfflineQueue: false, 11 | }, 12 | }); 13 | 14 | app.post("/jobs", async (req, res) => { 15 | try { 16 | const job = await myQueue.add("myjob", { req.body }); 17 | res.status(201).json(job.id); 18 | }catch(err){ 19 | res.status(503).send(err); 20 | } 21 | }) 22 | ``` 23 | 24 | Using this approach, the caller can catch the exception and act upon it depending on its requirements (for example, retrying the call or giving up). 25 | 26 | {% hint style="danger" %} 27 | Currently, there is a limitation in that the Redis instance must at least be online while the queue is being instantiated. 28 | {% endhint %} 29 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/flows.md: -------------------------------------------------------------------------------- 1 | # Flows 2 | 3 | {% hint style="warning" %} 4 | The following pattern, although still useful, has been mostly super-seeded by the new [Flows](../guide/flows/) functionality 5 | {% endhint %} 6 | 7 | In some situations, you may need to execute a flow of several actions, any of which could fail. For example, you may need to update a database, make calls to external services, or any other kind of asynchronous call. 8 | 9 | Sometimes it may not be possible to create an [idempotent job](idempotent-jobs.md) that can execute all these actions again in the case one of them failed for any reason. Instead, we may want to be able to only re-execute the action that failed and continue executing the rest of the actions that have not yet been executed. 10 | 11 | The pattern to solve this issue consists of dividing the flow of actions into one queue for every action. When the first action completes, it places the next action as a job in its corresponding queue. 12 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/idempotent-jobs.md: -------------------------------------------------------------------------------- 1 | # Idempotent jobs 2 | 3 | In order to take advantage of [the ability to retry failed jobs](../guide/retrying-failing-jobs.md), your jobs should be designed with failure in mind. 4 | 5 | This means that it should not make a difference to the final state of the system if a job successfully completes on its first attempt, or if it fails initially and succeeds when retried. This is called _Idempotence_. 6 | 7 | To achieve this behaviour, your jobs should be as atomic and simple as possible. Performing many different actions \(such as database updates, API calls, ...\) at once makes it hard to keep track of the process flow and, if needed, rollback partial progress when an exception occurs. 8 | 9 | Simpler jobs also means simpler debugging, identifying bottlenecks, etc. 10 | 11 | If necessary, split complex jobs [as described in the flow pattern](flows.md). 12 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/named-processor.md: -------------------------------------------------------------------------------- 1 | # Named processor 2 | 3 | When a Worker is instantiated, the most common usage is to specify a process function. 4 | 5 | Sometimes however, it is useful to be able to specify more than one function to process a job for a specific condition: 6 | 7 | ```typescript 8 | const worker = new Worker( 9 | 'queueName', 10 | async job => { 11 | switch (job.name) { 12 | case 'taskType1': { 13 | await doSomeLogic1(); 14 | break; 15 | } 16 | case 'taskType2': { 17 | await doSomeLogic2(); 18 | break; 19 | } 20 | } 21 | }, 22 | { connection }, 23 | ); 24 | ``` 25 | 26 | You could use a simple switch case to differentiate your logic, in this example we are using the job name. 27 | 28 | {% hint style="warning" %} 29 | This was a feature in the Bull package, but it creates a lot of confusion, so in order to provide an alternative, you can use this pattern. See [#297](https://github.com/taskforcesh/bullmq/issues/297) and [#69](https://github.com/taskforcesh/bullmq/issues/69) as reference 30 | {% endhint %} 31 | -------------------------------------------------------------------------------- /docs/gitbook/patterns/redis-cluster.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Important considerations when using Redis™ Cluster mode. 3 | --- 4 | 5 | # Redis Cluster 6 | 7 | Bull internals require atomic operations that span different keys. This behavior breaks Redis's rules for cluster configurations. However, it is still possible to use a cluster environment by using the proper bull prefix option as a cluster "hash tag". Hash tags are used to guarantee that certain keys are placed in the same hash slot, read more about hash tags in the [redis cluster tutorial](https://redis.io/topics/cluster-tutorial). A hash tag is defined with brackets. I.e. a key that has a substring inside brackets will use that substring to determine in which hash slot the key will be placed. 8 | 9 | In summary, to make bull compatible with Redis cluster, use a queue prefix inside brackets. For example: 10 | 11 | You can use two approaches in order to make the Queues compatible with Cluster. Either define a queue prefix: 12 | 13 | ```typescript 14 | const queue = new Queue('cluster', { 15 | prefix: '{myprefix}', 16 | }); 17 | ``` 18 | 19 | or wrap the queue name itself: 20 | 21 | ```typescript 22 | const queue = new Queue('{cluster}'); 23 | ``` 24 | 25 | Note that If you use several queues in the same cluster, you should use different prefixes so that the queues are evenly placed in the cluster nodes, potentially increasing performance and memory usage. 26 | -------------------------------------------------------------------------------- /mocha.setup.ts: -------------------------------------------------------------------------------- 1 | import * as chai from 'chai'; 2 | import * as chaiAsPromised from 'chai-as-promised'; 3 | 4 | chai.use(chaiAsPromised); 5 | -------------------------------------------------------------------------------- /python/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | bullmq/commands 3 | build 4 | bullmq.egg-info 5 | dist -------------------------------------------------------------------------------- /python/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ../docs/gitbook/python/changelog.md -------------------------------------------------------------------------------- /python/README.md: -------------------------------------------------------------------------------- 1 | # BullMQ For Python 2 | 3 | This is the official BullMQ Python library. It is a close port of the NodeJS version of the library. 4 | Python Queues are interoperable with NodeJS Queues, as both libraries use the same .lua scripts that 5 | power all the functionality. 6 | 7 | ## Features 8 | 9 | Currently, the library does not support all the features available in the NodeJS version. The following 10 | have been ported so far: 11 | 12 | - [ ] Add jobs to queues. 13 | 14 | - [x] Regular jobs. 15 | - [x] Delayed jobs. 16 | - [ ] Job priority. 17 | - [ ] Repeatable. 18 | 19 | - [x] Workers 20 | - [ ] Job events. 21 | - [x] Job progress. 22 | - [ ] Job retries. 23 | - [x] Job backoff. 24 | - [x] Getters. 25 | 26 | ## Installation 27 | 28 | ```bash 29 | pip install bullmq 30 | ``` 31 | 32 | ## Usage 33 | 34 | ```python 35 | from bullmq import Queue 36 | 37 | queue = Queue('my-queue') 38 | 39 | job = await queue.add('my-job', {'foo': 'bar'}) 40 | 41 | ``` 42 | 43 | ## Documentation 44 | 45 | The documentation is available at [https://docs.bullmq.io](https://docs.bullmq.io/python) 46 | 47 | ## License 48 | 49 | MIT 50 | 51 | ## Copyright 52 | 53 | Copyright (c) 2018-2023, Taskforce.sh Inc. and other contributors. 54 | -------------------------------------------------------------------------------- /python/bullmq/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | BullMQ 3 | 4 | A background job processor and message queue for Python based on Redis. 5 | """ 6 | __version__ = "2.15.0" 7 | __author__ = 'Taskforce.sh Inc.' 8 | __credits__ = 'Taskforce.sh Inc.' 9 | 10 | from bullmq.queue import Queue 11 | from bullmq.job import Job 12 | from bullmq.flow_producer import FlowProducer 13 | from bullmq.worker import Worker 14 | from bullmq.custom_errors import WaitingChildrenError 15 | -------------------------------------------------------------------------------- /python/bullmq/backoffs.py: -------------------------------------------------------------------------------- 1 | from bullmq.types import BackoffOptions 2 | 3 | import math 4 | 5 | 6 | class Backoffs: 7 | 8 | builtin_strategies = { 9 | "fixed": lambda delay: lambda attempts_made, type, err, job: delay, 10 | "exponential": lambda delay: lambda attempts_made, type, err, job: int(round(pow(2, attempts_made - 1) * delay)) 11 | } 12 | 13 | @staticmethod 14 | def normalize(backoff: int | BackoffOptions): 15 | if type(backoff) == int and math.isfinite(backoff): 16 | return { 17 | "type": "fixed", 18 | "delay": backoff 19 | } 20 | elif backoff: 21 | return backoff 22 | 23 | @staticmethod 24 | async def calculate(backoff: BackoffOptions, attempts_made: int, err, job, customStrategy): 25 | if backoff: 26 | strategy = lookup_strategy(backoff, customStrategy) 27 | return strategy(attempts_made, backoff.get("type"), err, job) 28 | 29 | 30 | def lookup_strategy(backoff: BackoffOptions, custom_strategy): 31 | backoff_type = backoff.get("type") 32 | if backoff_type in Backoffs.builtin_strategies: 33 | return Backoffs.builtin_strategies[backoff_type](backoff.get("delay")) 34 | elif custom_strategy: 35 | return custom_strategy 36 | else: 37 | raise Exception(f"Unknown backoff strategy {backoff_type}. " + 38 | "If a custom backoff strategy is used, specify it when the queue is created.") 39 | -------------------------------------------------------------------------------- /python/bullmq/custom_errors/__init__.py: -------------------------------------------------------------------------------- 1 | from bullmq.custom_errors.unrecoverable_error import UnrecoverableError 2 | from bullmq.custom_errors.waiting_children_error import WaitingChildrenError 3 | -------------------------------------------------------------------------------- /python/bullmq/custom_errors/unrecoverable_error.py: -------------------------------------------------------------------------------- 1 | class UnrecoverableError(Exception): 2 | "Raised when job is moved to failed without more retries" 3 | pass -------------------------------------------------------------------------------- /python/bullmq/custom_errors/waiting_children_error.py: -------------------------------------------------------------------------------- 1 | class WaitingChildrenError(Exception): 2 | "Raised when job is moved to waiting-children" 3 | pass 4 | -------------------------------------------------------------------------------- /python/bullmq/error_code.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ErrorCode(Enum): 5 | JobNotExist = -1 6 | JobLockNotExist = -2 7 | JobNotInState = -3 8 | JobPendingDependencies = -4 9 | ParentJobNotExist = -5 10 | JobLockMismatch = -6 11 | ParentJobCannotBeReplaced = -7 12 | -------------------------------------------------------------------------------- /python/bullmq/event_emitter.py: -------------------------------------------------------------------------------- 1 | # Credits: https://gist.github.com/marc-x-andre/1c55b3fafd1d00cfdaa205ec53a08cf3 2 | from typing import Dict 3 | 4 | 5 | class EventEmitter: 6 | 7 | def __init__(self): 8 | self._callbacks: Dict[str, callable] = {} 9 | 10 | def on(self, event_name: str, function): 11 | self._callbacks[event_name] = self._callbacks.get( 12 | event_name, []) + [function] 13 | return function 14 | 15 | def emit(self, event_name: str, *args, **kwargs): 16 | [function(*args, **kwargs) 17 | for function in self._callbacks.get(event_name, [])] 18 | 19 | def off(self, event_name: str, function): 20 | self._callbacks.get(event_name, []).remove(function) 21 | -------------------------------------------------------------------------------- /python/bullmq/queue_keys.py: -------------------------------------------------------------------------------- 1 | class QueueKeys: 2 | """ 3 | This class handles all keys parser logic. 4 | """ 5 | 6 | def __init__(self, prefix: str = 'bull'): 7 | self.prefix = prefix 8 | 9 | def getKeys(self, name: str): 10 | names = ["", "active", "wait", "waiting-children", "paused", "completed", "failed", "delayed", "repeat", 11 | "stalled", "limiter", "prioritized", "id", "stalled-check", "meta", "pc", "events", "marker"] 12 | keys = {} 13 | for name_type in names: 14 | keys[name_type] = self.toKey(name, name_type) 15 | 16 | return keys 17 | 18 | def toKey(self, name: str, name_type: str): 19 | return f"{self.getQueueQualifiedName(name)}:{name_type}" 20 | 21 | def getQueueQualifiedName(self, name: str): 22 | return f"{self.prefix}:{name}" 23 | -------------------------------------------------------------------------------- /python/bullmq/timer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | # Credits: https://stackoverflow.com/questions/45419723/python-timer-with-asyncio-coroutine 4 | 5 | 6 | class Timer: 7 | def __init__(self, interval: int, callback, emit_callback, *args, **kwargs): 8 | self.interval = interval 9 | self.args = args 10 | self.kwargs = kwargs 11 | self.callback = callback 12 | self.emit = emit_callback 13 | self._ok = True 14 | self._task = asyncio.ensure_future(self._job()) 15 | 16 | async def _job(self): 17 | try: 18 | while self._ok: 19 | await asyncio.sleep(self.interval) 20 | await self.callback(*self.args, **self.kwargs) 21 | except Exception as err: 22 | self.emit("error", err) 23 | pass 24 | 25 | def stop(self): 26 | self._ok = False 27 | self._task.cancel() 28 | -------------------------------------------------------------------------------- /python/bullmq/types/__init__.py: -------------------------------------------------------------------------------- 1 | from bullmq.types.backoff_options import BackoffOptions 2 | from bullmq.types.keep_jobs import KeepJobs 3 | from bullmq.types.job_options import JobOptions 4 | from bullmq.types.promote_jobs_options import PromoteJobsOptions 5 | from bullmq.types.queue_options import QueueBaseOptions 6 | from bullmq.types.worker_options import WorkerOptions 7 | from bullmq.types.retry_jobs_options import RetryJobsOptions 8 | -------------------------------------------------------------------------------- /python/bullmq/types/backoff_options.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | 4 | class BackoffOptions(TypedDict, total=False): 5 | type: str | dict 6 | """ 7 | Name of the backoff strategy. 8 | """ 9 | 10 | delay: int 11 | """ 12 | Delay in milliseconds. 13 | """ 14 | -------------------------------------------------------------------------------- /python/bullmq/types/keep_jobs.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | 4 | class KeepJobs(TypedDict, total=False): 5 | """ 6 | Specify which jobs to keep after finishing. If both age and count are 7 | specified, then the jobs kept will be the ones that satisfies both 8 | properties. 9 | """ 10 | 11 | age: int 12 | """ 13 | Maximum age in seconds for job to be kept. 14 | """ 15 | 16 | count: int 17 | """ 18 | Maximum count of jobs to be kept. 19 | """ 20 | -------------------------------------------------------------------------------- /python/bullmq/types/promote_jobs_options.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import TypedDict 3 | 4 | 5 | class PromoteJobsOptions(TypedDict, total=False): 6 | count: int 7 | -------------------------------------------------------------------------------- /python/bullmq/types/queue_options.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import TypedDict, Any 3 | import redis.asyncio as redis 4 | 5 | 6 | class QueueBaseOptions(TypedDict, total=False): 7 | """ 8 | Options for the Queue class. 9 | """ 10 | 11 | prefix: str 12 | """ 13 | Prefix for all queue keys. 14 | """ 15 | 16 | connection: dict[str, Any] | redis.Redis | str 17 | """ 18 | Options for connecting to a Redis instance. 19 | """ 20 | -------------------------------------------------------------------------------- /python/bullmq/types/retry_jobs_options.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import TypedDict 3 | 4 | 5 | class RetryJobsOptions(TypedDict, total=False): 6 | state: str 7 | count: int 8 | timestamp: int 9 | -------------------------------------------------------------------------------- /python/bullmq/types/worker_options.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import TypedDict, Any 3 | import redis.asyncio as redis 4 | 5 | 6 | class WorkerOptions(TypedDict, total=False): 7 | autorun: bool 8 | """ 9 | Condition to start processor at instance creation 10 | 11 | @default true 12 | """ 13 | 14 | concurrency: int 15 | """ 16 | Amount of jobs that a single worker is allowed to work on 17 | in parallel. 18 | 19 | @default 1 20 | @see https://docs.bullmq.io/guide/workers/concurrency 21 | """ 22 | 23 | maxStalledCount: int 24 | """ 25 | Amount of times a job can be recovered from a stalled state 26 | to the `wait` state. If this is exceeded, the job is moved 27 | to `failed`. 28 | 29 | @default 1 30 | """ 31 | 32 | stalledInterval: int 33 | """ 34 | Number of milliseconds between stallness checks. 35 | 36 | @default 30000 37 | """ 38 | 39 | lockDuration: int 40 | """ 41 | Duration of the lock for the job in milliseconds. The lock represents that 42 | a worker is processing the job. If the lock is lost, the job will be eventually 43 | be picked up by the stalled checker and move back to wait so that another worker 44 | can process it again. 45 | 46 | @default 30000 47 | """ 48 | 49 | prefix: str 50 | """ 51 | Prefix for all queue keys. 52 | """ 53 | 54 | connection: dict[str, Any] | redis.Redis | str 55 | """ 56 | Options for connecting to a Redis instance. 57 | """ 58 | -------------------------------------------------------------------------------- /python/bullmq/utils.py: -------------------------------------------------------------------------------- 1 | import semver 2 | import traceback 3 | import json 4 | 5 | 6 | def isRedisVersionLowerThan(current_version, minimum_version): 7 | return semver.VersionInfo.parse(current_version).compare(minimum_version) == -1 8 | 9 | def extract_result(job_task, emit_callback): 10 | try: 11 | return job_task.result() 12 | except Exception as e: 13 | if not str(e).startswith('Connection closed by server'): 14 | # lets use a simple-but-effective error handling: 15 | # ignore the job 16 | traceback.print_exc() 17 | emit_callback("error", e) 18 | 19 | def get_parent_key(opts: dict): 20 | if opts: 21 | return f"{opts.get('queue')}:{opts.get('id')}" 22 | 23 | def parse_json_string_values(input_dict: dict[str, str]) -> dict[str, dict]: 24 | return {key: json.loads(value) for key, value in input_dict.items()} -------------------------------------------------------------------------------- /python/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | timeout = 30 3 | timeout_method = thread 4 | timeout_func_only = true -------------------------------------------------------------------------------- /python/release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | rm -Rf dist 3 | rm -Rf bullmq.egg-info 4 | yarn build bullmq # latest version 5 | python -m build 6 | twine upload dist/* 7 | -------------------------------------------------------------------------------- /python/requirements.txt: -------------------------------------------------------------------------------- 1 | async-timeout==4.0.2 2 | certifi==2024.7.4 3 | distlib==0.3.7 4 | filelock==3.12.2 5 | msgpack==1.0.5 6 | pipenv==2023.7.4 7 | platformdirs==3.9.1 8 | pre-commit==3.3.3 9 | pytest==8.3.5 10 | pytest-timeout==2.4.0 11 | python-semantic-release==7.28.1 12 | redis==4.6.0 13 | semver==2.13.0 14 | six==1.16.0 15 | virtualenv==20.26.6 16 | virtualenv-clone==0.5.7 17 | -------------------------------------------------------------------------------- /python/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | redis-cli flushall 3 | python3 -m unittest -v tests.bulk_tests 4 | python3 -m unittest -v tests.delay_tests 5 | python3 -m unittest -v tests.flow_tests 6 | python3 -m unittest -v tests.job_tests 7 | python3 -m unittest -v tests.queue_tests 8 | python3 -m unittest -v tests.worker_tests -------------------------------------------------------------------------------- /python/run_tests_dragonfly.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export BULLMQ_TEST_PREFIX="{b}" 3 | python3 -m unittest -v tests.bulk_tests 4 | python3 -m unittest -v tests.delay_tests 5 | python3 -m unittest -v tests.flow_tests 6 | python3 -m unittest -v tests.job_tests 7 | python3 -m unittest -v tests.queue_tests 8 | python3 -m unittest -v tests.worker_tests -------------------------------------------------------------------------------- /python/setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Needed to allow setuptools to build the project form pyproject.toml 3 | For more info https://setuptools.pypa.io/en/latest/userguide/pyproject_config.html 4 | """ 5 | from setuptools import setup 6 | 7 | setup() 8 | -------------------------------------------------------------------------------- /python/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/python/tests/__init__.py -------------------------------------------------------------------------------- /scripts/generateRawScripts.ts: -------------------------------------------------------------------------------- 1 | import { ScriptLoader } from '../src/commands/index'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs'; 4 | import { promisify } from 'util'; 5 | 6 | const writeFile = promisify(fs.writeFile); 7 | 8 | export class RawScriptLoader extends ScriptLoader { 9 | /** 10 | * Transpile lua scripts in one file, specifying an specific directory to be saved 11 | * @param pathname - the path to the directory containing the scripts 12 | * @param writeDir - the path to the directory where scripts will be saved 13 | */ 14 | async transpileScripts(pathname: string, writeDir: string): Promise { 15 | const writeFilenamePath = path.normalize(writeDir); 16 | 17 | if (!fs.existsSync(writeFilenamePath)) { 18 | fs.mkdirSync(writeFilenamePath); 19 | } 20 | 21 | const paths = new Set(); 22 | if (!paths.has(pathname)) { 23 | paths.add(pathname); 24 | const scripts = await this.loadScripts(pathname); 25 | for (const command of scripts) { 26 | const { 27 | name, 28 | options: { numberOfKeys, lua }, 29 | } = command; 30 | await writeFile( 31 | path.join(writeFilenamePath, `${name}-${numberOfKeys}.lua`), 32 | lua, 33 | ); 34 | } 35 | } 36 | } 37 | } 38 | 39 | const scriptLoader = new RawScriptLoader(); 40 | 41 | scriptLoader.transpileScripts( 42 | path.join(__dirname, '../src/commands'), 43 | path.join(__dirname, '../rawScripts'), 44 | ); 45 | -------------------------------------------------------------------------------- /scripts/updateVersion.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const path = require('path'); 3 | 4 | const newVersion = process.argv[2]; 5 | const versionFilePath = path.join(__dirname, '../src/version.ts'); 6 | 7 | const content = `export const version = '${newVersion}';\n`; 8 | 9 | fs.writeFileSync(versionFilePath, content, 'utf8'); 10 | 11 | console.log(`Updated version file to version ${newVersion}`); 12 | -------------------------------------------------------------------------------- /src/classes/errors/delayed-error.ts: -------------------------------------------------------------------------------- 1 | export const DELAYED_ERROR = 'bullmq:movedToDelayed'; 2 | 3 | /** 4 | * DelayedError 5 | * 6 | * Error to be thrown when job is moved to delayed state 7 | * from job in active state. 8 | * 9 | */ 10 | export class DelayedError extends Error { 11 | constructor(message: string = DELAYED_ERROR) { 12 | super(message); 13 | this.name = this.constructor.name; 14 | Object.setPrototypeOf(this, new.target.prototype); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/classes/errors/index.ts: -------------------------------------------------------------------------------- 1 | export * from './delayed-error'; 2 | export * from './unrecoverable-error'; 3 | export * from './rate-limit-error'; 4 | export * from './waiting-children-error'; 5 | -------------------------------------------------------------------------------- /src/classes/errors/rate-limit-error.ts: -------------------------------------------------------------------------------- 1 | export const RATE_LIMIT_ERROR = 'bullmq:rateLimitExceeded'; 2 | 3 | /** 4 | * RateLimitError 5 | * 6 | * Error to be thrown when queue reaches a rate limit. 7 | * 8 | */ 9 | export class RateLimitError extends Error { 10 | constructor(message: string = RATE_LIMIT_ERROR) { 11 | super(message); 12 | this.name = this.constructor.name; 13 | Object.setPrototypeOf(this, new.target.prototype); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/classes/errors/unrecoverable-error.ts: -------------------------------------------------------------------------------- 1 | export const UNRECOVERABLE_ERROR = 'bullmq:unrecoverable'; 2 | 3 | /** 4 | * UnrecoverableError 5 | * 6 | * Error to move a job to failed even if the attemptsMade 7 | * are lower than the expected limit. 8 | * 9 | */ 10 | export class UnrecoverableError extends Error { 11 | constructor(message: string = UNRECOVERABLE_ERROR) { 12 | super(message); 13 | this.name = this.constructor.name; 14 | Object.setPrototypeOf(this, new.target.prototype); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/classes/errors/waiting-children-error.ts: -------------------------------------------------------------------------------- 1 | export const WAITING_CHILDREN_ERROR = 'bullmq:movedToWaitingChildren'; 2 | 3 | /** 4 | * WaitingChildrenError 5 | * 6 | * Error to be thrown when job is moved to waiting-children state 7 | * from job in active state. 8 | * 9 | */ 10 | export class WaitingChildrenError extends Error { 11 | constructor(message: string = WAITING_CHILDREN_ERROR) { 12 | super(message); 13 | this.name = this.constructor.name; 14 | Object.setPrototypeOf(this, new.target.prototype); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/classes/index.ts: -------------------------------------------------------------------------------- 1 | export * from './async-fifo-queue'; 2 | export * from './backoffs'; 3 | export * from './child'; 4 | export * from './child-pool'; 5 | export * from './child-processor'; 6 | export * from './errors'; 7 | export * from './flow-producer'; 8 | export * from './job'; 9 | export * from './job-scheduler'; 10 | // export * from './main'; this file must not be exported 11 | // export * from './main-worker'; this file must not be exported 12 | export * from './queue-base'; 13 | export * from './queue-events'; 14 | export * from './queue-events-producer'; 15 | export * from './queue-getters'; 16 | export * from './queue-keys'; 17 | export * from './queue'; 18 | export * from './redis-connection'; 19 | export * from './repeat'; 20 | export * from './sandbox'; 21 | export * from './scripts'; 22 | export * from './worker'; 23 | -------------------------------------------------------------------------------- /src/classes/main-base.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Wrapper for sandboxing. 3 | * 4 | */ 5 | import { ChildProcessor } from './child-processor'; 6 | import { ParentCommand, ChildCommand } from '../enums'; 7 | import { errorToJSON, toString } from '../utils'; 8 | import { Receiver } from '../interfaces'; 9 | 10 | export default (send: (msg: any) => Promise, receiver: Receiver) => { 11 | const childProcessor = new ChildProcessor(send, receiver); 12 | 13 | receiver?.on('message', async msg => { 14 | try { 15 | switch (msg.cmd as ChildCommand) { 16 | case ChildCommand.Init: 17 | await childProcessor.init(msg.value); 18 | break; 19 | case ChildCommand.Start: 20 | await childProcessor.start(msg.job, msg?.token); 21 | break; 22 | case ChildCommand.Stop: 23 | break; 24 | } 25 | } catch (err) { 26 | console.error('Error handling child message'); 27 | } 28 | }); 29 | 30 | process.on('SIGTERM', () => childProcessor.waitForCurrentJobAndExit()); 31 | process.on('SIGINT', () => childProcessor.waitForCurrentJobAndExit()); 32 | 33 | process.on('uncaughtException', async (err: any) => { 34 | if (typeof err !== 'object') { 35 | err = new Error(toString(err)); 36 | } 37 | 38 | await send({ 39 | cmd: ParentCommand.Failed, 40 | value: errorToJSON(err), 41 | }); 42 | 43 | // An uncaughException leaves this process in a potentially undetermined state so 44 | // we must exit 45 | process.exit(); 46 | }); 47 | }; 48 | -------------------------------------------------------------------------------- /src/classes/main-worker.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Worker Thread wrapper for sandboxing 3 | * 4 | */ 5 | import { parentPort } from 'worker_threads'; 6 | import mainBase from './main-base'; 7 | 8 | mainBase(async (msg: any) => parentPort.postMessage(msg), parentPort); 9 | -------------------------------------------------------------------------------- /src/classes/main.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Child process wrapper for sandboxing. 3 | * 4 | */ 5 | import { childSend } from '../utils'; 6 | import mainBase from './main-base'; 7 | 8 | mainBase((msg: any) => childSend(process, msg), process); 9 | -------------------------------------------------------------------------------- /src/classes/queue-keys.ts: -------------------------------------------------------------------------------- 1 | export type KeysMap = { [index in string]: string }; 2 | 3 | export class QueueKeys { 4 | constructor(public readonly prefix = 'bull') {} 5 | 6 | getKeys(name: string): KeysMap { 7 | const keys: { [index: string]: string } = {}; 8 | [ 9 | '', 10 | 'active', 11 | 'wait', 12 | 'waiting-children', 13 | 'paused', 14 | 'id', 15 | 'delayed', 16 | 'prioritized', 17 | 'stalled-check', 18 | 'completed', 19 | 'failed', 20 | 'stalled', 21 | 'repeat', 22 | 'limiter', 23 | 'meta', 24 | 'events', 25 | 'pc', // priority counter key 26 | 'marker', // marker key 27 | 'de', // deduplication key 28 | ].forEach(key => { 29 | keys[key] = this.toKey(name, key); 30 | }); 31 | 32 | return keys; 33 | } 34 | 35 | toKey(name: string, type: string): string { 36 | return `${this.getQueueQualifiedName(name)}:${type}`; 37 | } 38 | 39 | getQueueQualifiedName(name: string): string { 40 | return `${this.prefix}:${name}`; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/commands/addLog-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Add job log 3 | 4 | Input: 5 | KEYS[1] job id key 6 | KEYS[2] job logs key 7 | 8 | ARGV[1] id 9 | ARGV[2] log 10 | ARGV[3] keepLogs 11 | 12 | Output: 13 | -1 - Missing job. 14 | ]] 15 | local rcall = redis.call 16 | 17 | if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists 18 | local logCount = rcall("RPUSH", KEYS[2], ARGV[2]) 19 | 20 | if ARGV[3] ~= '' then 21 | local keepLogs = tonumber(ARGV[3]) 22 | rcall("LTRIM", KEYS[2], -keepLogs, -1) 23 | 24 | return math.min(keepLogs, logCount) 25 | end 26 | 27 | return logCount 28 | else 29 | return -1 30 | end 31 | -------------------------------------------------------------------------------- /src/commands/changeDelay-4.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Change job delay when it is in delayed set. 3 | Input: 4 | KEYS[1] delayed key 5 | KEYS[2] meta key 6 | KEYS[3] marker key 7 | KEYS[4] events stream 8 | 9 | ARGV[1] delay 10 | ARGV[2] timestamp 11 | ARGV[3] the id of the job 12 | ARGV[4] job key 13 | 14 | Output: 15 | 0 - OK 16 | -1 - Missing job. 17 | -3 - Job not in delayed set. 18 | 19 | Events: 20 | - delayed key. 21 | ]] 22 | local rcall = redis.call 23 | 24 | -- Includes 25 | --- @include "includes/addDelayMarkerIfNeeded" 26 | --- @include "includes/getDelayedScore" 27 | --- @include "includes/getOrSetMaxEvents" 28 | 29 | if rcall("EXISTS", ARGV[4]) == 1 then 30 | local jobId = ARGV[3] 31 | 32 | local delay = tonumber(ARGV[1]) 33 | local score, delayedTimestamp = getDelayedScore(KEYS[1], ARGV[2], delay) 34 | 35 | local numRemovedElements = rcall("ZREM", KEYS[1], jobId) 36 | 37 | if numRemovedElements < 1 then 38 | return -3 39 | end 40 | 41 | rcall("HSET", ARGV[4], "delay", delay) 42 | rcall("ZADD", KEYS[1], score, jobId) 43 | 44 | local maxEvents = getOrSetMaxEvents(KEYS[2]) 45 | 46 | rcall("XADD", KEYS[4], "MAXLEN", "~", maxEvents, "*", "event", "delayed", 47 | "jobId", jobId, "delay", delayedTimestamp) 48 | 49 | -- mark that a delayed job is available 50 | addDelayMarkerIfNeeded(KEYS[3], KEYS[1]) 51 | 52 | return 0 53 | else 54 | return -1 55 | end -------------------------------------------------------------------------------- /src/commands/drain-5.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Drains the queue, removes all jobs that are waiting 3 | or delayed, but not active, completed or failed 4 | 5 | Input: 6 | KEYS[1] 'wait', 7 | KEYS[2] 'paused' 8 | KEYS[3] 'delayed' 9 | KEYS[4] 'prioritized' 10 | KEYS[5] 'jobschedulers' (repeat) 11 | 12 | ARGV[1] queue key prefix 13 | ARGV[2] should clean delayed jobs 14 | ]] 15 | local rcall = redis.call 16 | local queueBaseKey = ARGV[1] 17 | 18 | --- @include "includes/removeListJobs" 19 | --- @include "includes/removeZSetJobs" 20 | 21 | -- We must not remove delayed jobs if they are associated to a job scheduler. 22 | local scheduledJobs = {} 23 | local jobSchedulers = rcall("ZRANGE", KEYS[5], 0, -1, "WITHSCORES") 24 | 25 | -- For every job scheduler, get the current delayed job id. 26 | for i = 1, #jobSchedulers, 2 do 27 | local jobSchedulerId = jobSchedulers[i] 28 | local jobSchedulerMillis = jobSchedulers[i + 1] 29 | 30 | local delayedJobId = "repeat:" .. jobSchedulerId .. ":" .. jobSchedulerMillis 31 | scheduledJobs[delayedJobId] = true 32 | end 33 | 34 | removeListJobs(KEYS[1], true, queueBaseKey, 0, scheduledJobs) -- wait 35 | removeListJobs(KEYS[2], true, queueBaseKey, 0, scheduledJobs) -- paused 36 | 37 | if ARGV[2] == "1" then 38 | removeZSetJobs(KEYS[3], true, queueBaseKey, 0, scheduledJobs) -- delayed 39 | end 40 | 41 | removeZSetJobs(KEYS[4], true, queueBaseKey, 0, scheduledJobs) -- prioritized 42 | -------------------------------------------------------------------------------- /src/commands/extendLock-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Extend lock and removes the job from the stalled set. 3 | 4 | Input: 5 | KEYS[1] 'lock', 6 | KEYS[2] 'stalled' 7 | 8 | ARGV[1] token 9 | ARGV[2] lock duration in milliseconds 10 | ARGV[3] jobid 11 | 12 | Output: 13 | "1" if lock extented succesfully. 14 | ]] 15 | local rcall = redis.call 16 | if rcall("GET", KEYS[1]) == ARGV[1] then 17 | -- if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2], "XX") then 18 | if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) then 19 | rcall("SREM", KEYS[2], ARGV[3]) 20 | return 1 21 | end 22 | end 23 | return 0 24 | -------------------------------------------------------------------------------- /src/commands/extendLocks-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Extend locks for multiple jobs and remove them from the stalled set if successful. 3 | Return the list of job IDs for which the operation failed. 4 | 5 | KEYS[1] = stalledKey 6 | 7 | ARGV[1] = baseKey 8 | ARGV[2] = tokens 9 | ARGV[3] = jobIds 10 | ARGV[4] = lockDuration (ms) 11 | 12 | Output: 13 | An array of failed job IDs. If empty, all succeeded. 14 | ]] 15 | local rcall = redis.call 16 | 17 | local stalledKey = KEYS[1] 18 | local baseKey = ARGV[1] 19 | local tokens = cmsgpack.unpack(ARGV[2]) 20 | local jobIds = cmsgpack.unpack(ARGV[3]) 21 | local lockDuration = ARGV[4] 22 | 23 | local jobCount = #jobIds 24 | local failedJobs = {} 25 | 26 | for i = 1, jobCount, 1 do 27 | local lockKey = baseKey .. jobIds[i] .. ':lock' 28 | local jobId = jobIds[i] 29 | local token = tokens[i] 30 | 31 | local currentToken = rcall("GET", lockKey) 32 | if currentToken == token then 33 | local setResult = rcall("SET", lockKey, token, "PX", lockDuration) 34 | if setResult then 35 | rcall("SREM", stalledKey, jobId) 36 | else 37 | table.insert(failedJobs, jobId) 38 | end 39 | else 40 | table.insert(failedJobs, jobId) 41 | end 42 | end 43 | 44 | return failedJobs 45 | -------------------------------------------------------------------------------- /src/commands/getCounts-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get counts per provided states 3 | 4 | Input: 5 | KEYS[1] 'prefix' 6 | 7 | ARGV[1...] types 8 | ]] 9 | local rcall = redis.call; 10 | local prefix = KEYS[1] 11 | local results = {} 12 | 13 | for i = 1, #ARGV do 14 | local stateKey = prefix .. ARGV[i] 15 | if ARGV[i] == "wait" or ARGV[i] == "paused" then 16 | -- Markers in waitlist DEPRECATED in v5: Remove in v6. 17 | local marker = rcall("LINDEX", stateKey, -1) 18 | if marker and string.sub(marker, 1, 2) == "0:" then 19 | local count = rcall("LLEN", stateKey) 20 | if count > 1 then 21 | rcall("RPOP", stateKey) 22 | results[#results+1] = count-1 23 | else 24 | results[#results+1] = 0 25 | end 26 | else 27 | results[#results+1] = rcall("LLEN", stateKey) 28 | end 29 | elseif ARGV[i] == "active" then 30 | results[#results+1] = rcall("LLEN", stateKey) 31 | else 32 | results[#results+1] = rcall("ZCARD", stateKey) 33 | end 34 | end 35 | 36 | return results 37 | -------------------------------------------------------------------------------- /src/commands/getCountsPerPriority-4.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get counts per provided states 3 | 4 | Input: 5 | KEYS[1] wait key 6 | KEYS[2] paused key 7 | KEYS[3] meta key 8 | KEYS[4] prioritized key 9 | 10 | ARGV[1...] priorities 11 | ]] 12 | local rcall = redis.call 13 | local results = {} 14 | local waitKey = KEYS[1] 15 | local pausedKey = KEYS[2] 16 | local prioritizedKey = KEYS[4] 17 | 18 | -- Includes 19 | --- @include "includes/isQueuePaused" 20 | 21 | for i = 1, #ARGV do 22 | local priority = tonumber(ARGV[i]) 23 | if priority == 0 then 24 | if isQueuePaused(KEYS[3]) then 25 | results[#results+1] = rcall("LLEN", pausedKey) 26 | else 27 | results[#results+1] = rcall("LLEN", waitKey) 28 | end 29 | else 30 | results[#results+1] = rcall("ZCOUNT", prioritizedKey, 31 | priority * 0x100000000, (priority + 1) * 0x100000000 - 1) 32 | end 33 | end 34 | 35 | return results 36 | -------------------------------------------------------------------------------- /src/commands/getDependencyCounts-4.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get counts per child states 3 | 4 | Input: 5 | KEYS[1] processed key 6 | KEYS[2] unprocessed key 7 | KEYS[3] ignored key 8 | KEYS[4] failed key 9 | 10 | ARGV[1...] types 11 | ]] 12 | local rcall = redis.call; 13 | local processedKey = KEYS[1] 14 | local unprocessedKey = KEYS[2] 15 | local ignoredKey = KEYS[3] 16 | local failedKey = KEYS[4] 17 | local results = {} 18 | 19 | for i = 1, #ARGV do 20 | if ARGV[i] == "processed" then 21 | results[#results+1] = rcall("HLEN", processedKey) 22 | elseif ARGV[i] == "unprocessed" then 23 | results[#results+1] = rcall("SCARD", unprocessedKey) 24 | elseif ARGV[i] == "ignored" then 25 | results[#results+1] = rcall("HLEN", ignoredKey) 26 | else 27 | results[#results+1] = rcall("ZCARD", failedKey) 28 | end 29 | end 30 | 31 | return results 32 | -------------------------------------------------------------------------------- /src/commands/getJobScheduler-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get job scheduler record. 3 | 4 | Input: 5 | KEYS[1] 'repeat' key 6 | 7 | ARGV[1] id 8 | ]] 9 | 10 | local rcall = redis.call 11 | local jobSchedulerKey = KEYS[1] .. ":" .. ARGV[1] 12 | 13 | local score = rcall("ZSCORE", KEYS[1], ARGV[1]) 14 | 15 | if score then 16 | return {rcall("HGETALL", jobSchedulerKey), score} -- get job data 17 | end 18 | 19 | return {nil, nil} 20 | -------------------------------------------------------------------------------- /src/commands/getRateLimitTtl-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get rate limit ttl 3 | 4 | Input: 5 | KEYS[1] 'limiter' 6 | 7 | ARGV[1] maxJobs 8 | ]] 9 | 10 | local rcall = redis.call 11 | 12 | -- Includes 13 | --- @include "includes/getRateLimitTTL" 14 | 15 | local rateLimiterKey = KEYS[1] 16 | if ARGV[1] ~= "0" then 17 | return getRateLimitTTL(tonumber(ARGV[1]), rateLimiterKey) 18 | else 19 | return rcall("PTTL", rateLimiterKey) 20 | end 21 | -------------------------------------------------------------------------------- /src/commands/getState-8.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get a job state 3 | 4 | Input: 5 | KEYS[1] 'completed' key, 6 | KEYS[2] 'failed' key 7 | KEYS[3] 'delayed' key 8 | KEYS[4] 'active' key 9 | KEYS[5] 'wait' key 10 | KEYS[6] 'paused' key 11 | KEYS[7] 'waiting-children' key 12 | KEYS[8] 'prioritized' key 13 | 14 | ARGV[1] job id 15 | Output: 16 | 'completed' 17 | 'failed' 18 | 'delayed' 19 | 'active' 20 | 'prioritized' 21 | 'waiting' 22 | 'waiting-children' 23 | 'unknown' 24 | ]] 25 | local rcall = redis.call 26 | 27 | if rcall("ZSCORE", KEYS[1], ARGV[1]) then 28 | return "completed" 29 | end 30 | 31 | if rcall("ZSCORE", KEYS[2], ARGV[1]) then 32 | return "failed" 33 | end 34 | 35 | if rcall("ZSCORE", KEYS[3], ARGV[1]) then 36 | return "delayed" 37 | end 38 | 39 | if rcall("ZSCORE", KEYS[8], ARGV[1]) then 40 | return "prioritized" 41 | end 42 | 43 | -- Includes 44 | --- @include "includes/checkItemInList" 45 | 46 | local active_items = rcall("LRANGE", KEYS[4] , 0, -1) 47 | if checkItemInList(active_items, ARGV[1]) ~= nil then 48 | return "active" 49 | end 50 | 51 | local wait_items = rcall("LRANGE", KEYS[5] , 0, -1) 52 | if checkItemInList(wait_items, ARGV[1]) ~= nil then 53 | return "waiting" 54 | end 55 | 56 | local paused_items = rcall("LRANGE", KEYS[6] , 0, -1) 57 | if checkItemInList(paused_items, ARGV[1]) ~= nil then 58 | return "waiting" 59 | end 60 | 61 | if rcall("ZSCORE", KEYS[7], ARGV[1]) then 62 | return "waiting-children" 63 | end 64 | 65 | return "unknown" 66 | -------------------------------------------------------------------------------- /src/commands/getStateV2-8.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Get a job state 3 | 4 | Input: 5 | KEYS[1] 'completed' key, 6 | KEYS[2] 'failed' key 7 | KEYS[3] 'delayed' key 8 | KEYS[4] 'active' key 9 | KEYS[5] 'wait' key 10 | KEYS[6] 'paused' key 11 | KEYS[7] 'waiting-children' key 12 | KEYS[8] 'prioritized' key 13 | 14 | ARGV[1] job id 15 | Output: 16 | 'completed' 17 | 'failed' 18 | 'delayed' 19 | 'active' 20 | 'waiting' 21 | 'waiting-children' 22 | 'unknown' 23 | ]] 24 | local rcall = redis.call 25 | 26 | if rcall("ZSCORE", KEYS[1], ARGV[1]) then 27 | return "completed" 28 | end 29 | 30 | if rcall("ZSCORE", KEYS[2], ARGV[1]) then 31 | return "failed" 32 | end 33 | 34 | if rcall("ZSCORE", KEYS[3], ARGV[1]) then 35 | return "delayed" 36 | end 37 | 38 | if rcall("ZSCORE", KEYS[8], ARGV[1]) then 39 | return "prioritized" 40 | end 41 | 42 | if rcall("LPOS", KEYS[4] , ARGV[1]) then 43 | return "active" 44 | end 45 | 46 | if rcall("LPOS", KEYS[5] , ARGV[1]) then 47 | return "waiting" 48 | end 49 | 50 | if rcall("LPOS", KEYS[6] , ARGV[1]) then 51 | return "waiting" 52 | end 53 | 54 | if rcall("ZSCORE", KEYS[7] , ARGV[1]) then 55 | return "waiting-children" 56 | end 57 | 58 | return "unknown" 59 | -------------------------------------------------------------------------------- /src/commands/includes/addBaseMarkerIfNeeded.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Add marker if needed when a job is available. 3 | ]] 4 | 5 | local function addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed) 6 | if not isPausedOrMaxed then 7 | rcall("ZADD", markerKey, 0, "0") 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /src/commands/includes/addDelayMarkerIfNeeded.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Add delay marker if needed. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "getNextDelayedTimestamp" 7 | 8 | local function addDelayMarkerIfNeeded(markerKey, delayedKey) 9 | local nextTimestamp = getNextDelayedTimestamp(delayedKey) 10 | if nextTimestamp ~= nil then 11 | -- Replace the score of the marker with the newest known 12 | -- next timestamp. 13 | rcall("ZADD", markerKey, nextTimestamp, "1") 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /src/commands/includes/addDelayedJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Adds a delayed job to the queue by doing the following: 3 | - Creates a new job key with the job data. 4 | - adds to delayed zset. 5 | - Emits a global event 'delayed' if the job is delayed. 6 | ]] 7 | 8 | -- Includes 9 | --- @include "addDelayMarkerIfNeeded" 10 | --- @include "getDelayedScore" 11 | 12 | local function addDelayedJob(jobId, delayedKey, eventsKey, timestamp, 13 | maxEvents, markerKey, delay) 14 | 15 | local score, delayedTimestamp = getDelayedScore(delayedKey, timestamp, tonumber(delay)) 16 | 17 | rcall("ZADD", delayedKey, score, jobId) 18 | rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "delayed", 19 | "jobId", jobId, "delay", delayedTimestamp) 20 | 21 | -- mark that a delayed job is available 22 | addDelayMarkerIfNeeded(markerKey, delayedKey) 23 | end 24 | -------------------------------------------------------------------------------- /src/commands/includes/addJobFromScheduler.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Add delay marker if needed. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "addDelayedJob" 7 | --- @include "addJobWithPriority" 8 | --- @include "isQueuePaused" 9 | --- @include "storeJob" 10 | --- @include "getTargetQueueList" 11 | --- @include "addJobInTargetList" 12 | 13 | local function addJobFromScheduler(jobKey, jobId, rawOpts, waitKey, pausedKey, activeKey, metaKey, 14 | prioritizedKey, priorityCounter, delayedKey, markerKey, eventsKey, name, maxEvents, timestamp, 15 | data, jobSchedulerId) 16 | local opts = cmsgpack.unpack(rawOpts) 17 | 18 | local delay, priority = storeJob(eventsKey, jobKey, jobId, name, data, 19 | opts, timestamp, nil, nil, jobSchedulerId) 20 | 21 | if delay ~= 0 then 22 | addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, markerKey, delay) 23 | else 24 | local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey) 25 | 26 | -- Standard or priority add 27 | if priority == 0 then 28 | local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH' 29 | addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId) 30 | else 31 | -- Priority add 32 | addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounter, isPausedOrMaxed) 33 | end 34 | -- Emit waiting event 35 | rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", jobId) 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /src/commands/includes/addJobInTargetList.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to add job in target list and add marker if needed. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "addBaseMarkerIfNeeded" 7 | 8 | local function addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId) 9 | rcall(pushCmd, targetKey, jobId) 10 | addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed) 11 | end 12 | -------------------------------------------------------------------------------- /src/commands/includes/addJobWithPriority.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to add job considering priority. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "addBaseMarkerIfNeeded" 7 | --- @include "getPriorityScore" 8 | 9 | local function addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounterKey, 10 | isPausedOrMaxed) 11 | local score = getPriorityScore(priority, priorityCounterKey) 12 | rcall("ZADD", prioritizedKey, score, jobId) 13 | addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed) 14 | end 15 | -------------------------------------------------------------------------------- /src/commands/includes/batches.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to loop in batches. 3 | Just a bit of warning, some commands as ZREM 4 | could receive a maximum of 7000 parameters per call. 5 | ]] 6 | 7 | local function batches(n, batchSize) 8 | local i = 0 9 | 10 | return function() 11 | local from = i * batchSize + 1 12 | i = i + 1 13 | if (from <= n) then 14 | local to = math.min(from + batchSize - 1, n) 15 | return from, to 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /src/commands/includes/checkItemInList.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to check if a item belongs to a list. 3 | ]] 4 | 5 | local function checkItemInList(list, item) 6 | for _, v in pairs(list) do 7 | if v == item then 8 | return 1 9 | end 10 | end 11 | return nil 12 | end 13 | -------------------------------------------------------------------------------- /src/commands/includes/deduplicateJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to debounce a job. 3 | ]] 4 | local function deduplicateJob(deduplicationOpts, jobId, deduplicationKey, eventsKey, maxEvents) 5 | local deduplicationId = deduplicationOpts and deduplicationOpts['id'] 6 | if deduplicationId then 7 | local ttl = deduplicationOpts['ttl'] 8 | local deduplicationKeyExists 9 | if ttl then 10 | deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'PX', ttl, 'NX') 11 | else 12 | deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'NX') 13 | end 14 | if deduplicationKeyExists then 15 | local currentDebounceJobId = rcall('GET', deduplicationKey) 16 | rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId", currentDebounceJobId, 17 | "debounceId", deduplicationId) 18 | rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId", 19 | currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId) 20 | return currentDebounceJobId 21 | end 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /src/commands/includes/destructureJobKey.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to destructure job key. 3 | Just a bit of warning, these functions may be a bit slow and affect performance significantly. 4 | ]] 5 | 6 | local getJobIdFromKey = function (jobKey) 7 | return string.match(jobKey, ".*:(.*)") 8 | end 9 | 10 | local getJobKeyPrefix = function (jobKey, jobId) 11 | return string.sub(jobKey, 0, #jobKey - #jobId) 12 | end 13 | -------------------------------------------------------------------------------- /src/commands/includes/filterOutJobsToIgnore.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to filter out jobs to ignore from a table. 3 | ]] 4 | 5 | local function filterOutJobsToIgnore(jobs, jobsToIgnore) 6 | local filteredJobs = {} 7 | for i = 1, #jobs do 8 | if not jobsToIgnore[jobs[i]] then 9 | table.insert(filteredJobs, jobs[i]) 10 | end 11 | end 12 | return filteredJobs 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/includes/getDelayedScore.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Bake in the job id first 12 bits into the timestamp 3 | to guarantee correct execution order of delayed jobs 4 | (up to 4096 jobs per given timestamp or 4096 jobs apart per timestamp) 5 | WARNING: Jobs that are so far apart that they wrap around will cause FIFO to fail 6 | ]] 7 | local function getDelayedScore(delayedKey, timestamp, delay) 8 | local delayedTimestamp = (delay > 0 and (tonumber(timestamp) + delay)) or tonumber(timestamp) 9 | local minScore = delayedTimestamp * 0x1000 10 | local maxScore = (delayedTimestamp + 1 ) * 0x1000 - 1 11 | 12 | local result = rcall("ZREVRANGEBYSCORE", delayedKey, maxScore, 13 | minScore, "WITHSCORES","LIMIT", 0, 1) 14 | if #result then 15 | local currentMaxScore = tonumber(result[2]) 16 | if currentMaxScore ~= nil then 17 | if currentMaxScore >= maxScore then 18 | return maxScore, delayedTimestamp 19 | else 20 | return currentMaxScore + 1, delayedTimestamp 21 | end 22 | end 23 | end 24 | return minScore, delayedTimestamp 25 | end 26 | -------------------------------------------------------------------------------- /src/commands/includes/getJobsInZset.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | We use ZRANGEBYSCORE to make the case where we're deleting a limited number 3 | of items in a sorted set only run a single iteration. If we simply used 4 | ZRANGE, we may take a long time traversing through jobs that are within the 5 | grace period. 6 | ]] 7 | local function getJobsInZset(zsetKey, rangeEnd, limit) 8 | if limit > 0 then 9 | return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd, "LIMIT", 0, limit) 10 | else 11 | return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/includes/getNextDelayedTimestamp.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to return the next delayed job timestamp. 3 | ]] 4 | local function getNextDelayedTimestamp(delayedKey) 5 | local result = rcall("ZRANGE", delayedKey, 0, 0, "WITHSCORES") 6 | if #result then 7 | local nextTimestamp = tonumber(result[2]) 8 | if nextTimestamp ~= nil then 9 | return nextTimestamp / 0x1000 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /src/commands/includes/getOrSetMaxEvents.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to get max events value or set by default 10000. 3 | ]] 4 | local function getOrSetMaxEvents(metaKey) 5 | local maxEvents = rcall("HGET", metaKey, "opts.maxLenEvents") 6 | if not maxEvents then 7 | maxEvents = 10000 8 | rcall("HSET", metaKey, "opts.maxLenEvents", maxEvents) 9 | end 10 | return maxEvents 11 | end 12 | -------------------------------------------------------------------------------- /src/commands/includes/getPriorityScore.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to get priority score. 3 | ]] 4 | 5 | local function getPriorityScore(priority, priorityCounterKey) 6 | local prioCounter = rcall("INCR", priorityCounterKey) 7 | return priority * 0x100000000 + prioCounter % 0x100000000 8 | end 9 | -------------------------------------------------------------------------------- /src/commands/includes/getRateLimitTTL.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to get current rate limit ttl. 3 | ]] 4 | local function getRateLimitTTL(maxJobs, rateLimiterKey) 5 | if maxJobs and maxJobs <= tonumber(rcall("GET", rateLimiterKey) or 0) then 6 | local pttl = rcall("PTTL", rateLimiterKey) 7 | 8 | if pttl == 0 then 9 | rcall("DEL", rateLimiterKey) 10 | end 11 | 12 | if pttl > 0 then 13 | return pttl 14 | end 15 | end 16 | return 0 17 | end 18 | -------------------------------------------------------------------------------- /src/commands/includes/getTargetQueueList.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check for the meta.paused key to decide if we are paused or not 3 | (since an empty list and !EXISTS are not really the same). 4 | ]] 5 | 6 | local function getTargetQueueList(queueMetaKey, activeKey, waitKey, pausedKey) 7 | local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency") 8 | 9 | if queueAttributes[1] then 10 | return pausedKey, true 11 | else 12 | if queueAttributes[2] then 13 | local activeCount = rcall("LLEN", activeKey) 14 | if activeCount >= tonumber(queueAttributes[2]) then 15 | return waitKey, true 16 | else 17 | return waitKey, false 18 | end 19 | end 20 | end 21 | return waitKey, false 22 | end 23 | -------------------------------------------------------------------------------- /src/commands/includes/getTimestamp.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to get the latest saved timestamp. 3 | ]] 4 | 5 | local function getTimestamp(jobKey, attributes) 6 | if #attributes == 1 then 7 | return rcall("HGET", jobKey, attributes[1]) 8 | end 9 | 10 | local jobTs 11 | for _, ts in ipairs(rcall("HMGET", jobKey, unpack(attributes))) do 12 | if (ts) then 13 | jobTs = ts 14 | break 15 | end 16 | end 17 | 18 | return jobTs 19 | end 20 | -------------------------------------------------------------------------------- /src/commands/includes/getZSetItems.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to get ZSet items. 3 | ]] 4 | 5 | local function getZSetItems(keyName, max) 6 | return rcall('ZRANGE', keyName, 0, max - 1) 7 | end 8 | -------------------------------------------------------------------------------- /src/commands/includes/handleDuplicatedJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to handle the case when job is duplicated. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "updateExistingJobsParent" 7 | 8 | local function handleDuplicatedJob(jobKey, jobId, currentParentKey, currentParent, 9 | parentData, parentDependenciesKey, completedKey, eventsKey, maxEvents, timestamp) 10 | local existedParentKey = rcall("HGET", jobKey, "parentKey") 11 | 12 | if not existedParentKey or existedParentKey == currentParentKey then 13 | updateExistingJobsParent(currentParentKey, currentParent, parentData, 14 | parentDependenciesKey, completedKey, jobKey, 15 | jobId, timestamp) 16 | else 17 | if currentParentKey ~= nil and currentParentKey ~= existedParentKey 18 | and (rcall("EXISTS", existedParentKey) == 1) then 19 | return -7 20 | end 21 | end 22 | rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", 23 | "duplicated", "jobId", jobId) 24 | 25 | return jobId .. "" -- convert to string 26 | end 27 | -------------------------------------------------------------------------------- /src/commands/includes/isJobSchedulerJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check if the job belongs to a job scheduler and 3 | current delayed job matches with jobId 4 | ]] 5 | local function isJobSchedulerJob(jobId, jobKey, jobSchedulersKey) 6 | local repeatJobKey = rcall("HGET", jobKey, "rjk") 7 | if repeatJobKey then 8 | local prevMillis = rcall("ZSCORE", jobSchedulersKey, repeatJobKey) 9 | if prevMillis then 10 | local currentDelayedJobId = "repeat:" .. repeatJobKey .. ":" .. prevMillis 11 | return jobId == currentDelayedJobId 12 | end 13 | end 14 | return false 15 | end 16 | -------------------------------------------------------------------------------- /src/commands/includes/isLocked.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to recursively check if there are no locks 3 | on the jobs to be removed. 4 | 5 | returns: 6 | boolean 7 | ]] 8 | --- @include "destructureJobKey" 9 | 10 | local function isLocked( prefix, jobId, removeChildren) 11 | local jobKey = prefix .. jobId; 12 | 13 | -- Check if this job is locked 14 | local lockKey = jobKey .. ':lock' 15 | local lock = rcall("GET", lockKey) 16 | if not lock then 17 | if removeChildren == "1" then 18 | local dependencies = rcall("SMEMBERS", jobKey .. ":dependencies") 19 | if (#dependencies > 0) then 20 | for i, childJobKey in ipairs(dependencies) do 21 | -- We need to get the jobId for this job. 22 | local childJobId = getJobIdFromKey(childJobKey) 23 | local childJobPrefix = getJobKeyPrefix(childJobKey, childJobId) 24 | local result = isLocked( childJobPrefix, childJobId, removeChildren ) 25 | if result then 26 | return true 27 | end 28 | end 29 | end 30 | end 31 | return false 32 | end 33 | return true 34 | end 35 | -------------------------------------------------------------------------------- /src/commands/includes/isQueueMaxed.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check if queue is maxed or not. 3 | ]] 4 | local function isQueueMaxed(queueMetaKey, activeKey) 5 | local maxConcurrency = rcall("HGET", queueMetaKey, "concurrency") 6 | 7 | if maxConcurrency then 8 | local activeCount = rcall("LLEN", activeKey) 9 | if activeCount >= tonumber(maxConcurrency) then 10 | return true 11 | end 12 | end 13 | 14 | return false 15 | end 16 | -------------------------------------------------------------------------------- /src/commands/includes/isQueuePaused.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check for the meta.paused key to decide if we are paused or not 3 | (since an empty list and !EXISTS are not really the same). 4 | ]] 5 | local function isQueuePaused(queueMetaKey) 6 | return rcall("HEXISTS", queueMetaKey, "paused") == 1 7 | end 8 | -------------------------------------------------------------------------------- /src/commands/includes/isQueuePausedOrMaxed.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to check if queue is paused or maxed 3 | (since an empty list and !EXISTS are not really the same). 4 | ]] 5 | 6 | local function isQueuePausedOrMaxed(queueMetaKey, activeKey) 7 | local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency") 8 | 9 | if queueAttributes[1] then 10 | return true 11 | else 12 | if queueAttributes[2] then 13 | local activeCount = rcall("LLEN", activeKey) 14 | return activeCount >= tonumber(queueAttributes[2]) 15 | end 16 | end 17 | return false 18 | end 19 | -------------------------------------------------------------------------------- /src/commands/includes/moveJobFromPrioritizedToActive.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to move job from prioritized state to active. 3 | ]] 4 | 5 | local function moveJobFromPrioritizedToActive(priorityKey, activeKey, priorityCounterKey) 6 | local prioritizedJob = rcall("ZPOPMIN", priorityKey) 7 | if #prioritizedJob > 0 then 8 | rcall("LPUSH", activeKey, prioritizedJob[1]) 9 | return prioritizedJob[1] 10 | else 11 | rcall("DEL", priorityCounterKey) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/includes/moveParentToWaitIfNeeded.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Validate and move parent to a wait status (waiting, delayed or prioritized) if needed. 3 | ]] 4 | -- Includes 5 | --- @include "moveParentToWait" 6 | local function moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp) 7 | if rcall("EXISTS", parentKey) == 1 then 8 | local parentWaitingChildrenKey = parentQueueKey .. ":waiting-children" 9 | if rcall("ZSCORE", parentWaitingChildrenKey, parentId) then 10 | rcall("ZREM", parentWaitingChildrenKey, parentId) 11 | moveParentToWait(parentQueueKey, parentKey, parentId, timestamp) 12 | end 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /src/commands/includes/moveParentToWaitIfNoPendingDependencies.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Validate and move parent to a wait status (waiting, delayed or prioritized) 3 | if no pending dependencies. 4 | ]] 5 | -- Includes 6 | --- @include "moveParentToWaitIfNeeded" 7 | local function moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey, 8 | parentId, timestamp) 9 | local doNotHavePendingDependencies = rcall("SCARD", parentDependenciesKey) == 0 10 | if doNotHavePendingDependencies then 11 | moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/includes/pushBackJobWithPriority.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to push back job considering priority in front of same prioritized jobs. 3 | ]] 4 | local function pushBackJobWithPriority(prioritizedKey, priority, jobId) 5 | -- in order to put it at front of same prioritized jobs 6 | -- we consider prioritized counter as 0 7 | local score = priority * 0x100000000 8 | rcall("ZADD", prioritizedKey, score, jobId) 9 | end 10 | -------------------------------------------------------------------------------- /src/commands/includes/removeDeduplicationKeyIfNeededOnFinalization.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove deduplication key if needed 3 | when a job is moved to completed or failed states. 4 | ]] 5 | 6 | local function removeDeduplicationKeyIfNeededOnFinalization(prefixKey, 7 | deduplicationId, jobId) 8 | if deduplicationId then 9 | local deduplicationKey = prefixKey .. "de:" .. deduplicationId 10 | local pttl = rcall("PTTL", deduplicationKey) 11 | 12 | if pttl == 0 then 13 | return rcall("DEL", deduplicationKey) 14 | end 15 | 16 | if pttl == -1 then 17 | local currentJobId = rcall('GET', deduplicationKey) 18 | if currentJobId and currentJobId == jobId then 19 | return rcall("DEL", deduplicationKey) 20 | end 21 | end 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /src/commands/includes/removeDeduplicationKeyIfNeededOnRemoval.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove deduplication key if needed 3 | when a job is being removed. 4 | ]] 5 | 6 | local function removeDeduplicationKeyIfNeededOnRemoval(prefixKey, 7 | jobKey, jobId) 8 | local deduplicationId = rcall("HGET", jobKey, "deid") 9 | if deduplicationId then 10 | local deduplicationKey = prefixKey .. "de:" .. deduplicationId 11 | local currentJobId = rcall('GET', deduplicationKey) 12 | if currentJobId and currentJobId == jobId then 13 | return rcall("DEL", deduplicationKey) 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /src/commands/includes/removeJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove job. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "removeDeduplicationKeyIfNeededOnRemoval" 7 | --- @include "removeJobKeys" 8 | --- @include "removeParentDependencyKey" 9 | 10 | local function removeJob(jobId, hard, baseKey, shouldRemoveDeduplicationKey) 11 | local jobKey = baseKey .. jobId 12 | removeParentDependencyKey(jobKey, hard, nil, baseKey) 13 | if shouldRemoveDeduplicationKey then 14 | removeDeduplicationKeyIfNeededOnRemoval(baseKey, jobKey, jobId) 15 | end 16 | removeJobKeys(jobKey) 17 | end 18 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobFromAnyState.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove from any state. 3 | 4 | returns: 5 | prev state 6 | ]] 7 | 8 | local function removeJobFromAnyState( prefix, jobId) 9 | -- We start with the ZSCORE checks, since they have O(1) complexity 10 | if rcall("ZSCORE", prefix .. "completed", jobId) then 11 | rcall("ZREM", prefix .. "completed", jobId) 12 | return "completed" 13 | elseif rcall("ZSCORE", prefix .. "waiting-children", jobId) then 14 | rcall("ZREM", prefix .. "waiting-children", jobId) 15 | return "waiting-children" 16 | elseif rcall("ZSCORE", prefix .. "delayed", jobId) then 17 | rcall("ZREM", prefix .. "delayed", jobId) 18 | return "delayed" 19 | elseif rcall("ZSCORE", prefix .. "failed", jobId) then 20 | rcall("ZREM", prefix .. "failed", jobId) 21 | return "failed" 22 | elseif rcall("ZSCORE", prefix .. "prioritized", jobId) then 23 | rcall("ZREM", prefix .. "prioritized", jobId) 24 | return "prioritized" 25 | -- We remove only 1 element from the list, since we assume they are not added multiple times 26 | elseif rcall("LREM", prefix .. "wait", 1, jobId) == 1 then 27 | return "wait" 28 | elseif rcall("LREM", prefix .. "paused", 1, jobId) == 1 then 29 | return "paused" 30 | elseif rcall("LREM", prefix .. "active", 1, jobId) == 1 then 31 | return "active" 32 | end 33 | 34 | return "unknown" 35 | end 36 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobKeys.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to remove job keys. 3 | ]] 4 | 5 | local function removeJobKeys(jobKey) 6 | return rcall("DEL", jobKey, jobKey .. ':logs', jobKey .. ':dependencies', 7 | jobKey .. ':processed', jobKey .. ':failed', jobKey .. ':unsuccessful') 8 | end 9 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobs.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to remove jobs. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "removeJob" 7 | 8 | local function removeJobs(keys, hard, baseKey, max) 9 | for i, key in ipairs(keys) do 10 | removeJob(key, hard, baseKey, true --[[remove debounce key]]) 11 | end 12 | return max - #keys 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobsByMaxAge.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to remove jobs by max age. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "removeJob" 7 | 8 | local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix, 9 | shouldRemoveDebounceKey) 10 | local start = timestamp - maxAge * 1000 11 | local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf") 12 | for i, jobId in ipairs(jobIds) do 13 | removeJob(jobId, false, prefix, false --[[remove debounce key]]) 14 | end 15 | rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start) 16 | end 17 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobsByMaxCount.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to remove jobs by max count. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "removeJob" 7 | 8 | local function removeJobsByMaxCount(maxCount, targetSet, prefix) 9 | local start = maxCount 10 | local jobIds = rcall("ZREVRANGE", targetSet, start, -1) 11 | for i, jobId in ipairs(jobIds) do 12 | removeJob(jobId, false, prefix, false --[[remove debounce key]]) 13 | end 14 | rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1)) 15 | end 16 | -------------------------------------------------------------------------------- /src/commands/includes/removeJobsOnFail.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to remove jobs when removeOnFail option is provided. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "removeJob" 7 | --- @include "removeJobsByMaxAge" 8 | --- @include "removeJobsByMaxCount" 9 | 10 | local function removeJobsOnFail(queueKeyPrefix, failedKey, jobId, opts, timestamp) 11 | local removeOnFailType = type(opts["removeOnFail"]) 12 | if removeOnFailType == "number" then 13 | removeJobsByMaxCount(opts["removeOnFail"], 14 | failedKey, queueKeyPrefix) 15 | elseif removeOnFailType == "boolean" then 16 | if opts["removeOnFail"] then 17 | removeJob(jobId, false, queueKeyPrefix, 18 | false --[[remove debounce key]]) 19 | rcall("ZREM", failedKey, jobId) 20 | end 21 | elseif removeOnFailType ~= "nil" then 22 | local maxAge = opts["removeOnFail"]["age"] 23 | local maxCount = opts["removeOnFail"]["count"] 24 | 25 | if maxAge ~= nil then 26 | removeJobsByMaxAge(timestamp, maxAge, 27 | failedKey, queueKeyPrefix) 28 | end 29 | 30 | if maxCount ~= nil and maxCount > 0 then 31 | removeJobsByMaxCount(maxCount, failedKey, 32 | queueKeyPrefix) 33 | end 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /src/commands/includes/removeListJobs.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Functions to remove jobs. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "filterOutJobsToIgnore" 7 | --- @include "removeJobs" 8 | 9 | local function getListItems(keyName, max) 10 | return rcall('LRANGE', keyName, 0, max - 1) 11 | end 12 | 13 | local function removeListJobs(keyName, hard, baseKey, max, jobsToIgnore) 14 | local jobs = getListItems(keyName, max) 15 | 16 | if jobsToIgnore then 17 | jobs = filterOutJobsToIgnore(jobs, jobsToIgnore) 18 | end 19 | 20 | local count = removeJobs(jobs, hard, baseKey, max) 21 | rcall("LTRIM", keyName, #jobs, -1) 22 | return count 23 | end 24 | -------------------------------------------------------------------------------- /src/commands/includes/removeLock.lua: -------------------------------------------------------------------------------- 1 | local function removeLock(jobKey, stalledKey, token, jobId) 2 | if token ~= "0" then 3 | local lockKey = jobKey .. ':lock' 4 | local lockToken = rcall("GET", lockKey) 5 | if lockToken == token then 6 | rcall("DEL", lockKey) 7 | rcall("SREM", stalledKey, jobId) 8 | else 9 | if lockToken then 10 | -- Lock exists but token does not match 11 | return -6 12 | else 13 | -- Lock is missing completely 14 | return -2 15 | end 16 | end 17 | end 18 | return 0 19 | end 20 | -------------------------------------------------------------------------------- /src/commands/includes/removeZSetJobs.lua: -------------------------------------------------------------------------------- 1 | -- Includes 2 | --- @include "batches" 3 | --- @include "filterOutJobsToIgnore" 4 | --- @include "getZSetItems" 5 | --- @include "removeJobs" 6 | 7 | local function removeZSetJobs(keyName, hard, baseKey, max, jobsToIgnore) 8 | local jobs = getZSetItems(keyName, max) 9 | 10 | if jobsToIgnore then 11 | jobs = filterOutJobsToIgnore(jobs, jobsToIgnore) 12 | end 13 | 14 | local count = removeJobs(jobs, hard, baseKey, max) 15 | if(#jobs > 0) then 16 | for from, to in batches(#jobs, 7000) do 17 | rcall("ZREM", keyName, unpack(jobs, from, to)) 18 | end 19 | end 20 | return count 21 | end 22 | -------------------------------------------------------------------------------- /src/commands/includes/storeJob.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to store a job 3 | ]] 4 | local function storeJob(eventsKey, jobIdKey, jobId, name, data, opts, timestamp, 5 | parentKey, parentData, repeatJobKey) 6 | local jsonOpts = cjson.encode(opts) 7 | local delay = opts['delay'] or 0 8 | local priority = opts['priority'] or 0 9 | local debounceId = opts['de'] and opts['de']['id'] 10 | 11 | local optionalValues = {} 12 | if parentKey ~= nil then 13 | table.insert(optionalValues, "parentKey") 14 | table.insert(optionalValues, parentKey) 15 | table.insert(optionalValues, "parent") 16 | table.insert(optionalValues, parentData) 17 | end 18 | 19 | if repeatJobKey then 20 | table.insert(optionalValues, "rjk") 21 | table.insert(optionalValues, repeatJobKey) 22 | end 23 | 24 | if debounceId then 25 | table.insert(optionalValues, "deid") 26 | table.insert(optionalValues, debounceId) 27 | end 28 | 29 | rcall("HMSET", jobIdKey, "name", name, "data", data, "opts", jsonOpts, 30 | "timestamp", timestamp, "delay", delay, "priority", priority, 31 | unpack(optionalValues)) 32 | 33 | rcall("XADD", eventsKey, "*", "event", "added", "jobId", jobId, "name", name) 34 | 35 | return delay, priority 36 | end 37 | -------------------------------------------------------------------------------- /src/commands/includes/storeJobScheduler.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to store a job scheduler 3 | ]] 4 | local function storeJobScheduler(schedulerId, schedulerKey, repeatKey, nextMillis, opts, 5 | templateData, templateOpts) 6 | rcall("ZADD", repeatKey, nextMillis, schedulerId) 7 | 8 | local optionalValues = {} 9 | if opts['tz'] then 10 | table.insert(optionalValues, "tz") 11 | table.insert(optionalValues, opts['tz']) 12 | end 13 | 14 | if opts['limit'] then 15 | table.insert(optionalValues, "limit") 16 | table.insert(optionalValues, opts['limit']) 17 | end 18 | 19 | if opts['pattern'] then 20 | table.insert(optionalValues, "pattern") 21 | table.insert(optionalValues, opts['pattern']) 22 | end 23 | 24 | if opts['endDate'] then 25 | table.insert(optionalValues, "endDate") 26 | table.insert(optionalValues, opts['endDate']) 27 | end 28 | 29 | if opts['every'] then 30 | table.insert(optionalValues, "every") 31 | table.insert(optionalValues, opts['every']) 32 | end 33 | 34 | local jsonTemplateOpts = cjson.encode(templateOpts) 35 | if jsonTemplateOpts and jsonTemplateOpts ~= '{}' then 36 | table.insert(optionalValues, "opts") 37 | table.insert(optionalValues, jsonTemplateOpts) 38 | end 39 | 40 | if templateData and templateData ~= '{}' then 41 | table.insert(optionalValues, "data") 42 | table.insert(optionalValues, templateData) 43 | end 44 | 45 | rcall("DEL", schedulerKey) -- remove all attributes and then re-insert new ones 46 | rcall("HMSET", schedulerKey, "name", opts['name'], "ic", 1, unpack(optionalValues)) 47 | end 48 | -------------------------------------------------------------------------------- /src/commands/includes/trimEvents.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to trim events, default 10000. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "getOrSetMaxEvents" 7 | 8 | local function trimEvents(metaKey, eventStreamKey) 9 | local maxEvents = getOrSetMaxEvents(metaKey) 10 | if maxEvents then 11 | rcall("XTRIM", eventStreamKey, "MAXLEN", "~", maxEvents) 12 | else 13 | rcall("XTRIM", eventStreamKey, "MAXLEN", "~", 10000) 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /src/commands/includes/updateExistingJobsParent.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | This function is used to update the parent's dependencies if the job 3 | is already completed and about to be ignored. The parent must get its 4 | dependencies updated to avoid the parent job being stuck forever in 5 | the waiting-children state. 6 | ]] 7 | 8 | -- Includes 9 | --- @include "updateParentDepsIfNeeded" 10 | 11 | local function updateExistingJobsParent(parentKey, parent, parentData, 12 | parentDependenciesKey, completedKey, 13 | jobIdKey, jobId, timestamp) 14 | if parentKey ~= nil then 15 | if rcall("ZSCORE", completedKey, jobId) then 16 | local returnvalue = rcall("HGET", jobIdKey, "returnvalue") 17 | updateParentDepsIfNeeded(parentKey, parent['queueKey'], 18 | parentDependenciesKey, parent['id'], 19 | jobIdKey, returnvalue, timestamp) 20 | else 21 | if parentDependenciesKey ~= nil then 22 | rcall("SADD", parentDependenciesKey, jobIdKey) 23 | end 24 | end 25 | rcall("HMSET", jobIdKey, "parentKey", parentKey, "parent", parentData) 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /src/commands/includes/updateJobFields.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Function to update a bunch of fields in a job. 3 | ]] 4 | local function updateJobFields(jobKey, msgpackedFields) 5 | if msgpackedFields and #msgpackedFields > 0 then 6 | local fieldsToUpdate = cmsgpack.unpack(msgpackedFields) 7 | if fieldsToUpdate then 8 | rcall("HMSET", jobKey, unpack(fieldsToUpdate)) 9 | end 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /src/commands/includes/updateParentDepsIfNeeded.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Validate and move or add dependencies to parent. 3 | ]] 4 | 5 | -- Includes 6 | --- @include "moveParentToWaitIfNoPendingDependencies" 7 | 8 | local function updateParentDepsIfNeeded(parentKey, parentQueueKey, parentDependenciesKey, 9 | parentId, jobIdKey, returnvalue, timestamp ) 10 | local processedSet = parentKey .. ":processed" 11 | rcall("HSET", processedSet, jobIdKey, returnvalue) 12 | moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey, parentId, timestamp) 13 | end 14 | -------------------------------------------------------------------------------- /src/commands/index.ts: -------------------------------------------------------------------------------- 1 | import { ScriptLoader } from './script-loader'; 2 | export { ScriptMetadata, Command, ScriptLoaderError } from './script-loader'; 3 | 4 | const scriptLoader = new ScriptLoader(); 5 | 6 | export { ScriptLoader, scriptLoader }; 7 | -------------------------------------------------------------------------------- /src/commands/isFinished-3.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Checks if a job is finished (.i.e. is in the completed or failed set) 3 | 4 | Input: 5 | KEYS[1] completed key 6 | KEYS[2] failed key 7 | KEYS[3] job key 8 | 9 | ARGV[1] job id 10 | ARGV[2] return value? 11 | Output: 12 | 0 - Not finished. 13 | 1 - Completed. 14 | 2 - Failed. 15 | -1 - Missing job. 16 | ]] 17 | local rcall = redis.call 18 | if rcall("EXISTS", KEYS[3]) ~= 1 then 19 | if ARGV[2] == "1" then 20 | 21 | return {-1,"Missing key for job " .. KEYS[3] .. ". isFinished"} 22 | end 23 | return -1 24 | end 25 | 26 | if rcall("ZSCORE", KEYS[1], ARGV[1]) then 27 | if ARGV[2] == "1" then 28 | local returnValue = rcall("HGET", KEYS[3], "returnvalue") 29 | 30 | return {1,returnValue} 31 | end 32 | return 1 33 | end 34 | 35 | if rcall("ZSCORE", KEYS[2], ARGV[1]) then 36 | if ARGV[2] == "1" then 37 | local failedReason = rcall("HGET", KEYS[3], "failedReason") 38 | 39 | return {2,failedReason} 40 | end 41 | return 2 42 | end 43 | 44 | if ARGV[2] == "1" then 45 | return {0} 46 | end 47 | 48 | return 0 49 | -------------------------------------------------------------------------------- /src/commands/isJobInList-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Checks if job is in a given list. 3 | 4 | Input: 5 | KEYS[1] 6 | ARGV[1] 7 | 8 | Output: 9 | 1 if element found in the list. 10 | ]] 11 | 12 | -- Includes 13 | --- @include "includes/checkItemInList" 14 | 15 | local items = redis.call("LRANGE", KEYS[1] , 0, -1) 16 | return checkItemInList(items, ARGV[1]) 17 | -------------------------------------------------------------------------------- /src/commands/isMaxed-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Checks if queue is maxed. 3 | 4 | Input: 5 | KEYS[1] meta key 6 | KEYS[2] active key 7 | 8 | Output: 9 | 1 if element found in the list. 10 | ]] 11 | 12 | local rcall = redis.call 13 | 14 | -- Includes 15 | --- @include "includes/isQueueMaxed" 16 | 17 | return isQueueMaxed(KEYS[1], KEYS[2]) 18 | -------------------------------------------------------------------------------- /src/commands/paginate-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Paginate a set or hash 3 | 4 | Input: 5 | KEYS[1] key pointing to the set or hash to be paginated. 6 | 7 | ARGV[1] page start offset 8 | ARGV[2] page end offset (-1 for all the elements) 9 | ARGV[3] cursor 10 | ARGV[4] offset 11 | ARGV[5] max iterations 12 | ARGV[6] fetch jobs? 13 | 14 | Output: 15 | [cursor, offset, items, numItems] 16 | ]] 17 | local rcall = redis.call 18 | 19 | -- Includes 20 | --- @include "includes/findPage" 21 | 22 | local key = KEYS[1] 23 | local scanCommand = "SSCAN" 24 | local countCommand = "SCARD" 25 | local type = rcall("TYPE", key)["ok"] 26 | 27 | if type == "none" then 28 | return {0, 0, {}, 0} 29 | elseif type == "hash" then 30 | scanCommand = "HSCAN" 31 | countCommand = "HLEN" 32 | elseif type ~= "set" then 33 | return 34 | redis.error_reply("Pagination is only supported for sets and hashes.") 35 | end 36 | 37 | local numItems = rcall(countCommand, key) 38 | local startOffset = tonumber(ARGV[1]) 39 | local endOffset = tonumber(ARGV[2]) 40 | if endOffset == -1 then 41 | endOffset = numItems 42 | end 43 | local pageSize = (endOffset - startOffset) + 1 44 | 45 | local cursor, offset, items, jobs = findPage(key, scanCommand, startOffset, 46 | pageSize, ARGV[3], tonumber(ARGV[4]), 47 | tonumber(ARGV[5]), ARGV[6]) 48 | 49 | return {cursor, offset, items, numItems, jobs} 50 | -------------------------------------------------------------------------------- /src/commands/pause-7.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Pauses or resumes a queue globably. 3 | 4 | Input: 5 | KEYS[1] 'wait' or 'paused'' 6 | KEYS[2] 'paused' or 'wait' 7 | KEYS[3] 'meta' 8 | KEYS[4] 'prioritized' 9 | KEYS[5] events stream key 10 | KEYS[6] 'delayed' 11 | KEYS|7] 'marker' 12 | 13 | ARGV[1] 'paused' or 'resumed' 14 | 15 | Event: 16 | publish paused or resumed event. 17 | ]] 18 | local rcall = redis.call 19 | 20 | -- Includes 21 | --- @include "includes/addDelayMarkerIfNeeded" 22 | 23 | local markerKey = KEYS[7] 24 | local hasJobs = rcall("EXISTS", KEYS[1]) == 1 25 | --TODO: check this logic to be reused when changing a delay 26 | if hasJobs then rcall("RENAME", KEYS[1], KEYS[2]) end 27 | 28 | if ARGV[1] == "paused" then 29 | rcall("HSET", KEYS[3], "paused", 1) 30 | rcall("DEL", markerKey) 31 | else 32 | rcall("HDEL", KEYS[3], "paused") 33 | 34 | if hasJobs or rcall("ZCARD", KEYS[4]) > 0 then 35 | -- Add marker if there are waiting or priority jobs 36 | rcall("ZADD", markerKey, 0, "0") 37 | else 38 | addDelayMarkerIfNeeded(markerKey, KEYS[6]) 39 | end 40 | end 41 | 42 | rcall("XADD", KEYS[5], "*", "event", ARGV[1]); 43 | -------------------------------------------------------------------------------- /src/commands/releaseLock-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Release lock 3 | 4 | Input: 5 | KEYS[1] 'lock', 6 | 7 | ARGV[1] token 8 | ARGV[2] lock duration in milliseconds 9 | 10 | Output: 11 | "OK" if lock extented succesfully. 12 | ]] 13 | local rcall = redis.call 14 | 15 | if rcall("GET", KEYS[1]) == ARGV[1] then 16 | return rcall("DEL", KEYS[1]) 17 | else 18 | return 0 19 | end 20 | -------------------------------------------------------------------------------- /src/commands/removeChildDependency-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Break parent-child dependency by removing 3 | child reference from parent 4 | 5 | Input: 6 | KEYS[1] 'key' prefix, 7 | 8 | ARGV[1] job key 9 | ARGV[2] parent key 10 | 11 | Output: 12 | 0 - OK 13 | 1 - There is not relationship. 14 | -1 - Missing job key 15 | -5 - Missing parent key 16 | ]] 17 | local rcall = redis.call 18 | local jobKey = ARGV[1] 19 | local parentKey = ARGV[2] 20 | 21 | -- Includes 22 | --- @include "includes/removeParentDependencyKey" 23 | 24 | if rcall("EXISTS", jobKey) ~= 1 then return -1 end 25 | 26 | if rcall("EXISTS", parentKey) ~= 1 then return -5 end 27 | 28 | if removeParentDependencyKey(jobKey, false, parentKey, KEYS[1], nil) then 29 | rcall("HDEL", jobKey, "parentKey", "parent") 30 | 31 | return 0 32 | else 33 | return 1 34 | end -------------------------------------------------------------------------------- /src/commands/removeJob-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Remove a job from all the statuses it may be in as well as all its data. 3 | In order to be able to remove a job, it cannot be active. 4 | 5 | Input: 6 | KEYS[1] jobKey 7 | KEYS[2] repeat key 8 | 9 | ARGV[1] jobId 10 | ARGV[2] remove children 11 | ARGV[3] queue prefix 12 | 13 | Events: 14 | 'removed' 15 | ]] 16 | 17 | local rcall = redis.call 18 | 19 | -- Includes 20 | --- @include "includes/isJobSchedulerJob" 21 | --- @include "includes/isLocked" 22 | --- @include "includes/removeJobWithChildren" 23 | 24 | local jobId = ARGV[1] 25 | local shouldRemoveChildren = ARGV[2] 26 | local prefix = ARGV[3] 27 | local jobKey = KEYS[1] 28 | local repeatKey = KEYS[2] 29 | 30 | if isJobSchedulerJob(jobId, jobKey, repeatKey) then 31 | return -8 32 | end 33 | 34 | if not isLocked(prefix, jobId, shouldRemoveChildren) then 35 | local options = { 36 | removeChildren = shouldRemoveChildren == "1", 37 | ignoreProcessed = false, 38 | ignoreLocked = false 39 | } 40 | 41 | removeJobWithChildren(prefix, jobId, nil, options) 42 | return 1 43 | end 44 | return 0 45 | -------------------------------------------------------------------------------- /src/commands/removeJobScheduler-3.lua: -------------------------------------------------------------------------------- 1 | 2 | --[[ 3 | Removes a job scheduler and its next scheduled job. 4 | Input: 5 | KEYS[1] job schedulers key 6 | KEYS[2] delayed jobs key 7 | KEYS[3] events key 8 | 9 | ARGV[1] job scheduler id 10 | ARGV[2] prefix key 11 | 12 | Output: 13 | 0 - OK 14 | 1 - Missing repeat job 15 | 16 | Events: 17 | 'removed' 18 | ]] 19 | local rcall = redis.call 20 | 21 | -- Includes 22 | --- @include "includes/removeJobKeys" 23 | 24 | local jobSchedulerId = ARGV[1] 25 | local prefix = ARGV[2] 26 | 27 | local millis = rcall("ZSCORE", KEYS[1], jobSchedulerId) 28 | 29 | if millis then 30 | -- Delete next programmed job. 31 | local delayedJobId = "repeat:" .. jobSchedulerId .. ":" .. millis 32 | if(rcall("ZREM", KEYS[2], delayedJobId) == 1) then 33 | removeJobKeys(prefix .. delayedJobId) 34 | rcall("XADD", KEYS[3], "*", "event", "removed", "jobId", delayedJobId, "prev", "delayed") 35 | end 36 | end 37 | 38 | if(rcall("ZREM", KEYS[1], jobSchedulerId) == 1) then 39 | rcall("DEL", KEYS[1] .. ":" .. jobSchedulerId) 40 | return 0 41 | end 42 | 43 | return 1 44 | -------------------------------------------------------------------------------- /src/commands/removeRepeatable-3.lua: -------------------------------------------------------------------------------- 1 | 2 | --[[ 3 | Removes a repeatable job 4 | Input: 5 | KEYS[1] repeat jobs key 6 | KEYS[2] delayed jobs key 7 | KEYS[3] events key 8 | 9 | ARGV[1] old repeat job id 10 | ARGV[2] options concat 11 | ARGV[3] repeat job key 12 | ARGV[4] prefix key 13 | 14 | Output: 15 | 0 - OK 16 | 1 - Missing repeat job 17 | 18 | Events: 19 | 'removed' 20 | ]] 21 | local rcall = redis.call 22 | local millis = rcall("ZSCORE", KEYS[1], ARGV[2]) 23 | 24 | -- Includes 25 | --- @include "includes/removeJobKeys" 26 | 27 | -- legacy removal TODO: remove in next breaking change 28 | if millis then 29 | -- Delete next programmed job. 30 | local repeatJobId = ARGV[1] .. millis 31 | if(rcall("ZREM", KEYS[2], repeatJobId) == 1) then 32 | removeJobKeys(ARGV[4] .. repeatJobId) 33 | rcall("XADD", KEYS[3], "*", "event", "removed", "jobId", repeatJobId, "prev", "delayed"); 34 | end 35 | end 36 | 37 | if(rcall("ZREM", KEYS[1], ARGV[2]) == 1) then 38 | return 0 39 | end 40 | 41 | -- new removal 42 | millis = rcall("ZSCORE", KEYS[1], ARGV[3]) 43 | 44 | if millis then 45 | -- Delete next programmed job. 46 | local repeatJobId = "repeat:" .. ARGV[3] .. ":" .. millis 47 | if(rcall("ZREM", KEYS[2], repeatJobId) == 1) then 48 | removeJobKeys(ARGV[4] .. repeatJobId) 49 | rcall("XADD", KEYS[3], "*", "event", "removed", "jobId", repeatJobId, "prev", "delayed") 50 | end 51 | end 52 | 53 | if(rcall("ZREM", KEYS[1], ARGV[3]) == 1) then 54 | rcall("DEL", KEYS[1] .. ":" .. ARGV[3]) 55 | return 0 56 | end 57 | 58 | return 1 59 | -------------------------------------------------------------------------------- /src/commands/removeUnprocessedChildren-2.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Remove a job from all the statuses it may be in as well as all its data. 3 | In order to be able to remove a job, it cannot be active. 4 | 5 | Input: 6 | KEYS[1] jobKey 7 | KEYS[2] meta key 8 | 9 | ARGV[1] prefix 10 | ARGV[2] jobId 11 | 12 | Events: 13 | 'removed' for every children removed 14 | ]] 15 | 16 | -- Includes 17 | --- @include "includes/removeJobWithChildren" 18 | 19 | local prefix = ARGV[1] 20 | local jobId = ARGV[2] 21 | 22 | local jobKey = KEYS[1] 23 | local metaKey = KEYS[2] 24 | 25 | local options = { 26 | removeChildren = "1", 27 | ignoreProcessed = true, 28 | ignoreLocked = true 29 | } 30 | 31 | removeJobChildren(prefix, jobKey, options) 32 | -------------------------------------------------------------------------------- /src/commands/reprocessJob-8.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Attempts to reprocess a job 3 | 4 | Input: 5 | KEYS[1] job key 6 | KEYS[2] events stream 7 | KEYS[3] job state 8 | KEYS[4] wait key 9 | KEYS[5] meta 10 | KEYS[6] paused key 11 | KEYS[7] active key 12 | KEYS[8] marker key 13 | 14 | ARGV[1] job.id 15 | ARGV[2] (job.opts.lifo ? 'R' : 'L') + 'PUSH' 16 | ARGV[3] propVal - failedReason/returnvalue 17 | ARGV[4] prev state - failed/completed 18 | 19 | Output: 20 | 1 means the operation was a success 21 | -1 means the job does not exist 22 | -3 means the job was not found in the expected set. 23 | ]] 24 | local rcall = redis.call; 25 | 26 | -- Includes 27 | --- @include "includes/addJobInTargetList" 28 | --- @include "includes/getOrSetMaxEvents" 29 | --- @include "includes/getTargetQueueList" 30 | 31 | if rcall("EXISTS", KEYS[1]) == 1 then 32 | local jobId = ARGV[1] 33 | if (rcall("ZREM", KEYS[3], jobId) == 1) then 34 | rcall("HDEL", KEYS[1], "finishedOn", "processedOn", ARGV[3]) 35 | 36 | local target, isPausedOrMaxed = getTargetQueueList(KEYS[5], KEYS[7], KEYS[4], KEYS[6]) 37 | addJobInTargetList(target, KEYS[8], ARGV[2], isPausedOrMaxed, jobId) 38 | 39 | local maxEvents = getOrSetMaxEvents(KEYS[5]) 40 | -- Emit waiting event 41 | rcall("XADD", KEYS[2], "MAXLEN", "~", maxEvents, "*", "event", "waiting", 42 | "jobId", jobId, "prev", ARGV[4]); 43 | return 1 44 | else 45 | return -3 46 | end 47 | else 48 | return -1 49 | end 50 | -------------------------------------------------------------------------------- /src/commands/saveStacktrace-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Save stacktrace and failedReason. 3 | Input: 4 | KEYS[1] job key 5 | ARGV[1] stacktrace 6 | ARGV[2] failedReason 7 | Output: 8 | 0 - OK 9 | -1 - Missing key 10 | ]] 11 | local rcall = redis.call 12 | 13 | if rcall("EXISTS", KEYS[1]) == 1 then 14 | rcall("HMSET", KEYS[1], "stacktrace", ARGV[1], "failedReason", ARGV[2]) 15 | 16 | return 0 17 | else 18 | return -1 19 | end 20 | -------------------------------------------------------------------------------- /src/commands/updateData-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Update job data 3 | 4 | Input: 5 | KEYS[1] Job id key 6 | 7 | ARGV[1] data 8 | 9 | Output: 10 | 0 - OK 11 | -1 - Missing job. 12 | ]] 13 | local rcall = redis.call 14 | 15 | if rcall("EXISTS",KEYS[1]) == 1 then -- // Make sure job exists 16 | rcall("HSET", KEYS[1], "data", ARGV[1]) 17 | return 0 18 | else 19 | return -1 20 | end 21 | -------------------------------------------------------------------------------- /src/commands/updateProgress-3.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Update job progress 3 | 4 | Input: 5 | KEYS[1] Job id key 6 | KEYS[2] event stream key 7 | KEYS[3] meta key 8 | 9 | ARGV[1] id 10 | ARGV[2] progress 11 | 12 | Output: 13 | 0 - OK 14 | -1 - Missing job. 15 | 16 | Event: 17 | progress(jobId, progress) 18 | ]] 19 | local rcall = redis.call 20 | 21 | -- Includes 22 | --- @include "includes/getOrSetMaxEvents" 23 | 24 | if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists 25 | local maxEvents = getOrSetMaxEvents(KEYS[3]) 26 | 27 | rcall("HSET", KEYS[1], "progress", ARGV[2]) 28 | rcall("XADD", KEYS[2], "MAXLEN", "~", maxEvents, "*", "event", "progress", 29 | "jobId", ARGV[1], "data", ARGV[2]); 30 | return 0 31 | else 32 | return -1 33 | end 34 | -------------------------------------------------------------------------------- /src/commands/updateRepeatableJobMillis-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Adds a repeatable job 3 | 4 | Input: 5 | KEYS[1] 'repeat' key 6 | 7 | ARGV[1] next milliseconds 8 | ARGV[2] custom key 9 | ARGV[3] legacy custom key TODO: remove this logic in next breaking change 10 | 11 | Output: 12 | repeatableKey - OK 13 | ]] 14 | local rcall = redis.call 15 | local repeatKey = KEYS[1] 16 | local nextMillis = ARGV[1] 17 | local customKey = ARGV[2] 18 | local legacyCustomKey = ARGV[3] 19 | 20 | if rcall("ZSCORE", repeatKey, customKey) then 21 | rcall("ZADD", repeatKey, nextMillis, customKey) 22 | return customKey 23 | elseif rcall("ZSCORE", repeatKey, legacyCustomKey) ~= false then 24 | rcall("ZADD", repeatKey, nextMillis, legacyCustomKey) 25 | return legacyCustomKey 26 | end 27 | 28 | return '' 29 | -------------------------------------------------------------------------------- /src/enums/child-command.ts: -------------------------------------------------------------------------------- 1 | export enum ChildCommand { 2 | Init, 3 | Start, 4 | Stop, 5 | GetChildrenValuesResponse, 6 | GetIgnoredChildrenFailuresResponse, 7 | } 8 | -------------------------------------------------------------------------------- /src/enums/error-code.ts: -------------------------------------------------------------------------------- 1 | export enum ErrorCode { 2 | JobNotExist = -1, 3 | JobLockNotExist = -2, 4 | JobNotInState = -3, 5 | JobPendingChildren = -4, 6 | ParentJobNotExist = -5, 7 | JobLockMismatch = -6, 8 | ParentJobCannotBeReplaced = -7, 9 | JobBelongsToJobScheduler = -8, 10 | JobFailedChildren = -9, 11 | } 12 | -------------------------------------------------------------------------------- /src/enums/index.ts: -------------------------------------------------------------------------------- 1 | export * from './child-command'; 2 | export * from './error-code'; 3 | export * from './parent-command'; 4 | export * from './metrics-time'; 5 | export * from './telemetry-attributes'; 6 | -------------------------------------------------------------------------------- /src/enums/metrics-time.ts: -------------------------------------------------------------------------------- 1 | export enum MetricsTime { 2 | ONE_MINUTE = 1, 3 | FIVE_MINUTES = 5, 4 | FIFTEEN_MINUTES = 15, 5 | THIRTY_MINUTES = 30, 6 | ONE_HOUR = 60, 7 | ONE_WEEK = 60 * 24 * 7, 8 | TWO_WEEKS = 60 * 24 * 7 * 2, 9 | ONE_MONTH = 60 * 24 * 7 * 2 * 4, 10 | } 11 | -------------------------------------------------------------------------------- /src/enums/parent-command.ts: -------------------------------------------------------------------------------- 1 | export enum ParentCommand { 2 | Completed, 3 | Error, 4 | Failed, 5 | InitFailed, 6 | InitCompleted, 7 | Log, 8 | MoveToDelayed, 9 | Progress, 10 | Update, 11 | GetChildrenValues, 12 | GetIgnoredChildrenFailures, 13 | } 14 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './classes'; 2 | export * from './enums'; 3 | export * from './interfaces'; 4 | export * from './types'; 5 | export * from './utils'; 6 | -------------------------------------------------------------------------------- /src/interfaces/advanced-options.ts: -------------------------------------------------------------------------------- 1 | import { BackoffStrategy, RepeatStrategy } from '../types'; 2 | 3 | export interface AdvancedRepeatOptions { 4 | /** 5 | * A custom cron strategy. 6 | */ 7 | repeatStrategy?: RepeatStrategy; 8 | 9 | /** 10 | * A hash algorithm to be used when trying to create the job redis key. 11 | * Default - md5 12 | */ 13 | repeatKeyHashAlgorithm?: string; 14 | } 15 | 16 | export interface AdvancedOptions extends AdvancedRepeatOptions { 17 | /** 18 | * A custom backoff strategy. 19 | */ 20 | backoffStrategy?: BackoffStrategy; 21 | } 22 | -------------------------------------------------------------------------------- /src/interfaces/backoff-options.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Settings for backing off failed jobs. 3 | * 4 | * @see {@link https://docs.bullmq.io/guide/retrying-failing-jobs} 5 | */ 6 | export interface BackoffOptions { 7 | /** 8 | * Name of the backoff strategy. 9 | */ 10 | type: 'fixed' | 'exponential' | (string & {}); 11 | /** 12 | * Delay in milliseconds. 13 | */ 14 | delay?: number; 15 | } 16 | -------------------------------------------------------------------------------- /src/interfaces/child-message.ts: -------------------------------------------------------------------------------- 1 | import { ParentCommand } from '../enums/parent-command'; 2 | 3 | export interface ChildMessage { 4 | cmd: ParentCommand; 5 | requestId?: string; 6 | value?: any; 7 | err?: Record; 8 | } 9 | -------------------------------------------------------------------------------- /src/interfaces/connection.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { Cluster, Redis } from 'ioredis'; 3 | 4 | export type RedisClient = Redis | Cluster; 5 | 6 | export interface IConnection extends EventEmitter { 7 | waitUntilReady(): Promise; 8 | client: Promise; 9 | } 10 | -------------------------------------------------------------------------------- /src/interfaces/debounce-options.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Debounce options 3 | */ 4 | export interface DebounceOptions { 5 | /** 6 | * ttl in milliseconds 7 | */ 8 | ttl?: number; 9 | 10 | /** 11 | * Identifier 12 | */ 13 | id: string; 14 | } 15 | -------------------------------------------------------------------------------- /src/interfaces/flow-job.ts: -------------------------------------------------------------------------------- 1 | import { JobsOptions } from '../types'; 2 | import { QueueOptions } from './queue-options'; 3 | 4 | export interface FlowJobBase { 5 | name: string; 6 | queueName: string; 7 | data?: any; 8 | prefix?: string; 9 | opts?: Omit; 10 | children?: FlowChildJob[]; 11 | } 12 | 13 | export type FlowChildJob = FlowJobBase>; 14 | 15 | export type FlowJob = FlowJobBase; 16 | 17 | export type FlowQueuesOpts = Record< 18 | string, 19 | Omit 20 | >; 21 | 22 | export interface FlowOpts { 23 | /** 24 | * Map of options for Queue classes. 25 | */ 26 | queuesOptions: FlowQueuesOpts; 27 | } 28 | -------------------------------------------------------------------------------- /src/interfaces/index.ts: -------------------------------------------------------------------------------- 1 | export * from './advanced-options'; 2 | export * from './backoff-options'; 3 | export * from './base-job-options'; 4 | export * from './child-message'; 5 | export * from './connection'; 6 | export * from './debounce-options'; 7 | export * from './flow-job'; 8 | export * from './ioredis-events'; 9 | export * from './job-json'; 10 | export * from './job-scheduler-json'; 11 | export * from './keep-jobs'; 12 | export * from './metrics-options'; 13 | export * from './metrics'; 14 | export * from './minimal-job'; 15 | export * from './parent-message'; 16 | export * from './parent'; 17 | export * from './parent-options'; 18 | export * from './queue-options'; 19 | export * from './rate-limiter-options'; 20 | export * from './redis-options'; 21 | export * from './redis-streams'; 22 | export * from './repeatable-job'; 23 | export * from './repeatable-options'; 24 | export * from './repeat-options'; 25 | export * from './sandboxed-job-processor'; 26 | export * from './sandboxed-job'; 27 | export * from './sandboxed-options'; 28 | export * from './worker-options'; 29 | export * from './telemetry'; 30 | export * from './receiver'; 31 | -------------------------------------------------------------------------------- /src/interfaces/ioredis-events.ts: -------------------------------------------------------------------------------- 1 | export interface IoredisListener { 2 | /** 3 | * Listen to 'ioredis:close' event. 4 | * 5 | * This event is triggered when ioredis is closed. 6 | */ 7 | 'ioredis:close': () => void; 8 | } 9 | -------------------------------------------------------------------------------- /src/interfaces/job-json.ts: -------------------------------------------------------------------------------- 1 | import { JobProgress, RedisJobOptions } from '../types'; 2 | import { ParentKeys } from './parent'; 3 | 4 | export interface JobJson { 5 | id: string; 6 | name: string; 7 | data: string; 8 | opts: RedisJobOptions; 9 | progress: JobProgress; 10 | attemptsMade: number; 11 | attemptsStarted: number; 12 | finishedOn?: number; 13 | processedOn?: number; 14 | timestamp: number; 15 | failedReason: string; 16 | stacktrace: string; 17 | returnvalue: string; 18 | parent?: ParentKeys; 19 | parentKey?: string; 20 | repeatJobKey?: string; 21 | nextRepeatableJobKey?: string; 22 | debounceId?: string; 23 | deduplicationId?: string; 24 | processedBy?: string; 25 | stalledCounter: number; 26 | } 27 | 28 | export interface JobJsonRaw { 29 | id: string; 30 | name: string; 31 | data: string; 32 | delay: string; 33 | opts: string; 34 | progress: string; 35 | attemptsMade?: string; 36 | finishedOn?: string; 37 | processedOn?: string; 38 | priority: string; 39 | timestamp: string; 40 | failedReason: string; 41 | stacktrace: string[]; 42 | returnvalue: string; 43 | parentKey?: string; 44 | parent?: string; 45 | deid?: string; 46 | rjk?: string; 47 | nrjid?: string; 48 | atm?: string; 49 | defa?: string; 50 | stc?: string; 51 | ats?: string; 52 | pb?: string; // Worker name 53 | } 54 | -------------------------------------------------------------------------------- /src/interfaces/job-scheduler-json.ts: -------------------------------------------------------------------------------- 1 | import { JobSchedulerTemplateOptions } from '../types'; 2 | 3 | export interface JobSchedulerTemplateJson { 4 | data?: D; 5 | opts?: JobSchedulerTemplateOptions; 6 | } 7 | 8 | export interface JobSchedulerJson { 9 | key: string; // key is actually the job scheduler id 10 | name: string; 11 | id?: string | null; 12 | iterationCount?: number; 13 | limit?: number; 14 | endDate?: number; 15 | tz?: string; 16 | pattern?: string; 17 | every?: string; 18 | next?: number; 19 | template?: JobSchedulerTemplateJson; 20 | } 21 | -------------------------------------------------------------------------------- /src/interfaces/keep-jobs.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * KeepJobs 3 | * 4 | * Specify which jobs to keep after finishing. If both age and count are 5 | * specified, then the jobs kept will be the ones that satisfies both 6 | * properties. 7 | */ 8 | export interface KeepJobs { 9 | /** 10 | * Maximum age in seconds for job to be kept. 11 | */ 12 | age?: number; 13 | 14 | /** 15 | * Maximum count of jobs to be kept. 16 | */ 17 | count?: number; 18 | } 19 | -------------------------------------------------------------------------------- /src/interfaces/metrics-options.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * 3 | * 4 | */ 5 | export interface MetricsOptions { 6 | /** 7 | * Enable gathering metrics for finished jobs. 8 | * Output refers to all finished jobs, completed or 9 | * failed. 10 | */ 11 | maxDataPoints?: number; 12 | } 13 | -------------------------------------------------------------------------------- /src/interfaces/metrics.ts: -------------------------------------------------------------------------------- 1 | export interface Metrics { 2 | meta: { 3 | count: number; 4 | prevTS: number; 5 | prevCount: number; 6 | }; 7 | data: number[]; 8 | count: number; 9 | } 10 | -------------------------------------------------------------------------------- /src/interfaces/parent-message.ts: -------------------------------------------------------------------------------- 1 | import { ChildCommand } from '../enums/child-command'; 2 | import { JobJson } from './job-json'; 3 | 4 | export interface ParentMessage { 5 | cmd: ChildCommand; 6 | value?: any; 7 | err?: Error; 8 | job?: JobJson; 9 | } 10 | -------------------------------------------------------------------------------- /src/interfaces/parent-options.ts: -------------------------------------------------------------------------------- 1 | export type ParentOptions = { 2 | /** 3 | * Parent identifier. 4 | */ 5 | id: string; 6 | 7 | /** 8 | * It includes the prefix, the namespace separator :, and queue name. 9 | * @see {@link https://www.gnu.org/software/gawk/manual/html_node/Qualified-Names.html} 10 | */ 11 | queue: string; 12 | }; 13 | -------------------------------------------------------------------------------- /src/interfaces/parent.ts: -------------------------------------------------------------------------------- 1 | import { JobsOptions } from '../types'; 2 | 3 | /** 4 | * Describes the parent for a Job. 5 | */ 6 | export interface Parent { 7 | name: string; 8 | prefix?: string; 9 | queue?: string; 10 | data?: T; 11 | opts?: JobsOptions; 12 | } 13 | 14 | export interface ParentKeys { 15 | id?: string; 16 | queueKey: string; 17 | fpof?: boolean; 18 | rdof?: boolean; 19 | idof?: boolean; 20 | cpof?: boolean; 21 | } 22 | 23 | export type ParentKeyOpts = { 24 | waitChildrenKey?: string; 25 | parentDependenciesKey?: string; 26 | parentKey?: string; 27 | }; 28 | -------------------------------------------------------------------------------- /src/interfaces/rate-limiter-options.ts: -------------------------------------------------------------------------------- 1 | export interface RateLimiterOptions { 2 | /** 3 | * Max number of jobs to process in the time period 4 | * specified in `duration`. 5 | */ 6 | max: number; 7 | 8 | /** 9 | * Time in milliseconds. During this time, a maximum 10 | * of `max` jobs will be processed. 11 | */ 12 | duration: number; 13 | } 14 | -------------------------------------------------------------------------------- /src/interfaces/receiver.ts: -------------------------------------------------------------------------------- 1 | export interface Receiver { 2 | on: (evt: 'message', cb: (msg: any) => void) => void; 3 | off: (evt: 'message', cb: (msg: any) => void) => void; 4 | } 5 | -------------------------------------------------------------------------------- /src/interfaces/redis-options.ts: -------------------------------------------------------------------------------- 1 | import type * as IORedis from 'ioredis'; 2 | 3 | export interface BaseOptions { 4 | skipVersionCheck?: boolean; 5 | url?: string; 6 | } 7 | 8 | export type RedisOptions = IORedis.RedisOptions & BaseOptions; 9 | 10 | export type ClusterOptions = IORedis.ClusterOptions & BaseOptions; 11 | 12 | export type ConnectionOptions = 13 | | RedisOptions 14 | | ClusterOptions 15 | | IORedis.Redis 16 | | IORedis.Cluster; 17 | -------------------------------------------------------------------------------- /src/interfaces/redis-streams.ts: -------------------------------------------------------------------------------- 1 | // The types declaration is wrong for xread, so we need to cast returns until its fixed 2 | // 3 | // https://github.com/DefinitelyTyped/DefinitelyTyped/issues/44301 4 | // 5 | export type StreamName = string; 6 | export type EntryId = string; 7 | export type EntryRaw = [EntryId, string[]]; 8 | export type StreamReadRaw = [StreamName, EntryRaw[]][] | null | undefined; // [string, [string, string[]][]][] 9 | -------------------------------------------------------------------------------- /src/interfaces/repeat-options.ts: -------------------------------------------------------------------------------- 1 | import { ParserOptions } from 'cron-parser'; 2 | 3 | /** 4 | * Settings for repeatable jobs 5 | * 6 | * @see {@link https://docs.bullmq.io/guide/jobs/repeatable} 7 | */ 8 | export interface RepeatOptions extends Omit { 9 | /** 10 | * A repeat pattern 11 | */ 12 | pattern?: string; 13 | 14 | /** 15 | * Custom repeatable key. This is the key that holds the "metadata" 16 | * of a given repeatable job. This key is normally auto-generated but 17 | * it is sometimes useful to specify a custom key for easier retrieval 18 | * of repeatable jobs. 19 | */ 20 | key?: string; 21 | 22 | /** 23 | * Number of times the job should repeat at max. 24 | */ 25 | limit?: number; 26 | 27 | /** 28 | * Repeat after this amount of milliseconds 29 | * (`pattern` setting cannot be used together with this setting.) 30 | */ 31 | every?: number; 32 | 33 | /** 34 | * Repeated job should start right now 35 | * ( work only with cron settings) 36 | */ 37 | immediately?: boolean; 38 | 39 | /** 40 | * The start value for the repeat iteration count. 41 | */ 42 | count?: number; 43 | 44 | /** 45 | * Offset in milliseconds to affect the next iteration time 46 | * 47 | * */ 48 | offset?: number; 49 | 50 | /** 51 | * Internal property to store the previous time the job was executed. 52 | */ 53 | prevMillis?: number; 54 | 55 | /** 56 | * Internal property to store de job id 57 | * @deprecated not in use anymore 58 | */ 59 | jobId?: string; 60 | } 61 | -------------------------------------------------------------------------------- /src/interfaces/repeatable-job.ts: -------------------------------------------------------------------------------- 1 | // TODO: remove this type in favor of JobSchedulerJson in next breaking change 2 | export type RepeatableJob = { 3 | key: string; 4 | name: string; 5 | id?: string | null; 6 | endDate: number | null; 7 | tz: string | null; 8 | pattern: string | null; 9 | every?: string | null; 10 | next?: number; 11 | }; 12 | -------------------------------------------------------------------------------- /src/interfaces/repeatable-options.ts: -------------------------------------------------------------------------------- 1 | export type RepeatableOptions = { 2 | name: string; 3 | endDate?: number; 4 | tz?: string; 5 | limit?: number; 6 | pattern?: string; 7 | every?: number; 8 | }; 9 | -------------------------------------------------------------------------------- /src/interfaces/sandboxed-job-processor.ts: -------------------------------------------------------------------------------- 1 | import { SandboxedJob } from './sandboxed-job'; 2 | 3 | /** 4 | * @see {@link https://docs.bullmq.io/guide/workers/sandboxed-processors} 5 | */ 6 | export type SandboxedJobProcessor = 7 | | ((job: SandboxedJob) => R | PromiseLike) 8 | | (( 9 | job: SandboxedJob, 10 | callback: (error: unknown, result: R) => void, 11 | ) => void); 12 | -------------------------------------------------------------------------------- /src/interfaces/sandboxed-job.ts: -------------------------------------------------------------------------------- 1 | import { JobJsonSandbox, JobProgress, JobsOptions } from '../types'; 2 | 3 | /** 4 | * @see {@link https://docs.bullmq.io/guide/workers/sandboxed-processors} 5 | */ 6 | export interface SandboxedJob 7 | extends Omit { 8 | data: T; 9 | opts: JobsOptions; 10 | moveToDelayed: (timestamp: number, token?: string) => Promise; 11 | log: (row: any) => void; 12 | updateData: (data: any) => Promise; 13 | updateProgress: (value: JobProgress) => Promise; 14 | returnValue: R; 15 | } 16 | -------------------------------------------------------------------------------- /src/interfaces/sandboxed-options.ts: -------------------------------------------------------------------------------- 1 | import { ForkOptions } from 'child_process'; 2 | import { WorkerOptions as WorkerThreadsOptions } from 'worker_threads'; 3 | 4 | export interface SandboxedOptions { 5 | /** 6 | * Use Worker Threads instead of Child Processes. 7 | * Note: This option can only be used when specifying 8 | * a file for the processor argument. 9 | * 10 | * @defaultValue false 11 | */ 12 | useWorkerThreads?: boolean; 13 | 14 | /** 15 | * Support passing Worker Fork Options. 16 | * Note: This option can only be used when specifying 17 | * a file for the processor argument and useWorkerThreads is passed as false (default value). 18 | * @see {@link https://nodejs.org/api/child_process.html#child_processforkmodulepath-args-options} 19 | */ 20 | workerForkOptions?: ForkOptions; 21 | 22 | /** 23 | * Support passing Worker Threads Options. 24 | * Note: This option can only be used when specifying 25 | * a file for the processor argument and useWorkerThreads is passed as true. 26 | * @see {@link https://nodejs.org/api/worker_threads.html#new-workerfilename-options} 27 | */ 28 | workerThreadsOptions?: WorkerThreadsOptions; 29 | } 30 | -------------------------------------------------------------------------------- /src/types/backoff-strategy.ts: -------------------------------------------------------------------------------- 1 | import { MinimalJob } from '../interfaces/minimal-job'; 2 | 3 | export type BackoffStrategy = ( 4 | attemptsMade: number, 5 | type?: string, 6 | err?: Error, 7 | job?: MinimalJob, 8 | ) => Promise | number; 9 | -------------------------------------------------------------------------------- /src/types/finished-status.ts: -------------------------------------------------------------------------------- 1 | export type FinishedStatus = 'completed' | 'failed'; 2 | 3 | export type FinishedPropValAttribute = 'returnvalue' | 'failedReason'; 4 | -------------------------------------------------------------------------------- /src/types/index.ts: -------------------------------------------------------------------------------- 1 | export * from './backoff-strategy'; 2 | export * from './finished-status'; 3 | export * from './minimal-queue'; 4 | export * from './job-json-sandbox'; 5 | export * from './job-options'; 6 | export * from './job-scheduler-template-options'; 7 | export * from './job-type'; 8 | export * from './repeat-strategy'; 9 | export * from './job-progress'; 10 | -------------------------------------------------------------------------------- /src/types/job-json-sandbox.ts: -------------------------------------------------------------------------------- 1 | import { JobJson } from '../interfaces'; 2 | 3 | export type JobJsonSandbox = JobJson & { 4 | queueName: string; 5 | prefix: string; 6 | }; 7 | -------------------------------------------------------------------------------- /src/types/job-progress.ts: -------------------------------------------------------------------------------- 1 | export type JobProgress = string | boolean | number | object; 2 | -------------------------------------------------------------------------------- /src/types/job-scheduler-template-options.ts: -------------------------------------------------------------------------------- 1 | import { JobsOptions } from './job-options'; 2 | 3 | export type JobSchedulerTemplateOptions = Omit< 4 | JobsOptions, 5 | 'jobId' | 'repeat' | 'delay' | 'deduplication' | 'debounce' 6 | >; 7 | -------------------------------------------------------------------------------- /src/types/job-type.ts: -------------------------------------------------------------------------------- 1 | import { FinishedStatus } from './finished-status'; 2 | 3 | export type JobState = 4 | | FinishedStatus 5 | | 'active' 6 | | 'delayed' 7 | | 'prioritized' 8 | | 'waiting' 9 | | 'waiting-children'; 10 | 11 | export type JobType = JobState | 'paused' | 'repeat' | 'wait'; 12 | -------------------------------------------------------------------------------- /src/types/minimal-queue.ts: -------------------------------------------------------------------------------- 1 | import { QueueBase } from '../classes/queue-base'; 2 | 3 | export type MinimalQueue = Pick< 4 | QueueBase, 5 | | 'name' 6 | | 'client' 7 | | 'toKey' 8 | | 'keys' 9 | | 'opts' 10 | | 'qualifiedName' 11 | | 'closing' 12 | | 'waitUntilReady' 13 | | 'removeListener' 14 | | 'emit' 15 | | 'on' 16 | | 'redisVersion' 17 | | 'trace' 18 | >; 19 | -------------------------------------------------------------------------------- /src/types/net.d.ts: -------------------------------------------------------------------------------- 1 | /* TODO: remove as soon as node 12 is deprecated */ 2 | declare module 'node:net' { 3 | export * from 'net'; 4 | } 5 | -------------------------------------------------------------------------------- /src/types/repeat-strategy.ts: -------------------------------------------------------------------------------- 1 | import { RepeatOptions } from '../interfaces/repeat-options'; 2 | 3 | export type RepeatStrategy = ( 4 | millis: number, 5 | opts: RepeatOptions, 6 | name?: string, 7 | ) => number | undefined | Promise; 8 | -------------------------------------------------------------------------------- /src/version.ts: -------------------------------------------------------------------------------- 1 | export const version = '5.53.1'; 2 | -------------------------------------------------------------------------------- /tests/fixtures/delay.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function delay(ms) { 4 | return new Promise(function (resolve) { 5 | return setTimeout(resolve, ms); 6 | }); 7 | }; 8 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor.cjs: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(1000).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_bar.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | return 'bar'; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_broken.js: -------------------------------------------------------------------------------- 1 | throw new Error('Broken file processor'); 2 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_crash.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = function (job) { 8 | setTimeout(() => { 9 | if (typeof job.data.exitCode !== 'number') { 10 | throw new Error('boom!'); 11 | } 12 | process.exit(job.data.exitCode); 13 | }, 100); 14 | 15 | return new Promise(() => { 16 | // do nothing 17 | }); 18 | }; 19 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_env.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | if (process.env.variable === 'variable') { 12 | return 'variable'; 13 | } 14 | throw new Error('Manually failed processor'); 15 | }); 16 | }; 17 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_exit.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(200).then(() => { 11 | delay(100).then(() => { 12 | process.exit(0); 13 | }); 14 | }); 15 | }; 16 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_fail.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | class TestError extends Error { 10 | metadata = 'metadata'; 11 | } 12 | 13 | module.exports = function (/*job*/) { 14 | return delay(500).then(() => { 15 | throw new TestError('Manually failed processor'); 16 | }); 17 | }; 18 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_fail_with_circular_reference.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | const error = new Error('error'); 12 | const value = {}; 13 | value.ref = value; 14 | error.custom = value; 15 | error.reference = error; 16 | 17 | throw error; 18 | }); 19 | }; 20 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_foo.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | return 'foo'; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_get_children_failures.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = async function (job) { 8 | const values = await job.getIgnoredChildrenFailures(); 9 | return values; 10 | }; 11 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_get_children_failures_child.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = function (job) { 8 | throw new Error('child error'); 9 | }; 10 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_get_children_values.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = async function (job) { 8 | const values = await job.getChildrenValues(); 9 | return values; 10 | }; 11 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_get_children_values_child.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | module.exports = function (job) { 8 | return { childResult: 'bar' }; 9 | }; 10 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_missing_function.js: -------------------------------------------------------------------------------- 1 | module.exports = {}; 2 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_move_to_delayed.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const { DelayedError } = require('../../dist/cjs/classes'); 8 | const delay = require('./delay'); 9 | 10 | module.exports = function (job, token) { 11 | if (job.attemptsStarted == 1) { 12 | return delay(250) 13 | .then(() => { 14 | job.moveToDelayed(Date.now() + 2500, token); 15 | return delay(500); 16 | }) 17 | .then(() => { 18 | throw new DelayedError(); 19 | }); 20 | } 21 | }; 22 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_parent.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (job) { 10 | return delay(200).then(() => { 11 | return job.parent; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_queueName.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (job) { 10 | return delay(500).then(() => { 11 | return job.queueName; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_slow.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(1000).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_stderr.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | console.error('error message'); 12 | return 1; 13 | }); 14 | }; 15 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_stdout.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (/*job*/) { 10 | return delay(500).then(() => { 11 | console.log('message'); 12 | return 1; 13 | }); 14 | }; 15 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_steps.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const delay = require('./delay'); 4 | 5 | module.exports = async function (job) { 6 | let step = job.data.step; 7 | while (step !== 'FINISH') { 8 | switch (step) { 9 | case 'INITIAL': { 10 | await delay(200); 11 | const data = { 12 | ...job.data, 13 | step: 'SECOND', 14 | extraDataSecondStep: 'second data', 15 | }; 16 | await job.updateData(data); 17 | step = 'SECOND'; 18 | break; 19 | } 20 | case 'SECOND': { 21 | await delay(200); 22 | const data = { 23 | ...job.data, 24 | extraDataFinishedStep: 'finish data', 25 | step: 'FINISH', 26 | }; 27 | 28 | await job.updateData(data); 29 | step = 'FINISH'; 30 | return; 31 | } 32 | default: { 33 | throw new Error('invalid step'); 34 | } 35 | } 36 | } 37 | }; 38 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_ttl.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | // This processor will timeout in 10 seconds. 8 | const MAX_TTL = 1_000; 9 | const CLEANUP_TTL = 500; 10 | 11 | const TTL_EXIT_CODE = 10; 12 | 13 | module.exports = async function (job) { 14 | let hasCompleted = false; 15 | const harKillTimeout = setTimeout(() => { 16 | if (!hasCompleted) { 17 | process.exit(TTL_EXIT_CODE); 18 | } 19 | }, MAX_TTL); 20 | 21 | const softKillTimeout = setTimeout(async () => { 22 | await doCleanup(job); 23 | }, CLEANUP_TTL); 24 | 25 | try { 26 | // If doAsyncWork is CPU intensive and blocks NodeJS loop forever, the timeout will never be triggered. 27 | await doAsyncWork(job); 28 | hasCompleted = true; 29 | } finally { 30 | // Important to clear the timeouts before returning as this process will be reused. 31 | clearTimeout(harKillTimeout); 32 | clearTimeout(softKillTimeout); 33 | } 34 | }; 35 | 36 | const doAsyncWork = async job => { 37 | // Simulate a long running operation. 38 | await new Promise(resolve => setTimeout(resolve, 10000)); 39 | }; 40 | 41 | const doCleanup = async job => { 42 | // Simulate a cleanup operation. 43 | await job.updateProgress(50); 44 | }; 45 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_unrecoverable.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const { UnrecoverableError } = require('../../dist/cjs/classes'); 8 | const delay = require('./delay'); 9 | 10 | module.exports = function (job) { 11 | return delay(500).then(() => { 12 | if (job.attemptsMade < 1) { 13 | throw new Error('Not yet!'); 14 | } 15 | if (job.attemptsMade < 2) { 16 | throw new UnrecoverableError(); 17 | } 18 | }); 19 | }; 20 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_update_data.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (job) { 10 | return delay(50) 11 | .then(() => { 12 | job.updateData({ foo: 'bar' }); 13 | return delay(100); 14 | }) 15 | .then(() => { 16 | job.updateData({ foo: 'baz' }); 17 | delay(100); 18 | return 'result'; 19 | }); 20 | }; 21 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_update_progress.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (job) { 10 | return delay(50) 11 | .then(() => { 12 | job.updateProgress(10); 13 | return delay(100); 14 | }) 15 | .then(() => { 16 | job.updateProgress(27); 17 | return delay(150); 18 | }) 19 | .then(() => { 20 | job.updateProgress(78); 21 | return delay(100); 22 | }) 23 | .then(() => { 24 | job.updateProgress(100); 25 | }) 26 | .then(() => { 27 | return job.progress; 28 | }); 29 | }; 30 | -------------------------------------------------------------------------------- /tests/fixtures/fixture_processor_with_extra_param.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A processor file to be used in tests. 3 | * 4 | */ 5 | 'use strict'; 6 | 7 | const delay = require('./delay'); 8 | 9 | module.exports = function (job, token, extraParam) { 10 | return delay(500).then(() => { 11 | return 42; 12 | }); 13 | }; 14 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/empty/test.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/tests/fixtures/scripts/dir-test/empty/test.js -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/non-lua/fixture_non_lua_file.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taskforcesh/bullmq/483c9325597c21a9e7bb881d34e7f38615a87c43/tests/fixtures/scripts/dir-test/non-lua/fixture_non_lua_file.txt -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/non-lua/test.lua: -------------------------------------------------------------------------------- 1 | local name = 'test.lua' 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/one-0.lua: -------------------------------------------------------------------------------- 1 | local one = 1 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/three-2.lua: -------------------------------------------------------------------------------- 1 | local three = 3 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/dir-test/two-1.lua: -------------------------------------------------------------------------------- 1 | --- 2 | --- Generated by EmmyLua(https://github.com/EmmyLua) 3 | --- Created by ccollie. 4 | --- DateTime: 11/20/21 8:34 PM 5 | --- 6 | local two = 2 7 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_circular_dependency.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_circular_dependency.lua 2 | --- @include "fixture_circular_dependency_child" 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_circular_dependency_child.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_circlular_dependency_child.lua 2 | --- @include "fixture_circular_dependency" 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_duplicate_elimination.lua: -------------------------------------------------------------------------------- 1 | --- 2 | --- Fixture for script deduplication 3 | --- We include multiple scripts which directly or 4 | --- transitively include "strings.lua". It should only be included once 5 | --- 6 | --- @include "includes/fixture_recursive_grandchild" 7 | --- @include "includes/utils" 8 | --- @include "includes/strings" 9 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_duplicate_include.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_duplicate_include.lua 2 | --- @include "includes/utils" 3 | --- @include "includes/utils" 4 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_glob_includes.lua: -------------------------------------------------------------------------------- 1 | --- @include "includes/fixture_glob_*.lua" 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_missing_include.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_missing_include.lua 2 | --- @include "includes/non-existent" 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_path_mapped.lua: -------------------------------------------------------------------------------- 1 | --- 2 | --- Expects a path mapping of map-glob to be set in the test file 3 | --- addScriptPathMapping('includes', './fixtures/scripts/includes'); 4 | --- 5 | --- @include "/math.lua" 6 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_path_mapped_glob.lua: -------------------------------------------------------------------------------- 1 | --- 2 | --- Expects a path mapping of map-glob to be set in the test file 3 | --- addScriptPathMapping('map-glob', './fixtures/mapped'); 4 | --- 5 | --- @include "/fixture_mapped*.lua" 6 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_recursive_parent.lua: -------------------------------------------------------------------------------- 1 | --- @include "includes/fixture_recursive_child" 2 | --- file: fixture_recursive_parent.lua 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_simple_include.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_simple_include.lua 2 | --- @include "fixture_simple_include_child" 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/fixture_simple_include_child.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_simple_include_child.lua 2 | --- @include "includes/math.lua" 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/fixture_glob_include_1.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_glob_include_1.lua 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/fixture_glob_include_2.lua: -------------------------------------------------------------------------------- 1 | --- file: fixture_glob_include_2.lua 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/fixture_recursive_child.lua: -------------------------------------------------------------------------------- 1 | --- @include "fixture_recursive_grandchild" 2 | --- file: fixture_recursive_child.lua 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/fixture_recursive_grandchild.lua: -------------------------------------------------------------------------------- 1 | --- @include "fixture_recursive_great_grandchild" 2 | --- file: fixture_recursive_grandchild.lua 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/fixture_recursive_great_grandchild.lua: -------------------------------------------------------------------------------- 1 | --- @include "strings" 2 | --- file: fixture_recursive_great_grandchild.lua 3 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/math.lua: -------------------------------------------------------------------------------- 1 | --- file: math.lua 2 | local function sign(x) 3 | x = tonumber(x) 4 | if x == 0 then return 0 end 5 | return x < 0 and -1 or 1 6 | end 7 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/strings.lua: -------------------------------------------------------------------------------- 1 | --- file: strings.lua 2 | local function isString(x) 3 | return type(x) == 'string' 4 | end 5 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/includes/utils.lua: -------------------------------------------------------------------------------- 1 | --- @include "strings" 2 | --- @include "math" 3 | --- file: utils.lua 4 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/load/broadcastEvent-1.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Broadcast a message. 3 | 4 | Input: 5 | KEYS[1] channel, 6 | ARGV[1] event 7 | ARGV[2] payload 8 | ]] 9 | local channel = KEYS[1] 10 | local event = ARGV[1] 11 | local payload = ARGV[2] 12 | 13 | -- Emit event 14 | redis.call("XADD", channel, "*", "event", event, "payload", payload); 15 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/mapped/fixture_mapped_include_1.lua: -------------------------------------------------------------------------------- 1 | -- file: fixture_mapped_include_1.lua 2 | -------------------------------------------------------------------------------- /tests/fixtures/scripts/mapped/fixture_mapped_include_2.lua: -------------------------------------------------------------------------------- 1 | -- file: fixture_mapped_include_2.lua 2 | -------------------------------------------------------------------------------- /tests/utils/repeat_utils.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'crypto'; 2 | 3 | export function createRepeatableJobKey( 4 | jobName: string, 5 | jobId: string, 6 | endDate: string, 7 | tz: string, 8 | suffix: number, 9 | ): string { 10 | return `${jobName}:${jobId}:${endDate}:${tz}:${suffix}`; 11 | } 12 | 13 | export function getRepeatableJobKeyPrefix( 14 | prefix: string, 15 | queueName: string, 16 | ): string { 17 | return `${prefix}:${queueName}:repeat:`; 18 | } 19 | 20 | export function extractRepeatableJobChecksumFromRedisKey( 21 | redisKey: string, 22 | ): string { 23 | return redisKey.split(':')[3]; 24 | } 25 | 26 | export function hash(repeatKeyHashAlgorithm: string, payload: string): string { 27 | return createHash(repeatKeyHashAlgorithm).update(payload).digest('hex'); 28 | } 29 | 30 | export function getRepeatJobIdCheckum( 31 | repeatJobKey: string, 32 | repeatKeyHashAlgorithm: string, 33 | ): string { 34 | return hash(repeatKeyHashAlgorithm, repeatJobKey); 35 | } 36 | -------------------------------------------------------------------------------- /tsconfig-cjs.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "module": "node16", 5 | "outDir": "dist/cjs", 6 | "declaration": false, 7 | "declarationDir": null, 8 | "moduleResolution": "node16" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "types": ["node"], 4 | "target": "ES2017", 5 | "module": "ES2020", 6 | "incremental": true, 7 | "declaration": true, 8 | "outDir": "dist/esm", 9 | "sourceMap": true, 10 | "experimentalDecorators": true, 11 | "emitDecoratorMetadata": true, 12 | "strict": true, 13 | "jsx": "preserve", 14 | "importHelpers": true, 15 | "moduleResolution": "node", 16 | "esModuleInterop": false, 17 | "allowSyntheticDefaultImports": false, 18 | "strictNullChecks": false, 19 | "baseUrl": ".", 20 | "lib": ["esnext", "DOM"] 21 | }, 22 | "include": ["src"], 23 | "exclude": ["node_modules", "dist", "tests/*", "src/commands/*.ts"] 24 | } 25 | --------------------------------------------------------------------------------