├── docs
├── .nojekyll
├── images
│ ├── job-states.png
│ └── job-states.drawio
├── _sidebar.md
├── api
│ ├── pubsub.md
│ ├── utils.md
│ ├── events.md
│ ├── scheduling.md
│ ├── ops.md
│ ├── workers.md
│ ├── queues.md
│ ├── constructor.md
│ ├── testing.md
│ └── jobs.md
├── index.html
├── install.md
├── sql.md
├── introduction.md
└── README.md
├── .github
├── FUNDING.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── test
├── config.json
├── moduleTest.ts
├── test-types.d.ts
├── backgroundErrorTest.ts
├── speedTest.ts
├── databaseTest.ts
├── toolsTest.ts
├── hooks.ts
├── multiMasterTest.ts
├── priorityTest.ts
├── resumeTest.ts
├── deleteTest.ts
├── delayTest.ts
├── configTest.ts
├── cancelTest.ts
├── expireTest.ts
├── opsTest.ts
├── queueStatsTest.ts
├── testHelper.ts
├── throttleTest.ts
├── retryTest.ts
├── publishTest.ts
├── completeTest.ts
├── monitoringTest.ts
├── insertTest.ts
├── sendTest.ts
├── failureTest.ts
├── fetchTest.ts
├── migrationTest.ts
├── workTest.ts
└── queueTest.ts
├── .mocharc.json
├── .nycrc.json
├── tsconfig.json
├── eslint.config.js
├── tsconfig.build.json
├── docker-compose.yaml
├── examples
├── readme.cjs
├── readme.mjs
├── schedule.js
└── load
│ └── index.ts
├── LICENSE
├── src
├── db.ts
├── tools.ts
├── contractor.ts
├── spy.ts
├── worker.ts
├── boss.ts
├── migrationStore.ts
├── timekeeper.ts
└── types.ts
├── package.json
└── README.md
/docs/.nojekyll:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: timgit
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | .nyc_output
3 | .vscode
4 | *.tgz
5 | *.log
6 | coverage
7 | dist
8 |
--------------------------------------------------------------------------------
/docs/images/job-states.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/timgit/pg-boss/HEAD/docs/images/job-states.png
--------------------------------------------------------------------------------
/test/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "host": "127.0.0.1",
3 | "port": 5432,
4 | "database": "pgboss",
5 | "user": "postgres",
6 | "password": "postgres",
7 | "max": 3,
8 | "debug": false
9 | }
10 |
--------------------------------------------------------------------------------
/.mocharc.json:
--------------------------------------------------------------------------------
1 | {
2 | "timeout": 10000,
3 | "slow": 10000,
4 | "bail": true,
5 | "parallel": true,
6 | "require": [
7 | "./test/hooks.ts"
8 | ],
9 | "node-option": [
10 | "import=tsx"
11 | ]
12 | }
--------------------------------------------------------------------------------
/.nycrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@istanbuljs/nyc-config-typescript",
3 | "include": [
4 | "src/**/*.ts"
5 | ],
6 | "sourceMap": true,
7 | "instrument": true,
8 | "reporter": [
9 | "lcov",
10 | "text-summary",
11 | "text"
12 | ]
13 | }
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "@tsconfig/node22/tsconfig.json",
4 | "@tsconfig/node-ts/tsconfig.json"
5 | ],
6 | "compilerOptions": {
7 | "outDir": "dist",
8 | "declaration": true,
9 | "declarationMap": true,
10 | "stripInternal": true,
11 | "resolveJsonModule": true,
12 | "noEmit": true
13 | },
14 | "include": ["src", "test"]
15 | }
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
1 | import neostandard from 'neostandard'
2 | import { defineConfig } from 'eslint/config'
3 |
4 | const config = neostandard({
5 | ts: true,
6 | env: ['mocha'],
7 | ignores: neostandard.resolveIgnoresFromGitignore(),
8 | noJsx: true,
9 | })
10 |
11 | export default defineConfig(config, {
12 | languageOptions: {
13 | ecmaVersion: 2025,
14 | },
15 | })
16 |
--------------------------------------------------------------------------------
/tsconfig.build.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "@tsconfig/node22/tsconfig.json",
4 | "@tsconfig/node-ts/tsconfig.json"
5 | ],
6 | "compilerOptions": {
7 | "rootDir": "src",
8 | "outDir": "dist",
9 | "declaration": true,
10 | "declarationMap": true,
11 | "stripInternal": true,
12 | "resolveJsonModule": true
13 | },
14 | "include": ["src"]
15 | }
16 |
--------------------------------------------------------------------------------
/test/moduleTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import { states } from '../src/index.ts'
3 |
4 | describe('module', function () {
5 | it('should export states object', function () {
6 | assert(states.created)
7 | assert(states.retry)
8 | assert(states.active)
9 | assert(states.completed)
10 | assert(states.cancelled)
11 | assert(states.failed)
12 | })
13 | })
14 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:18
4 | ports:
5 | - 5432:5432
6 | volumes:
7 | - db_volume:/var/lib/postgresql/data18
8 | environment:
9 | - POSTGRES_DB=pgboss
10 | - POSTGRES_NAME=pgboss
11 | - POSTGRES_USER=postgres
12 | - POSTGRES_PASSWORD=postgres
13 | command: -c 'max_connections=400'
14 |
15 | volumes:
16 | db_volume:
17 |
--------------------------------------------------------------------------------
/docs/_sidebar.md:
--------------------------------------------------------------------------------
1 | * [Home](/)
2 | * [Introduction](introduction.md)
3 | * [Install](install.md)
4 | * API
5 | * * [Constructor](./api/constructor.md)
6 | * * [Events](./api/events.md)
7 | * * [Operations](./api/ops.md)
8 | * * [Queues](./api/queues.md)
9 | * * [Jobs](./api/jobs.md)
10 | * * [Scheduling](./api/scheduling.md)
11 | * * [PubSub](./api/pubsub.md)
12 | * * [Workers](./api/workers.md)
13 | * * [Testing](./api/testing.md)
14 | * * [Utils](./api/utils.md)
15 | * [SQL](sql.md)
--------------------------------------------------------------------------------
/docs/api/pubsub.md:
--------------------------------------------------------------------------------
1 | # Pub-sub
2 |
3 | Pub-sub in pg-boss is a light abstraction over creating more than 1 job into multiple queues from a single event. Otherwise, use `send()` or `insert()`.
4 |
5 | ### `publish(event, data, options)`
6 |
7 | Publish an event with optional data and options (Same as `send()` args). Looks up all subscriptions for the event and sends to each queue.
8 |
9 | ### `subscribe(event, name)`
10 |
11 | Subscribe queue `name` to `event`.
12 |
13 | ### `unsubscribe(event, name)`
14 |
15 | Remove the subscription of queue `name` to `event`.
16 |
--------------------------------------------------------------------------------
/test/test-types.d.ts:
--------------------------------------------------------------------------------
1 | import { type PgBoss } from '../src/index.ts'
2 | import type { ConstructorOptions } from '../src/types.ts'
3 |
4 | // Extend Mocha's interfaces to include custom properties directly on 'this'
5 | // Both Context and Test need to be augmented since Context extends Test
6 | declare module 'mocha' {
7 | interface Context {
8 | boss?: PgBoss
9 | bossConfig: ConstructorOptions & { schema: string }
10 | schema: string
11 | }
12 |
13 | interface Test {
14 | boss?: PgBoss
15 | bossConfig: ConstructorOptions & { schema: string }
16 | schema: string
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/examples/readme.cjs:
--------------------------------------------------------------------------------
1 | const { PgBoss } = require('../')
2 |
3 | async function readme () {
4 | const boss = new PgBoss('postgres://postgres:postgres@localhost/pgboss')
5 |
6 | boss.on('error', console.error)
7 |
8 | await boss.start()
9 |
10 | const queue = 'readme-queue'
11 |
12 | await boss.createQueue(queue)
13 |
14 | const id = await boss.send(queue, { arg1: 'read me' })
15 |
16 | console.log(`created job ${id} in queue ${queue}`)
17 |
18 | await boss.work(queue, async ([job]) => {
19 | console.log(`received job ${job.id} with data ${JSON.stringify(job.data)}`)
20 | })
21 | }
22 |
23 | readme()
24 | .catch(err => {
25 | console.log(err)
26 | process.exit(1)
27 | })
28 |
--------------------------------------------------------------------------------
/examples/readme.mjs:
--------------------------------------------------------------------------------
1 | import { PgBoss } from '../dist/index.js'
2 |
3 | async function readme () {
4 | const boss = new PgBoss('postgres://postgres:postgres@localhost/pgboss')
5 |
6 | boss.on('error', console.error)
7 |
8 | await boss.start()
9 |
10 | const queue = 'readme-queue'
11 |
12 | await boss.createQueue(queue)
13 |
14 | const id = await boss.send(queue, { arg1: 'read me' })
15 |
16 | console.log(`created job ${id} in queue ${queue}`)
17 |
18 | await boss.work(queue, async ([job]) => {
19 | console.log(`received job ${job.id} with data ${JSON.stringify(job.data)}`)
20 | })
21 | }
22 |
23 | readme()
24 | .catch(err => {
25 | console.log(err)
26 | process.exit(1)
27 | })
28 |
--------------------------------------------------------------------------------
/examples/schedule.js:
--------------------------------------------------------------------------------
1 | import { PgBoss } from '../dist/index.js'
2 | import * as helper from '../test/testHelper.js'
3 |
4 | async function schedule () {
5 | const boss = new PgBoss(helper.getConnectionString())
6 |
7 | boss.on('error', console.error)
8 |
9 | await boss.start()
10 |
11 | const queue = 'scheduled-queue'
12 |
13 | await boss.createQueue(queue)
14 |
15 | await boss.schedule(queue, '*/2 * * * *', { arg1: 'schedule me' })
16 |
17 | await boss.work(queue, async ([job]) => {
18 | console.log(`received job ${job.id} with data ${JSON.stringify(job.data)} on ${new Date().toISOString()}`)
19 | })
20 | }
21 |
22 | schedule()
23 | .catch(err => {
24 | console.log(err)
25 | process.exit(1)
26 | })
27 |
--------------------------------------------------------------------------------
/test/backgroundErrorTest.ts:
--------------------------------------------------------------------------------
1 | import { strictEqual } from 'node:assert'
2 | import { PgBoss } from '../src/index.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('background processing error handling', function () {
6 | it('maintenance error handling works', async function () {
7 | const defaults = {
8 | superviseIntervalSeconds: 1,
9 | supervise: true,
10 | __test__throw_maint: 'my maintenance error'
11 | }
12 |
13 | const config = { ...this.bossConfig, ...defaults }
14 | this.boss = new PgBoss(config)
15 |
16 | let errorCount = 0
17 |
18 | this.boss.once('error', (error) => {
19 | strictEqual(error.message, config.__test__throw_maint)
20 | errorCount++
21 | })
22 |
23 | await this.boss.start()
24 |
25 | await delay(3000)
26 |
27 | strictEqual(errorCount, 1)
28 | })
29 | })
30 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | pg-boss Docs
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/test/speedTest.ts:
--------------------------------------------------------------------------------
1 | import * as helper from './testHelper.ts'
2 | import assert from 'node:assert'
3 |
4 | describe('speed', function () {
5 | const expectedSeconds = 9
6 | const jobCount = 5_000
7 | const queue = 'speedTest'
8 | const data = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } }))
9 | const testTitle = `should be able to fetch and complete ${jobCount} jobs in ${expectedSeconds} seconds`
10 |
11 | it(testTitle, async function () {
12 | this.timeout(expectedSeconds * 1000)
13 | this.slow(0)
14 |
15 | const config = { ...this.bossConfig, min: 10, max: 10, noDefault: true }
16 | this.boss = await helper.start(config)
17 | await this.boss.createQueue(queue)
18 | await this.boss.insert(queue, data)
19 | const jobs = await this.boss.fetch(queue, { batchSize: jobCount })
20 |
21 | assert.strictEqual(jobCount, jobs.length)
22 |
23 | await this.boss.complete(queue, jobs.map(job => job.id))
24 | })
25 | })
26 |
--------------------------------------------------------------------------------
/docs/api/utils.md:
--------------------------------------------------------------------------------
1 | # Utility functions
2 |
3 | The following static functions are not required during normal operations, but are intended to assist in schema creation or migration if run-time privileges do not allow schema changes.
4 |
5 | ### `getConstructionPlans(schema)`
6 |
7 | **Arguments**
8 | - `schema`: string, database schema name
9 |
10 | Returns the SQL commands required for manual creation of the required schema.
11 |
12 | ### `getMigrationPlans(schema, version)`
13 |
14 | **Arguments**
15 | - `schema`: string, database schema name
16 | - `version`: string, target schema version to migrate
17 |
18 | Returns the SQL commands required to manually migrate from the specified version to the latest version.
19 |
20 | ### `getRollbackPlans(schema, version)`
21 |
22 | **Arguments**
23 | - `schema`: string, database schema name
24 | - `version`: string, target schema version to uninstall
25 |
26 | Returns the SQL commands required to manually roll back the specified version to the previous version
27 |
--------------------------------------------------------------------------------
/test/databaseTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import { PgBoss } from '../src/index.ts'
3 |
4 | describe('database', function () {
5 | it('should fail on invalid database host', async function () {
6 | const boss = new PgBoss({
7 | connectionString: 'postgres://bobby:tables@wat:12345/northwind',
8 | connectionTimeoutMillis: 3000
9 | })
10 |
11 | await assert.rejects(async () => {
12 | await boss.start()
13 | })
14 | })
15 |
16 | it('can be swapped out via BYODB', async function () {
17 | const query = 'SELECT something FROM somewhere'
18 |
19 | const mydb = {
20 | executeSql: async (text: string, values: []): Promise<{ rows: any[]; text: string }> => {
21 | assert.strictEqual(text, query)
22 | return { rows: [], text }
23 | }
24 | }
25 |
26 | const boss = new PgBoss({ db: mydb })
27 | const response = await boss.getDb().executeSql(query)
28 |
29 | // @ts-ignore
30 | assert(response.text === query)
31 | })
32 | })
33 |
--------------------------------------------------------------------------------
/test/toolsTest.ts:
--------------------------------------------------------------------------------
1 | const assert = require('node:assert')
2 |
3 | describe('tools.unwrapSQLResult', function () {
4 | it('should return the same object when input is an object with rows', function () {
5 | const { unwrapSQLResult } = require('../src/tools')
6 |
7 | const input = { rows: [{ id: 1 }, { id: 2 }] }
8 | const output = unwrapSQLResult(input)
9 |
10 | assert.strictEqual(output, input)
11 | assert.deepStrictEqual(output, input)
12 | })
13 |
14 | it('should flatten an array of results into a single rows array', function () {
15 | const { unwrapSQLResult } = require('../src/tools')
16 |
17 | const part1 = { rows: [{ id: 'a' }] }
18 | const part2 = { rows: [{ id: 'b' }, { id: 'c' }] }
19 | const output = unwrapSQLResult([part1, part2])
20 |
21 | assert.deepStrictEqual(output, { rows: [part1.rows, part2.rows].flat() })
22 | })
23 |
24 | it('should handle empty array by returning empty rows', function () {
25 | const { unwrapSQLResult } = require('../src/tools')
26 |
27 | const output = unwrapSQLResult([])
28 | assert.deepStrictEqual(output, { rows: [] })
29 | })
30 | })
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Tim Jones
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/test/hooks.ts:
--------------------------------------------------------------------------------
1 | import * as helper from './testHelper.ts'
2 | import type { Context, Test } from 'mocha'
3 | import type { ConstructorOptions } from '../src/types.ts'
4 |
5 | export type { Context as TestContext }
6 |
7 | export const mochaHooks = {
8 | beforeAll,
9 | beforeEach,
10 | afterEach
11 | }
12 |
13 | async function beforeAll (this: Context): Promise {
14 | await helper.init()
15 | }
16 |
17 | async function beforeEach (this: Context): Promise {
18 | this.timeout(2000)
19 | const config = helper.getConfig({ testKey: getTestKey(this.currentTest!) })
20 | console.log(` ${this.currentTest!.title} (schema: ${config.schema})...`)
21 | await helper.dropSchema(config.schema!)
22 |
23 | // Set properties directly on context for easy access in tests
24 | this.bossConfig = config as ConstructorOptions & { schema: string }
25 | this.schema = config.schema!
26 | }
27 |
28 | async function afterEach (this: Context): Promise {
29 | this.timeout(10000)
30 |
31 | const { boss } = this.currentTest!.ctx!
32 |
33 | if (boss) {
34 | await boss.stop({ timeout: 2000 })
35 | }
36 |
37 | if (this.currentTest!.state === 'passed') {
38 | await helper.dropSchema(this.schema)
39 | }
40 | }
41 |
42 | function getTestKey (ctx: Test): string {
43 | return ctx.file! + ctx.parent!.title + ctx.title
44 | }
45 |
--------------------------------------------------------------------------------
/examples/load/index.ts:
--------------------------------------------------------------------------------
1 | import { PgBoss } from '../../dist/index.js'
2 | import * as helper from '../../test/testHelper.js'
3 | import { delay } from '../../src/tools.ts'
4 |
5 | const SCHEMA_COUNT = 60
6 | const QUEUE_COUNT = 200
7 |
8 | loadTest()
9 | .catch(err => {
10 | console.log(err)
11 | process.exit(1)
12 | })
13 |
14 | async function loadTest () {
15 | const schemas = new Array(SCHEMA_COUNT).fill(null).map((_, index) => `schema${index}`)
16 |
17 | for (const schema of schemas) {
18 | setImmediate(() => init(schema))
19 | }
20 | }
21 |
22 | async function init (schema: string) {
23 | const config = helper.getConfig()
24 | const boss = new PgBoss({ ...config, schema, supervise: false, schedule: false })
25 |
26 | boss.on('error', console.error)
27 |
28 | await boss.start()
29 |
30 | console.log('creating queues')
31 |
32 | const queues = new Array(QUEUE_COUNT).fill(null).map((_, index) => `queue${index}`)
33 |
34 | for (const queue of queues) {
35 | console.log(`creating queue ${schema}.${queue}`)
36 | await boss.createQueue(queue)
37 | await boss.work(queue, async () => {})
38 | }
39 |
40 | console.log('created queues')
41 |
42 | while (true) {
43 | console.log(`${schema}: sending a job to each one: ${new Date()}`)
44 |
45 | await Promise.all(queues.map(async queue => {
46 | await boss.send(queue)
47 | await boss.fetch(queue)
48 | }))
49 |
50 | await delay(1000)
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches: [master]
4 | tags-ignore: ['**']
5 | pull_request:
6 | branches: [master]
7 |
8 | name: CI
9 |
10 | jobs:
11 | test:
12 | runs-on: ubuntu-latest
13 | container: node:22
14 | strategy:
15 | matrix:
16 | node: [ 22 ]
17 | services:
18 | postgres:
19 | image: postgres
20 | env:
21 | POSTGRES_PASSWORD: postgres
22 | options: >-
23 | --health-cmd pg_isready
24 | --health-interval 10s
25 | --health-timeout 5s
26 | --health-retries 5
27 |
28 | steps:
29 | - name: Checkout code
30 | uses: actions/checkout@v4
31 |
32 | - name: Set up Node.js
33 | uses: actions/setup-node@v4
34 | with:
35 | node-version: ${{ matrix.node }}
36 |
37 | - name: Install
38 | run: npm install
39 |
40 | - name: Test
41 | run: npm run cover
42 | env:
43 | POSTGRES_HOST: postgres
44 |
45 | - name: Coveralls
46 | uses: coverallsapp/github-action@v2
47 | with:
48 | flag-name: run-${{ join(matrix.*, '-') }}
49 | parallel: true
50 |
51 | coverage:
52 | needs: test
53 | if: ${{ always() }}
54 | runs-on: ubuntu-latest
55 | steps:
56 | - name: Coveralls Finished
57 | uses: coverallsapp/github-action@v2
58 | with:
59 | parallel-finished: true
60 | carryforward: "run-1,run-2"
61 |
--------------------------------------------------------------------------------
/docs/install.md:
--------------------------------------------------------------------------------
1 | # Database install
2 |
3 | pg-boss will automatically create a dedicated schema (`pgboss` is the default name) in the target database. This will require the user in database connection to have the [CREATE](http://www.postgresql.org/docs/current/static/sql-grant.html) privilege.
4 |
5 | ```sql
6 | GRANT CREATE ON DATABASE db1 TO leastprivuser;
7 | ```
8 |
9 | If the CREATE privilege is not available or desired, you can use the included [static functions](#static-functions) to export the SQL commands to manually create or upgrade the required database schema. **This means you will also need to monitor future releases for schema changes**.
10 |
11 | NOTE: Using an existing schema is supported for advanced use cases **but discouraged**, as this opens up the possibility that creation will fail on an object name collision, and it will add more steps to the uninstallation process.
12 |
13 | # Database uninstall
14 |
15 | If you need to uninstall pg-boss from a database, just run the following command.
16 |
17 | ```sql
18 | DROP SCHEMA $1 CASCADE
19 | ```
20 |
21 | Where `$1` is the name of your schema if you've customized it. Otherwise, the default schema is `pgboss`.
22 |
23 | NOTE: If an existing schema was used during installation, created objects will need to be removed manually using the following commands.
24 |
25 | ```sql
26 | DROP TABLE pgboss.version;
27 | DROP TABLE pgboss.job;
28 | DROP TYPE pgboss.job_state;
29 | DROP TABLE pgboss.subscription;
30 | DROP TABLE pgboss.schedule;
31 | DROP FUNCTION pgboss.create_queue;
32 | DROP FUNCTION pgboss.delete_queue;
33 | DROP TABLE pgboss.queue;
34 | ```
--------------------------------------------------------------------------------
/src/db.ts:
--------------------------------------------------------------------------------
1 | import EventEmitter from 'node:events'
2 | import pg from 'pg'
3 | import assert from 'node:assert'
4 | import type * as types from './types.ts'
5 |
6 | class Db extends EventEmitter implements types.IDatabase, types.EventsMixin {
7 | private pool!: pg.Pool
8 | private config: types.DatabaseOptions
9 | /** @internal */
10 | readonly _pgbdb: true
11 | opened: boolean
12 |
13 | constructor (config: types.DatabaseOptions) {
14 | super()
15 |
16 | config.application_name = config.application_name || 'pgboss'
17 | config.connectionTimeoutMillis = config.connectionTimeoutMillis || 10000
18 | // config.maxUses = config.maxUses || 1000
19 |
20 | this.config = config
21 | this._pgbdb = true
22 | this.opened = false
23 | }
24 |
25 | events = {
26 | error: 'error'
27 | }
28 |
29 | async open () {
30 | this.pool = new pg.Pool(this.config)
31 | this.pool.on('error', error => this.emit('error', error))
32 | this.opened = true
33 | }
34 |
35 | async close () {
36 | if (!this.pool.ending) {
37 | this.opened = false
38 | await this.pool.end()
39 | }
40 | }
41 |
42 | async executeSql (text: string, values?: unknown[]) {
43 | assert(this.opened, 'Database not opened. Call open() before executing SQL.')
44 |
45 | // if (this.config.debug === true) {
46 | // console.log(`${new Date().toISOString()}: DEBUG SQL`)
47 | // console.log(text)
48 |
49 | // if (values) {
50 | // console.log(`${new Date().toISOString()}: DEBUG VALUES`)
51 | // console.log(values)
52 | // }
53 | // }
54 |
55 | return await this.pool.query(text, values)
56 | }
57 | }
58 |
59 | export default Db
60 |
--------------------------------------------------------------------------------
/src/tools.ts:
--------------------------------------------------------------------------------
1 | import { setTimeout } from 'node:timers/promises'
2 |
3 | /**
4 | * When sql contains multiple queries, result is an array of objects with rows property
5 | * This function unwraps the result into a single object with rows property
6 | */
7 | function unwrapSQLResult (result: { rows: any[] } | { rows: any[] }[]): { rows: any[] } {
8 | if (Array.isArray(result)) {
9 | return { rows: result.flatMap(i => i.rows) }
10 | }
11 |
12 | return result
13 | }
14 |
15 | export interface AbortablePromise extends Promise {
16 | abort: () => void
17 | }
18 |
19 | function delay (ms: number, error?: string, abortController?: AbortController): AbortablePromise {
20 | const ac = abortController || new AbortController()
21 |
22 | const promise = new Promise((resolve, reject) => {
23 | setTimeout(ms, null, { signal: ac.signal })
24 | .then(() => {
25 | if (error) {
26 | reject(new Error(error))
27 | } else {
28 | resolve()
29 | }
30 | })
31 | .catch(resolve)
32 | }) as AbortablePromise
33 |
34 | promise.abort = () => {
35 | if (!ac.signal.aborted) {
36 | ac.abort()
37 | }
38 | }
39 |
40 | return promise
41 | }
42 |
43 | async function resolveWithinSeconds (promise: Promise, seconds: number, message?: string, abortController?: AbortController): Promise {
44 | const timeout = Math.max(1, seconds) * 1000
45 | const reject = delay(timeout, message, abortController)
46 |
47 | let result
48 |
49 | try {
50 | result = await Promise.race([promise, reject])
51 | } finally {
52 | reject.abort()
53 | }
54 |
55 | return result
56 | }
57 |
58 | export {
59 | delay,
60 | resolveWithinSeconds,
61 | unwrapSQLResult
62 | }
63 |
--------------------------------------------------------------------------------
/docs/api/events.md:
--------------------------------------------------------------------------------
1 | # Events
2 |
3 | Each pg-boss instance is an EventEmitter, and contains the following events.
4 |
5 | ## `error`
6 | The `error` event could be raised during internal processing, such as scheduling and maintenance. Adding a listener to the error event is strongly encouraged because of the default behavior of Node.
7 |
8 | > If an EventEmitter does not have at least one listener registered for the 'error' event, and an 'error' event is emitted, the error is thrown, a stack trace is printed, and the Node.js process exits.
9 | >
10 | >Source: [Node.js Events > Error Events](https://nodejs.org/api/events.html#events_error_events)
11 |
12 | Ideally, code similar to the following example would be used after creating your instance, but before `start()` is called.
13 |
14 | ```js
15 | boss.on('error', error => logger.error(error));
16 | ```
17 | ## `warning`
18 |
19 | During monitoring and maintenance, pg-boss may raise warning events.
20 |
21 | Examples are slow queries, large queues, and scheduling clock skew.
22 |
23 | ## `wip`
24 |
25 | Emitted at most once every 2 seconds when workers are receiving jobs. The payload is an array that represents each worker in this instance of pg-boss.
26 |
27 | ```js
28 | [
29 | {
30 | id: 'fc738fb0-1de5-4947-b138-40d6a790749e',
31 | name: 'my-queue',
32 | options: { pollingInterval: 2000 },
33 | state: 'active',
34 | count: 1,
35 | createdOn: 1620149137015,
36 | lastFetchedOn: 1620149137015,
37 | lastJobStartedOn: 1620149137015,
38 | lastJobEndedOn: null,
39 | lastJobDuration: 343
40 | lastError: null,
41 | lastErrorOn: null
42 | }
43 | ]
44 | ```
45 |
46 | ## `stopped`
47 |
48 | Emitted after `stop()` once all workers have completed their work and maintenance has been shut down.
49 |
--------------------------------------------------------------------------------
/test/multiMasterTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import { getDb } from './testHelper.ts'
3 | import { PgBoss } from '../src/index.ts'
4 | import Contractor from '../src/contractor.ts'
5 | import { getAll } from '../src/migrationStore.ts'
6 | import packageJson from '../package.json' with { type: 'json' }
7 |
8 | const currentSchemaVersion = packageJson.pgboss.schema
9 |
10 | describe('multi-master', function () {
11 | it('should only allow 1 master to start at a time', async function () {
12 | const replicaCount = 20
13 | const config = { ...this.bossConfig, supervise: true, max: 2 }
14 | const instances = []
15 |
16 | for (let i = 0; i < replicaCount; i++) {
17 | instances.push(new PgBoss(config))
18 | }
19 |
20 | await Promise.all(instances.map(i => i.start()))
21 | await Promise.all(instances.map(i => i.stop({ graceful: false })))
22 | })
23 |
24 | it.skip('should only allow 1 master to migrate to latest at a time', async function () {
25 | const config = {
26 | ...this.bossConfig,
27 | supervise: true,
28 | superviseIntervalSeconds: 1,
29 | max: 2
30 | }
31 |
32 | const db = await getDb()
33 | // @ts-ignore
34 | const contractor = new Contractor(db, config)
35 |
36 | await contractor.create()
37 |
38 | await contractor.rollback(currentSchemaVersion)
39 |
40 | const oldVersion = await contractor.schemaVersion()
41 |
42 | assert.notStrictEqual(oldVersion, currentSchemaVersion)
43 |
44 | config.migrations = getAll(config.schema)
45 | config.migrations[0].install.push('select pg_sleep(1)')
46 |
47 | const instances = []
48 |
49 | for (let i = 0; i < 5; i++) {
50 | instances.push(new PgBoss(config))
51 | }
52 |
53 | await Promise.all(instances.map(i => i.start()))
54 | await Promise.all(instances.map(i => i.stop({ graceful: false })))
55 | })
56 | })
57 |
--------------------------------------------------------------------------------
/test/priorityTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 |
4 | describe('priority', function () {
5 | it('higher priority job', async function () {
6 | this.boss = await helper.start(this.bossConfig)
7 |
8 | await this.boss.send(this.schema)
9 |
10 | const high = await this.boss.send(this.schema, null, { priority: 1 })
11 |
12 | const [job] = await this.boss.fetch(this.schema)
13 |
14 | assert.strictEqual(job.id, high)
15 | })
16 |
17 | it('descending priority order', async function () {
18 | this.boss = await helper.start(this.bossConfig)
19 |
20 | const low = await this.boss.send(this.schema, null, { priority: 1 })
21 | const medium = await this.boss.send(this.schema, null, { priority: 5 })
22 | const high = await this.boss.send(this.schema, null, { priority: 10 })
23 |
24 | const [job1] = await this.boss.fetch(this.schema)
25 | const [job2] = await this.boss.fetch(this.schema)
26 | const [job3] = await this.boss.fetch(this.schema)
27 |
28 | assert.strictEqual(job1.id, high)
29 | assert.strictEqual(job2.id, medium)
30 | assert.strictEqual(job3.id, low)
31 | })
32 |
33 | it('bypasses priority when priority option used in fetch', async function () {
34 | this.boss = await helper.start(this.bossConfig)
35 |
36 | const low = await this.boss.send(this.schema, null, { priority: 1 })
37 | const medium = await this.boss.send(this.schema, null, { priority: 5 })
38 | const high = await this.boss.send(this.schema, null, { priority: 10 })
39 |
40 | const [job1] = await this.boss.fetch(this.schema, { priority: false })
41 | const [job2] = await this.boss.fetch(this.schema, { priority: false })
42 | const [job3] = await this.boss.fetch(this.schema, { priority: false })
43 |
44 | assert.strictEqual(job1.id, low)
45 | assert.strictEqual(job2.id, medium)
46 | assert.strictEqual(job3.id, high)
47 | })
48 | })
49 |
--------------------------------------------------------------------------------
/test/resumeTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 |
4 | describe('cancel', function () {
5 | it('should reject missing id argument', async function () {
6 | this.boss = await helper.start(this.bossConfig)
7 |
8 | await assert.rejects(async () => {
9 | // @ts-ignore
10 | await this.boss.resume()
11 | })
12 | })
13 |
14 | it('should cancel and resume a pending job', async function () {
15 | this.boss = await helper.start(this.bossConfig)
16 |
17 | const jobId = await this.boss.send(this.schema, null, { startAfter: 1 })
18 |
19 | assert(jobId)
20 |
21 | await this.boss.cancel(this.schema, jobId)
22 |
23 | const job = await this.boss.getJobById(this.schema, jobId)
24 |
25 | assert(job && job.state === 'cancelled')
26 |
27 | await this.boss.resume(this.schema, jobId)
28 |
29 | const job2 = await this.boss.getJobById(this.schema, jobId)
30 |
31 | assert(job2 && job2.state === 'created')
32 | })
33 |
34 | it('should cancel and resume a pending job with custom connection', async function () {
35 | this.boss = await helper.start(this.bossConfig)
36 |
37 | const jobId = await this.boss.send(this.schema, null, { startAfter: 1 })
38 |
39 | assert(jobId)
40 |
41 | let callCount = 0
42 | const _db = await helper.getDb()
43 | const db = {
44 | // @ts-ignore
45 | async executeSql (sql, values) {
46 | callCount++
47 | // @ts-ignore
48 | return _db.pool.query(sql, values)
49 | }
50 | }
51 |
52 | await this.boss.cancel(this.schema, jobId, { db })
53 |
54 | const job = await this.boss.getJobById(this.schema, jobId, { db })
55 |
56 | assert(job && job.state === 'cancelled')
57 |
58 | await this.boss.resume(this.schema, jobId, { db })
59 |
60 | const job2 = await this.boss.getJobById(this.schema, jobId, { db })
61 |
62 | assert(job2 && job2.state === 'created')
63 | assert.strictEqual(callCount, 4)
64 | })
65 | })
66 |
--------------------------------------------------------------------------------
/docs/sql.md:
--------------------------------------------------------------------------------
1 | # SQL
2 |
3 | If you need to interact with pg-boss outside of Node.js, such as other clients or even using triggers within PostgreSQL itself, most functionality is supported even when working directly against the internal tables. Additionally, you may even decide to do this within Node.js. For example, if you wanted to bulk load jobs into pg-boss and skip calling `send()` or `insert()`, you could use SQL `INSERT` or `COPY` commands.
4 |
5 | ## Job table
6 |
7 | The following command is the definition of the primary job table. For manual job creation, the only required column is `name`. All other columns are nullable or have defaults.
8 |
9 | ```sql
10 | CREATE TABLE pgboss.job (
11 | id uuid not null default gen_random_uuid(),
12 | name text not null,
13 | priority integer not null default(0),
14 | data jsonb,
15 | state pgboss.job_state not null default('created'),
16 | retry_limit integer not null default(2),
17 | retry_count integer not null default(0),
18 | retry_delay integer not null default(0),
19 | retry_backoff boolean not null default false,
20 | retry_delay_max integer;
21 | expire_seconds integer not null default (900),
22 | deletion_seconds integer not null default (60 * 60 * 24 * 7),
23 | singleton_key text,
24 | singleton_on timestamp without time zone,
25 | start_after timestamp with time zone not null default now(),
26 | created_on timestamp with time zone not null default now(),
27 | started_on timestamp with time zone,
28 | completed_on timestamp with time zone,
29 | keep_until timestamp with time zone NOT NULL default now() + interval '14 days',
30 | output jsonb,
31 | dead_letter text,
32 | policy text,
33 | CONSTRAINT job_pkey PRIMARY KEY (name, id)
34 | ) PARTITION BY LIST (name)
35 | ```
36 |
37 | ## Queue functions
38 |
39 | Queues can be created or deleted from SQL functions.
40 |
41 | `pgboss.create_queue(queue_name text, options jsonb)`
42 |
43 | options: Same as options in [`createQueue()`](./api/queues?id=createqueuename-queue)
44 |
45 | `pgboss.delete_queue(queue_name text)`
--------------------------------------------------------------------------------
/test/deleteTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('delete', async function () {
6 | it('should delete a completed job via maintenance', async function () {
7 | const config = {
8 | ...this.bossConfig,
9 | maintenanceIntervalSeconds: 1
10 | }
11 |
12 | this.boss = await helper.start(config)
13 |
14 | const jobId = await this.boss.send(this.schema, null, { deleteAfterSeconds: 1 })
15 |
16 | assert(jobId)
17 |
18 | await this.boss.fetch(this.schema)
19 | await this.boss.complete(this.schema, jobId)
20 |
21 | await delay(1000)
22 |
23 | await this.boss.supervise(this.schema)
24 |
25 | const job = await this.boss.getJobById(this.schema, jobId)
26 |
27 | assert(!job)
28 | })
29 |
30 | it('should delete a completed job via maintenance - cascade config from queue', async function () {
31 | const config = {
32 | ...this.bossConfig,
33 | maintenanceIntervalSeconds: 1,
34 | noDefault: true
35 | }
36 |
37 | this.boss = await helper.start(config)
38 |
39 | await this.boss.createQueue(this.schema, { deleteAfterSeconds: 1 })
40 |
41 | const jobId = await this.boss.send(this.schema)
42 | assert(jobId)
43 | await this.boss.fetch(this.schema)
44 | await this.boss.complete(this.schema, jobId)
45 |
46 | await delay(1000)
47 |
48 | await this.boss.supervise(this.schema)
49 |
50 | const job = await this.boss.getJobById(this.schema, jobId)
51 |
52 | assert(!job)
53 | })
54 |
55 | it('should delete a job via deleteJob()', async function () {
56 | const config = { ...this.bossConfig }
57 | this.boss = await helper.start(config)
58 |
59 | const jobId = await this.boss.send(this.schema)
60 |
61 | assert(jobId)
62 |
63 | await this.boss.fetch(this.schema)
64 |
65 | await this.boss.deleteJob(this.schema, jobId)
66 |
67 | const job = await this.boss.getJobById(this.schema, jobId)
68 |
69 | assert(!job)
70 | })
71 | })
72 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pg-boss",
3 | "version": "12.5.3",
4 | "description": "Queueing jobs in Postgres from Node.js like a boss",
5 | "type": "module",
6 | "main": "./dist/index.js",
7 | "types": "./dist/index.d.ts",
8 | "engines": {
9 | "node": ">=22.12.0"
10 | },
11 | "dependencies": {
12 | "cron-parser": "^5.4.0",
13 | "pg": "^8.16.3",
14 | "serialize-error": "^12.0.0"
15 | },
16 | "devDependencies": {
17 | "@istanbuljs/nyc-config-typescript": "^1.0.2",
18 | "@tsconfig/node-ts": "^23.6.2",
19 | "@tsconfig/node22": "^22.0.5",
20 | "@types/luxon": "^3.7.1",
21 | "@types/mocha": "^10.0.10",
22 | "@types/node": "^22.19.3",
23 | "@types/pg": "^8.16.0",
24 | "eslint": "^9.39.2",
25 | "luxon": "^3.7.2",
26 | "mocha": "^11.7.5",
27 | "neostandard": "^0.12.2",
28 | "nyc": "^17.1.0",
29 | "tsx": "^4.21.0",
30 | "typescript": "^5.9.3"
31 | },
32 | "scripts": {
33 | "build": "npm run clean && tsc --project tsconfig.build.json",
34 | "clean": "node -e \"fs.rmSync('dist',{recursive:true,force:true})\"",
35 | "prepublishOnly": "npm test && npm run build",
36 | "postpublish": "npm run clean",
37 | "test": "eslint . && mocha test/**/*.ts",
38 | "cover": "nyc npm test",
39 | "tsc": "tsc --noEmit",
40 | "readme": "node ./examples/readme.js",
41 | "db:migrate": "node --import=tsx -e 'console.log(require(\"./src\").getMigrationPlans())'",
42 | "db:construct": "node --import=tsx -e 'console.log(require(\"./src\").getConstructionPlans())'"
43 | },
44 | "pgboss": {
45 | "schema": 26
46 | },
47 | "repository": {
48 | "type": "git",
49 | "url": "git+https://github.com/timgit/pg-boss.git"
50 | },
51 | "keywords": [
52 | "postgresql",
53 | "postgres",
54 | "queue",
55 | "job"
56 | ],
57 | "author": "timgit",
58 | "license": "MIT",
59 | "bugs": {
60 | "url": "https://github.com/timgit/pg-boss/issues"
61 | },
62 | "homepage": "https://timgit.github.io/pg-boss",
63 | "files": [
64 | "dist",
65 | "README.md",
66 | "LICENSE",
67 | "package.json"
68 | ]
69 | }
70 |
--------------------------------------------------------------------------------
/docs/images/job-states.drawio:
--------------------------------------------------------------------------------
1 | 7Vtdc5s4FP01fkwGEBj70U7SdGeyM9nNzqR96sggGzWAWCEndn/9SlgyH8IudgjgdvMSuAgZ7rnn6OpKjMBNtLmnMAn+JD4KR5bhb0bgdmRZpm2N+T9h2UrLdGzsLCuKfWnLDU/4B5JG1WyNfZSWGjJCQoaTstEjcYw8VrJBSslbudmShOVfTeAKaYYnD4a69Rn7LNhZJ46R2z8jvArUL5uGvBJB1Vga0gD65K1gAncjcEMJYbujaHODQuE95ZfdfZ8OXN0/GEUxa3JD7K4ev3y9/3e7Nb58nc82Ifjj+Ur28grDtXxh+bBsqzzAnzsRh7w37nR+NA9YFHKDyQ+XOAxvSEho1hYY2Z+wk5hJMM2JPC+0W2Z/3K6/hHoiRBnaFEzype4RiRCjW95EXlX+lRF2NVah85bjZUtTUIBK2aCMkNW+59yJ/ED68RSfTjUfIp8HlTwllAVkRWIY3uXWOSXr2EeiW+G+vM0DIYn09XfE2FY6Fa4ZKSNx0JMpWVMPHXleCTmDdIXYsfeSHYqXOQoMRSFk+LXMoDo3Z7fOKIXbQoOE4JilhZ4fhaGA97gC+MQ0KpjtuswR3D/b+aBaGlE8iiDjiFWxzpEUsLwFmKGnBGYIvHGRbIjaKfFfJYClE8C0ahgw/igGAM1ZkHqBiIjBOQt06azN9O55tvnL+ufp/vUeLL797bqflQT3oRYVJp8nH3ZT+bD6UAu7OjwAt121qMXU+V0wNXrBdFLBVBk+FNOxJmp4yc9jIp5siWOcBmI04ELCAhwL7wVc78Yh9998QfnRShzxPHWJV2uaNUWbBFPuKCKaJ4hiog8nAs8HuOB5dQlsGOJVzI89ji3iWdVcaCDmeetMXoiw7+/iCqX4B1xk/YnIkt7lnTvzkXO7DxxNRPdJtbx5tE+1iiF1OP4PivCVcW3Z7riEoUouToscLTRcs9Sr5ZY7IMtlygO6KtktRIf7mzB+OgjCj6cdEN7UJ0cZ47+ThWA8xGEq6Z45gP/YA46weGwVYxfPY/coj41rw7UqStwKi8vSMP4ADh/jQDFt9dgQslYwHVyKb/YoeOfom9NQ30CtpnYteJNq1upMK8Wdn91gtayQ9UEw0RgTkdcsteHIiQKcnPUZcCnUTcuGZIMbEiUh4pPpmWj2hLhe+Wl/ClqMr6PB31hqj0ioaRiTMnSgFQ3l861yv6bTlY46WlR4CuDepXRcrR71L6X6xEIP/difiWK2CPMQpin22tJFVVP/mS5aDXWx4EWnxonK9s7gBtqs3qygs3txedsR4VTlgLynKs4712g9tUUWJSW9oG81RN8eFPqmXR0dJ2eiv19mOthTe+jXzzF0rUyw95INoetEHzCXhAo3E/rS3/B45gTjcPQfKxQA25q2EjRueTC0uxoM9RSpQ343ndU7g+I3AG3x266W+zvnt74UsisfqIwo1Tmerj0PpelyHYbbX4Plk6MsN64NE4AyPVuJo6tKibGzYuCkFrbLKQZOT0kLel/RcVte/60nsp6nlQZq44JH5sMxfGxkNsHULgPxTtaqbqZd0VQlf788T81eiliuRtQualJTjad0P0sfVOHBBR0WHurj377w+FcEblbV7b2IW1mmHkoR19AYk61LD7FY59Ts7euWM+DSKdO04DcMylT29g2FMvrKsAdjj985RNJM7L5JU1PyuCzSWBdFmsoWt6GQxtJII/ZQDJEx+7JRf8vret2osNusPPM8YcMZT4W5m47vN+t9saHpEvz/S02nBpW+2KDBr772iDYr8UXPdbpNo/AaeuwT5uJbDgVVr8hqG48kxVlkgdsFYYxEvEEoLsyh97LKKF33AUi15sGEun/EZyGg989C9HVeH8lFcbU/ogGFs3t2DGY40vcmfRx/QUP+mk1XE7ZlpLrhs1VloeOcyWetJ7shn/Oe3lPj4qf5x2O75vk3eODuPw==
--------------------------------------------------------------------------------
/test/delayTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('delayed jobs', function () {
6 | it('should wait until after an int (in seconds)', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 |
9 | const startAfter = 2
10 |
11 | await this.boss.send(this.schema, null, { startAfter })
12 |
13 | const [job] = await this.boss.fetch(this.schema)
14 |
15 | assert(!job)
16 |
17 | await delay(startAfter * 1000)
18 |
19 | const [job2] = await this.boss.fetch(this.schema)
20 |
21 | assert(job2)
22 | })
23 |
24 | it('should wait until after a date time string', async function () {
25 | this.boss = await helper.start(this.bossConfig)
26 |
27 | const date = new Date()
28 |
29 | date.setUTCSeconds(date.getUTCSeconds() + 2)
30 |
31 | const startAfter = date.toISOString()
32 |
33 | await this.boss.send(this.schema, null, { startAfter })
34 |
35 | const [job] = await this.boss.fetch(this.schema)
36 |
37 | assert(!job)
38 |
39 | await delay(5000)
40 |
41 | const job2 = await this.boss.fetch(this.schema)
42 |
43 | assert(job2)
44 | })
45 |
46 | it('should wait until after a date object', async function () {
47 | this.boss = await helper.start(this.bossConfig)
48 |
49 | const date = new Date()
50 | date.setUTCSeconds(date.getUTCSeconds() + 2)
51 |
52 | const startAfter = date
53 |
54 | await this.boss.send(this.schema, null, { startAfter })
55 |
56 | const [job] = await this.boss.fetch(this.schema)
57 |
58 | assert(!job)
59 |
60 | await delay(2000)
61 |
62 | const [job2] = await this.boss.fetch(this.schema)
63 |
64 | assert(job2)
65 | })
66 |
67 | it('should work with sendAfter() and a date object', async function () {
68 | this.boss = await helper.start(this.bossConfig)
69 |
70 | const date = new Date()
71 | date.setUTCSeconds(date.getUTCSeconds() + 2)
72 |
73 | const startAfter = date
74 |
75 | await this.boss.sendAfter(this.schema, { something: 1 }, { retryLimit: 0 }, startAfter)
76 |
77 | const [job] = await this.boss.fetch(this.schema)
78 |
79 | assert(!job)
80 |
81 | await delay(2000)
82 |
83 | const [job2] = await this.boss.fetch(this.schema)
84 |
85 | assert(job2)
86 | })
87 | })
88 |
--------------------------------------------------------------------------------
/test/configTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import Db from '../src/db.ts'
3 | import { PgBoss } from '../src/index.ts'
4 | import * as helper from './testHelper.ts'
5 | import packageJson from '../package.json' with { type: 'json' }
6 |
7 | describe('config', function () {
8 | it('should allow a 50 character custom schema name', async function () {
9 | const config = this.bossConfig
10 |
11 | config.schema = 'thisisareallylongschemanamefortestingmaximumlength'
12 |
13 | await helper.dropSchema(config.schema)
14 |
15 | assert.strictEqual(config.schema.length, 50)
16 |
17 | this.boss = new PgBoss(config)
18 |
19 | await this.boss.start()
20 |
21 | await helper.dropSchema(config.schema)
22 | })
23 |
24 | it('should not allow more than 50 characters in schema name', async function () {
25 | const config = this.bossConfig
26 |
27 | config.schema = 'thisisareallylongschemanamefortestingmaximumlengthb'
28 |
29 | await helper.dropSchema(config.schema)
30 |
31 | assert(config.schema.length > 50)
32 |
33 | assert.throws(() => new PgBoss(config))
34 | })
35 |
36 | it('should accept a connectionString property', async function () {
37 | const connectionString = helper.getConnectionString()
38 | this.boss = new PgBoss({ connectionString, schema: this.bossConfig.schema })
39 |
40 | await this.boss.start()
41 | })
42 |
43 | it('should not allow calling job instance functions if not started', async function () {
44 | const boss = new PgBoss(this.bossConfig)
45 |
46 | await assert.rejects(async () => {
47 | await boss.send('queue1')
48 | })
49 | })
50 |
51 | it('start() should return instance after', async function () {
52 | this.boss = await helper.start(this.bossConfig)
53 | const result2 = await this.boss.start()
54 | assert(result2)
55 | })
56 |
57 | it('isInstalled() should indicate whether db schema is installed', async function () {
58 | const db = new Db(this.bossConfig)
59 | await db.open()
60 |
61 | this.boss = new PgBoss({ ...this.bossConfig, db })
62 | assert.strictEqual(await this.boss.isInstalled(), false)
63 | await this.boss.start()
64 | assert.strictEqual(await this.boss.isInstalled(), true)
65 | })
66 |
67 | it('schemaVersion() should return current version', async function () {
68 | this.boss = await helper.start(this.bossConfig)
69 | const version = await this.boss.schemaVersion()
70 | assert.strictEqual(version, packageJson.pgboss.schema)
71 | })
72 | })
73 |
--------------------------------------------------------------------------------
/test/cancelTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import type { TestContext } from './hooks.ts'
4 |
5 | describe('cancel', function () {
6 | it('should reject missing arguments', async function (this: TestContext) {
7 | this.boss = await helper.start(this.bossConfig)
8 | await assert.rejects(async () => {
9 | // @ts-ignore
10 | await this.boss.cancel()
11 | })
12 | })
13 |
14 | it('should cancel a pending job', async function (this: TestContext) {
15 | this.boss = await helper.start(this.bossConfig)
16 |
17 | const jobId = await this.boss.send(this.schema, {}, { startAfter: 1 })
18 |
19 | await this.boss.cancel(this.schema, jobId!)
20 |
21 | const job = await this.boss.getJobById(this.schema, jobId!)
22 |
23 | assert(job && job.state === 'cancelled')
24 | })
25 |
26 | it('should not cancel a completed job', async function (this: TestContext) {
27 | this.boss = await helper.start(this.bossConfig)
28 |
29 | await this.boss.send(this.schema)
30 |
31 | const [job] = await this.boss.fetch(this.schema)
32 |
33 | const completeResult = await this.boss.complete(this.schema, job.id)
34 |
35 | assert.strictEqual(completeResult.affected, 1)
36 |
37 | const cancelResult = await this.boss.cancel(this.schema, job.id)
38 |
39 | assert.strictEqual(cancelResult.affected, 0)
40 | })
41 |
42 | it('should cancel a batch of jobs', async function (this: TestContext) {
43 | this.boss = await helper.start(this.bossConfig)
44 |
45 | const jobs = await Promise.all([
46 | this.boss.send(this.schema),
47 | this.boss.send(this.schema),
48 | this.boss.send(this.schema)
49 | ])
50 |
51 | await this.boss.cancel(this.schema, jobs as string[])
52 | })
53 |
54 | it('should cancel a pending job with custom connection', async function (this: TestContext) {
55 | this.boss = await helper.start(this.bossConfig)
56 |
57 | let called = false
58 | const _db = await helper.getDb()
59 | const db = {
60 | async executeSql (sql: string, values: any[]) {
61 | called = true
62 | return (_db as any).pool.query(sql, values)
63 | }
64 | }
65 |
66 | const jobId = await this.boss.send(this.schema, {}, { startAfter: 1 })
67 |
68 | await this.boss.cancel(this.schema, jobId!, { db })
69 |
70 | const job = await this.boss.getJobById(this.schema, jobId!)
71 |
72 | assert(job && job.state === 'cancelled')
73 | assert.strictEqual(called, true)
74 | })
75 | })
76 |
--------------------------------------------------------------------------------
/test/expireTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('expire', function () {
6 | it('should expire a job', async function () {
7 | this.boss = await helper.start({ ...this.bossConfig, monitorIntervalSeconds: 1 })
8 |
9 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 0, expireInSeconds: 1 })
10 |
11 | assert(jobId)
12 |
13 | const [job1] = await this.boss.fetch(this.schema)
14 |
15 | assert(job1)
16 |
17 | await delay(1000)
18 |
19 | await this.boss.supervise(this.schema)
20 |
21 | const job = await this.boss.getJobById(this.schema, jobId)
22 |
23 | assert.strictEqual('failed', job!.state)
24 | })
25 |
26 | it('should expire a job - cascaded config', async function () {
27 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
28 |
29 | await this.boss.createQueue(this.schema, { expireInSeconds: 1, retryLimit: 0 })
30 | const jobId = await this.boss.send(this.schema)
31 |
32 | assert(jobId)
33 |
34 | // fetch the job but don't complete it
35 | await this.boss.fetch(this.schema)
36 |
37 | await delay(1000)
38 |
39 | await this.boss.supervise(this.schema)
40 |
41 | const job = await this.boss.getJobById(this.schema, jobId)
42 |
43 | assert.strictEqual('failed', job!.state)
44 | })
45 |
46 | it('should expire a job via supervise option', async function () {
47 | this.boss = await helper.start({
48 | ...this.bossConfig,
49 | noDefault: true,
50 | supervise: true,
51 | monitorIntervalSeconds: 1,
52 | superviseIntervalSeconds: 1
53 | })
54 |
55 | await this.boss.createQueue(this.schema, { expireInSeconds: 1, retryLimit: 0 })
56 | const jobId = await this.boss.send(this.schema)
57 |
58 | assert(jobId)
59 |
60 | // fetch the job but don't complete it
61 | await this.boss.fetch(this.schema)
62 |
63 | await delay(4000)
64 |
65 | const job = await this.boss.getJobById(this.schema, jobId)
66 |
67 | assert.strictEqual('failed', job!.state)
68 | })
69 |
70 | it('should abort signal when job handler times out', async function () {
71 | this.boss = await helper.start({ ...this.bossConfig, monitorIntervalSeconds: 1 })
72 |
73 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 0, expireInSeconds: 1 })
74 |
75 | assert(jobId)
76 |
77 | let signalAborted = false
78 |
79 | await this.boss.work(this.schema, async ([job]) => {
80 | job.signal.addEventListener('abort', () => {
81 | signalAborted = true
82 | })
83 | await delay(2000)
84 | })
85 |
86 | await delay(3000)
87 |
88 | assert.strictEqual(signalAborted, true)
89 | })
90 | })
91 |
--------------------------------------------------------------------------------
/docs/api/scheduling.md:
--------------------------------------------------------------------------------
1 | # Scheduling
2 |
3 | Jobs may be created automatically based on a cron expression. As with other cron-based systems, at least one instance needs to be running for scheduling to work. In order to reduce the amount of evaluations, schedules are checked every 30 seconds, which means the 6-placeholder format should be discouraged in favor of the minute-level precision 5-placeholder format.
4 |
5 | For example, use this format, which implies "any second during 3:30 am every day"
6 |
7 | ```
8 | 30 3 * * *
9 | ```
10 |
11 | but **not** this format which is parsed as "only run exactly at 3:30:30 am every day"
12 |
13 | ```
14 | 30 30 3 * * *
15 | ```
16 |
17 | To change how often schedules are checked, you can set `cronMonitorIntervalSeconds`. To change how often cron jobs are run, you can set `cronWorkerIntervalSeconds`.
18 |
19 | In order mitigate clock skew and drift, every 10 minutes the clocks of each instance are compared to the database server's clock. The skew, if any, is stored and used as an offset during cron evaluation to ensure all instances are synchronized. Internally, job throttling options are then used to make sure only 1 job is sent even if multiple instances are running.
20 |
21 | If needed, the default clock monitoring interval can be adjusted using `clockMonitorIntervalSeconds`. Additionally, to disable scheduling on an instance completely, use the following in the constructor options.
22 |
23 | ```js
24 | {
25 | schedule: false
26 | }
27 | ```
28 |
29 | For more cron documentation and examples see the docs for the [cron-parser package](https://www.npmjs.com/package/cron-parser).
30 |
31 | ### `schedule(name, cron, data, options)`
32 |
33 | Schedules a job to be sent to the specified queue based on a cron expression. If the schedule already exists, it's updated to the new cron expression.
34 |
35 | **Arguments**
36 |
37 | - `name`: string, *required*
38 | - `cron`: string, *required*
39 | - `data`: object
40 | - `options`: object
41 |
42 | `options` supports all properties in `send()` as well as the following additional options.
43 |
44 | * **tz**
45 |
46 | An optional time zone name. If not specified, the default is UTC.
47 |
48 | * **key**
49 |
50 | An optional unique key if more than schedule is needed for this queue.
51 |
52 |
53 | For example, the following code will send a job at 3:00am in the US central time zone into the queue `notification-abc`.
54 |
55 | ```js
56 | await boss.schedule('notification-abc', `0 3 * * *`, null, { tz: 'America/Chicago' })
57 | ```
58 |
59 | ### `unschedule(name)`
60 |
61 | Removes all scheduled jobs for the specified queue name.
62 |
63 | ### `unschedule(name, key)`
64 |
65 | Removes a schedule by queue name and unique key.
66 |
67 | ### `getSchedules()`
68 |
69 | Returns all scheduled jobs.
70 |
71 | ### `getSchedules(name)`
72 |
73 | Returns all scheduled jobs by queue name.
74 |
75 | ### `getSchedules(name, key)`
76 |
77 | Returns all scheduled jobs by queue name and unique key.
--------------------------------------------------------------------------------
/test/opsTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { randomUUID } from 'node:crypto'
4 | import { PgBoss } from '../src/index.ts'
5 |
6 | describe('ops', function () {
7 | it('should emit error in worker', async function () {
8 | this.boss = await helper.start({ ...this.bossConfig, __test__throw_worker: true })
9 |
10 | await this.boss.send(this.schema)
11 | await this.boss.work(this.schema, async () => {})
12 |
13 | await new Promise(resolve => this.boss!.once('error', resolve))
14 | })
15 |
16 | it('should return null from getJobById if not found', async function () {
17 | this.boss = await helper.start(this.bossConfig)
18 |
19 | const jobId = await this.boss.getJobById(this.schema, randomUUID())
20 |
21 | assert(!jobId)
22 | })
23 |
24 | it('should force stop', async function () {
25 | this.boss = await helper.start(this.bossConfig)
26 | await this.boss.stop({ graceful: false })
27 | })
28 |
29 | it('should close the connection pool', async function () {
30 | this.boss = await helper.start(this.bossConfig)
31 | await this.boss.stop({ graceful: false })
32 |
33 | // @ts-ignore
34 | assert(this.boss.getDb().pool.totalCount === 0)
35 | })
36 |
37 | it('should close the connection pool gracefully', async function () {
38 | this.boss = await helper.start(this.bossConfig)
39 | await this.boss.stop()
40 |
41 | // @ts-ignore
42 | assert(this.boss.getDb().pool.totalCount === 0)
43 | })
44 |
45 | it('should not close the connection pool after stop with close option', async function () {
46 | this.boss = await helper.start(this.bossConfig)
47 | await this.boss.stop({ close: false })
48 |
49 | const jobId = await this.boss.send(this.schema)
50 | const [job] = await this.boss.fetch(this.schema)
51 |
52 | assert.strictEqual(jobId, job.id)
53 | })
54 |
55 | it('should be able to run an arbitrary query via getDb()', async function () {
56 | this.boss = await helper.start(this.bossConfig)
57 | const { rows } = await this.boss.getDb().executeSql('select 1')
58 | assert.strictEqual(1, rows.length)
59 | })
60 |
61 | it('should start and stop immediately', async function () {
62 | const boss = new PgBoss(this.bossConfig)
63 | await boss.start()
64 | await boss.stop()
65 | })
66 |
67 | it('should not leave open handles after starting and stopping', async function () {
68 | const resourcesBefore = process.getActiveResourcesInfo()
69 |
70 | const boss = new PgBoss({ ...this.bossConfig, supervise: true, schedule: true })
71 | await boss.start()
72 | await boss.createQueue(this.schema)
73 | await boss.work(this.schema, async () => {})
74 | await boss.stop()
75 |
76 | const resourcesAfter = process.getActiveResourcesInfo()
77 |
78 | assert.strictEqual(resourcesAfter.length, resourcesBefore.length, `Should not leave open async resources. Before: ${resourcesBefore.length}, After: ${resourcesAfter.length}`)
79 | })
80 | })
81 |
--------------------------------------------------------------------------------
/test/queueStatsTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { randomUUID } from 'node:crypto'
4 | import type { ConstructorOptions } from '../src/types.ts'
5 |
6 | describe('queueStats', function () {
7 | const queue1 = `q${randomUUID().replaceAll('-', '')}`
8 | const queue2 = `q${randomUUID().replaceAll('-', '')}`
9 |
10 | async function init (config: (ConstructorOptions & { schema: string }) | (Partial & { testKey?: string; noDefault?: boolean }) | undefined) {
11 | const boss = await helper.start(config)
12 |
13 | await boss.createQueue(queue1)
14 | await boss.createQueue(queue2)
15 |
16 | await boss.send(queue1)
17 | await boss.send(queue1)
18 | await boss.send(queue2)
19 | await boss.send(queue2)
20 |
21 | return boss
22 | }
23 |
24 | it('should get accurate stats', async function () {
25 | this.boss = await init(this.bossConfig)
26 | const queueData = await this.boss.getQueueStats(queue1)
27 | assert.notEqual(queueData, undefined)
28 |
29 | const {
30 | name,
31 | deferredCount,
32 | queuedCount,
33 | activeCount,
34 | totalCount
35 | } = queueData!
36 |
37 | assert.equal(name, queue1)
38 | assert.equal(deferredCount, 0)
39 | assert.equal(queuedCount, 2)
40 | assert.equal(activeCount, 0)
41 | assert.equal(totalCount, 2)
42 | })
43 |
44 | it('should get accurate stats on an empty queue', async function () {
45 | this.boss = await init(this.bossConfig)
46 | const queue3 = randomUUID()
47 | await this.boss.createQueue(queue3)
48 |
49 | const queueData = await this.boss.getQueueStats(queue3)
50 | assert.notEqual(queueData, undefined)
51 |
52 | const {
53 | name,
54 | deferredCount,
55 | queuedCount,
56 | activeCount,
57 | totalCount
58 | } = queueData
59 |
60 | assert.equal(name, queue3)
61 | assert.equal(deferredCount, 0)
62 | assert.equal(queuedCount, 0)
63 | assert.equal(activeCount, 0)
64 | assert.equal(totalCount, 0)
65 | })
66 |
67 | it('should properly get queue stats when all jobs are deleted', async function () {
68 | this.boss = await helper.start({ ...this.bossConfig, monitorIntervalSeconds: 1, queueCacheIntervalSeconds: 1 })
69 |
70 | const queue4 = randomUUID()
71 | await this.boss.createQueue(queue4)
72 |
73 | await this.boss.send(queue4)
74 | await this.boss.send(queue4)
75 | await this.boss.send(queue4)
76 |
77 | await this.boss.supervise(queue4)
78 |
79 | await this.boss.deleteAllJobs(queue4)
80 |
81 | await this.boss.supervise(queue4)
82 |
83 | // wait for a second for queueCache to update
84 | await new Promise(resolve => setTimeout(resolve, 1000))
85 |
86 | const queueData = await this.boss.getQueueStats(queue4)
87 | assert(queueData)
88 |
89 | assert.equal(queueData.deferredCount, 0)
90 | assert.equal(queueData.queuedCount, 0)
91 | assert.equal(queueData.activeCount, 0)
92 | assert.equal(queueData.totalCount, 0)
93 | })
94 | })
95 |
--------------------------------------------------------------------------------
/docs/api/ops.md:
--------------------------------------------------------------------------------
1 | # Operations
2 |
3 | ### `start()`
4 |
5 | Returns the same PgBoss instance used during invocation
6 |
7 | Prepares the target database and begins job monitoring.
8 |
9 | ```js
10 | await boss.start()
11 | await boss.send('hey-there', { msg:'this came for you' })
12 | ```
13 |
14 | If the required database objects do not exist in the specified database, **`start()` will automatically create them**. The same process is true for updates as well. If a new schema version is required, pg-boss will automatically migrate the internal storage to the latest installed version.
15 |
16 | > While this is most likely a welcome feature, be aware of this during upgrades since this could delay the promise resolution by however long the migration script takes to run against your data. For example, if you happened to have millions of jobs in the job table just hanging around for archiving and the next version of the schema had a couple of new indexes, it may take a few seconds before `start()` resolves. Most migrations are very quick, however, and are designed with performance in mind.
17 |
18 | Additionally, all schema operations, both first-time provisioning and migrations, are nested within advisory locks to prevent race conditions during `start()`. Internally, these locks are created using `pg_advisory_xact_lock()` which auto-unlock at the end of the transaction and don't require a persistent session or the need to issue an unlock.
19 |
20 | One example of how this is useful would be including `start()` inside the bootstrapping of a pod in a ReplicaSet in Kubernetes. Being able to scale up your job processing using a container orchestration tool like k8s is becoming more and more popular, and pg-boss can be dropped into this system without any special startup handling.
21 |
22 | ### `stop(options)`
23 |
24 | Stops all background processing, such as maintenance and scheduling, as well as all polling workers started with `work()`.
25 |
26 | By default, calling `stop()` without any arguments will gracefully wait for all workers to finish processing active jobs before resolving. Emits a `stopped` event if needed.
27 |
28 | **Arguments**
29 |
30 | * `options`: object
31 |
32 | * `graceful`, bool
33 |
34 | Default: `true`. If `true`, the PgBoss instance will wait for any workers that are currently processing jobs to finish, up to the specified timeout. During this period, new jobs will not be processed, but active jobs will be allowed to finish.
35 |
36 | * `close`, bool
37 | Default: `true`. If the database connection is managed by pg-boss, it will close the connection pool. Use `false` if needed to continue allowing operations such as `send()` and `fetch()`.
38 |
39 | * `timeout`, int
40 |
41 | Default: 30000. Maximum time (in milliseconds) to wait for workers to finish job processing before shutting down the PgBoss instance.
42 |
43 | Note: This option is ignored when `graceful` is set to `false`.
44 |
45 |
46 | ### `isInstalled()`
47 |
48 | Utility function to see if pg-boss is installed in the configured database.
49 |
50 | ### `schemaVersion()`
51 |
52 | Utility function to get the database schema version.
53 |
--------------------------------------------------------------------------------
/docs/introduction.md:
--------------------------------------------------------------------------------
1 | # Intro
2 | pg-boss is a job queue powered by Postgres, operated by 1 or more Node.js instances.
3 |
4 | pg-boss relies on [SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE), a feature built specifically for message queues to resolve record locking challenges inherent with relational databases. This provides exactly-once delivery and the safety of guaranteed atomic commits to asynchronous job processing.
5 |
6 | This will likely cater the most to teams already familiar with the simplicity of relational database semantics and operations (SQL, querying, and backups). It will be especially useful to those already relying on PostgreSQL that want to limit how many systems are required to monitor and support in their architecture.
7 |
8 | Internally, pg-boss uses declarative list-based partitioning to expose a single logical `job` table. By default, all queues's jobs will be stored together in a shared table, but this could affect performance if 1 or more of your queues grows significantly or experiences an unexpected backlog.
9 |
10 | If a queue needs to be scaled out, you can create it with a `partition` option that will create a dedicated physical table within the partitioning hierarchy. This storage strategy should offer a balance between maintenance operations and query plan optimization. According to [the docs](https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE-BEST-PRACTICES), Postgres should scale to thousands of queues in a partitioning hierarchy quite well, but the decision on how many dedicated tables to use should be based on your specific needs. If your usage somehow exceeds what Postgres partitioning is capable of (congrats!), consider provisioning queues into separate schemas in the target database.
11 |
12 | You may use as many Node.js instances as desired to connect to the same Postgres database, even running it inside serverless functions if needed. Each instance maintains a client-side connection pool or you can substitute your own database client, limited to the maximum number of connections your database server (or server-side connection pooler) can accept. If you find yourself needing even more connections, pg-boss can easily be used behind your custom web API.
13 |
14 | ## Job states
15 |
16 | All jobs start out in the `created` state and become `active` via [`fetch(name, options)`](#fetchname-options) or in a polling worker via [`work()`](#work).
17 |
18 | In a worker, when your handler function completes, jobs will be marked `completed` automatically unless previously deleted via [`deleteJob(name, id)`](#deletejobname-id-options). If an unhandled error is thrown in your handler, the job will usually enter the `retry` state, and then the `failed` state once all retries have been attempted.
19 |
20 | Uncompleted jobs may also be assigned to `cancelled` state via [`cancel(name, id)`](#cancelname-id-options), where they can be moved back into `created` via [`resume(name, id)`](#resumename-id-options). Failed jobs can be retried via [`retry(name, id)`](#retryname-id-options).
21 |
22 | All jobs that are not actively deleted during processing will remain in `completed`, `cancelled` or `failed` state until they are automatically removed.
23 |
--------------------------------------------------------------------------------
/docs/api/workers.md:
--------------------------------------------------------------------------------
1 | # Workers
2 |
3 | ### `work()`
4 |
5 | Adds a new polling worker for a queue and executes the provided callback function when jobs are found. Each call to work() will add a new worker and resolve a unqiue worker id.
6 |
7 | Workers can be stopped via `offWork()` all at once by queue name or individually by using the worker id. Worker activity may be monitored by listening to the `wip` event.
8 |
9 | The default options for `work()` is 1 job every 2 seconds.
10 |
11 | ### `work(name, options, handler)`
12 |
13 | **Arguments**
14 | - `name`: string, *required*
15 | - `options`: object
16 | - `handler`: function(jobs), *required*
17 |
18 | **Options**
19 |
20 | * **batchSize**, int, *(default=1)*
21 |
22 | Same as in [`fetch()`](#fetch)
23 |
24 | * **includeMetadata**, bool, *(default=false)*
25 |
26 | Same as in [`fetch()`](#fetch)
27 |
28 | * **priority**, bool, *(default=true)*
29 |
30 | Same as in [`fetch()`](#fetch)
31 |
32 | * **pollingIntervalSeconds**, int, *(default=2)*
33 |
34 | Interval to check for new jobs in seconds, must be >=0.5 (500ms)
35 |
36 |
37 | **Handler function**
38 |
39 | `handler` should return a promise (Usually this is an `async` function). If an unhandled error occurs in a handler, `fail()` will automatically be called for the jobs, storing the error in the `output` property, making the job or jobs available for retry.
40 |
41 | The jobs argument is an array of jobs with the following properties.
42 |
43 | | Prop | Type | |
44 | | - | - | -|
45 | |`id`| string, uuid |
46 | |`name`| string |
47 | |`data`| object |
48 | |`signal`| AbortSignal |
49 |
50 |
51 | An example of a worker that checks for a job every 10 seconds.
52 |
53 | ```js
54 | await boss.work('email-welcome', { pollingIntervalSeconds: 10 }, ([ job ]) => myEmailService.sendWelcomeEmail(job.data))
55 | ```
56 |
57 | An example of a worker that returns a maximum of 5 jobs in a batch.
58 |
59 | ```js
60 | await boss.work('email-welcome', { batchSize: 5 }, (jobs) => myEmailService.sendWelcomeEmails(jobs.map(job => job.data)))
61 | ```
62 |
63 | ### `work(name, handler)`
64 |
65 | Simplified work() without an options argument
66 |
67 | ```js
68 | await boss.work('email-welcome', ([ job ]) => emailer.sendWelcomeEmail(job.data))
69 | ```
70 |
71 | work() with active job deletion
72 |
73 | ```js
74 | const queue = 'email-welcome'
75 |
76 | await boss.work(queue, async ([ job ]) => {
77 | await emailer.sendWelcomeEmail(job.data)
78 | await boss.deleteJob(queue, job.id)
79 | })
80 | ```
81 |
82 | work() with abort signal
83 |
84 | ```js
85 | await boss.work('process-video', async ([ job ]) => {
86 | const result = await fetch('https://api.example.com/process', { signal: job.signal })
87 | })
88 | ```
89 |
90 | ### `notifyWorker(id)`
91 |
92 | Notifies a worker by id to bypass the job polling interval (see `pollingIntervalSeconds`) for this iteration in the loop.
93 |
94 |
95 | ### `offWork(name, options)`
96 |
97 | Removes a worker by name or id and stops polling.
98 |
99 | ** Arguments **
100 | - name: string
101 | - options: object
102 |
103 | **Options**
104 |
105 | * **wait**, boolean, *(default=true)*
106 |
107 | If the promise should wait until current jobs finish
108 |
109 | * **id**, string
110 |
111 | Only stop polling by worker id
112 |
--------------------------------------------------------------------------------
/src/contractor.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as plans from './plans.ts'
3 | import * as migrationStore from './migrationStore.ts'
4 | import packageJson from '../package.json' with { type: 'json' }
5 | import type * as types from './types.ts'
6 |
7 | const schemaVersion = packageJson.pgboss.schema as number
8 |
9 | class Contractor {
10 | static constructionPlans (schema = plans.DEFAULT_SCHEMA, options = { createSchema: true }) {
11 | return plans.create(schema, schemaVersion, options)
12 | }
13 |
14 | static migrationPlans (schema = plans.DEFAULT_SCHEMA, version = schemaVersion - 1) {
15 | return migrationStore.migrate(schema, version)
16 | }
17 |
18 | static rollbackPlans (schema = plans.DEFAULT_SCHEMA, version = schemaVersion) {
19 | return migrationStore.rollback(schema, version)
20 | }
21 |
22 | private config: types.ResolvedConstructorOptions
23 | private db: types.IDatabase
24 | private migrations: types.Migration[]
25 |
26 | constructor (db: types.IDatabase, config: types.ResolvedConstructorOptions) {
27 | this.config = config
28 | this.db = db
29 | this.migrations = this.config.migrations || migrationStore.getAll(this.config.schema)
30 | }
31 |
32 | async schemaVersion () {
33 | const result = await this.db.executeSql(plans.getVersion(this.config.schema))
34 | return result.rows.length ? parseInt(result.rows[0].version) : null
35 | }
36 |
37 | async isInstalled () {
38 | const result = await this.db.executeSql(plans.versionTableExists(this.config.schema))
39 | return !!result.rows[0].name
40 | }
41 |
42 | async start () {
43 | const installed = await this.isInstalled()
44 |
45 | if (installed) {
46 | const version = await this.schemaVersion()
47 |
48 | if (version !== null && schemaVersion > version) {
49 | await this.migrate(version)
50 | }
51 | } else {
52 | await this.create()
53 | }
54 | }
55 |
56 | async check () {
57 | const installed = await this.isInstalled()
58 |
59 | if (!installed) {
60 | throw new Error('pg-boss is not installed')
61 | }
62 |
63 | const version = await this.schemaVersion()
64 |
65 | if (schemaVersion !== version) {
66 | throw new Error('pg-boss database requires migrations')
67 | }
68 | }
69 |
70 | async create () {
71 | try {
72 | const commands = plans.create(this.config.schema, schemaVersion, this.config)
73 | await this.db.executeSql(commands)
74 | } catch (err: any) {
75 | assert(err.message.includes(plans.CREATE_RACE_MESSAGE), err)
76 | }
77 | }
78 |
79 | async migrate (version: number) {
80 | try {
81 | const commands = migrationStore.migrate(this.config.schema, version, this.migrations)
82 | await this.db.executeSql(commands)
83 | } catch (err: any) {
84 | assert(err.message.includes(plans.MIGRATE_RACE_MESSAGE), err)
85 | }
86 | }
87 |
88 | async next (version: number) {
89 | const commands = migrationStore.next(this.config.schema, version, this.migrations)
90 | await this.db.executeSql(commands)
91 | }
92 |
93 | async rollback (version: number) {
94 | const commands = migrationStore.rollback(this.config.schema, version, this.migrations)
95 | await this.db.executeSql(commands)
96 | }
97 | }
98 |
99 | export default Contractor
100 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Queueing jobs in Postgres from Node.js like a boss.
2 |
3 | [](https://nodei.co/npm/pg-boss/)
4 | [](https://github.com/timgit/pg-boss/actions/workflows/ci.yml)
5 | [](https://coveralls.io/github/timgit/pg-boss?branch=master)
6 |
7 | ```js
8 | async function readme() {
9 | const { PgBoss } = require('pg-boss');
10 | const boss = new PgBoss('postgres://user:pass@host/database');
11 |
12 | boss.on('error', console.error)
13 |
14 | await boss.start()
15 |
16 | const queue = 'readme-queue'
17 |
18 | await boss.createQueue(queue)
19 |
20 | const id = await boss.send(queue, { arg1: 'read me' })
21 |
22 | console.log(`created job ${id} in queue ${queue}`)
23 |
24 | await boss.work(queue, async ([ job ]) => {
25 | console.log(`received job ${job.id} with data ${JSON.stringify(job.data)}`)
26 | })
27 | }
28 |
29 | readme()
30 | .catch(err => {
31 | console.log(err)
32 | process.exit(1)
33 | })
34 | ```
35 |
36 | pg-boss is a job queue built in Node.js on top of PostgreSQL in order to provide background processing and reliable asynchronous execution to Node.js applications.
37 |
38 | pg-boss relies on Postgres's SKIP LOCKED, a feature built specifically for message queues to resolve record locking challenges inherent with relational databases. This provides exactly-once delivery and the safety of guaranteed atomic commits to asynchronous job processing.
39 |
40 | This will likely cater the most to teams already familiar with the simplicity of relational database semantics and operations (SQL, querying, and backups). It will be especially useful to those already relying on PostgreSQL that want to limit how many systems are required to monitor and support in their architecture.
41 |
42 |
43 | ## Summary
44 | * Exactly-once job delivery
45 | * Create jobs within your existing database transaction
46 | * Backpressure-compatible polling workers
47 | * Cron scheduling
48 | * Queue storage policies to support a variety of rate limiting, debouncing, and concurrency use cases
49 | * Priority queues, dead letter queues, job deferral, automatic retries with exponential backoff
50 | * Pub/sub API for fan-out queue relationships
51 | * SQL support for non-Node.js runtimes for most operations
52 | * Serverless function compatible
53 | * Multi-master compatible (for example, in a Kubernetes ReplicaSet)
54 |
55 | ## Requirements
56 | * Node 22.12 or higher for CommonJS's require(esm)
57 | * PostgreSQL 13 or higher
58 |
59 | ## Documentation
60 | * [Docs](https://timgit.github.io/pg-boss/)
61 |
62 | ## Contributing
63 | To setup a development environment for this library:
64 |
65 | ```bash
66 | git clone https://github.com/timgit/pg-boss.git
67 | npm install
68 | ```
69 |
70 | To run the test suite, linter and code coverage:
71 | ```bash
72 | npm run cover
73 | ```
74 |
75 | The test suite will try and create a new database named pgboss. The [config.json](https://github.com/timgit/pg-boss/blob/master/test/config.json) file has the default credentials to connect to postgres.
76 |
77 | The [Docker Compose](https://github.com/timgit/pg-boss/blob/master/docker-compose.yaml) file can be used to start a local postgres instance for testing:
78 |
79 | ```bash
80 | docker compose up
81 | ```
82 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | Queueing jobs in Postgres from Node.js like a boss.
2 |
3 | [](https://nodei.co/npm/pg-boss/)
4 | [](https://github.com/timgit/pg-boss/actions/workflows/ci.yml)
5 | [](https://coveralls.io/github/timgit/pg-boss?branch=master)
6 |
7 | ```js
8 | async function readme() {
9 | const { PgBoss } = require('pg-boss');
10 | const boss = new PgBoss('postgres://user:pass@host/database');
11 |
12 | boss.on('error', console.error)
13 |
14 | await boss.start()
15 |
16 | const queue = 'readme-queue'
17 |
18 | await boss.createQueue(queue)
19 |
20 | const id = await boss.send(queue, { arg1: 'read me' })
21 |
22 | console.log(`created job ${id} in queue ${queue}`)
23 |
24 | await boss.work(queue, async ([ job ]) => {
25 | console.log(`received job ${job.id} with data ${JSON.stringify(job.data)}`)
26 | })
27 | }
28 |
29 | readme()
30 | .catch(err => {
31 | console.log(err)
32 | process.exit(1)
33 | })
34 | ```
35 |
36 | pg-boss is a job queue built in Node.js on top of PostgreSQL in order to provide background processing and reliable asynchronous execution to Node.js applications.
37 |
38 | pg-boss relies on Postgres's SKIP LOCKED, a feature built specifically for message queues to resolve record locking challenges inherent with relational databases. This provides exactly-once delivery and the safety of guaranteed atomic commits to asynchronous job processing.
39 |
40 | This will likely cater the most to teams already familiar with the simplicity of relational database semantics and operations (SQL, querying, and backups). It will be especially useful to those already relying on PostgreSQL that want to limit how many systems are required to monitor and support in their architecture.
41 |
42 |
43 | ## Summary
44 | * Exactly-once job delivery
45 | * Create jobs within your existing database transaction
46 | * Backpressure-compatible polling workers
47 | * Cron scheduling
48 | * Queue storage policies to support a variety of rate limiting, debouncing, and concurrency use cases
49 | * Priority queues, dead letter queues, job deferral, automatic retries with exponential backoff
50 | * Pub/sub API for fan-out queue relationships
51 | * SQL support for non-Node.js runtimes for most operations
52 | * Serverless function compatible
53 | * Multi-master compatible (for example, in a Kubernetes ReplicaSet)
54 |
55 | ## Requirements
56 | * Node 22.12 or higher for CommonJS's require(esm)
57 | * PostgreSQL 13 or higher
58 |
59 | ## Documentation
60 | * [Docs](https://timgit.github.io/pg-boss/)
61 |
62 | ## Contributing
63 | To setup a development environment for this library:
64 |
65 | ```bash
66 | git clone https://github.com/timgit/pg-boss.git
67 | npm install
68 | ```
69 |
70 | To run the test suite, linter and code coverage:
71 | ```bash
72 | npm run cover
73 | ```
74 |
75 | The test suite will try and create a new database named pgboss. The [config.json](https://github.com/timgit/pg-boss/blob/master/test/config.json) file has the default credentials to connect to postgres.
76 |
77 | The [Docker Compose](https://github.com/timgit/pg-boss/blob/master/docker-compose.yaml) file can be used to start a local postgres instance for testing:
78 |
79 | ```bash
80 | docker compose up
81 | ```
82 |
--------------------------------------------------------------------------------
/test/testHelper.ts:
--------------------------------------------------------------------------------
1 | import Db from '../src/db.ts'
2 | import { PgBoss } from '../src/index.ts'
3 | import crypto from 'node:crypto'
4 | import configJson from './config.json' with { type: 'json' }
5 | import type { ConstructorOptions } from '../src/types.ts'
6 |
7 | const sha1 = (value: string): string => crypto.createHash('sha1').update(value).digest('hex')
8 |
9 | function getConnectionString (): string {
10 | const config = getConfig()
11 |
12 | return `postgres://${config.user}:${config.password}@${config.host}:${config.port}/${config.database}`
13 | }
14 |
15 | function getConfig (options: Partial & { testKey?: string } = {}): ConstructorOptions {
16 | const config: any = { ...configJson }
17 |
18 | config.host = process.env.POSTGRES_HOST || config.host
19 | config.port = process.env.POSTGRES_PORT || config.port
20 | config.password = process.env.POSTGRES_PASSWORD || config.password
21 |
22 | if (options.testKey) {
23 | config.schema = `pgboss${sha1(options.testKey)}`
24 | }
25 |
26 | config.schema = config.schema || 'pgboss'
27 |
28 | config.supervise = false
29 | config.schedule = false
30 | config.createSchema = true
31 |
32 | return Object.assign(config, options)
33 | }
34 |
35 | async function init (): Promise {
36 | const { database } = getConfig()
37 |
38 | await tryCreateDb(database!)
39 | }
40 |
41 | async function getDb ({ database, debug }: { database?: string; debug?: boolean } = {}): Promise {
42 | const config = getConfig()
43 |
44 | config.database = database || config.database
45 |
46 | const db = new Db({ ...config, debug })
47 |
48 | await db.open()
49 |
50 | return db
51 | }
52 |
53 | async function dropSchema (schema: string): Promise {
54 | const db = await getDb()
55 | await db.executeSql(`DROP SCHEMA IF EXISTS ${schema} CASCADE`)
56 | await db.close()
57 | }
58 |
59 | async function findJobs (schema: string, where: string, values?: any[]): Promise {
60 | const db = await getDb()
61 | const jobs = await db.executeSql(`select * from ${schema}.job where ${where}`, values)
62 | await db.close()
63 | return jobs
64 | }
65 |
66 | async function countJobs (schema: string, table: string, where: string, values?: any[]): Promise {
67 | const db = await getDb()
68 | const result = await db.executeSql(`select count(*) as count from ${schema}.${table} where ${where}`, values)
69 | await db.close()
70 | return parseFloat(result.rows[0].count)
71 | }
72 |
73 | async function tryCreateDb (database: string): Promise {
74 | const db = await getDb({ database: 'postgres' })
75 |
76 | try {
77 | await db.executeSql(`CREATE DATABASE ${database}`)
78 | } catch {} finally {
79 | await db.close()
80 | }
81 | }
82 |
83 | async function start (options?: Partial & { testKey?: string; noDefault?: boolean }): Promise {
84 | try {
85 | const config = getConfig(options)
86 |
87 | const boss = new PgBoss(config)
88 | // boss.on('error', err => console.log({ schema: config.schema, message: err.message }))
89 |
90 | await boss.start()
91 |
92 | if (!options?.noDefault) {
93 | await boss.createQueue(config.schema!)
94 | }
95 | return boss
96 | } catch (err) {
97 | // this is nice for occaisional debugging, Mr. Linter
98 | if (err) {
99 | throw err
100 | }
101 | throw new Error('Unexpected error')
102 | }
103 | }
104 |
105 | export {
106 | dropSchema,
107 | start,
108 | getDb,
109 | countJobs,
110 | findJobs,
111 | getConfig,
112 | getConnectionString,
113 | tryCreateDb,
114 | init
115 | }
116 |
--------------------------------------------------------------------------------
/docs/api/queues.md:
--------------------------------------------------------------------------------
1 | # Queues
2 |
3 | ### `createQueue(name, Queue)`
4 |
5 | Creates a queue.
6 |
7 | ```ts
8 | type Queue = {
9 | name: string;
10 | policy?: QueuePolicy;
11 | partition?: boolean;
12 | deadLetter?: string;
13 | warningQueueSize?: number;
14 | } & QueueOptions
15 | ```
16 |
17 | Allowed policy values:
18 |
19 | | Policy | Description |
20 | | - | - |
21 | | `standard` | (Default) Supports all standard features such as deferral, priority, and throttling |
22 | | `short` | Only allows 1 job to be queued, unlimited active. Can be extended with `singletonKey` |
23 | | `singleton` | Only allows 1 job to be active, unlimited queued. Can be extended with `singletonKey` |
24 | | `stately` | Combination of short and singleton: Only allows 1 job per state, queued and/or active. Can be extended with `singletonKey` |
25 | | `exclusive` | Only allows 1 job to be queued or active. Can be extended with `singletonKey` |
26 |
27 | > `stately` queues are special in how retries are handled. By definition, stately queues will not allow multiple jobs to occupy `retry` state. Once a job exists in `retry`, failing another `active` job will bypass the retry mechanism and force the job to `failed`. If this job requires retries, consider a custom retry implementation using a dead letter queue.
28 |
29 | * **partition**, boolean, default false
30 |
31 | If set to true, a dedicated table will be created in the partition scheme. This would be more useful for large queues in order to keep it from being a "noisy neighbor".
32 |
33 | * **deadLetter**, string
34 |
35 | When a job fails after all retries, if the queue has a `deadLetter` property, the job's payload will be copied into that queue, copying the same retention and retry configuration as the original job.
36 |
37 | * **warningQueueSize**, int
38 |
39 | How many items can exist in the created or retry state before emitting a warning event.
40 |
41 | **Retry options**
42 |
43 | * **retryLimit**, int
44 |
45 | Default: 2. Number of retries to complete a job.
46 |
47 | * **retryDelay**, int
48 |
49 | Default: 0. Delay between retries of failed jobs, in seconds.
50 |
51 | * **retryBackoff**, bool
52 |
53 | Default: false. Enables exponential backoff retries based on retryDelay instead of a fixed delay. Sets initial retryDelay to 1 if not set.
54 |
55 | * **retryDelayMax**, int
56 |
57 | Default: no limit. Maximum delay between retries of failed jobs, in seconds. Only used when retryBackoff is true.
58 |
59 | **Expiration options**
60 |
61 | * **expireInSeconds**, number
62 |
63 | Default: 15 minutes. How many seconds a job may be in active state before being retried or failed. Must be >=1
64 |
65 | **Retention options**
66 |
67 | * **retentionSeconds**, number
68 |
69 | Default: 14 days. How many seconds a job may be in created or retry state before it's deleted. Must be >=1
70 |
71 | * **deleteAfterSeconds**, int
72 |
73 | Default: 7 days. How long a job should be retained in the database after it's completed.
74 |
75 | * All retry, expiration, and retention options set on the queue will be inheritied for each job, unless they are overridden.
76 |
77 | ### `updateQueue(name, options)`
78 |
79 | Updates options on an existing queue, with the exception of the `policy` and `partition` settings, which cannot be changed.
80 |
81 | ### `deleteQueue(name)`
82 |
83 | Deletes a queue and all jobs.
84 |
85 | ### `getQueues()`
86 |
87 | Returns all queues
88 |
89 | ### `getQueue(name)`
90 |
91 | Returns a queue by name
92 |
93 | ### `getQueueStats(name)`
94 |
95 | Returns the number of jobs in various states in a queue. The result matches the results from getQueue(), but ignores the cached data and forces the stats to be retrieved immediately.
--------------------------------------------------------------------------------
/test/throttleTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('throttle', function () {
6 | it('should only create 1 job for interval', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 |
9 | const singletonSeconds = 2
10 | const sendCount = 4
11 |
12 | for (let i = 0; i < sendCount; i++) {
13 | await this.boss.send(this.schema, null, { singletonSeconds })
14 | await delay(1000)
15 | }
16 |
17 | const { length } = await this.boss.fetch(this.schema, { batchSize: sendCount })
18 |
19 | assert(length < sendCount)
20 | })
21 |
22 | it('should process at most 1 job per second', async function () {
23 | this.boss = await helper.start(this.bossConfig)
24 |
25 | const singletonSeconds = 1
26 | const jobCount = 3
27 | const sendInterval = 100
28 | const assertTimeout = jobCount * 1000
29 |
30 | const sendCount = 0
31 | let processCount = 0
32 |
33 | this.boss.work(this.schema, async () => processCount++)
34 |
35 | for (let i = 0; i < sendCount; i++) {
36 | await this.boss.send(this.schema, null, { singletonSeconds })
37 | await delay(sendInterval)
38 | }
39 |
40 | await delay(assertTimeout)
41 |
42 | assert(processCount <= jobCount + 1)
43 | })
44 |
45 | it('should debounce', async function () {
46 | this.boss = await helper.start(this.bossConfig)
47 |
48 | const jobId = await this.boss.send(this.schema, null, { singletonSeconds: 300 })
49 |
50 | assert(jobId)
51 |
52 | const jobId2 = await this.boss.send(this.schema, null, { singletonSeconds: 300, singletonNextSlot: true })
53 |
54 | assert(jobId2)
55 | })
56 |
57 | it('should debounce via sendDebounced()', async function () {
58 | this.boss = await helper.start(this.bossConfig)
59 |
60 | const seconds = 60
61 |
62 | const jobId = await this.boss.sendDebounced(this.schema, null, null, seconds)
63 |
64 | assert(jobId)
65 |
66 | const jobId2 = await this.boss.sendDebounced(this.schema, null, null, seconds)
67 |
68 | assert(jobId2)
69 |
70 | const jobId3 = await this.boss.sendDebounced(this.schema, null, null, seconds)
71 |
72 | assert.strictEqual(jobId3, null)
73 | })
74 |
75 | it('should reject 2nd request in the same time slot', async function () {
76 | this.boss = await helper.start(this.bossConfig)
77 |
78 | const jobId1 = await this.boss.send(this.schema, null, { singletonSeconds: 300 })
79 |
80 | assert(jobId1)
81 |
82 | const jobId2 = await this.boss.send(this.schema, null, { singletonSeconds: 300 })
83 |
84 | assert.strictEqual(jobId2, null)
85 | })
86 |
87 | it('should throttle via sendThrottled()', async function () {
88 | this.boss = await helper.start(this.bossConfig)
89 |
90 | const seconds = 60
91 |
92 | const jobId1 = await this.boss.sendThrottled(this.schema, null, null, seconds)
93 |
94 | assert(jobId1)
95 |
96 | const jobId2 = await this.boss.sendThrottled(this.schema, null, null, seconds)
97 |
98 | assert.strictEqual(jobId2, null)
99 | })
100 |
101 | it('should not allow more than 1 complete job with the same key with an interval', async function () {
102 | this.boss = await helper.start(this.bossConfig)
103 |
104 | const singletonKey = 'a'
105 | const singletonSeconds = 60
106 |
107 | await this.boss.send(this.schema, null, { singletonKey, singletonSeconds })
108 | const [job] = await this.boss.fetch(this.schema)
109 |
110 | await this.boss.complete(this.schema, job.id)
111 |
112 | const jobId = await this.boss.send(this.schema, null, { singletonKey, singletonSeconds })
113 |
114 | assert.strictEqual(jobId, null)
115 | })
116 | })
117 |
--------------------------------------------------------------------------------
/docs/api/constructor.md:
--------------------------------------------------------------------------------
1 | # Constructor
2 |
3 | ### `new(connectionString)`
4 |
5 | Passing a string argument to the constructor implies a PostgreSQL connection string in one of the formats specified by the [pg](https://github.com/brianc/node-postgres) package. Some examples are currently posted in the [pg docs](https://github.com/brianc/node-postgres/wiki/pg).
6 |
7 | ```js
8 | const boss = new PgBoss('postgres://user:pass@host:port/database?ssl=require');
9 | ```
10 |
11 | ### `new(options)`
12 |
13 | The following options can be set as properties in an object for additional configurations.
14 |
15 | **Connection options**
16 |
17 | * **host** - string, defaults to "127.0.0.1"
18 |
19 | * **port** - int, defaults to 5432
20 |
21 | * **ssl** - boolean or object
22 |
23 | * **database** - string, *required*
24 |
25 | * **user** - string, *required*
26 |
27 | * **password** - string
28 |
29 | * **connectionString** - string
30 |
31 | PostgreSQL connection string will be parsed and used instead of `host`, `port`, `ssl`, `database`, `user`, `password`.
32 |
33 | * **max** - int, defaults to 10
34 |
35 | Maximum number of connections that will be shared by all operations in this instance
36 |
37 | * **application_name** - string, defaults to "pgboss"
38 |
39 | * **db** - object
40 |
41 | Passing an object named db allows you "bring your own database connection". This option may be beneficial if you'd like to use an existing database service with its own connection pool. Setting this option will bypass the above configuration.
42 |
43 | The expected interface is a function named `executeSql` that allows the following code to run without errors.
44 |
45 |
46 | ```js
47 | const text = "select $1 as input"
48 | const values = ['arg1']
49 |
50 | const { rows } = await executeSql(text, values)
51 |
52 | assert(rows[0].input === 'arg1')
53 | ```
54 |
55 | * **schema** - string, defaults to "pgboss"
56 |
57 | Database schema that contains all required storage objects. Only alphanumeric and underscore allowed, length: <= 50 characters
58 |
59 |
60 | **Operations options**
61 |
62 | * **supervise**, bool, default true
63 |
64 | If this is set to false, maintenance and monitoring operations will be disabled on this instance. This is an advanced use case, as bypassing maintenance operations is not something you would want to do under normal circumstances.
65 |
66 | * **schedule**, bool, default true
67 |
68 | If this is set to false, this instance will not monitor or created scheduled jobs during. This is an advanced use case you may want to do for testing or if the clock of the server is skewed and you would like to disable the skew warnings.
69 |
70 | * **migrate**, bool, default true
71 |
72 | If this is set to false, this instance will skip attempts to run schema migrations during `start()`. If schema migrations exist, `start()` will throw and error and block usage. This is an advanced use case when the configured user account does not have schema mutation privileges.
73 |
74 | The following configuration options should not normally need to be changed, but are still available for special use cases.
75 |
76 | * **createSchema**, bool, default true
77 |
78 | If set to false, the `CREATE SCHEMA` statement will not be issued during installation. This may be useful if this privilege is not granted to the role.
79 |
80 | * **superviseIntervalSeconds**, int, default 60 seconds
81 |
82 | Entry point for how often queues are monitored and maintained.
83 |
84 | * **maintenanceIntervalSeconds**, int, default 1 day
85 |
86 | How often maintenance will be run against queue tables to drop queued and completed jobs.
87 |
88 | * **monitorIntervalSeconds**, int, default 60 seconds
89 |
90 | How often each queue is monitored for backlogs, expired jobs, and calculating stats.
91 |
92 | * **queueCacheIntervalSeconds**, int, default 60 seconds
93 |
94 | How often queue metadata is refreshed in memory.
--------------------------------------------------------------------------------
/test/retryTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('retries', function () {
6 | it('should retry a job that didn\'t complete', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 |
9 | const jobId = await this.boss.send({ name: this.schema, options: { expireInSeconds: 1, retryLimit: 1 } })
10 |
11 | const [try1] = await this.boss.fetch(this.schema)
12 |
13 | await delay(1000)
14 | await this.boss.supervise()
15 |
16 | const [try2] = await this.boss.fetch(this.schema)
17 |
18 | assert.strictEqual(try1.id, jobId)
19 | assert.strictEqual(try2.id, jobId)
20 | })
21 |
22 | it('should retry a job that failed', async function () {
23 | this.boss = await helper.start(this.bossConfig)
24 |
25 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 1 })
26 |
27 | await this.boss.fetch(this.schema)
28 | await this.boss.fail(this.schema, jobId!)
29 |
30 | const [job] = await this.boss.fetch(this.schema)
31 |
32 | assert.strictEqual(job.id, jobId)
33 | })
34 |
35 | it('should retry with a fixed delay', async function () {
36 | this.boss = await helper.start(this.bossConfig)
37 |
38 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 1, retryDelay: 1 })
39 |
40 | await this.boss.fetch(this.schema)
41 | await this.boss.fail(this.schema, jobId!)
42 |
43 | const [job1] = await this.boss.fetch(this.schema)
44 |
45 | assert(!job1)
46 |
47 | await delay(1000)
48 |
49 | const [job2] = await this.boss.fetch(this.schema)
50 |
51 | assert(job2)
52 | })
53 |
54 | it('should retry with a exponential backoff', async function () {
55 | this.boss = await helper.start(this.bossConfig)
56 |
57 | let processCount = 0
58 | const retryLimit = 4
59 |
60 | await this.boss.work(this.schema, { pollingIntervalSeconds: 1 }, async () => {
61 | ++processCount
62 | throw new Error('retry')
63 | })
64 |
65 | await this.boss.send(this.schema, null, { retryLimit, retryDelay: 2, retryBackoff: true })
66 |
67 | await delay(8000)
68 |
69 | assert(processCount < retryLimit)
70 | })
71 |
72 | it('should limit retry delay with exponential backoff', async function () {
73 | this.boss = await helper.start(this.bossConfig)
74 |
75 | const startAfters: Date[] = []
76 | const retryDelayMax = 3
77 |
78 | await this.boss.work(this.schema, { pollingIntervalSeconds: 0.5, includeMetadata: true }, async ([job]) => {
79 | startAfters.push(job.startAfter)
80 | throw new Error('retry')
81 | })
82 |
83 | await this.boss.send(this.schema, null, {
84 | retryLimit: 4,
85 | retryDelay: 1,
86 | retryBackoff: true,
87 | retryDelayMax
88 | })
89 |
90 | await delay(13000)
91 |
92 | const delays = startAfters.map((startAfter, index) =>
93 | index === 0 ? 0 : (startAfter.getTime() - startAfters[index - 1].getTime()) / 1000)
94 |
95 | for (const d of delays) {
96 | // the +1 eval here is to allow latency from the work() polling interval
97 | assert(d < (retryDelayMax + 1), `Expected delay to be less than ${retryDelayMax + 1} seconds, but got ${d}`)
98 | }
99 | }).timeout(15000)
100 |
101 | it('should mark a failed job to be retried', async function () {
102 | this.boss = await helper.start(this.bossConfig)
103 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 0 })
104 | await this.boss.fail(this.schema, jobId!)
105 | await this.boss.retry(this.schema, jobId!)
106 | const job = await this.boss.getJobById(this.schema, jobId!)
107 | assert(job)
108 | const { state, retryLimit } = job
109 | assert(state === 'retry')
110 | assert(retryLimit === 1)
111 | })
112 | })
113 |
--------------------------------------------------------------------------------
/test/publishTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 |
4 | describe('pubsub', function () {
5 | it('should fail with no arguments', async function () {
6 | this.boss = await helper.start(this.bossConfig)
7 | await assert.rejects(async () => {
8 | // @ts-ignore
9 | await this.boss.publish()
10 | })
11 | })
12 |
13 | it('should accept single string argument', async function () {
14 | this.boss = await helper.start(this.bossConfig)
15 | await this.boss.publish(this.schema)
16 | })
17 |
18 | it('should not send to the same named queue', async function () {
19 | this.boss = await helper.start(this.bossConfig)
20 |
21 | const message = 'hi'
22 |
23 | await this.boss.publish(this.schema, { message })
24 |
25 | const [job] = await this.boss.fetch(this.schema)
26 |
27 | assert(!job)
28 | })
29 |
30 | it('should use subscriptions to map to a single queue', async function () {
31 | this.boss = await helper.start(this.bossConfig)
32 |
33 | const event = 'event'
34 | const message = 'hi'
35 |
36 | await this.boss.subscribe(event, this.schema)
37 | await this.boss.publish(event, { message })
38 |
39 | const [job] = await this.boss.fetch<{ message: string }>(this.schema)
40 |
41 | assert.strictEqual(message, job.data.message)
42 | })
43 |
44 | it('should use subscriptions to map to more than one queue', async function () {
45 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
46 |
47 | interface Message {
48 | message: string
49 | }
50 |
51 | const queue1 = 'subqueue1'
52 | const queue2 = 'subqueue2'
53 |
54 | await this.boss.createQueue(queue1)
55 | await this.boss.createQueue(queue2)
56 |
57 | const event = 'event'
58 | const message = 'hi'
59 |
60 | await this.boss.subscribe(event, queue1)
61 | await this.boss.subscribe(event, queue2)
62 | await this.boss.publish(event, { message })
63 |
64 | const [job1] = await this.boss.fetch(queue1)
65 | const [job2] = await this.boss.fetch(queue2)
66 |
67 | assert.strictEqual(message, job1.data.message)
68 | assert.strictEqual(message, job2.data.message)
69 | })
70 | })
71 |
72 | it('should fail if unsubscribe is called without args', async function () {
73 | this.boss = await helper.start(this.bossConfig)
74 | await assert.rejects(async () => {
75 | // @ts-ignore
76 | await this.boss.unsubscribe()
77 | })
78 | })
79 |
80 | it('should fail if unsubscribe is called without both args', async function () {
81 | this.boss = await helper.start(this.bossConfig)
82 | await assert.rejects(async () => {
83 | // @ts-ignore
84 | await this.boss.unsubscribe('foo')
85 | })
86 | })
87 |
88 | it('unsubscribe works', async function () {
89 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
90 |
91 | const event = 'foo'
92 |
93 | const queue1 = 'queue1'
94 | const queue2 = 'queue2'
95 |
96 | await this.boss.createQueue(queue1)
97 | await this.boss.createQueue(queue2)
98 |
99 | await this.boss.subscribe(event, queue1)
100 | await this.boss.subscribe(event, queue2)
101 |
102 | await this.boss.publish(event)
103 |
104 | const [job1] = await this.boss.fetch(queue1)
105 |
106 | assert(job1)
107 |
108 | const [job2] = await this.boss.fetch(queue2)
109 |
110 | assert(job2)
111 |
112 | await this.boss.unsubscribe(event, queue2)
113 |
114 | await this.boss.publish(event)
115 |
116 | const [job3] = await this.boss.fetch(queue1)
117 |
118 | assert(job3)
119 |
120 | const [job4] = await this.boss.fetch(queue2)
121 |
122 | assert(!job4)
123 |
124 | await this.boss.unsubscribe(event, queue1)
125 |
126 | await this.boss.publish(event)
127 |
128 | const [job5] = await this.boss.fetch(queue1)
129 | assert(!job5)
130 | })
131 |
--------------------------------------------------------------------------------
/src/spy.ts:
--------------------------------------------------------------------------------
1 | export type JobSpyState = 'created' | 'active' | 'completed' | 'failed'
2 |
3 | export type JobDataSelector = (jobData: T) => boolean
4 |
5 | export type JobSelector = (job: SpyJob) => boolean
6 |
7 | export interface SpyJob {
8 | id: string
9 | name: string
10 | data: T
11 | state: JobSpyState
12 | output?: object
13 | }
14 |
15 | export interface JobSpyInterface {
16 | clear(): void
17 | waitForJob(
18 | selector: JobDataSelector,
19 | state: JobSpyState
20 | ): Promise>
21 | waitForJobWithId(
22 | id: string,
23 | state: JobSpyState
24 | ): Promise>
25 | }
26 |
27 | type SpyPromise = {
28 | selector: JobSelector
29 | awaitedState: JobSpyState
30 | resolve: (job: SpyJob) => void
31 | }
32 |
33 | export class JobSpy implements JobSpyInterface {
34 | #jobResults: Map> = new Map()
35 | #pendingPromises: SpyPromise[] = []
36 |
37 | clear (): void {
38 | this.#jobResults.clear()
39 | this.#pendingPromises = []
40 | }
41 |
42 | waitForJobWithId (id: string, awaitedState: JobSpyState): Promise> {
43 | return this.waitForJob(() => true, awaitedState, id)
44 | }
45 |
46 | waitForJob (
47 | dataSelector: JobDataSelector,
48 | awaitedState: JobSpyState,
49 | specificId?: string
50 | ): Promise> {
51 | const selector: JobSelector = (job) => {
52 | if (specificId && job.id !== specificId) {
53 | return false
54 | }
55 | return dataSelector(job.data)
56 | }
57 |
58 | // Check if we already have a matching job
59 | for (const job of this.#jobResults.values()) {
60 | if (job.state === awaitedState && selector(job)) {
61 | return Promise.resolve(this.#cloneJob(job))
62 | }
63 | }
64 |
65 | // Register promise to be resolved when job arrives
66 | return this.#registerPromise(selector, awaitedState)
67 | }
68 |
69 | #registerPromise (
70 | selector: JobSelector,
71 | awaitedState: JobSpyState
72 | ): Promise> {
73 | let resolve!: (job: SpyJob) => void
74 |
75 | const promise = new Promise>((_resolve) => {
76 | resolve = _resolve
77 | })
78 |
79 | this.#pendingPromises.push({ selector, awaitedState, resolve })
80 |
81 | return promise
82 | }
83 |
84 | #getJobResultKey (id: string, state: JobSpyState): string {
85 | return `${id}:${state}`
86 | }
87 |
88 | #cloneJob (job: SpyJob): SpyJob {
89 | return {
90 | id: job.id,
91 | name: job.name,
92 | data: structuredClone(job.data),
93 | state: job.state,
94 | output: job.output ? structuredClone(job.output) : undefined
95 | }
96 | }
97 |
98 | addJob (
99 | id: string,
100 | name: string,
101 | data: T,
102 | state: JobSpyState,
103 | output?: object
104 | ): void {
105 | const job: SpyJob = {
106 | id,
107 | name,
108 | data: structuredClone(data),
109 | state,
110 | output: output ? structuredClone(output) : undefined
111 | }
112 |
113 | const key = this.#getJobResultKey(id, state)
114 | this.#jobResults.set(key, job)
115 |
116 | // Resolve any pending promises that match this job
117 | const matchingPromises: SpyPromise[] = []
118 | const remainingPromises: SpyPromise[] = []
119 |
120 | for (const pending of this.#pendingPromises) {
121 | if (pending.awaitedState === state && pending.selector(job)) {
122 | matchingPromises.push(pending)
123 | } else {
124 | remainingPromises.push(pending)
125 | }
126 | }
127 |
128 | this.#pendingPromises = remainingPromises
129 |
130 | for (const pending of matchingPromises) {
131 | pending.resolve(this.#cloneJob(job))
132 | }
133 | }
134 | }
135 |
--------------------------------------------------------------------------------
/test/completeTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { states } from '../src/index.ts'
4 | import type { TestContext } from './hooks.ts'
5 |
6 | describe('complete', function () {
7 | it('should reject missing id argument', async function (this: TestContext) {
8 | this.boss = await helper.start(this.bossConfig)
9 | await assert.rejects(async () => {
10 | // @ts-ignore
11 | await this.boss.complete(this.schema)
12 | })
13 | })
14 |
15 | it('should complete a batch of jobs', async function (this: TestContext) {
16 | this.boss = await helper.start(this.bossConfig)
17 |
18 | const batchSize = 3
19 |
20 | await Promise.all([
21 | this.boss.send(this.schema),
22 | this.boss.send(this.schema),
23 | this.boss.send(this.schema)
24 | ])
25 |
26 | const { table } = (await this.boss.getQueue(this.schema))!
27 |
28 | const countJobs = (state: string) => helper.countJobs(this.schema, table, 'name = $1 AND state = $2', [this.schema, state])
29 |
30 | const jobs = await this.boss.fetch(this.schema, { batchSize })
31 |
32 | const activeCount = await countJobs(states.active)
33 |
34 | assert.strictEqual(activeCount, batchSize)
35 |
36 | const result = await this.boss.complete(this.schema, jobs.map(job => job.id))
37 |
38 | assert.strictEqual(batchSize, result.jobs.length)
39 | })
40 |
41 | it('should store job output in job.output from complete()', async function (this: TestContext) {
42 | this.boss = await helper.start(this.bossConfig)
43 |
44 | const jobId = await this.boss.send(this.schema)
45 |
46 | const [job] = await this.boss.fetch(this.schema)
47 |
48 | assert.strictEqual(jobId, job.id)
49 |
50 | const completionData = { msg: 'i am complete' }
51 |
52 | await this.boss.complete(this.schema, jobId, completionData)
53 |
54 | const jobWithMetadata = await this.boss.getJobById(this.schema, jobId)
55 | assert(jobWithMetadata)
56 |
57 | assert.strictEqual((jobWithMetadata as any).output.msg, completionData.msg)
58 | })
59 |
60 | it('should store job error in job.output from fail()', async function (this: TestContext) {
61 | this.boss = await helper.start(this.bossConfig)
62 |
63 | const jobId = await this.boss.send(this.schema)
64 |
65 | const [job] = await this.boss.fetch(this.schema)
66 |
67 | assert.strictEqual(jobId, job.id)
68 |
69 | const completionError = new Error('i am complete')
70 |
71 | await this.boss.fail(this.schema, jobId, completionError)
72 |
73 | const jobWithMetadata = await this.boss.getJobById(this.schema, jobId)
74 | assert(jobWithMetadata)
75 |
76 | assert.strictEqual((jobWithMetadata as any).output.message, completionError.message)
77 | })
78 |
79 | it('should complete a batch of jobs with custom connection', async function (this: TestContext) {
80 | this.boss = await helper.start(this.bossConfig)
81 |
82 | const batchSize = 3
83 |
84 | await Promise.all([
85 | this.boss.send(this.schema),
86 | this.boss.send(this.schema),
87 | this.boss.send(this.schema)
88 | ])
89 |
90 | const { table } = (await this.boss.getQueue(this.schema))!
91 |
92 | const countJobs = (state: string) => helper.countJobs(this.schema, table, 'name = $1 AND state = $2', [this.schema, state])
93 |
94 | const jobs = await this.boss.fetch(this.schema, { batchSize })
95 |
96 | const activeCount = await countJobs(states.active)
97 |
98 | assert.strictEqual(activeCount, batchSize)
99 |
100 | let called = false
101 | const _db = await helper.getDb()
102 | const db = {
103 | async executeSql (sql: string, values: any[]) {
104 | called = true
105 | return (_db as any).pool.query(sql, values)
106 | }
107 | }
108 |
109 | const result = await this.boss.complete(this.schema, jobs.map(job => job.id), undefined, { db })
110 |
111 | assert.strictEqual(batchSize, result.jobs.length)
112 | assert.strictEqual(called, true)
113 | })
114 | })
115 |
--------------------------------------------------------------------------------
/src/worker.ts:
--------------------------------------------------------------------------------
1 | import { type AbortablePromise, delay } from './tools.ts'
2 | import type * as types from './types.ts'
3 |
4 | const WORKER_STATES = {
5 | created: 'created',
6 | active: 'active',
7 | stopping: 'stopping',
8 | stopped: 'stopped'
9 | } as const
10 |
11 | interface WorkerOptions {
12 | id: string
13 | name: string
14 | options: types.WorkOptions
15 | interval: number
16 | fetch: () => Promise[]>
17 | onFetch: (jobs: types.Job[]) => Promise
18 | onError: (err: any) => void
19 | }
20 |
21 | class Worker {
22 | readonly id: string
23 | readonly name: string
24 | readonly options: types.WorkOptions
25 | readonly fetch: () => Promise[]>
26 | readonly onFetch: (jobs: types.Job[]) => Promise
27 | readonly onError: (err: any) => void
28 | readonly interval: number
29 |
30 | jobs: types.Job[] = []
31 | createdOn = Date.now()
32 | state: types.WorkerState = WORKER_STATES.created
33 | lastFetchedOn: number | null = null
34 | lastJobStartedOn: number | null = null
35 | lastJobEndedOn: number | null = null
36 | lastJobDuration: number | null = null
37 | lastError: any = null
38 | lastErrorOn: number | null = null
39 | stopping = false
40 | stopped = false
41 | private loopDelayPromise: AbortablePromise | null = null
42 | private beenNotified = false
43 | private runPromise: Promise | null = null
44 |
45 | constructor ({ id, name, options, interval, fetch, onFetch, onError }: WorkerOptions) {
46 | this.id = id
47 | this.name = name
48 | this.options = options
49 | this.fetch = fetch
50 | this.onFetch = onFetch
51 | this.onError = onError
52 | this.interval = interval
53 | }
54 |
55 | start () {
56 | this.runPromise = this.run()
57 | }
58 |
59 | private async run () {
60 | this.state = WORKER_STATES.active
61 |
62 | while (!this.stopping) {
63 | const started = Date.now()
64 |
65 | try {
66 | this.beenNotified = false
67 | const jobs = await this.fetch()
68 |
69 | this.lastFetchedOn = Date.now()
70 |
71 | if (jobs) {
72 | this.jobs = jobs
73 |
74 | this.lastJobStartedOn = this.lastFetchedOn
75 |
76 | await this.onFetch(jobs)
77 |
78 | this.lastJobEndedOn = Date.now()
79 |
80 | this.jobs = []
81 | }
82 | } catch (err: any) {
83 | this.lastErrorOn = Date.now()
84 | this.lastError = err
85 |
86 | err.message = `${err.message} (Queue: ${this.name}, Worker: ${this.id})`
87 |
88 | this.onError(err)
89 | }
90 |
91 | const duration = Date.now() - started
92 |
93 | this.lastJobDuration = duration
94 |
95 | if (!this.stopping && !this.beenNotified && (this.interval - duration) > 100) {
96 | this.loopDelayPromise = delay(this.interval - duration)
97 | await this.loopDelayPromise
98 | this.loopDelayPromise = null
99 | }
100 | }
101 |
102 | this.stopping = false
103 | this.stopped = true
104 | this.state = WORKER_STATES.stopped
105 | }
106 |
107 | notify () {
108 | this.beenNotified = true
109 |
110 | if (this.loopDelayPromise) {
111 | this.loopDelayPromise.abort()
112 | }
113 | }
114 |
115 | async stop (): Promise {
116 | this.stopping = true
117 | this.state = WORKER_STATES.stopping
118 |
119 | if (this.loopDelayPromise) {
120 | this.loopDelayPromise.abort()
121 | }
122 |
123 | await this.runPromise
124 | }
125 |
126 | toWipData (): types.WipData {
127 | return {
128 | id: this.id,
129 | name: this.name,
130 | options: this.options,
131 | state: this.state,
132 | count: this.jobs.length,
133 | createdOn: this.createdOn,
134 | lastFetchedOn: this.lastFetchedOn,
135 | lastJobStartedOn: this.lastJobStartedOn,
136 | lastJobEndedOn: this.lastJobEndedOn,
137 | lastError: this.lastError,
138 | lastErrorOn: this.lastErrorOn,
139 | lastJobDuration: this.lastJobDuration
140 | }
141 | }
142 | }
143 |
144 | export default Worker
145 |
--------------------------------------------------------------------------------
/test/monitoringTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('monitoring', function () {
6 | it('should cache job counts into queue', async function () {
7 | const config = {
8 | ...this.bossConfig,
9 | monitorIntervalSeconds: 1
10 | }
11 |
12 | this.boss = await helper.start(config)
13 |
14 | await this.boss.send(this.schema)
15 | await this.boss.send(this.schema)
16 | await this.boss.send(this.schema)
17 | await this.boss.fetch(this.schema)
18 |
19 | await delay(1000)
20 | await this.boss.supervise()
21 | const result1 = await this.boss.getQueue(this.schema)
22 |
23 | assert(result1)
24 |
25 | assert.strictEqual(2, result1.queuedCount)
26 | assert.strictEqual(1, result1.activeCount)
27 | assert.strictEqual(3, result1.totalCount)
28 |
29 | const [job] = await this.boss.fetch(this.schema)
30 | await this.boss.complete(this.schema, job.id)
31 |
32 | await delay(1000)
33 | await this.boss.supervise(this.schema)
34 | const result2 = await this.boss.getQueue(this.schema)
35 |
36 | assert(result2)
37 |
38 | assert.strictEqual(1, result2.queuedCount)
39 | assert.strictEqual(1, result2.activeCount)
40 | assert.strictEqual(3, result2.totalCount)
41 | })
42 |
43 | it('queue cache should emit error', async function () {
44 | const config = {
45 | ...this.bossConfig,
46 | queueCacheIntervalSeconds: 1,
47 | __test__throw_queueCache: true
48 | }
49 |
50 | let errorCount = 0
51 |
52 | this.boss = await helper.start(config)
53 |
54 | this.boss.on('error', () => errorCount++)
55 |
56 | await delay(2000)
57 |
58 | assert(errorCount > 0)
59 | })
60 |
61 | it('slow maintenance should emit warning', async function () {
62 | const config = {
63 | ...this.bossConfig,
64 | __test__warn_slow_query: true,
65 | warningSlowQuerySeconds: 1
66 | }
67 |
68 | this.boss = await helper.start(config)
69 |
70 | let eventCount = 0
71 | this.boss.on('warning', (event) => {
72 | assert(event.message.includes('slow'))
73 | eventCount++
74 | })
75 |
76 | await this.boss.supervise(this.schema)
77 |
78 | assert(eventCount > 0)
79 | })
80 |
81 | it('large queue should emit warning using global default', async function () {
82 | const config = {
83 | ...this.bossConfig,
84 | monitorIntervalSeconds: 1,
85 | warningQueueSize: 1
86 | }
87 |
88 | this.boss = await helper.start(config)
89 |
90 | await this.boss.send(this.schema)
91 | await this.boss.send(this.schema)
92 |
93 | let eventCount = 0
94 |
95 | this.boss.on('warning', (event) => {
96 | assert(event.message.includes('queue'))
97 | eventCount++
98 | })
99 |
100 | await this.boss.supervise(this.schema)
101 |
102 | await delay(1000)
103 |
104 | assert(eventCount > 0)
105 | })
106 |
107 | it('large queue should emit warning via queue config', async function () {
108 | const config = {
109 | ...this.bossConfig,
110 | monitorIntervalSeconds: 1,
111 | noDefault: true
112 | }
113 |
114 | this.boss = await helper.start(config)
115 | await this.boss.createQueue(this.schema, { warningQueueSize: 1 })
116 |
117 | await this.boss.send(this.schema)
118 | await this.boss.send(this.schema)
119 |
120 | let eventCount = 0
121 |
122 | this.boss.on('warning', (event) => {
123 | assert(event.message.includes('queue'))
124 | eventCount++
125 | })
126 |
127 | await this.boss.supervise(this.schema)
128 |
129 | await delay(1000)
130 |
131 | assert(eventCount > 0)
132 | })
133 |
134 | it('should reset cached counts to zero when all jobs are deleted for given queue', async function () {
135 | const config = {
136 | ...this.bossConfig,
137 | monitorIntervalSeconds: 1
138 | }
139 |
140 | this.boss = await helper.start(config)
141 |
142 | await this.boss.send(this.schema)
143 | await this.boss.send(this.schema)
144 | await this.boss.send(this.schema)
145 |
146 | await this.boss.supervise()
147 |
148 | await this.boss.deleteAllJobs(this.schema)
149 |
150 | await delay(1000)
151 | await this.boss.supervise()
152 | const result = await this.boss.getQueue(this.schema)
153 | assert(result)
154 |
155 | assert.strictEqual(0, result.queuedCount)
156 | assert.strictEqual(0, result.activeCount)
157 | assert.strictEqual(0, result.deferredCount)
158 | assert.strictEqual(0, result.totalCount)
159 | })
160 | })
161 |
--------------------------------------------------------------------------------
/docs/api/testing.md:
--------------------------------------------------------------------------------
1 | # Testing
2 |
3 | pg-boss includes built-in spy support to help write fast, deterministic tests without polling or arbitrary delays.
4 |
5 | ## Enabling Spies
6 |
7 | Spies must be explicitly enabled via the `__test__enableSpies` constructor option. This ensures zero overhead in production.
8 |
9 | ```js
10 | const boss = new PgBoss({
11 | connectionString: 'postgres://...',
12 | __test__enableSpies: true
13 | })
14 | ```
15 |
16 | > **Note:** Calling `getSpy()` without enabling spies will throw an error.
17 |
18 | ## `getSpy(name)`
19 |
20 | Returns a spy instance for the specified queue. The spy tracks all job state transitions (created, active, completed, failed) for that queue.
21 |
22 | **Arguments**
23 | - `name`: string, queue name
24 |
25 | **Returns**
26 |
27 | A spy object with the following interface:
28 |
29 | ```ts
30 | interface JobSpyInterface {
31 | clear(): void
32 | waitForJob(selector: (data: T) => boolean, state: JobSpyState): Promise>
33 | waitForJobWithId(id: string, state: JobSpyState): Promise>
34 | }
35 |
36 | type JobSpyState = 'created' | 'active' | 'completed' | 'failed'
37 |
38 | interface SpyJob {
39 | id: string
40 | name: string
41 | data: T
42 | state: JobSpyState
43 | output?: object
44 | }
45 | ```
46 |
47 | ### `spy.waitForJob(selector, state)`
48 |
49 | Waits for a job matching the selector function to reach the specified state. If a job matching the selector criteria was already processed before this method was called, the promise will resolve immediately.
50 |
51 | **Arguments**
52 | - `selector`: function(data) => boolean, filters jobs by their data payload
53 | - `state`: string, one of 'created', 'active', 'completed', 'failed'
54 |
55 | ```js
56 | const boss = new PgBoss({ ..., __test__enableSpies: true })
57 | await boss.start()
58 |
59 | const spy = boss.getSpy('my-queue')
60 |
61 | // Wait for any job with userId '123' to complete
62 | const job = await spy.waitForJob(
63 | (data) => data.userId === '123',
64 | 'completed'
65 | )
66 |
67 | console.log(job.output) // handler result
68 | ```
69 |
70 | ### `spy.waitForJobWithId(id, state)`
71 |
72 | Waits for a specific job by id to reach the specified state. Like `waitForJob()`, if the job already reached the specified state before this method was called, the promise will resolve immediately.
73 |
74 | **Arguments**
75 | - `id`: string, job id
76 | - `state`: string, one of 'created', 'active', 'completed', 'failed'
77 |
78 | ```js
79 | const spy = boss.getSpy('my-queue')
80 |
81 | const jobId = await boss.send('my-queue', { userId: '123' })
82 |
83 | // Wait for this specific job to complete
84 | const job = await spy.waitForJobWithId(jobId, 'completed')
85 | ```
86 |
87 | ### `spy.clear()`
88 |
89 | Clears all tracked job data from the spy. Useful for resetting state between tests.
90 |
91 | ```js
92 | afterEach(() => {
93 | spy.clear()
94 | })
95 | ```
96 |
97 | ## `clearSpies()`
98 |
99 | Clears all spies and their tracked data across all queues.
100 |
101 | ```js
102 | afterEach(() => {
103 | boss.clearSpies()
104 | })
105 | ```
106 |
107 | ## Example Test
108 |
109 | ```js
110 | const PgBoss = require('pg-boss')
111 | const assert = require('assert')
112 |
113 | describe('email notifications', () => {
114 | let boss
115 |
116 | before(async () => {
117 | boss = new PgBoss({
118 | connectionString: process.env.DATABASE_URL,
119 | __test__enableSpies: true
120 | })
121 | await boss.start()
122 | })
123 |
124 | after(async () => {
125 | await boss.stop()
126 | })
127 |
128 | afterEach(() => {
129 | boss.clearSpies()
130 | })
131 |
132 | it('should send welcome email when user signs up', async () => {
133 | const spy = boss.getSpy('email-welcome')
134 |
135 | // Start the worker
136 | await boss.work('email-welcome', async ([job]) => {
137 | await sendEmail(job.data.email, 'Welcome!')
138 | return { sent: true }
139 | })
140 |
141 | // Trigger the action that creates the job
142 | await userService.signUp({ email: 'test@example.com' })
143 |
144 | // Wait for job to complete - no polling needed
145 | const job = await spy.waitForJob(
146 | (data) => data.email === 'test@example.com',
147 | 'completed'
148 | )
149 |
150 | assert.deepStrictEqual(job.output, { sent: true })
151 | })
152 |
153 | it('should handle email failures', async () => {
154 | const spy = boss.getSpy('email-welcome')
155 |
156 | await boss.work('email-welcome', async () => {
157 | throw new Error('SMTP connection failed')
158 | })
159 |
160 | const jobId = await boss.send('email-welcome', { email: 'test@example.com' })
161 |
162 | const job = await spy.waitForJobWithId(jobId, 'failed')
163 |
164 | assert.strictEqual(job.output.message, 'SMTP connection failed')
165 | })
166 | })
167 | ```
168 |
169 | ## Race Condition Safety
170 |
171 | The spy is designed to handle race conditions gracefully. You can call `waitForJob()` or `waitForJobWithId()` before or after the job reaches the desired state:
172 |
173 | ```js
174 | const spy = boss.getSpy('my-queue')
175 |
176 | // This works even if job completes before waitForJob is called
177 | const waitPromise = spy.waitForJob((data) => data.id === '123', 'completed')
178 |
179 | await boss.send('my-queue', { id: '123' })
180 | await boss.work('my-queue', async () => {})
181 |
182 | const job = await waitPromise // Resolves correctly
183 | ```
184 |
185 | ## Tracked States
186 |
187 | | State | When Tracked |
188 | | - | - |
189 | | `created` | Job inserted via `send()` or `insert()` |
190 | | `active` | Job fetched by a worker and handler started |
191 | | `completed` | Handler finished successfully |
192 | | `failed` | Handler threw an error or job expired |
193 |
--------------------------------------------------------------------------------
/src/boss.ts:
--------------------------------------------------------------------------------
1 | import EventEmitter from 'node:events'
2 | import type Manager from './manager.js'
3 | import * as plans from './plans.js'
4 | import { unwrapSQLResult } from './tools.js'
5 | import * as types from './types.js'
6 |
7 | const events = {
8 | error: 'error',
9 | warning: 'warning'
10 | }
11 |
12 | const WARNINGS = {
13 | SLOW_QUERY: { seconds: 30, message: 'Warning: slow query. Your queues and/or database server should be reviewed' },
14 | LARGE_QUEUE: { size: 10_000, message: 'Warning: large queue backlog. Your queue should be reviewed' }
15 | }
16 |
17 | class Boss extends EventEmitter implements types.EventsMixin {
18 | #stopped: boolean
19 | #maintaining: boolean | undefined
20 | #superviseInterval: NodeJS.Timeout | undefined
21 | #db: types.IDatabase
22 | #config: types.ResolvedConstructorOptions
23 | #manager: Manager
24 |
25 | events = events
26 |
27 | constructor (
28 | db: types.IDatabase,
29 | manager: Manager,
30 | config: types.ResolvedConstructorOptions
31 | ) {
32 | super()
33 |
34 | this.#db = db
35 | this.#config = config
36 | this.#manager = manager
37 | this.#stopped = true
38 |
39 | if (config.warningSlowQuerySeconds) {
40 | WARNINGS.SLOW_QUERY.seconds = config.warningSlowQuerySeconds
41 | }
42 |
43 | if (config.warningQueueSize) {
44 | WARNINGS.LARGE_QUEUE.size = config.warningQueueSize
45 | }
46 | }
47 |
48 | async start () {
49 | if (this.#stopped) {
50 | this.#superviseInterval = setInterval(
51 | () => this.#onSupervise(),
52 | this.#config.superviseIntervalSeconds! * 1000
53 | )
54 | this.#stopped = false
55 | }
56 | }
57 |
58 | async stop () {
59 | if (!this.#stopped) {
60 | if (this.#superviseInterval) clearInterval(this.#superviseInterval)
61 | this.#stopped = true
62 | }
63 | }
64 |
65 | async #executeSql (sql: string) {
66 | const started = Date.now()
67 |
68 | const result = unwrapSQLResult(await this.#db.executeSql(sql))
69 |
70 | const elapsed = (Date.now() - started) / 1000
71 |
72 | if (
73 | elapsed > WARNINGS.SLOW_QUERY.seconds ||
74 | this.#config.__test__warn_slow_query
75 | ) {
76 | this.emit(events.warning, {
77 | message: WARNINGS.SLOW_QUERY.message,
78 | data: { elapsed, sql },
79 | })
80 | }
81 |
82 | return result
83 | }
84 |
85 | async #executeQuery (query: plans.SqlQuery) {
86 | const started = Date.now()
87 |
88 | const result = unwrapSQLResult(await this.#db.executeSql(query.text, query.values))
89 |
90 | const elapsed = (Date.now() - started) / 1000
91 |
92 | if (
93 | elapsed > WARNINGS.SLOW_QUERY.seconds ||
94 | this.#config.__test__warn_slow_query
95 | ) {
96 | this.emit(events.warning, {
97 | message: WARNINGS.SLOW_QUERY.message,
98 | data: { elapsed, sql: query.text, values: query.values },
99 | })
100 | }
101 |
102 | return result
103 | }
104 |
105 | async #onSupervise () {
106 | try {
107 | if (this.#stopped) return
108 | if (this.#maintaining) return
109 | if (this.#config.__test__throw_maint) { throw new Error(this.#config.__test__throw_maint) }
110 |
111 | this.#maintaining = true
112 |
113 | const queues = await this.#manager.getQueues()
114 |
115 | !this.#stopped && (await this.supervise(queues))
116 | } catch (err) {
117 | this.emit(events.error, err)
118 | } finally {
119 | this.#maintaining = false
120 | }
121 | }
122 |
123 | async supervise (value?: string | types.QueueResult[]) {
124 | let queues: types.QueueResult[]
125 |
126 | if (Array.isArray(value)) {
127 | queues = value
128 | } else {
129 | queues = await this.#manager.getQueues(value)
130 | }
131 |
132 | const queueGroups = queues.reduce<
133 | Record
134 | >((acc, q) => {
135 | const { table } = q
136 | acc[table] = acc[table] || { table, queues: [] }
137 | acc[table].queues.push(q)
138 | return acc
139 | }, {})
140 |
141 | for (const queueGroup of Object.values(queueGroups)) {
142 | const { table, queues } = queueGroup
143 | const names = queues.map((i) => i.name)
144 |
145 | while (names.length) {
146 | const chunk = names.splice(0, 100)
147 |
148 | await this.#monitor(table, chunk)
149 | await this.#maintain(table, chunk)
150 | }
151 | }
152 | }
153 |
154 | async #monitor (table: string, names: string[]) {
155 | const command = plans.trySetQueueMonitorTime(
156 | this.#config.schema,
157 | names,
158 | this.#config.monitorIntervalSeconds
159 | )
160 | const { rows } = await this.#executeQuery(command)
161 |
162 | if (rows.length) {
163 | const queues = rows.map((q) => q.name)
164 |
165 | const cacheStatsSql = plans.cacheQueueStats(this.#config.schema, table, queues)
166 | const { rows: rowsCacheStats } = await this.#executeSql(cacheStatsSql)
167 | const warnings = rowsCacheStats.filter(i => i.queuedCount > (i.warningQueueSize || WARNINGS.LARGE_QUEUE.size))
168 |
169 | for (const warning of warnings) {
170 | this.emit(events.warning, {
171 | message: WARNINGS.LARGE_QUEUE.message,
172 | data: warning,
173 | })
174 | }
175 |
176 | const sql = plans.failJobsByTimeout(this.#config.schema, table, queues)
177 | await this.#executeSql(sql)
178 | }
179 | }
180 |
181 | async #maintain (table: string, names: string[]) {
182 | const command = plans.trySetQueueDeletionTime(
183 | this.#config.schema,
184 | names,
185 | this.#config.maintenanceIntervalSeconds
186 | )
187 | const { rows } = await this.#executeQuery(command)
188 |
189 | if (rows.length) {
190 | const queues = rows.map((q) => q.name)
191 | const sql = plans.deletion(this.#config.schema, table, queues)
192 | await this.#executeSql(sql)
193 | }
194 | }
195 | }
196 |
197 | export default Boss
198 |
--------------------------------------------------------------------------------
/test/insertTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import { randomUUID } from 'node:crypto'
3 | import * as helper from './testHelper.ts'
4 |
5 | describe('insert', function () {
6 | it('should create jobs from an array', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 |
9 | const input = [{}, {}, {}]
10 |
11 | await this.boss.insert(this.schema, input)
12 |
13 | const { queuedCount } = await this.boss.getQueueStats(this.schema)
14 |
15 | assert.strictEqual(queuedCount, 3)
16 | })
17 |
18 | it('should create jobs from an array with all properties', async function () {
19 | this.boss = await helper.start(this.bossConfig)
20 |
21 | const deadLetter = `${this.schema}_dlq`
22 | await this.boss.createQueue(deadLetter)
23 | await this.boss.updateQueue(this.schema, { deadLetter })
24 |
25 | const input = {
26 | id: randomUUID(),
27 | priority: 1,
28 | data: { some: 'data' },
29 | retryLimit: 1,
30 | retryDelay: 2,
31 | retryBackoff: true,
32 | retryDelayMax: 3,
33 | startAfter: new Date().toISOString(),
34 | expireInSeconds: 5,
35 | deleteAfterSeconds: 60,
36 | singletonKey: '123',
37 | retentionSeconds: 60
38 | }
39 |
40 | const keepUntil = new Date(new Date(input.startAfter).getTime() + (input.retentionSeconds * 1000)).toISOString()
41 |
42 | await this.boss.insert(this.schema, [input])
43 |
44 | const job = await this.boss.getJobById(this.schema, input.id)
45 |
46 | assert(job)
47 |
48 | assert.strictEqual(job.id, input.id, `id input ${input.id} didn't match job ${job.id}`)
49 | assert.strictEqual(job.priority, input.priority, `priority input ${input.priority} didn't match job ${job.priority}`)
50 | assert.strictEqual(JSON.stringify(job.data), JSON.stringify(input.data), `data input ${input.data} didn't match job ${job.data}`)
51 | assert.strictEqual(job.retryLimit, input.retryLimit, `retryLimit input ${input.retryLimit} didn't match job ${job.retryLimit}`)
52 | assert.strictEqual(job.retryDelay, input.retryDelay, `retryDelay input ${input.retryDelay} didn't match job ${job.retryDelay}`)
53 | assert.strictEqual(job.retryBackoff, input.retryBackoff, `retryBackoff input ${input.retryBackoff} didn't match job ${job.retryBackoff}`)
54 | assert.strictEqual(job.retryDelayMax, input.retryDelayMax, `retryDelayMax input ${input.retryDelayMax} didn't match job ${job.retryDelayMax}`)
55 | assert.strictEqual(new Date(job.startAfter).toISOString(), input.startAfter, `startAfter input ${input.startAfter} didn't match job ${job.startAfter}`)
56 | assert.strictEqual(job.expireInSeconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expireInSeconds}`)
57 | assert.strictEqual(job.deleteAfterSeconds, input.deleteAfterSeconds, `deleteAfterSeconds input ${input.deleteAfterSeconds} didn't match job ${job.deleteAfterSeconds}`)
58 | assert.strictEqual(job.singletonKey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonKey}`)
59 | assert.strictEqual(new Date(job.keepUntil).toISOString(), keepUntil, `keepUntil input ${keepUntil} didn't match job ${job.keepUntil}`)
60 | })
61 |
62 | it('should create jobs from an array with all properties and custom connection', async function () {
63 | this.boss = await helper.start(this.bossConfig)
64 |
65 | const deadLetter = `${this.schema}_dlq`
66 | await this.boss.createQueue(deadLetter)
67 | await this.boss.updateQueue(this.schema, { deadLetter })
68 |
69 | const input = {
70 | id: randomUUID(),
71 | priority: 1,
72 | data: { some: 'data' },
73 | retryLimit: 1,
74 | retryDelay: 2,
75 | retryBackoff: true,
76 | retryDelayMax: 3,
77 | startAfter: new Date().toISOString(),
78 | expireInSeconds: 5,
79 | deleteAfterSeconds: 45,
80 | singletonKey: '123',
81 | retentionSeconds: 60
82 | }
83 |
84 | const keepUntil = new Date(new Date(input.startAfter).getTime() + (input.retentionSeconds * 1000)).toISOString()
85 |
86 | let called = false
87 | const db = await helper.getDb()
88 | const options = {
89 | db: {
90 | // @ts-ignore
91 | async executeSql (sql, values) {
92 | called = true
93 | // @ts-ignore
94 | return db.pool.query(sql, values)
95 | }
96 | }
97 | }
98 |
99 | await this.boss.insert(this.schema, [input], options)
100 |
101 | const job = await this.boss.getJobById(this.schema, input.id)
102 |
103 | assert(job)
104 |
105 | assert.strictEqual(job.id, input.id, `id input ${input.id} didn't match job ${job.id}`)
106 | assert.strictEqual(job.priority, input.priority, `priority input ${input.priority} didn't match job ${job.priority}`)
107 | assert.strictEqual(JSON.stringify(job.data), JSON.stringify(input.data), `data input ${input.data} didn't match job ${job.data}`)
108 | assert.strictEqual(job.retryLimit, input.retryLimit, `retryLimit input ${input.retryLimit} didn't match job ${job.retryLimit}`)
109 | assert.strictEqual(job.retryDelay, input.retryDelay, `retryDelay input ${input.retryDelay} didn't match job ${job.retryDelay}`)
110 | assert.strictEqual(job.retryBackoff, input.retryBackoff, `retryBackoff input ${input.retryBackoff} didn't match job ${job.retryBackoff}`)
111 | assert.strictEqual(job.retryDelayMax, input.retryDelayMax, `retryDelayMax input ${input.retryDelayMax} didn't match job ${job.retryDelayMax}`)
112 | assert.strictEqual(new Date(job.startAfter).toISOString(), input.startAfter, `startAfter input ${input.startAfter} didn't match job ${job.startAfter}`)
113 | assert.strictEqual(job.expireInSeconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expireInSeconds}`)
114 | assert.strictEqual(job.deleteAfterSeconds, input.deleteAfterSeconds, `deleteAfterSeconds input ${input.deleteAfterSeconds} didn't match job ${job.deleteAfterSeconds}`)
115 | assert.strictEqual(job.singletonKey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonKey}`)
116 | assert.strictEqual(new Date(job.keepUntil).toISOString(), keepUntil, `keepUntil input ${keepUntil} didn't match job ${job.keepUntil}`)
117 | assert.strictEqual(called, true)
118 | })
119 | })
120 |
--------------------------------------------------------------------------------
/src/migrationStore.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as plans from './plans.ts'
3 | import * as types from './types.ts'
4 |
5 | function flatten (schema: string, commands: string[], version: number) {
6 | commands.unshift(plans.assertMigration(schema, version))
7 | commands.push(plans.setVersion(schema, version))
8 |
9 | return plans.locked(schema, commands)
10 | }
11 |
12 | function rollback (schema: string, version: number, migrations?: types.Migration[]) {
13 | migrations = migrations || getAll(schema)
14 |
15 | const result = migrations.find(i => i.version === version)
16 |
17 | assert(result, `Version ${version} not found.`)
18 |
19 | return flatten(schema, result.uninstall || [], result.previous)
20 | }
21 |
22 | function next (schema: string, version: number, migrations: types.Migration[] | undefined) {
23 | migrations = migrations || getAll(schema)
24 |
25 | const result = migrations.find(i => i.previous === version)
26 |
27 | assert(result, `Version ${version} not found.`)
28 |
29 | return flatten(schema, result.install, result.version)
30 | }
31 |
32 | function migrate (schema: string, version: number, migrations?: types.Migration[]) {
33 | migrations = migrations || getAll(schema)
34 |
35 | const result = migrations
36 | .filter(i => i.previous >= version!)
37 | .sort((a, b) => a.version - b.version)
38 | .reduce((acc, i) => {
39 | acc.install = acc.install.concat(i.install)
40 | acc.version = i.version
41 | return acc
42 | }, { install: [] as string[], version })
43 |
44 | assert(result.install.length > 0, `Version ${version} not found.`)
45 |
46 | return flatten(schema, result.install, result.version!)
47 | }
48 |
49 | function getAll (schema: string): types.Migration[] {
50 | return [
51 | {
52 | release: '11.1.0',
53 | version: 26,
54 | previous: 25,
55 | install: [
56 | `
57 | CREATE OR REPLACE FUNCTION ${schema}.create_queue(queue_name text, options jsonb)
58 | RETURNS VOID AS
59 | $$
60 | DECLARE
61 | tablename varchar := CASE WHEN options->>'partition' = 'true'
62 | THEN 'j' || encode(sha224(queue_name::bytea), 'hex')
63 | ELSE 'job_common'
64 | END;
65 | queue_created_on timestamptz;
66 | BEGIN
67 |
68 | WITH q as (
69 | INSERT INTO ${schema}.queue (
70 | name,
71 | policy,
72 | retry_limit,
73 | retry_delay,
74 | retry_backoff,
75 | retry_delay_max,
76 | expire_seconds,
77 | retention_seconds,
78 | deletion_seconds,
79 | warning_queued,
80 | dead_letter,
81 | partition,
82 | table_name
83 | )
84 | VALUES (
85 | queue_name,
86 | options->>'policy',
87 | COALESCE((options->>'retryLimit')::int, 2),
88 | COALESCE((options->>'retryDelay')::int, 0),
89 | COALESCE((options->>'retryBackoff')::bool, false),
90 | (options->>'retryDelayMax')::int,
91 | COALESCE((options->>'expireInSeconds')::int, 900),
92 | COALESCE((options->>'retentionSeconds')::int, 1209600),
93 | COALESCE((options->>'deleteAfterSeconds')::int, 604800),
94 | COALESCE((options->>'warningQueueSize')::int, 0),
95 | options->>'deadLetter',
96 | COALESCE((options->>'partition')::bool, false),
97 | tablename
98 | )
99 | ON CONFLICT DO NOTHING
100 | RETURNING created_on
101 | )
102 | SELECT created_on into queue_created_on from q;
103 |
104 | IF queue_created_on IS NULL OR options->>'partition' IS DISTINCT FROM 'true' THEN
105 | RETURN;
106 | END IF;
107 |
108 | EXECUTE format('CREATE TABLE ${schema}.%I (LIKE ${schema}.job INCLUDING DEFAULTS)', tablename);
109 |
110 | EXECUTE format('ALTER TABLE ${schema}.%1$I ADD PRIMARY KEY (name, id)', tablename);
111 | EXECUTE format('ALTER TABLE ${schema}.%1$I ADD CONSTRAINT q_fkey FOREIGN KEY (name) REFERENCES ${schema}.queue (name) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED', tablename);
112 | EXECUTE format('ALTER TABLE ${schema}.%1$I ADD CONSTRAINT dlq_fkey FOREIGN KEY (dead_letter) REFERENCES ${schema}.queue (name) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED', tablename);
113 |
114 | EXECUTE format('CREATE INDEX %1$s_i5 ON ${schema}.%1$I (name, start_after) INCLUDE (priority, created_on, id) WHERE state < ''active''', tablename);
115 | EXECUTE format('CREATE UNIQUE INDEX %1$s_i4 ON ${schema}.%1$I (name, singleton_on, COALESCE(singleton_key, '''')) WHERE state <> ''cancelled'' AND singleton_on IS NOT NULL', tablename);
116 |
117 | IF options->>'policy' = 'short' THEN
118 | EXECUTE format('CREATE UNIQUE INDEX %1$s_i1 ON ${schema}.%1$I (name, COALESCE(singleton_key, '''')) WHERE state = ''created'' AND policy = ''short''', tablename);
119 | ELSIF options->>'policy' = 'singleton' THEN
120 | EXECUTE format('CREATE UNIQUE INDEX %1$s_i2 ON ${schema}.%1$I (name, COALESCE(singleton_key, '''')) WHERE state = ''active'' AND policy = ''singleton''', tablename);
121 | ELSIF options->>'policy' = 'stately' THEN
122 | EXECUTE format('CREATE UNIQUE INDEX %1$s_i3 ON ${schema}.%1$I (name, state, COALESCE(singleton_key, '''')) WHERE state <= ''active'' AND policy = ''stately''', tablename);
123 | ELSIF options->>'policy' = 'exclusive' THEN
124 | EXECUTE format('CREATE UNIQUE INDEX %1$s_i6 ON ${schema}.%1$I (name, COALESCE(singleton_key, '''')) WHERE state <= ''active'' AND policy = ''exclusive''', tablename);
125 | END IF;
126 |
127 | EXECUTE format('ALTER TABLE ${schema}.%I ADD CONSTRAINT cjc CHECK (name=%L)', tablename, queue_name);
128 | EXECUTE format('ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.%I FOR VALUES IN (%L)', tablename, queue_name);
129 | END;
130 | $$
131 | LANGUAGE plpgsql;
132 | `,
133 | `CREATE UNIQUE INDEX job_i6 ON ${schema}.job_common (name, COALESCE(singleton_key, '')) WHERE state <= 'active' AND policy = 'exclusive'`
134 | ],
135 | uninstall: [
136 | `DROP INDEX ${schema}.job_i6`
137 | ]
138 | },]
139 | }
140 |
141 | export {
142 | rollback,
143 | next,
144 | migrate,
145 | getAll,
146 | }
147 |
--------------------------------------------------------------------------------
/test/sendTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import type { TestContext } from './hooks.ts'
4 |
5 | describe('send', function () {
6 | it('should fail with no arguments', async function (this: TestContext) {
7 | this.boss = await helper.start(this.bossConfig)
8 |
9 | await assert.rejects(async () => {
10 | // @ts-ignore
11 | await this.boss.send()
12 | })
13 | })
14 |
15 | it('should fail with a function for data', async function () {
16 | this.boss = await helper.start(this.bossConfig)
17 |
18 | await assert.rejects(async () => {
19 | // @ts-ignore
20 | await this.boss.send('job', () => true)
21 | })
22 | })
23 |
24 | it('should fail with a function for options', async function () {
25 | this.boss = await helper.start(this.bossConfig)
26 |
27 | await assert.rejects(async () => {
28 | // @ts-ignore
29 | await this.boss.send('job', 'data', () => true)
30 | })
31 | })
32 |
33 | it('should accept single string argument', async function () {
34 | this.boss = await helper.start(this.bossConfig)
35 |
36 | await this.boss.send(this.schema)
37 | })
38 |
39 | it('should accept job object argument with only name', async function () {
40 | this.boss = await helper.start(this.bossConfig)
41 |
42 | await this.boss.send({ name: this.schema })
43 | })
44 |
45 | it('should accept job object with name and data only', async function () {
46 | this.boss = await helper.start(this.bossConfig)
47 |
48 | const message = 'hi'
49 |
50 | await this.boss.send({ name: this.schema, data: { message } })
51 |
52 | const [job] = await this.boss.fetch<{ message: string }>(this.schema)
53 |
54 | assert.strictEqual(message, job.data.message)
55 | })
56 |
57 | it('should accept job object with name and options only', async function () {
58 | this.boss = await helper.start(this.bossConfig)
59 |
60 | const options = { retryLimit: 1 }
61 |
62 | await this.boss.send({ name: this.schema, options })
63 |
64 | const [job] = await this.boss.fetch(this.schema)
65 |
66 | assert.strictEqual(job.data, null)
67 | })
68 |
69 | it('should accept job object with name and custom connection', async function () {
70 | this.boss = await helper.start(this.bossConfig)
71 |
72 | let called = false
73 | const db = await helper.getDb()
74 | const options = {
75 | db: {
76 | // @ts-ignore
77 | async executeSql (sql, values) {
78 | called = true
79 | // @ts-ignore
80 | return db.pool.query(sql, values)
81 | }
82 | },
83 | someCrazyOption: 'whatever'
84 | }
85 |
86 | await this.boss.send({ name: this.schema, options })
87 |
88 | const [job] = await this.boss.fetch(this.schema)
89 |
90 | assert.notEqual(job, null)
91 | assert.strictEqual(job.data, null)
92 | assert.strictEqual(called, true)
93 | })
94 |
95 | it('should not create job if transaction fails', async function (this: TestContext) {
96 | this.boss = await helper.start(this.bossConfig)
97 | const { schema } = this.bossConfig
98 |
99 | const db = await helper.getDb()
100 | const client = (db as any).pool
101 | await client.query(`CREATE TABLE IF NOT EXISTS ${schema}.test (label VARCHAR(50))`)
102 |
103 | const throwError = () => { throw new Error('Error') }
104 |
105 | try {
106 | await client.query('BEGIN')
107 | const options = {
108 | db: {
109 | async executeSql (sql: string, values: any[]) {
110 | return client.query(sql, values)
111 | }
112 | },
113 | someCrazyOption: 'whatever'
114 | }
115 | const queryText = `INSERT INTO ${schema}.test(label) VALUES('Test')`
116 | await client.query(queryText)
117 |
118 | await this.boss.send({ name: this.schema, options })
119 |
120 | throwError()
121 | await client.query('COMMIT')
122 | } catch (e) {
123 | await client.query('ROLLBACK')
124 | }
125 |
126 | const [job] = await this.boss.fetch(this.schema)
127 |
128 | assert(!job)
129 | })
130 |
131 | it('should create job with all properties', async function (this: TestContext) {
132 | this.boss = await helper.start(this.bossConfig)
133 |
134 | const deadLetter = `${this.schema}_dlq`
135 | await this.boss.createQueue(deadLetter)
136 | await this.boss.updateQueue(this.schema, { deadLetter })
137 |
138 | const options = {
139 | priority: 1,
140 | retryLimit: 1,
141 | retryDelay: 2,
142 | retryBackoff: true,
143 | retryDelayMax: 3,
144 | startAfter: new Date().toISOString(),
145 | expireInSeconds: 5,
146 | deleteAfterSeconds: 60,
147 | singletonKey: '123',
148 | retentionSeconds: 10
149 | }
150 |
151 | const keepUntil = new Date(new Date(options.startAfter).getTime() + (options.retentionSeconds * 1000)).toISOString()
152 |
153 | const id = await this.boss.send(this.schema, {}, options)
154 |
155 | const job = await this.boss.getJobById(this.schema, id!)
156 | assert(job)
157 |
158 | assert.strictEqual(job.priority, options.priority, `priority input ${options.priority} didn't match job ${job.priority}`)
159 | assert.strictEqual(job.retryLimit, options.retryLimit, `retryLimit input ${options.retryLimit} didn't match job ${job.retryLimit}`)
160 | assert.strictEqual(job.retryDelay, options.retryDelay, `retryDelay input ${options.retryDelay} didn't match job ${job.retryDelay}`)
161 | assert.strictEqual(job.retryBackoff, options.retryBackoff, `retryBackoff input ${options.retryBackoff} didn't match job ${job.retryBackoff}`)
162 | assert.strictEqual(job.retryDelayMax, options.retryDelayMax, `retryDelayMax input ${options.retryDelayMax} didn't match job ${job.retryDelayMax}`)
163 | assert.strictEqual(new Date(job.startAfter).toISOString(), options.startAfter, `startAfter input ${options.startAfter} didn't match job ${job.startAfter}`)
164 | assert.strictEqual(job.expireInSeconds, options.expireInSeconds, `expireInSeconds input ${options.expireInSeconds} didn't match job ${job.expireInSeconds}`)
165 | assert.strictEqual(job.deleteAfterSeconds, options.deleteAfterSeconds, `deleteAfterSeconds input ${options.deleteAfterSeconds} didn't match job ${job.deleteAfterSeconds}`)
166 | assert.strictEqual(job.singletonKey, options.singletonKey, `name input ${options.singletonKey} didn't match job ${job.singletonKey}`)
167 | assert.strictEqual(new Date(job.keepUntil).toISOString(), keepUntil, `keepUntil input ${keepUntil} didn't match job ${job.keepUntil}`)
168 | })
169 | })
170 |
--------------------------------------------------------------------------------
/src/timekeeper.ts:
--------------------------------------------------------------------------------
1 | import { CronExpressionParser } from 'cron-parser'
2 | import EventEmitter from 'node:events'
3 |
4 | import * as Attorney from './attorney.ts'
5 | import type Manager from './manager.ts'
6 | import * as plans from './plans.ts'
7 | import * as types from './types.ts'
8 |
9 | export const QUEUES = {
10 | SEND_IT: '__pgboss__send-it'
11 | }
12 |
13 | const EVENTS = {
14 | error: 'error',
15 | schedule: 'schedule',
16 | warning: 'warning'
17 | }
18 |
19 | const WARNINGS = {
20 | CLOCK_SKEW: {
21 | message: 'Warning: Clock skew between this instance and the database server. This will not break scheduling, but is emitted any time the skew exceeds 60 seconds.'
22 | }
23 | }
24 |
25 | class Timekeeper extends EventEmitter implements types.EventsMixin {
26 | db: types.IDatabase
27 | config: types.ResolvedConstructorOptions
28 | manager: Manager
29 |
30 | private stopped = true
31 | private cronMonitorInterval: NodeJS.Timeout | null | undefined
32 | private skewMonitorInterval: NodeJS.Timeout | null | undefined
33 | private timekeeping: boolean | undefined
34 |
35 | clockSkew = 0
36 | events = EVENTS
37 |
38 | constructor (db: types.IDatabase, manager: Manager, config: types.ResolvedConstructorOptions) {
39 | super()
40 |
41 | this.db = db
42 | this.config = config
43 | this.manager = manager
44 | }
45 |
46 | async start () {
47 | this.stopped = false
48 |
49 | await this.cacheClockSkew()
50 | await this.manager.createQueue(QUEUES.SEND_IT)
51 |
52 | const options = {
53 | pollingIntervalSeconds: this.config.cronWorkerIntervalSeconds,
54 | batchSize: 50
55 | }
56 |
57 | await this.manager.work(QUEUES.SEND_IT, options, (jobs) => this.onSendIt(jobs))
58 |
59 | setImmediate(() => this.onCron())
60 |
61 | this.cronMonitorInterval = setInterval(async () => await this.onCron(), this.config.cronMonitorIntervalSeconds! * 1000)
62 | this.skewMonitorInterval = setInterval(async () => await this.cacheClockSkew(), this.config.clockMonitorIntervalSeconds! * 1000)
63 | }
64 |
65 | async stop () {
66 | if (this.stopped) {
67 | return
68 | }
69 |
70 | this.stopped = true
71 |
72 | await this.manager.offWork(QUEUES.SEND_IT, { wait: true })
73 |
74 | if (this.skewMonitorInterval) {
75 | clearInterval(this.skewMonitorInterval)
76 | this.skewMonitorInterval = null
77 | }
78 |
79 | if (this.cronMonitorInterval) {
80 | clearInterval(this.cronMonitorInterval)
81 | this.cronMonitorInterval = null
82 | }
83 | }
84 |
85 | async cacheClockSkew () {
86 | let skew = 0
87 |
88 | try {
89 | if (this.config.__test__force_clock_monitoring_error) {
90 | throw new Error(this.config.__test__force_clock_monitoring_error)
91 | }
92 |
93 | const { rows } = await this.db.executeSql(plans.getTime())
94 |
95 | const local = Date.now()
96 |
97 | const dbTime = parseFloat(rows[0].time)
98 |
99 | skew = dbTime - local
100 |
101 | const skewSeconds = Math.abs(skew) / 1000
102 |
103 | if (skewSeconds >= 60 || this.config.__test__force_clock_skew_warning) {
104 | this.emit(this.events.warning, { message: WARNINGS.CLOCK_SKEW.message, data: { seconds: skewSeconds, direction: skew > 0 ? 'slower' : 'faster' } })
105 | }
106 | } catch (err) {
107 | this.emit(this.events.error, err)
108 | } finally {
109 | this.clockSkew = skew
110 | }
111 | }
112 |
113 | async onCron () {
114 | try {
115 | if (this.stopped || this.timekeeping) return
116 |
117 | if (this.config.__test__force_cron_monitoring_error) {
118 | throw new Error(this.config.__test__force_cron_monitoring_error)
119 | }
120 |
121 | this.timekeeping = true
122 |
123 | const sql = plans.trySetCronTime(this.config.schema, this.config.cronMonitorIntervalSeconds)
124 |
125 | if (!this.stopped) {
126 | const { rows } = await this.db.executeSql(sql)
127 |
128 | if (!this.stopped && rows.length === 1) {
129 | await this.cron()
130 | }
131 | }
132 | } catch (err) {
133 | this.emit(this.events.error, err)
134 | } finally {
135 | this.timekeeping = false
136 | }
137 | }
138 |
139 | async cron () {
140 | const schedules = await this.getSchedules()
141 |
142 | const scheduled = schedules
143 | .filter(i => this.shouldSendIt(i.cron, i.timezone))
144 | .map(({ name, key, data, options }): types.JobInsert => ({ data: { name, data, options }, singletonKey: `${name}__${key}`, singletonSeconds: 60 }))
145 |
146 | if (scheduled.length > 0 && !this.stopped) {
147 | await this.manager.insert(QUEUES.SEND_IT, scheduled)
148 | }
149 | }
150 |
151 | shouldSendIt (cron: string, tz: string) {
152 | const interval = CronExpressionParser.parse(cron, { tz, strict: false })
153 |
154 | const prevTime = interval.prev()
155 |
156 | const databaseTime = Date.now() + this.clockSkew
157 |
158 | const prevDiff = (databaseTime - prevTime.getTime()) / 1000
159 |
160 | return prevDiff < 60
161 | }
162 |
163 | private async onSendIt (jobs: types.Job[]): Promise {
164 | await Promise.allSettled(jobs.map(({ data }) => this.manager.send(data)))
165 | }
166 |
167 | async getSchedules (name?: string, key = '') : Promise {
168 | let sql = plans.getSchedules(this.config.schema)
169 | let params: unknown[] = []
170 |
171 | if (name) {
172 | sql = plans.getSchedulesByQueue(this.config.schema)
173 | params = [name, key]
174 | }
175 |
176 | const { rows } = await this.db.executeSql(sql, params)
177 |
178 | return rows
179 | }
180 |
181 | async schedule (name: string, cron: string, data?: unknown, options: types.ScheduleOptions = {}): Promise {
182 | const { tz = 'UTC', key = '', ...rest } = options
183 |
184 | CronExpressionParser.parse(cron, { tz, strict: false })
185 |
186 | Attorney.checkSendArgs([name, data, { ...rest }])
187 | Attorney.assertKey(key)
188 |
189 | try {
190 | const sql = plans.schedule(this.config.schema)
191 | await this.db.executeSql(sql, [name, key, cron, tz, data, options])
192 | } catch (err: any) {
193 | if (err.message.includes('foreign key')) {
194 | err.message = `Queue ${name} not found`
195 | }
196 |
197 | throw err
198 | }
199 | }
200 |
201 | async unschedule (name: string, key = ''): Promise {
202 | const sql = plans.unschedule(this.config.schema)
203 | await this.db.executeSql(sql, [name, key])
204 | }
205 | }
206 |
207 | export default Timekeeper
208 |
--------------------------------------------------------------------------------
/test/failureTest.ts:
--------------------------------------------------------------------------------
1 | import { delay } from '../src/tools.ts'
2 | import assert from 'node:assert'
3 | import * as helper from './testHelper.ts'
4 |
5 | describe('failure', function () {
6 | it('should reject missing id argument', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 | await assert.rejects(async () => {
9 | // @ts-ignore
10 | await this.boss.fail()
11 | })
12 | })
13 |
14 | it('should fail a job when requested', async function () {
15 | this.boss = await helper.start(this.bossConfig)
16 |
17 | await this.boss.send(this.schema)
18 |
19 | const [job] = await this.boss.fetch(this.schema)
20 |
21 | await this.boss.fail(this.schema, job.id)
22 | })
23 |
24 | it('should fail a batch of jobs', async function () {
25 | this.boss = await helper.start(this.bossConfig)
26 |
27 | await Promise.all([
28 | this.boss.send(this.schema),
29 | this.boss.send(this.schema),
30 | this.boss.send(this.schema)
31 | ])
32 |
33 | const jobs = await this.boss.fetch(this.schema, { batchSize: 3 })
34 |
35 | const result = await this.boss.fail(this.schema, jobs.map(job => job.id))
36 |
37 | assert.strictEqual(result.jobs.length, 3)
38 | })
39 |
40 | it('should fail a batch of jobs with a data arg', async function () {
41 | this.boss = await helper.start(this.bossConfig)
42 | const message = 'some error'
43 |
44 | await Promise.all([
45 | this.boss.send(this.schema),
46 | this.boss.send(this.schema),
47 | this.boss.send(this.schema)
48 | ])
49 |
50 | const jobs = await this.boss.fetch(this.schema, { batchSize: 3 })
51 |
52 | await this.boss.fail(this.schema, jobs.map(job => job.id), new Error(message))
53 |
54 | const results = await Promise.all(jobs.map(job => this.boss!.getJobById(this.schema, job.id)))
55 |
56 | // @ts-ignore
57 | assert(results.every(i => i!.output.message === message))
58 | })
59 |
60 | it('should preserve nested objects within a payload that is an instance of Error', async function () {
61 | this.boss = await helper.start(this.bossConfig)
62 |
63 | const failPayload = new Error('Something went wrong')
64 | // @ts-ignore
65 | failPayload.some = { deeply: { nested: { reason: 'nuna' } } }
66 |
67 | const jobId = await this.boss.send(this.schema)
68 |
69 | assert(jobId)
70 |
71 | await this.boss.fail(this.schema, jobId, failPayload)
72 |
73 | const job = await this.boss.getJobById(this.schema, jobId)
74 |
75 | assert(job?.output)
76 |
77 | // @ts-ignore
78 | assert.strictEqual(job.output.some.deeply.nested.reason, failPayload.some.deeply.nested.reason)
79 | })
80 |
81 | it('failure via Promise reject() should pass string wrapped in value prop', async function () {
82 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
83 | const failPayload = 'mah error'
84 |
85 | const spy = this.boss.getSpy(this.schema)
86 | const jobId = await this.boss.send(this.schema)
87 |
88 | assert(jobId)
89 |
90 | await this.boss.work(this.schema, () => Promise.reject(failPayload))
91 |
92 | await spy.waitForJobWithId(jobId, 'failed')
93 |
94 | const job = await this.boss.getJobById(this.schema, jobId)
95 |
96 | assert.strictEqual((job!.output as { value: string }).value, failPayload)
97 | })
98 |
99 | it('failure via Promise reject() should pass object payload', async function () {
100 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
101 | const something = 'clever'
102 |
103 | const spy = this.boss.getSpy(this.schema)
104 | const errorResponse = new Error('custom error')
105 | // @ts-ignore
106 | errorResponse.something = something
107 |
108 | const jobId = await this.boss.send(this.schema)
109 |
110 | assert(jobId)
111 |
112 | await this.boss.work(this.schema, () => Promise.reject(errorResponse))
113 |
114 | await spy.waitForJobWithId(jobId, 'failed')
115 |
116 | const job = await this.boss.getJobById(this.schema, jobId)
117 |
118 | assert.strictEqual((job!.output as { something: string }).something, something)
119 | })
120 |
121 | it('failure with Error object should be saved in the job', async function () {
122 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
123 | const message = 'a real error!'
124 |
125 | const spy = this.boss.getSpy(this.schema)
126 | const jobId = await this.boss.send(this.schema)
127 |
128 | assert(jobId)
129 |
130 | await this.boss.work(this.schema, async () => { throw new Error(message) })
131 |
132 | await spy.waitForJobWithId(jobId, 'failed')
133 |
134 | const job = await this.boss.getJobById(this.schema, jobId)
135 |
136 | assert((job!.output as { message: string }).message.includes(message))
137 | })
138 |
139 | it('should fail a job with custom connection', async function () {
140 | this.boss = await helper.start(this.bossConfig)
141 |
142 | await this.boss.send(this.schema)
143 |
144 | const [job] = await this.boss.fetch(this.schema)
145 |
146 | let called = false
147 | const _db = await helper.getDb()
148 | const db = {
149 | // @ts-ignore
150 | async executeSql (sql, values) {
151 | called = true
152 | // @ts-ignore
153 | return _db.pool.query(sql, values)
154 | }
155 | }
156 |
157 | await this.boss.fail(this.schema, job.id, null, { db })
158 |
159 | assert.strictEqual(called, true)
160 | })
161 |
162 | it('failure with circular payload should be safely serialized', async function () {
163 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
164 |
165 | const spy = this.boss.getSpy(this.schema)
166 | const jobId = await this.boss.send(this.schema)
167 |
168 | assert(jobId)
169 |
170 | const message = 'mhmm'
171 |
172 | await this.boss.work(this.schema, { pollingIntervalSeconds: 0.5 }, async () => {
173 | const err = { message }
174 | // @ts-ignore
175 | err.myself = err
176 | throw err
177 | })
178 |
179 | await spy.waitForJobWithId(jobId, 'failed')
180 |
181 | const job = await this.boss.getJobById(this.schema, jobId)
182 |
183 | assert.strictEqual((job!.output as { message: string }).message, message)
184 | })
185 |
186 | it('dead letter queues are working', async function () {
187 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
188 |
189 | const deadLetter = `${this.schema}_dlq`
190 |
191 | await this.boss.createQueue(deadLetter)
192 | await this.boss.createQueue(this.schema, { deadLetter })
193 |
194 | const jobId = await this.boss.send(this.schema, { key: this.schema }, { retryLimit: 0 })
195 |
196 | assert(jobId)
197 |
198 | await this.boss.fetch(this.schema)
199 | await this.boss.fail(this.schema, jobId)
200 |
201 | const [job] = await this.boss.fetch<{ key: string }>(deadLetter)
202 |
203 | assert.strictEqual(job.data.key, this.schema)
204 | })
205 |
206 | it('should fail active jobs in a worker during shutdown', async function () {
207 | this.boss = await helper.start(this.bossConfig)
208 |
209 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 1 })
210 |
211 | await this.boss.work(this.schema, async () => await delay(4000))
212 |
213 | await delay(500)
214 |
215 | await this.boss.stop({ timeout: 2000 })
216 |
217 | await this.boss.start()
218 |
219 | const [job] = await this.boss.fetch(this.schema)
220 |
221 | assert.strictEqual(job?.id, jobId)
222 | })
223 | })
224 |
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
1 | export type JobStates = {
2 | created: 'created',
3 | retry: 'retry',
4 | active: 'active',
5 | completed: 'completed',
6 | cancelled: 'cancelled',
7 | failed: 'failed'
8 | }
9 |
10 | export type Events = {
11 | error: 'error',
12 | warning: 'warning',
13 | wip: 'wip',
14 | stopped: 'stopped'
15 | }
16 |
17 | export interface IDatabase {
18 | executeSql(text: string, values?: unknown[]): Promise<{ rows: any[] }>;
19 | }
20 |
21 | export interface DatabaseOptions {
22 | application_name?: string;
23 | database?: string;
24 | user?: string;
25 | password?: string | (() => string) | (() => Promise);
26 | host?: string;
27 | port?: number;
28 | schema?: string;
29 | ssl?: any;
30 | connectionString?: string;
31 | max?: number;
32 | db?: IDatabase;
33 | connectionTimeoutMillis?: number;
34 | /** @internal */
35 | debug?: boolean;
36 | }
37 |
38 | export interface SchedulingOptions {
39 | schedule?: boolean;
40 | clockMonitorIntervalSeconds?: number;
41 | cronWorkerIntervalSeconds?: number;
42 | cronMonitorIntervalSeconds?: number;
43 | }
44 |
45 | export interface MaintenanceOptions {
46 | supervise?: boolean;
47 | migrate?: boolean;
48 | createSchema?: boolean;
49 | warningSlowQuerySeconds?: number;
50 | warningQueueSize?: number;
51 | superviseIntervalSeconds?: number;
52 | maintenanceIntervalSeconds?: number;
53 | queueCacheIntervalSeconds?: number;
54 | monitorIntervalSeconds?: number;
55 | }
56 |
57 | export interface Migration {
58 | release: string
59 | version: number
60 | previous: number
61 | install: string[]
62 | uninstall: string[]
63 | }
64 |
65 | export interface ConstructorOptions extends DatabaseOptions, SchedulingOptions, MaintenanceOptions {
66 | /** @internal */
67 | __test__warn_slow_query?: boolean;
68 | /** @internal */
69 | __test__throw_maint?: string;
70 | /** @internal */
71 | __test__throw_queueCache?: boolean;
72 | /** @internal */
73 | __test__throw_worker?: boolean;
74 | /** @internal */
75 | __test__force_cron_monitoring_error?: string;
76 | /** @internal */
77 | __test__force_clock_skew_warning?: string;
78 | /** @internal */
79 | __test__force_clock_monitoring_error?: string;
80 | /** @internal */
81 | __test__enableSpies?: boolean;
82 | /** @internal */
83 | migrations?: Migration[];
84 | }
85 |
86 | export interface ResolvedConstructorOptions extends ConstructorOptions {
87 | schema: string;
88 | monitorIntervalSeconds: number;
89 | cronMonitorIntervalSeconds: number;
90 | maintenanceIntervalSeconds: number;
91 | }
92 |
93 | export interface QueueOptions {
94 | expireInSeconds?: number;
95 | retentionSeconds?: number;
96 | deleteAfterSeconds?: number;
97 | retryLimit?: number;
98 | retryDelay?: number;
99 | retryBackoff?: boolean;
100 | retryDelayMax?: number;
101 | }
102 |
103 | export interface JobOptions {
104 | id?: string;
105 | priority?: number;
106 | startAfter?: number | string | Date;
107 | singletonKey?: string;
108 | singletonSeconds?: number;
109 | singletonNextSlot?: boolean;
110 | keepUntil?: number | string | Date;
111 | }
112 |
113 | export interface ConnectionOptions {
114 | db?: IDatabase;
115 | }
116 |
117 | export type InsertOptions = ConnectionOptions
118 |
119 | export type SendOptions = JobOptions & QueueOptions & ConnectionOptions
120 |
121 | export type QueuePolicy = 'standard' | 'short' | 'singleton' | 'stately' | 'exclusive' | (string & {})
122 |
123 | export interface Queue extends QueueOptions {
124 | name: string;
125 | policy?: QueuePolicy;
126 | partition?: boolean;
127 | deadLetter?: string;
128 | warningQueueSize?: number;
129 | }
130 |
131 | export interface QueueResult extends Queue {
132 | deferredCount: number;
133 | queuedCount: number;
134 | activeCount: number;
135 | totalCount: number
136 | table: string;
137 | createdOn: Date;
138 | updatedOn: Date;
139 | singletonsActive: string[] | null;
140 | }
141 |
142 | export type ScheduleOptions = SendOptions & { tz?: string, key?: string }
143 |
144 | export interface JobPollingOptions {
145 | pollingIntervalSeconds?: number;
146 | }
147 |
148 | export interface JobFetchOptions {
149 | includeMetadata?: boolean;
150 | priority?: boolean;
151 | batchSize?: number;
152 | ignoreStartAfter?: boolean;
153 | }
154 |
155 | export type WorkOptions = JobFetchOptions & JobPollingOptions
156 | export type FetchOptions = JobFetchOptions & ConnectionOptions
157 |
158 | export interface ResolvedWorkOptions extends WorkOptions {
159 | pollingInterval: number;
160 | }
161 |
162 | export interface WorkHandler {
163 | (job: Job[]): Promise;
164 | }
165 |
166 | export interface WorkWithMetadataHandler {
167 | (job: JobWithMetadata[]): Promise;
168 | }
169 |
170 | export interface Request {
171 | name: string;
172 | data?: object;
173 | options?: SendOptions;
174 | }
175 |
176 | export interface Schedule {
177 | name: string;
178 | key: string;
179 | cron: string;
180 | timezone: string;
181 | data?: object;
182 | options?: SendOptions;
183 | }
184 |
185 | export interface Job {
186 | id: string;
187 | name: string;
188 | data: T;
189 | expireInSeconds: number;
190 | signal: AbortSignal;
191 | }
192 |
193 | export interface JobWithMetadata extends Job {
194 | priority: number;
195 | state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed';
196 | retryLimit: number;
197 | retryCount: number;
198 | retryDelay: number;
199 | retryBackoff: boolean;
200 | retryDelayMax?: number;
201 | startAfter: Date;
202 | startedOn: Date;
203 | singletonKey: string | null;
204 | singletonOn: Date | null;
205 | expireInSeconds: number;
206 | deleteAfterSeconds: number;
207 | createdOn: Date;
208 | completedOn: Date | null;
209 | keepUntil: Date;
210 | policy: QueuePolicy;
211 | deadLetter: string;
212 | output: object;
213 | }
214 |
215 | export interface JobInsert {
216 | id?: string;
217 | data?: T;
218 | priority?: number;
219 | retryLimit?: number;
220 | retryDelay?: number;
221 | retryBackoff?: boolean;
222 | retryDelayMax?: number;
223 | startAfter?: number | string | Date;
224 | singletonKey?: string;
225 | singletonSeconds?: number;
226 | expireInSeconds?: number;
227 | deleteAfterSeconds?: number;
228 | retentionSeconds?: number;
229 | }
230 |
231 | export type WorkerState = 'created' | 'active' | 'stopping' | 'stopped'
232 |
233 | export interface WipData {
234 | id: string;
235 | name: string;
236 | options: WorkOptions;
237 | state: WorkerState;
238 | count: number;
239 | createdOn: number;
240 | lastFetchedOn: number | null;
241 | lastJobStartedOn: number | null;
242 | lastJobEndedOn: number | null;
243 | lastJobDuration: number | null;
244 | lastError: object | null;
245 | lastErrorOn: number | null;
246 | }
247 |
248 | export interface StopOptions {
249 | close?: boolean;
250 | graceful?: boolean;
251 | timeout?: number;
252 | }
253 |
254 | export interface OffWorkOptions {
255 | id?: string,
256 | wait?: boolean
257 | }
258 |
259 | export interface EventsMixin extends NodeJS.EventEmitter {
260 | events: Record;
261 | }
262 |
263 | export interface FunctionsMixin {
264 | functions: Function[];
265 | }
266 |
267 | export type UpdateQueueOptions = Omit
268 |
269 | export interface Warning { message: string, data: object }
270 |
271 | export interface CommandResponse {
272 | /** @internal */
273 | jobs: string[];
274 | /** @internal */
275 | requested: number;
276 | /** @internal */
277 | affected: number;
278 | }
279 |
280 | export type PgBossEventMap = {
281 | error: [error: Error]
282 | warning: [warning: Warning]
283 | wip: [data: WipData[]]
284 | stopped: []
285 | }
286 |
--------------------------------------------------------------------------------
/test/fetchTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 |
4 | describe('fetch', function () {
5 | it('should reject missing queue argument', async function () {
6 | this.boss = await helper.start(this.bossConfig)
7 | await assert.rejects(async () => {
8 | // @ts-ignore
9 | await this.boss.fetch()
10 | })
11 | })
12 |
13 | it('should fetch a job by name manually', async function () {
14 | this.boss = await helper.start(this.bossConfig)
15 |
16 | await this.boss.send(this.schema)
17 | const [job] = await this.boss.fetch(this.schema)
18 | assert(this.schema === job.name)
19 | })
20 |
21 | it('should get a batch of jobs as an array', async function () {
22 | this.boss = await helper.start(this.bossConfig)
23 | const batchSize = 4
24 |
25 | await Promise.all([
26 | this.boss.send(this.schema),
27 | this.boss.send(this.schema),
28 | this.boss.send(this.schema),
29 | this.boss.send(this.schema)
30 | ])
31 |
32 | const jobs = await this.boss.fetch(this.schema, { batchSize })
33 |
34 | assert(jobs.length === batchSize)
35 | })
36 |
37 | it('should fetch all metadata for a single job when requested', async function () {
38 | this.boss = await helper.start(this.bossConfig)
39 |
40 | await this.boss.send(this.schema)
41 | const [job] = await this.boss.fetch(this.schema, { includeMetadata: true })
42 |
43 | assert(this.schema === job.name)
44 | assert(job.state === 'active')
45 |
46 | assert(job.priority !== undefined)
47 | assert(job.policy !== undefined)
48 | assert(job.retryLimit !== undefined)
49 | assert(job.retryCount !== undefined)
50 | assert(job.retryDelay !== undefined)
51 | assert(job.retryBackoff === false)
52 | assert(job.retryDelayMax !== undefined)
53 | assert(job.startAfter !== undefined)
54 | assert(job.startedOn !== undefined)
55 | assert(job.singletonKey !== undefined)
56 | assert(job.singletonOn !== undefined)
57 | assert(job.expireInSeconds !== undefined)
58 | assert(job.deleteAfterSeconds !== undefined)
59 | assert(job.createdOn !== undefined)
60 | assert(job.completedOn !== undefined)
61 | assert(job.keepUntil !== undefined)
62 | assert(job.deadLetter !== undefined)
63 | })
64 |
65 | it('should fetch all metadata for a batch of jobs when requested', async function () {
66 | this.boss = await helper.start(this.bossConfig)
67 | const batchSize = 4
68 |
69 | await Promise.all([
70 | this.boss.send(this.schema),
71 | this.boss.send(this.schema),
72 | this.boss.send(this.schema),
73 | this.boss.send(this.schema)
74 | ])
75 |
76 | const jobs = await this.boss.fetch(this.schema, { batchSize, includeMetadata: true })
77 | assert(jobs.length === batchSize)
78 |
79 | for (const job of jobs) {
80 | assert(this.schema === job.name)
81 | assert(job.state === 'active')
82 | assert(job.priority !== undefined)
83 | assert(job.policy !== undefined)
84 | assert(job.retryLimit !== undefined)
85 | assert(job.retryCount !== undefined)
86 | assert(job.retryDelay !== undefined)
87 | assert(job.retryBackoff === false)
88 | assert(job.retryDelayMax !== undefined)
89 | assert(job.startAfter !== undefined)
90 | assert(job.startedOn !== undefined)
91 | assert(job.singletonKey === null)
92 | assert(job.singletonOn === null)
93 | assert(job.expireInSeconds !== undefined)
94 | assert(job.createdOn !== undefined)
95 | assert(job.completedOn === null)
96 | assert(job.keepUntil !== undefined)
97 | }
98 | })
99 |
100 | it('should fetch all metadata for a single job with exponential backoff when requested', async function () {
101 | this.boss = await helper.start(this.bossConfig)
102 |
103 | await this.boss.send(this.schema, null, { retryLimit: 1, retryDelay: 1, retryBackoff: true, retryDelayMax: 10 })
104 | const [job] = await this.boss.fetch(this.schema, { includeMetadata: true })
105 |
106 | assert.strictEqual(job.name, this.schema)
107 | assert.strictEqual(job.priority, 0)
108 | assert.strictEqual(job.state, 'active')
109 | assert(job.policy !== undefined)
110 | assert.strictEqual(job.retryLimit, 1)
111 | assert.strictEqual(job.retryCount, 0)
112 | assert.strictEqual(job.retryDelay, 1)
113 | assert.strictEqual(job.retryBackoff, true)
114 | assert.strictEqual(job.retryDelayMax, 10)
115 | assert(job.startAfter !== undefined)
116 | assert(job.startedOn !== undefined)
117 | assert.strictEqual(job.singletonKey, null)
118 | assert.strictEqual(job.singletonOn, null)
119 | assert(job.expireInSeconds !== undefined)
120 | assert(job.createdOn !== undefined)
121 | assert.strictEqual(job.completedOn, null)
122 | assert(job.keepUntil !== undefined)
123 | })
124 |
125 | it('should fetch all metadata for a batch of jobs with exponential backoff when requested', async function () {
126 | this.boss = await helper.start(this.bossConfig)
127 | const options = { retryDelay: 1, retryBackoff: true, retryDelayMax: 10 }
128 | const batchSize = 4
129 |
130 | await Promise.all([
131 | this.boss.send(this.schema, null, options),
132 | this.boss.send(this.schema, null, options),
133 | this.boss.send(this.schema, null, options),
134 | this.boss.send(this.schema, null, options)
135 | ])
136 |
137 | const jobs = await this.boss.fetch(this.schema, { batchSize, includeMetadata: true })
138 | assert(jobs.length === batchSize)
139 |
140 | for (const job of jobs) {
141 | assert(this.schema === job.name)
142 | assert(job.priority === 0)
143 | assert(job.state === 'active')
144 | assert(job.policy !== undefined)
145 | assert(job.retryLimit !== undefined)
146 | assert(job.retryCount === 0)
147 | assert(job.retryDelay === 1)
148 | assert(job.retryBackoff === true)
149 | assert(job.retryDelayMax === 10)
150 | assert(job.startAfter !== undefined)
151 | assert(job.startedOn !== undefined)
152 | assert(job.singletonKey !== undefined)
153 | assert(job.singletonOn !== undefined)
154 | assert(job.expireInSeconds !== undefined)
155 | assert(job.deleteAfterSeconds !== undefined)
156 | assert(job.createdOn !== undefined)
157 | assert(job.completedOn !== undefined)
158 | assert(job.keepUntil !== undefined)
159 | assert(job.deadLetter !== undefined)
160 | }
161 | })
162 |
163 | it('should fetch a job with custom connection', async function () {
164 | this.boss = await helper.start(this.bossConfig)
165 |
166 | let calledCounter = 0
167 | const db = await helper.getDb()
168 | const options = {
169 | db: {
170 | // @ts-ignore
171 | async executeSql (sql, values) {
172 | calledCounter++
173 | // @ts-ignore
174 | return db.pool.query(sql, values)
175 | }
176 | }
177 | }
178 |
179 | await this.boss.send(this.schema, {}, options)
180 | const [job] = await this.boss.fetch(this.schema, { ...options, batchSize: 10 })
181 | assert(this.schema === job.name)
182 | assert.strictEqual(calledCounter, 2)
183 | })
184 |
185 | it('should allow fetching jobs that have a start_after in the future', async function () {
186 | this.boss = await helper.start(this.bossConfig)
187 |
188 | await this.boss.send(this.schema, { startAfter: new Date(Date.now() + 1000) })
189 | const db = await helper.getDb()
190 | const sqlStatements : string[] = []
191 | const options = {
192 | db: {
193 | // @ts-ignore
194 | async executeSql (sql, values) {
195 | sqlStatements.push(sql)
196 | // @ts-ignore
197 | return db.pool.query(sql, values)
198 | }
199 | }
200 | }
201 |
202 | const jobs = await this.boss.fetch(this.schema, { ...options, ignoreStartAfter: true })
203 | assert(jobs.length === 1)
204 | assert(sqlStatements.length === 1)
205 | assert(!sqlStatements[0].includes('start_after < now()'))
206 | })
207 | })
208 |
--------------------------------------------------------------------------------
/test/migrationTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import { PgBoss, getConstructionPlans, getMigrationPlans, getRollbackPlans } from '../src/index.ts'
3 | import { getDb } from './testHelper.ts'
4 | import Contractor from '../src/contractor.ts'
5 | import { getAll } from '../src/migrationStore.ts'
6 | import packageJson from '../package.json' with { type: 'json' }
7 | import { setVersion } from '../src/plans.ts'
8 |
9 | const currentSchemaVersion = packageJson.pgboss.schema
10 |
11 | describe('migration', function () {
12 | beforeEach(async function () {
13 | const db = await getDb({ debug: false })
14 | // @ts-ignore
15 | this.contractor = new Contractor(db, this.bossConfig)
16 | })
17 |
18 | it('should include create schema by default ', function () {
19 | const schema = 'custom'
20 | const plans = Contractor.constructionPlans(schema)
21 | assert(plans.includes('CREATE SCHEMA'))
22 | })
23 |
24 | it('should not include create schema if createSchema=false', function () {
25 | const schema = 'custom'
26 | const plans = Contractor.constructionPlans(schema, { createSchema: false })
27 | assert(!plans.includes('CREATE SCHEMA'))
28 | })
29 |
30 | it('should not install if createSchema option is false and schema is missing', async function () {
31 | const config = { ...this.bossConfig, createSchema: false }
32 | this.boss = new PgBoss(config)
33 | await assert.rejects(async () => {
34 | await this.boss!.start()
35 | })
36 | })
37 |
38 | it('should export commands to manually build schema', function () {
39 | const schema = 'custom'
40 | const plans = getConstructionPlans(schema)
41 |
42 | assert(plans.includes(`${schema}.job`))
43 | assert(plans.includes(`${schema}.version`))
44 | })
45 |
46 | it('should fail to export migration using current version', function () {
47 | const schema = 'custom'
48 |
49 | assert.throws(() => {
50 | getMigrationPlans(schema, currentSchemaVersion)
51 | })
52 | })
53 |
54 | it('should export commands to migrate', function () {
55 | const schema = 'custom'
56 | const plans = getMigrationPlans(schema, currentSchemaVersion - 1)
57 |
58 | assert(plans)
59 | })
60 |
61 | it('should fail to export commands to roll back from invalid version', function () {
62 | const schema = 'custom'
63 |
64 | assert.throws(() => {
65 | getRollbackPlans(schema, -1)
66 | })
67 | })
68 |
69 | it('should export commands to roll back', function () {
70 | const schema = 'custom'
71 | const plans = getRollbackPlans(schema, currentSchemaVersion)
72 |
73 | assert(plans, 'rollback plans not found')
74 | })
75 |
76 | it('should not migrate when current version is not found in migration store', async function () {
77 | const { contractor } = this
78 | const config = { ...this.bossConfig }
79 |
80 | await contractor.create()
81 |
82 | const db = await getDb()
83 | // version 20 was v9 and dropped from the migration store with v10
84 | await db.executeSql(setVersion(config.schema, 20))
85 |
86 | this.boss = new PgBoss(config)
87 |
88 | await assert.rejects(async () => {
89 | await this.boss!.start()
90 | })
91 | })
92 |
93 | it.skip('should migrate to previous version and back again', async function () {
94 | const { contractor } = this
95 |
96 | await contractor.create()
97 |
98 | await contractor.rollback(currentSchemaVersion)
99 | const oldVersion = await contractor.schemaVersion()
100 |
101 | assert.notStrictEqual(oldVersion, currentSchemaVersion)
102 |
103 | await contractor.migrate(oldVersion)
104 | const newVersion = await contractor.schemaVersion()
105 |
106 | assert.strictEqual(newVersion, currentSchemaVersion)
107 | })
108 |
109 | it('should install next version via contractor', async function () {
110 | const { contractor } = this
111 |
112 | await contractor.create()
113 |
114 | await contractor.rollback(currentSchemaVersion)
115 |
116 | const oneVersionAgo = await contractor.schemaVersion()
117 |
118 | await contractor.next(oneVersionAgo)
119 |
120 | const version = await contractor.schemaVersion()
121 |
122 | assert.strictEqual(version, currentSchemaVersion)
123 | })
124 |
125 | it('should migrate to latest during start if on previous schema version', async function () {
126 | const { contractor } = this
127 |
128 | await contractor.create()
129 |
130 | await contractor.rollback(currentSchemaVersion)
131 |
132 | const config = { ...this.bossConfig }
133 |
134 | this.boss = new PgBoss(config)
135 |
136 | await this.boss.start()
137 |
138 | const version = await contractor.schemaVersion()
139 |
140 | assert.strictEqual(version, currentSchemaVersion)
141 | })
142 |
143 | it.skip('should migrate through 2 versions back and forth', async function () {
144 | const { contractor } = this
145 |
146 | const queue = 'migrate-back-2-and-forward'
147 |
148 | const config = { ...this.bossConfig }
149 |
150 | this.boss = new PgBoss(config)
151 |
152 | await this.boss.start()
153 |
154 | // creating jobs in 3 states to have data to migrate back and forth
155 |
156 | // completed job
157 | await this.boss.createQueue(queue)
158 | await this.boss.send(queue)
159 | const [job] = await this.boss.fetch(queue)
160 | await this.boss.complete(queue, job.id)
161 |
162 | // created job
163 | await this.boss.send(queue)
164 |
165 | await contractor.rollback(currentSchemaVersion)
166 | const oneVersionAgo = await contractor.schemaVersion()
167 |
168 | assert.notStrictEqual(oneVersionAgo, currentSchemaVersion)
169 |
170 | await contractor.rollback(oneVersionAgo)
171 | const twoVersionsAgo = await contractor.schemaVersion()
172 |
173 | assert.notStrictEqual(twoVersionsAgo, oneVersionAgo)
174 |
175 | await contractor.next(twoVersionsAgo)
176 | const oneVersionAgoPart2 = await contractor.schemaVersion()
177 |
178 | assert.strictEqual(oneVersionAgo, oneVersionAgoPart2)
179 |
180 | await contractor.next(oneVersionAgo)
181 | const version = await contractor.schemaVersion()
182 |
183 | assert.strictEqual(version, currentSchemaVersion)
184 |
185 | await this.boss.send(queue)
186 | const [job2] = await this.boss.fetch(queue)
187 | await this.boss.complete(queue, job2.id)
188 | })
189 |
190 | it.skip('should migrate to latest during start if on previous 2 schema versions', async function () {
191 | const { contractor } = this
192 |
193 | await contractor.create()
194 |
195 | await contractor.rollback(currentSchemaVersion)
196 | const oneVersionAgo = await contractor.schemaVersion()
197 | assert.strictEqual(oneVersionAgo, currentSchemaVersion - 1)
198 |
199 | await contractor.rollback(oneVersionAgo)
200 | const twoVersionsAgo = await contractor.schemaVersion()
201 | assert.strictEqual(twoVersionsAgo, currentSchemaVersion - 2)
202 |
203 | const config = { ...this.bossConfig }
204 | this.boss = new PgBoss(config)
205 | await this.boss.start()
206 |
207 | const version = await contractor.schemaVersion()
208 |
209 | assert.strictEqual(version, currentSchemaVersion)
210 | })
211 |
212 | it('migrating to non-existent version fails gracefully', async function () {
213 | const { contractor } = this
214 |
215 | await contractor.create()
216 |
217 | try {
218 | await contractor.migrate('¯\\_(ツ)_//¯')
219 | } catch (error: any) {
220 | assert(error.message.includes('not found'))
221 | }
222 | })
223 |
224 | it('should roll back an error during a migration', async function () {
225 | const { contractor } = this
226 |
227 | const config = { ...this.bossConfig }
228 |
229 | config.migrations = getAll(config.schema)
230 |
231 | // add invalid sql statement
232 | config.migrations[0].install.push('wat')
233 |
234 | await contractor.create()
235 | await contractor.rollback(currentSchemaVersion)
236 | const oneVersionAgo = await contractor.schemaVersion()
237 |
238 | const boss1 = new PgBoss(config)
239 |
240 | try {
241 | await boss1.start()
242 | } catch (error: any) {
243 | assert(error.message.includes('wat'))
244 | } finally {
245 | await boss1.stop({ graceful: false })
246 | }
247 |
248 | const version1 = await contractor.schemaVersion()
249 |
250 | assert.strictEqual(version1, oneVersionAgo)
251 |
252 | // remove bad sql statement
253 | config.migrations[0].install.pop()
254 |
255 | const boss2 = new PgBoss(config)
256 |
257 | try {
258 | await boss2.start()
259 |
260 | const version2 = await contractor.schemaVersion()
261 |
262 | assert.strictEqual(version2, currentSchemaVersion)
263 | } finally {
264 | await boss2.stop({ graceful: false })
265 | }
266 | })
267 |
268 | it('should not install if migrate option is false', async function () {
269 | const config = { ...this.bossConfig, migrate: false }
270 | this.boss = new PgBoss(config)
271 | await assert.rejects(async () => {
272 | await this.boss!.start()
273 | })
274 | })
275 |
276 | it('should not migrate if migrate option is false', async function () {
277 | const { contractor } = this
278 |
279 | await contractor.create()
280 |
281 | await contractor.rollback(currentSchemaVersion)
282 |
283 | const config = { ...this.bossConfig, migrate: false }
284 | this.boss = new PgBoss(config)
285 |
286 | await assert.rejects(async () => {
287 | await this.boss!.start()
288 | })
289 | })
290 |
291 | it('should still work if migrate option is false', async function () {
292 | const { contractor } = this
293 |
294 | await contractor.create()
295 |
296 | const config = { ...this.bossConfig, migrate: false }
297 |
298 | this.boss = new PgBoss(config)
299 |
300 | await this.boss.start()
301 | await this.boss.createQueue(this.schema)
302 | await this.boss.send(this.schema)
303 | const [job] = await this.boss.fetch(this.schema)
304 | await this.boss.complete(this.schema, job.id)
305 | })
306 | })
307 |
--------------------------------------------------------------------------------
/docs/api/jobs.md:
--------------------------------------------------------------------------------
1 | # Jobs
2 |
3 | ### `send()`
4 |
5 | Creates a new job and returns the job id.
6 |
7 | > `send()` will resolve a `null` for job id under some use cases when using unique jobs or throttling (see below). These options are always opt-in on the send side and therefore don't result in a promise rejection.
8 |
9 | ### `send(name, data, options)`
10 |
11 | **Arguments**
12 |
13 | - `name`: string, *required*
14 | - `data`: object
15 | - `options`: object
16 |
17 |
18 | **General options**
19 |
20 | * **priority**, int
21 |
22 | optional priority. Higher numbers have, um, higher priority
23 |
24 | * **id**, uuid
25 |
26 | optional id. If not set, a uuid will automatically created
27 |
28 | **Retry options**
29 |
30 | * **retryLimit**, int
31 |
32 | Default: 2. Number of retries to complete a job.
33 |
34 | * **retryDelay**, int
35 |
36 | Default: 0. Delay between retries of failed jobs, in seconds.
37 |
38 | * **retryBackoff**, bool
39 |
40 | Default: false. Enables exponential backoff retries based on retryDelay instead of a fixed delay. Sets initial retryDelay to 1 if not set.
41 |
42 | * **retryDelayMax**, int
43 |
44 | Default: no limit. Maximum delay between retries of failed jobs, in seconds. Only used when retryBackoff is true.
45 |
46 | **Expiration options**
47 |
48 | * **expireInSeconds**, number
49 |
50 | Default: 15 minutes. How many seconds a job may be in active state before being retried or failed. Must be >=1
51 |
52 | **Retention options**
53 |
54 | * **retentionSeconds**, number
55 |
56 | Default: 14 days. How many seconds a job may be in created or retry state before it's deleted. Must be >=1
57 |
58 | * **deleteAfterSeconds**, int
59 |
60 | Default: 7 days. How long a job should be retained in the database after it's completed.
61 |
62 |
63 | All retry, expiration, and retention options can also be set on the queue and will be inheritied for each job, unless they are overridden.
64 |
65 | **Connection options**
66 |
67 | * **db**, object
68 |
69 | Instead of using pg-boss's default adapter, you can use your own, as long as it implements the following interface (the same as the pg module).
70 |
71 | ```ts
72 | interface Db {
73 | executeSql(text: string, values: any[]): Promise<{ rows: any[] }>;
74 | }
75 | ```
76 |
77 | **Deferred jobs**
78 |
79 | * **startAfter** int, string, or Date
80 | * int: seconds to delay starting the job
81 | * string: Start after a UTC Date time string in 8601 format
82 | * Date: Start after a Date object
83 |
84 | Default: 0
85 |
86 | **Throttle or debounce jobs**
87 |
88 | * **singletonSeconds**, int
89 | * **singletonNextSlot**, bool
90 | * **singletonKey** string
91 |
92 | Throttling jobs to 'one per time slot'. This option is set on the send side of the API since jobs may or may not be created based on the existence of other jobs.
93 |
94 | For example, if you set the `singletonSeconds` to 60, then submit 2 jobs within the same minute, only the first job will be accepted and resolve a job id. The second request will resolve a null instead of a job id.
95 |
96 | Setting `singletonNextSlot` to true will cause the job to be scheduled to run after the current time slot if and when a job is throttled. This option is set to true, for example, when calling the convenience function `sendDebounced()`.
97 |
98 | As with queue policies, using `singletonKey` will extend throttling to allow one job per key within the time slot.
99 |
100 | ```js
101 | const payload = {
102 | email: "billybob@veganplumbing.com",
103 | name: "Billy Bob"
104 | };
105 |
106 | const options = {
107 | startAfter: 1,
108 | retryLimit: 2
109 | };
110 |
111 | const jobId = await boss.send('email-send-welcome', payload, options)
112 | console.log(`job ${jobId} submitted`)
113 | ```
114 |
115 | ### `send({ name, data, options })`
116 |
117 | This overload supports sending an object with name, data, and options properties.
118 |
119 | ```js
120 | const jobId = await boss.send({
121 | name: 'database-backup',
122 | options: { retryLimit: 1 }
123 | })
124 |
125 | console.log(`job ${jobId} submitted`)
126 | ```
127 |
128 | ### `sendAfter(name, data, options, value)`
129 |
130 | Send a job that should start after a number of seconds from now, or after a specific date time.
131 |
132 | This is a convenience version of `send()` with the `startAfter` option assigned.
133 |
134 | `value`: int: seconds | string: ISO date string | Date
135 |
136 |
137 | ### `sendThrottled(name, data, options, seconds, key)`
138 |
139 | Only allows one job to be sent to the same queue within a number of seconds. In this case, the first job within the interval is allowed, and all other jobs within the same interval are rejected.
140 |
141 | This is a convenience version of `send()` with the `singletonSeconds` and `singletonKey` option assigned. The `key` argument is optional.
142 |
143 | ### `sendDebounced(name, data, options, seconds, key)`
144 |
145 | Like, `sendThrottled()`, but instead of rejecting if a job is already sent in the current interval, it will try to add the job to the next interval if one hasn't already been sent.
146 |
147 | This is a convenience version of `send()` with the `singletonSeconds`, `singletonKey` and `singletonNextSlot` option assigned. The `key` argument is optional.
148 |
149 | ### `insert(name, Job[])`
150 |
151 | Create multiple jobs in one request with an array of objects.
152 |
153 | The contract and supported features are slightly different than `send()`, which is why this function is named independently. For example, debouncing is not supported.
154 |
155 | The following contract is a typescript defintion of the expected object. Only `name` is required, but most other properties can be set. This will likely be enhanced later with more support for deferral and retention by an offset. For now, calculate any desired timestamps for these features before insertion.
156 |
157 | ```ts
158 | interface JobInsert {
159 | id?: string,
160 | name: string;
161 | data?: T;
162 | priority?: number;
163 | retryLimit?: number;
164 | retryDelay?: number;
165 | retryBackoff?: boolean;
166 | startAfter?: Date | string;
167 | singletonKey?: string;
168 | expireInSeconds?: number;
169 | deleteAfterSeconds?: number;
170 | keepUntil?: Date | string;
171 | }
172 | ```
173 |
174 | ### `fetch(name, options)`
175 |
176 | Returns an array of jobs from a queue
177 |
178 | **Arguments**
179 | - `name`: string
180 | - `options`: object
181 |
182 | * `batchSize`, int, *default: 1*
183 |
184 | Number of jobs to return
185 |
186 | * `priority`, bool, *default: true*
187 |
188 | If true, allow jobs with a higher priority to be fetched before jobs with lower or no priority
189 |
190 | * `includeMetadata`, bool, *default: false*
191 |
192 | If `true`, all job metadata will be returned on the job object.
193 |
194 | * `ignoreStartAfter`, bool, *default: false*
195 |
196 | If `true`, jobs with a `startAfter` timestamp in the future will be fetched. Useful for fetching jobs immediately without waiting for a retry delay.
197 |
198 | ```js
199 | interface JobWithMetadata {
200 | id: string;
201 | name: string;
202 | data: T;
203 | priority: number;
204 | state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed';
205 | retryLimit: number;
206 | retryCount: number;
207 | retryDelay: number;
208 | retryBackoff: boolean;
209 | startAfter: Date;
210 | startedOn: Date;
211 | singletonKey: string | null;
212 | singletonOn: Date | null;
213 | expireInSeconds: number;
214 | deleteAfterSeconds: number;
215 | createdOn: Date;
216 | completedOn: Date | null;
217 | keepUntil: Date;
218 | deadLetter: string,
219 | policy: string,
220 | output: object
221 | }
222 | ```
223 |
224 |
225 | **Notes**
226 |
227 | The following example shows how to fetch and delete up to 20 jobs.
228 |
229 | ```js
230 | const QUEUE = 'email-daily-digest'
231 | const emailer = require('./emailer.js')
232 |
233 | const jobs = await boss.fetch(QUEUE, { batchSize: 20 })
234 |
235 | await Promise.allSettled(jobs.map(async job => {
236 | try {
237 | await emailer.send(job.data)
238 | await boss.deleteJob(QUEUE, job.id)
239 | } catch(err) {
240 | await boss.fail(QUEUE, job.id, err)
241 | }
242 | }))
243 | ```
244 |
245 | ### `deleteJob(name, id, options)`
246 |
247 | Deletes a job by id.
248 |
249 | > Job deletion is offered if desired for a "fetch then delete" workflow similar to SQS. This is not the default behavior for workers so "everything just works" by default, including job throttling and debouncing, which requires jobs to exist to enforce a unique constraint. For example, if you are debouncing a queue to "only allow 1 job per hour", deleting jobs after processing would re-open that time slot, breaking your throttling policy.
250 |
251 | ### `deleteJob(name, [ids], options)`
252 |
253 | Deletes a set of jobs by id.
254 |
255 | ### `deleteQueuedJobs(name)`
256 |
257 | Deletes all queued jobs in a queue.
258 |
259 | ### `deleteStoredJobs(name)`
260 |
261 | Deletes all jobs in completed, failed, and cancelled state in a queue.
262 |
263 | ### `deleteAllJobs(name?)`
264 |
265 | Deletes all jobs in a queue, including active jobs.
266 |
267 | If no queue name is given, jobs are deleted from all queues.
268 |
269 |
270 | ### `cancel(name, id, options)`
271 |
272 | Cancels a pending or active job.
273 |
274 | ### `cancel(name, [ids], options)`
275 |
276 | Cancels a set of pending or active jobs.
277 |
278 | When passing an array of ids, it's possible that the operation may partially succeed based on the state of individual jobs requested. Consider this a best-effort attempt.
279 |
280 | ### `resume(name, id, options)`
281 |
282 | Resumes a cancelled job.
283 |
284 | ### `resume(name, [ids], options)`
285 |
286 | Resumes a set of cancelled jobs.
287 |
288 | ### `retry(name, id, options)`
289 |
290 | Retries a failed job.
291 |
292 | ### `retry(name, [ids], options)`
293 |
294 | Retries a set of failed jobs.
295 |
296 | ### `complete(name, id, data, options)`
297 |
298 | Completes an active job. This would likely only be used with `fetch()`. Accepts an optional `data` argument.
299 |
300 | The promise will resolve on a successful completion, or reject if the job could not be completed.
301 |
302 | ### `complete(name, [ids], options)`
303 |
304 | Completes a set of active jobs.
305 |
306 | The promise will resolve on a successful completion, or reject if not all of the requested jobs could not be marked as completed.
307 |
308 | > See comments above on `cancel([ids])` regarding when the promise will resolve or reject because of a batch operation.
309 |
310 | ### `fail(name, id, data, options)`
311 |
312 | Marks an active job as failed.
313 |
314 | The promise will resolve on a successful assignment of failure, or reject if the job could not be marked as failed.
315 |
316 | ### `fail(name, [ids], options)`
317 |
318 | Fails a set of active jobs.
319 |
320 | The promise will resolve on a successful failure state assignment, or reject if not all of the requested jobs could not be marked as failed.
321 |
322 | > See comments above on `cancel([ids])` regarding when the promise will resolve or reject because of a batch operation.
323 |
324 |
325 | ### `getJobById(name, id, options)`
326 |
327 | Retrieves a job with all metadata by name and id
328 |
329 | **options**
330 |
331 | * **db**, object, see notes in `send()`
--------------------------------------------------------------------------------
/test/workTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { delay } from '../src/tools.ts'
4 |
5 | describe('work', function () {
6 | it('should fail with no arguments', async function () {
7 | this.boss = await helper.start(this.bossConfig)
8 | await assert.rejects(async () => {
9 | // @ts-ignore
10 | await this.boss.work()
11 | })
12 | })
13 |
14 | it('should fail if no callback provided', async function () {
15 | this.boss = await helper.start(this.bossConfig)
16 | await assert.rejects(async () => {
17 | // @ts-ignore
18 | await this.boss.work('foo')
19 | })
20 | })
21 |
22 | it('should fail if options is not an object', async function () {
23 | this.boss = await helper.start(this.bossConfig)
24 | await assert.rejects(async () => {
25 | // @ts-ignore
26 | await this.boss.work('foo', async () => {}, 'nope')
27 | })
28 | })
29 |
30 | it('offWork should fail without a name', async function () {
31 | this.boss = await helper.start(this.bossConfig)
32 | await assert.rejects(async () => {
33 | // @ts-ignore
34 | await this.boss.offWork()
35 | })
36 | })
37 |
38 | it('should honor a custom polling interval', async function () {
39 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
40 |
41 | const spy = this.boss.getSpy(this.schema)
42 | const pollingIntervalSeconds = 1
43 | let processCount = 0
44 | const expectedProcessCount = 5
45 |
46 | const jobIds: string[] = []
47 | for (let i = 0; i < expectedProcessCount; i++) {
48 | const jobId = await this.boss.send(this.schema)
49 | jobIds.push(jobId!)
50 | }
51 |
52 | await this.boss.work(this.schema, { pollingIntervalSeconds }, async () => {
53 | processCount++
54 | })
55 |
56 | // Wait for all jobs to complete
57 | await Promise.all(jobIds.map(id => spy.waitForJobWithId(id, 'completed')))
58 |
59 | assert.strictEqual(processCount, expectedProcessCount)
60 | })
61 |
62 | it('should provide abort signal to job handler', async function () {
63 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
64 |
65 | const spy = this.boss.getSpy(this.schema)
66 | let receivedSignal = {}
67 |
68 | const jobId = await this.boss.send(this.schema)
69 |
70 | await this.boss.work(this.schema, async ([job]) => {
71 | receivedSignal = job.signal
72 | })
73 |
74 | await spy.waitForJobWithId(jobId!, 'completed')
75 |
76 | assert(receivedSignal instanceof AbortSignal)
77 | })
78 |
79 | it('should honor when a worker is notified', async function () {
80 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
81 |
82 | const spy = this.boss.getSpy(this.schema)
83 | let processCount = 0
84 |
85 | const jobId1 = await this.boss.send(this.schema)
86 |
87 | const workerId = await this.boss.work(this.schema, { pollingIntervalSeconds: 5 }, async () => processCount++)
88 |
89 | await spy.waitForJobWithId(jobId1!, 'completed')
90 |
91 | assert.strictEqual(processCount, 1)
92 |
93 | const jobId2 = await this.boss.send(this.schema)
94 |
95 | this.boss.notifyWorker(workerId)
96 |
97 | await spy.waitForJobWithId(jobId2!, 'completed')
98 |
99 | assert.strictEqual(processCount, 2)
100 | })
101 |
102 | it('should remove a worker', async function () {
103 | this.boss = await helper.start(this.bossConfig)
104 |
105 | let receivedCount = 0
106 |
107 | this.boss.work(this.schema, async () => {
108 | receivedCount++
109 | await this.boss!.offWork(this.schema)
110 | })
111 |
112 | await this.boss.send(this.schema)
113 | await this.boss.send(this.schema)
114 |
115 | await delay(5000)
116 |
117 | assert.strictEqual(receivedCount, 1)
118 | })
119 |
120 | it('should remove a worker by id', async function () {
121 | this.boss = await helper.start(this.bossConfig)
122 |
123 | let receivedCount = 0
124 |
125 | await this.boss.send(this.schema)
126 | await this.boss.send(this.schema)
127 |
128 | const id = await this.boss.work(this.schema, { pollingIntervalSeconds: 0.5 }, async () => {
129 | receivedCount++
130 | await this.boss!.offWork(this.schema, { id })
131 | })
132 |
133 | await delay(2000)
134 |
135 | assert.strictEqual(receivedCount, 1)
136 | })
137 |
138 | it('should handle a batch of jobs via batchSize', async function () {
139 | this.boss = await helper.start(this.bossConfig)
140 |
141 | const batchSize = 4
142 |
143 | for (let i = 0; i < batchSize; i++) {
144 | await this.boss.send(this.schema)
145 | }
146 |
147 | return new Promise((resolve) => {
148 | this.boss!.work(this.schema, { batchSize }, async jobs => {
149 | assert.strictEqual(jobs.length, batchSize)
150 | resolve()
151 | })
152 | })
153 | })
154 |
155 | it('batchSize should auto-complete the jobs', async function () {
156 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
157 |
158 | const spy = this.boss.getSpy(this.schema)
159 | const jobId = await this.boss.send(this.schema)
160 |
161 | await this.boss.work(this.schema, { batchSize: 1 }, async jobs => {
162 | assert.strictEqual(jobs.length, 1)
163 | })
164 |
165 | const job = await spy.waitForJobWithId(jobId!, 'completed')
166 |
167 | assert.strictEqual(job.state, 'completed')
168 | })
169 |
170 | it('returning promise applies backpressure', async function () {
171 | this.boss = await helper.start(this.bossConfig)
172 |
173 | const jobCount = 4
174 | let processCount = 0
175 |
176 | for (let i = 0; i < jobCount; i++) {
177 | await this.boss.send(this.schema)
178 | }
179 |
180 | await this.boss.work(this.schema, async () => {
181 | // delay slows down process fetch
182 | await delay(2000)
183 | processCount++
184 | })
185 |
186 | await delay(7000)
187 |
188 | assert(processCount < jobCount)
189 | })
190 |
191 | it('completion should pass string wrapped in value prop', async function () {
192 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
193 |
194 | const spy = this.boss.getSpy(this.schema)
195 | const result = 'success'
196 |
197 | const jobId = await this.boss.send(this.schema)
198 |
199 | await this.boss.work(this.schema, async () => result)
200 |
201 | await spy.waitForJobWithId(jobId!, 'completed')
202 |
203 | const job = await this.boss.getJobById(this.schema, jobId!)
204 |
205 | assert.strictEqual(job!.state, 'completed')
206 | assert.strictEqual((job!.output as { value: string }).value, result)
207 | })
208 |
209 | it('handler result should be stored in output', async function () {
210 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
211 | const something = 'clever'
212 |
213 | const spy = this.boss.getSpy(this.schema)
214 |
215 | const jobId = await this.boss.send(this.schema)
216 | await this.boss.work(this.schema, async () => ({ something }))
217 |
218 | const job = await spy.waitForJobWithId(jobId!, 'completed')
219 |
220 | assert.strictEqual(job.state, 'completed')
221 | assert.strictEqual((job.output as { something: string }).something, something)
222 | })
223 |
224 | it('job can be deleted in handler', async function () {
225 | this.boss = await helper.start({ ...this.bossConfig, __test__enableSpies: true })
226 |
227 | const spy = this.boss.getSpy(this.schema)
228 | const jobId = await this.boss.send(this.schema)
229 |
230 | assert(jobId)
231 |
232 | await this.boss.work(this.schema, async ([job]) => this.boss!.deleteJob(this.schema, job.id))
233 |
234 | await spy.waitForJobWithId(jobId, 'completed')
235 |
236 | const job = await this.boss.getJobById(this.schema, jobId)
237 |
238 | assert(!job)
239 | })
240 |
241 | it('should allow multiple workers to the same this.schema per instance', async function () {
242 | this.boss = await helper.start(this.bossConfig)
243 |
244 | await this.boss.work(this.schema, async () => {})
245 | await this.boss.work(this.schema, async () => {})
246 | })
247 |
248 | it('should honor the includeMetadata option', async function () {
249 | this.boss = await helper.start(this.bossConfig)
250 |
251 | await this.boss.send(this.schema)
252 |
253 | return new Promise((resolve) => {
254 | this.boss!.work(this.schema, { includeMetadata: true }, async ([job]) => {
255 | assert(job.startedOn !== undefined)
256 | resolve()
257 | })
258 | })
259 | })
260 |
261 | it('should fail job at expiration in worker', async function () {
262 | this.boss = await helper.start({ ...this.bossConfig, supervise: false })
263 |
264 | const jobId = await this.boss.send(this.schema, null, { retryLimit: 0, expireInSeconds: 1 })
265 |
266 | await this.boss.work(this.schema, () => delay(2000))
267 |
268 | await delay(2000)
269 |
270 | const job = await this.boss.getJobById(this.schema, jobId!)
271 |
272 | assert.strictEqual(job!.state, 'failed')
273 | assert((job!.output as any).message!.includes('handler execution exceeded'))
274 | })
275 |
276 | it('should fail a batch of jobs at expiration in worker', async function () {
277 | this.boss = await helper.start({ ...this.bossConfig, supervise: false })
278 |
279 | const jobId1 = await this.boss.send(this.schema, null, { retryLimit: 0, expireInSeconds: 1 })
280 | const jobId2 = await this.boss.send(this.schema, null, { retryLimit: 0, expireInSeconds: 1 })
281 |
282 | await this.boss.work(this.schema, { batchSize: 2 }, () => delay(2000))
283 |
284 | await delay(2000)
285 |
286 | const job1 = await this.boss.getJobById(this.schema, jobId1!)
287 | const job2 = await this.boss.getJobById(this.schema, jobId2!)
288 |
289 | assert.strictEqual(job1!.state, 'failed')
290 | assert((job1!.output as any).message.includes('handler execution exceeded'))
291 |
292 | assert.strictEqual(job2!.state, 'failed')
293 | assert((job2!.output as any).message.includes('handler execution exceeded'))
294 | })
295 |
296 | it('should emit wip event every 2s for workers', async function () {
297 | this.boss = await helper.start(this.bossConfig)
298 |
299 | const firstWipEvent = new Promise>(resolve => this.boss!.once('wip', resolve))
300 |
301 | await this.boss.send(this.schema)
302 |
303 | await this.boss.work(this.schema, { pollingIntervalSeconds: 1 }, () => delay(2000))
304 |
305 | const wip1 = await firstWipEvent
306 |
307 | await this.boss.send(this.schema)
308 |
309 | assert.strictEqual(wip1.length, 1)
310 |
311 | const secondWipEvent = new Promise>(resolve => this.boss!.once('wip', resolve))
312 |
313 | const wip2 = await secondWipEvent
314 |
315 | assert.strictEqual(wip2.length, 1)
316 | })
317 |
318 | it('should reject work() after stopping', async function () {
319 | this.boss = await helper.start(this.bossConfig)
320 |
321 | await this.boss.stop()
322 |
323 | await assert.rejects(async () => {
324 | await this.boss!.work(this.schema, async () => {})
325 | })
326 | })
327 |
328 | it('should allow send() after stopping', async function () {
329 | this.boss = await helper.start(this.bossConfig)
330 |
331 | this.boss.stop({ close: false })
332 |
333 | await this.boss.send(this.schema)
334 | })
335 | })
336 |
--------------------------------------------------------------------------------
/test/queueTest.ts:
--------------------------------------------------------------------------------
1 | import assert from 'node:assert'
2 | import * as helper from './testHelper.ts'
3 | import { states } from '../src/index.ts'
4 |
5 | describe('queues', function () {
6 | it('should create a queue', async function () {
7 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
8 |
9 | await this.boss.createQueue(this.schema)
10 | })
11 |
12 | it('should not add a policy property when creating a queue if it is missing', async function () {
13 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
14 |
15 | const options = {}
16 |
17 | await this.boss.createQueue(this.schema, options)
18 |
19 | assert.strictEqual(Object.keys(options).length, 0)
20 | })
21 |
22 | it('createQueue should work if queue already exists', async function () {
23 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
24 |
25 | await this.boss.createQueue(this.schema)
26 | await this.boss.createQueue(this.schema)
27 | })
28 |
29 | it('should reject a queue with invalid characters', async function () {
30 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
31 | const queue = `*${this.bossConfig.schema}`
32 | await assert.rejects(async () => {
33 | await this.boss!.createQueue(queue)
34 | })
35 | })
36 |
37 | it('should reject a queue with invalid policy', async function () {
38 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
39 | await assert.rejects(async () => {
40 | // @ts-ignore
41 | await this.boss.createQueue(this.schema, { policy: 'something' })
42 | })
43 | })
44 |
45 | it('should reject using a queue if not created', async function () {
46 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
47 | await assert.rejects(async () => {
48 | await this.boss!.send(this.schema)
49 | })
50 | })
51 |
52 | it('should create a queue with standard policy', async function () {
53 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
54 |
55 | await this.boss.createQueue(this.schema, { policy: 'standard' })
56 | })
57 |
58 | it('should delete and then create a queue', async function () {
59 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
60 |
61 | await this.boss.createQueue(this.schema)
62 | assert(await this.boss.getQueue(this.schema))
63 | await this.boss.deleteQueue(this.schema)
64 | await this.boss.createQueue(this.schema)
65 | })
66 |
67 | it('should delete an empty queue', async function () {
68 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
69 |
70 | await this.boss.createQueue(this.schema)
71 | await this.boss.send(this.schema)
72 | await this.boss.deleteAllJobs(this.schema)
73 | await this.boss.deleteQueue(this.schema)
74 | })
75 |
76 | it('should truncate a partitioned queue and leave other queues alone', async function () {
77 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
78 |
79 | const queue2 = `${this.schema}2`
80 | await this.boss.createQueue(queue2)
81 | await this.boss.send(queue2)
82 |
83 | await this.boss.createQueue(this.schema, { partition: true })
84 | await this.boss.send(this.schema)
85 |
86 | await this.boss.deleteAllJobs(this.schema)
87 | await this.boss.deleteQueue(this.schema)
88 |
89 | const { queuedCount } = await this.boss.getQueueStats(queue2)
90 | assert(queuedCount)
91 | })
92 |
93 | it('should truncate a partitioned queue', async function () {
94 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
95 |
96 | await this.boss.createQueue(this.schema, { partition: true })
97 | await this.boss.send(this.schema)
98 | await this.boss.deleteAllJobs(this.schema)
99 | await this.boss.deleteQueue(this.schema)
100 | })
101 |
102 | it('should delete all jobs from all queues, included partitioned', async function () {
103 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
104 |
105 | await this.boss.createQueue(this.schema, { partition: true })
106 | await this.boss.send(this.schema)
107 |
108 | const queue2 = `${this.schema}2`
109 | await this.boss.createQueue(queue2)
110 | await this.boss.send(queue2)
111 |
112 | await this.boss.deleteAllJobs()
113 |
114 | const { queuedCount: count1 } = await this.boss.getQueueStats(this.schema)
115 | const { queuedCount: count2 } = await this.boss.getQueueStats(queue2)
116 |
117 | assert.strictEqual(count1 + count2, 0)
118 | })
119 |
120 | it('should delete a non-empty queue', async function () {
121 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
122 |
123 | await this.boss.createQueue(this.schema)
124 | await this.boss.send(this.schema)
125 | await this.boss.deleteQueue(this.schema)
126 | })
127 |
128 | it('should delete all queued jobs from a queue', async function () {
129 | this.boss = await helper.start(this.bossConfig)
130 |
131 | const getCount = () => helper.countJobs(this.bossConfig.schema, 'job', 'state = $1', [states.created])
132 |
133 | await this.boss.send(this.schema)
134 |
135 | assert.strictEqual(await getCount(), 1)
136 |
137 | await this.boss.deleteQueuedJobs(this.schema)
138 |
139 | assert.strictEqual(await getCount(), 0)
140 | })
141 |
142 | it('should delete all stored jobs from a queue', async function () {
143 | this.boss = await helper.start(this.bossConfig)
144 |
145 | const { completed, failed, cancelled } = states
146 | const inClause = [completed, failed, cancelled].map(s => `'${s}'`)
147 | const getCount = () => helper.countJobs(this.bossConfig.schema, 'job', `state IN (${inClause})`)
148 |
149 | await this.boss.send(this.schema)
150 | const [job1] = await this.boss.fetch(this.schema)
151 | assert(job1?.id)
152 |
153 | await this.boss.complete(this.schema, job1.id)
154 |
155 | assert.strictEqual(await getCount(), 1)
156 |
157 | await this.boss.send(this.schema, null, { retryLimit: 0 })
158 | const [job2] = await this.boss.fetch(this.schema)
159 | await this.boss.fail(this.schema, job2.id)
160 |
161 | assert.strictEqual(await getCount(), 2)
162 |
163 | await this.boss.deleteStoredJobs(this.schema)
164 |
165 | assert.strictEqual(await getCount(), 0)
166 | })
167 |
168 | it('getQueue() returns null when missing', async function () {
169 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
170 | const queue = await this.boss.getQueue(this.bossConfig.schema)
171 | assert.strictEqual(queue, null)
172 | })
173 |
174 | it('getQueues() returns queues array', async function () {
175 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
176 | const queue1 = `${this.bossConfig.schema}_1`
177 | const queue2 = `${this.bossConfig.schema}_2`
178 |
179 | await this.boss.createQueue(queue1)
180 | await this.boss.createQueue(queue2)
181 |
182 | const queues = await this.boss.getQueues()
183 |
184 | assert.strictEqual(queues.length, 2)
185 |
186 | assert(queues.some(q => q.name === queue1))
187 | assert(queues.some(q => q.name === queue2))
188 | })
189 |
190 | it('should update queue properties', async function () {
191 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
192 |
193 | let deadLetter = `${this.schema}_dlq1`
194 | await this.boss.createQueue(deadLetter)
195 |
196 | const createProps = {
197 | policy: 'standard',
198 | retryLimit: 1,
199 | retryBackoff: true,
200 | retryDelayMax: 3,
201 | retryDelay: 1,
202 | expireInSeconds: 1,
203 | retentionSeconds: 1,
204 | deadLetter
205 | }
206 |
207 | await this.boss.createQueue(this.schema, createProps)
208 |
209 | let queueObj = await this.boss.getQueue(this.schema)
210 |
211 | assert(queueObj)
212 |
213 | assert.strictEqual(this.schema, queueObj.name)
214 | assert.strictEqual(createProps.policy, queueObj.policy)
215 | assert.strictEqual(createProps.retryLimit, queueObj.retryLimit)
216 | assert.strictEqual(createProps.retryBackoff, queueObj.retryBackoff)
217 | assert.strictEqual(createProps.retryDelay, queueObj.retryDelay)
218 | assert.strictEqual(createProps.retryDelayMax, queueObj.retryDelayMax)
219 | assert.strictEqual(createProps.expireInSeconds, queueObj.expireInSeconds)
220 | assert.strictEqual(createProps.retentionSeconds, queueObj.retentionSeconds)
221 | assert.strictEqual(createProps.deadLetter, queueObj.deadLetter)
222 | assert(queueObj.createdOn)
223 | assert(queueObj.updatedOn)
224 |
225 | deadLetter = `${this.schema}_dlq2`
226 | await this.boss.createQueue(deadLetter)
227 |
228 | const updateProps = {
229 | retryDelay: 2,
230 | retryLimit: 2,
231 | retryBackoff: false,
232 | expireInSeconds: 2,
233 | deadLetter
234 | }
235 |
236 | await this.boss.updateQueue(this.schema, updateProps)
237 |
238 | queueObj = await this.boss.getQueue(this.schema)
239 |
240 | assert.strictEqual(updateProps.retryLimit, queueObj!.retryLimit)
241 | assert.strictEqual(updateProps.retryBackoff, queueObj!.retryBackoff)
242 | assert.strictEqual(updateProps.retryDelay, queueObj!.retryDelay)
243 | assert.strictEqual(updateProps.expireInSeconds, queueObj!.expireInSeconds)
244 | assert.strictEqual(updateProps.deadLetter, queueObj!.deadLetter)
245 | })
246 |
247 | it('should fail to change queue policy', async function () {
248 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
249 |
250 | await this.boss.createQueue(this.schema, { policy: 'standard' })
251 |
252 | await assert.rejects(async () => {
253 | // @ts-ignore
254 | await this.boss.updateQueue(this.schema, { policy: 'exclusive' })
255 | })
256 | })
257 |
258 | it('should fail to change queue partitioning', async function () {
259 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
260 | await this.boss.createQueue(this.schema, { partition: true })
261 |
262 | await assert.rejects(async () => {
263 | // @ts-ignore
264 | await this.boss.updateQueue(this.schema, { partition: false })
265 | })
266 | })
267 |
268 | it('jobs should inherit properties from queue', async function () {
269 | this.boss = await helper.start({ ...this.bossConfig, noDefault: true })
270 |
271 | const deadLetter = `${this.schema}_dlq`
272 | await this.boss.createQueue(deadLetter)
273 |
274 | const createProps = {
275 | retryLimit: 1,
276 | retryBackoff: true,
277 | retryDelay: 2,
278 | retryDelayMax: 3,
279 | expireInSeconds: 4,
280 | retentionSeconds: 4,
281 | deadLetter
282 | }
283 |
284 | await this.boss.createQueue(this.schema, createProps)
285 |
286 | const jobId = await this.boss.send(this.schema)
287 |
288 | const job = await this.boss.getJobById(this.schema, jobId!)
289 |
290 | assert(job)
291 |
292 | const retentionSeconds = (new Date(job.keepUntil).getTime() - new Date(job.createdOn).getTime()) / 1000
293 |
294 | assert.strictEqual(job.retryLimit, createProps.retryLimit)
295 | assert.strictEqual(job.retryBackoff, createProps.retryBackoff)
296 | assert.strictEqual(job.retryDelay, createProps.retryDelay)
297 | assert.strictEqual(job.retryDelayMax, createProps.retryDelayMax)
298 | assert.strictEqual(job.deadLetter, createProps.deadLetter)
299 | assert.strictEqual(job.expireInSeconds, createProps.expireInSeconds)
300 | assert.strictEqual(retentionSeconds, createProps.retentionSeconds)
301 | })
302 | })
303 |
--------------------------------------------------------------------------------