├── .gitignore ├── .eslintignore ├── .babelrc ├── interfaces ├── modules │ ├── uuid.js.flow │ ├── cls-bluebird.js.flow │ ├── ratelimiter.js.flow │ ├── redis.js.flow │ ├── monitor-dog.js.flow │ ├── continuation-local-storage.js.flow │ ├── error-cat.js.flow │ ├── joi.js.flow │ ├── 101.js.flow │ ├── amqplib.js.flow │ ├── bunyan.js.flow │ ├── bluebird.js.flow │ └── immutable.js.flow └── ponos │ └── worker.js.flow ├── .istanbul.yml ├── src ├── index.js ├── logger.js ├── rate-limiters │ └── redis.js ├── worker.js ├── server.js └── rabbitmq.js ├── .npmignore ├── .flowconfig ├── .eslintrc ├── resources ├── generate-docs.sh └── mocha-bootstrap.js ├── .codeclimate.yml ├── test ├── unit │ ├── logger.js │ ├── rate-limiters │ │ └── redis.js │ └── worker.js ├── functional │ ├── fixtures │ │ ├── worker.js │ │ ├── worker-two.js │ │ ├── worker-one.js │ │ ├── timeout-worker.js │ │ └── worker-tid.js │ ├── tid.js │ ├── rate-limiting.js │ ├── retry-limit.js │ ├── basic.js │ ├── failing.js │ └── timeout.js └── integration │ └── rate-limiters │ └── redis.js ├── LICENSE ├── examples ├── topic-exchanges-on-the-fly.js └── basic-worker.js ├── .travis.yml ├── package.json ├── docs └── Guides-Migration-v3.0.0.md ├── CHANGELOG.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | coverage 3 | node_modules 4 | npm-debug.log 5 | lib 6 | out 7 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | **/*{.,-}min.js 2 | coverage 3 | docs 4 | interfaces 5 | lib 6 | node_modules 7 | -------------------------------------------------------------------------------- /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": [ 3 | "transform-class-properties", 4 | "transform-flow-strip-types" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /interfaces/modules/uuid.js.flow: -------------------------------------------------------------------------------- 1 | declare module 'uuid' { 2 | declare var exports: { 3 | (): string; 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /.istanbul.yml: -------------------------------------------------------------------------------- 1 | check: 2 | global: 3 | statements: 100 4 | lines: 100 5 | branches: 100 6 | functions: 100 7 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Server = require('./server') 4 | 5 | module.exports = { 6 | Server: Server 7 | } 8 | -------------------------------------------------------------------------------- /interfaces/ponos/worker.js.flow: -------------------------------------------------------------------------------- 1 | declare class WorkerError extends Error { 2 | data: ?Object 3 | } 4 | 5 | export { WorkerError } 6 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | .babelrc 2 | .flowconfig 3 | .gitignore 4 | .istanbul.yml 5 | .travis.yml 6 | coverage 7 | interfaces 8 | resources 9 | src 10 | test 11 | -------------------------------------------------------------------------------- /interfaces/modules/cls-bluebird.js.flow: -------------------------------------------------------------------------------- 1 | declare module '@runnable/cls-bluebird' { 2 | declare var exports: { 3 | (cls: CLS): void; 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /.flowconfig: -------------------------------------------------------------------------------- 1 | [ignore] 2 | .*/node_modules/.* 3 | 4 | [include] 5 | 6 | [libs] 7 | interfaces/ 8 | 9 | [options] 10 | suppress_comment= \\(.\\|\n\\)*\\$FlowIgnore 11 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | parser: babel-eslint 2 | 3 | ecmaFeatures: 4 | modules: true 5 | 6 | env: 7 | browser: true 8 | es6: true 9 | mocha: true 10 | node: true 11 | 12 | extends: standard 13 | -------------------------------------------------------------------------------- /interfaces/modules/ratelimiter.js.flow: -------------------------------------------------------------------------------- 1 | declare class RateLimiter { 2 | get(callback: Function): void; 3 | } 4 | 5 | declare module 'ratelimiter' { 6 | declare var exports: typeof RateLimiter; 7 | } 8 | -------------------------------------------------------------------------------- /resources/generate-docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | rm -rf out 4 | npm run docs 5 | cd out 6 | git init . 7 | git remote add origin git@github.com:Runnable/ponos.git 8 | git checkout -b gh-pages 9 | git add . 10 | git commit -m "update docs" 11 | git push -f origin gh-pages 12 | cd .. 13 | rm -rf out 14 | -------------------------------------------------------------------------------- /interfaces/modules/redis.js.flow: -------------------------------------------------------------------------------- 1 | declare class Redis { 2 | static createClient(port: string, host: string): RedisClient; 3 | } 4 | 5 | declare class RedisClient { 6 | on(event: string, callback: function): void; 7 | } 8 | 9 | declare module 'redis' { 10 | declare var exports: typeof Redis; 11 | } 12 | -------------------------------------------------------------------------------- /resources/mocha-bootstrap.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | chai.use(require('chai-as-promised')) 5 | 6 | require('sinon-as-promised')(require('bluebird')) 7 | 8 | /* istanbul ignore next */ 9 | process.on('unhandledRejection', (error) => { 10 | console.error('Unhandled Promise Rejection:') 11 | console.error(error && error.stack || error) 12 | process.exit(2) 13 | }) 14 | -------------------------------------------------------------------------------- /interfaces/modules/monitor-dog.js.flow: -------------------------------------------------------------------------------- 1 | declare class DDTimer { 2 | stop(): void; 3 | } 4 | 5 | declare class Monitor { 6 | static constructor(): Monitor; 7 | static increment(monitor: string, tags?: Array | Object): void; 8 | static timer(monitor: string, something: boolean, tags?: Array | Object): DDTimer; 9 | } 10 | 11 | declare module 'monitor-dog' { 12 | declare var exports: Class; 13 | } 14 | -------------------------------------------------------------------------------- /interfaces/modules/continuation-local-storage.js.flow: -------------------------------------------------------------------------------- 1 | declare class CLS { 2 | run(fn: Function): void; 3 | set(key: string, value: any): void; 4 | get(key: string): any; 5 | } 6 | 7 | declare class CLSClass { 8 | static createNamespace(name: string): CLS; 9 | static getNamespace(name: string): ?CLS; 10 | } 11 | 12 | declare module 'continuation-local-storage' { 13 | declare var exports: Class 14 | } 15 | 16 | export { 17 | CLS 18 | } 19 | -------------------------------------------------------------------------------- /interfaces/modules/error-cat.js.flow: -------------------------------------------------------------------------------- 1 | declare class ErrorCat { 2 | report(error: Error): void; 3 | } 4 | 5 | declare module 'error-cat' { 6 | declare var exports: typeof ErrorCat; 7 | } 8 | 9 | declare class WorkerStopError extends Error { 10 | static constructor(message: string, data?: Object, reporting?: Object, queue?: string, job?: Object): WorkerStopError; 11 | } 12 | 13 | declare module 'error-cat/errors/worker-stop-error' { 14 | declare var exports: Class 15 | } 16 | -------------------------------------------------------------------------------- /interfaces/modules/joi.js.flow: -------------------------------------------------------------------------------- 1 | declare class Joi { 2 | static alternatives(): T; 3 | static array(): T; 4 | static assert(opts: Object, schema: T): T; 5 | static bool(): T; 6 | static func(): T; 7 | static number(): T; 8 | static object(): T; 9 | static required(): T; 10 | static string(): T; 11 | static validate(opts: Object, schema: T, opts: Object): JoiValidate; 12 | } 13 | 14 | declare class JoiValidate { 15 | value: Object 16 | } 17 | 18 | declare module 'joi' { 19 | declare var exports: typeof Joi; 20 | } 21 | -------------------------------------------------------------------------------- /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | --- 2 | engines: 3 | duplication: 4 | enabled: true 5 | config: 6 | languages: 7 | - ruby 8 | - javascript 9 | - python 10 | - php 11 | eslint: 12 | enabled: true 13 | channel: eslint-2 14 | fixme: 15 | enabled: true 16 | ratings: 17 | paths: 18 | - "**.inc" 19 | - "**.js" 20 | - "**.jsx" 21 | - "**.module" 22 | - "**.php" 23 | - "**.py" 24 | - "**.rb" 25 | exclude_paths: 26 | - coverage/ 27 | - examples/ 28 | - interfaces/ 29 | - lib/ 30 | - node_modules/ 31 | - out/ 32 | - resources/ 33 | - test/ 34 | -------------------------------------------------------------------------------- /test/unit/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | 5 | const log = require('../../src/logger') 6 | 7 | const assert = chai.assert 8 | 9 | describe('logger', () => { 10 | it('should use return one stream by default', () => { 11 | const streams = log._getStreams() 12 | assert.lengthOf(streams, 1) 13 | assert.equal(streams[0].stream, process.stdout) 14 | }) 15 | 16 | describe('when LOG_LEVEL is not defined', () => { 17 | let prevLevel 18 | 19 | beforeEach(() => { 20 | prevLevel = process.env.LOG_LEVEL 21 | process.env.LOG_LEVEL = undefined 22 | }) 23 | 24 | afterEach(() => { 25 | process.env.LOG_LEVEL = prevLevel 26 | }) 27 | 28 | it('should default info level', () => { 29 | const streams = log._getStreams() 30 | assert.equal(streams[0].level, 'info') 31 | }) 32 | }) 33 | }) 34 | -------------------------------------------------------------------------------- /src/logger.js: -------------------------------------------------------------------------------- 1 | /* @flow */ 2 | /* global Stream */ 3 | 'use strict' 4 | 5 | const bunyan = require('bunyan') 6 | 7 | /** 8 | * Bunyan logger for ponos. 9 | * 10 | * @private 11 | * @author Bryan Kendall 12 | * @module ponos/lib/logger 13 | */ 14 | module.exports = bunyan.createLogger({ 15 | name: 'ponos', 16 | streams: _getStreams(), 17 | serializers: bunyan.stdSerializers 18 | }) 19 | 20 | // Expose get streams for unit testing 21 | module.exports._getStreams = _getStreams 22 | 23 | /** 24 | * Streams for ponos's bunyan logger. 25 | * @private 26 | * @return {array} An array of streams for the bunyan logger 27 | */ 28 | function _getStreams (): Array { 29 | const logLevel = process.env.LOG_LEVEL 30 | const level = parseInt(logLevel, 10) || logLevel || 'info' 31 | return [ 32 | { 33 | level, 34 | stream: process.stdout 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /test/functional/fixtures/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const EventEmitter = require('events') 4 | const Promise = require('bluebird') 5 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 6 | 7 | /** 8 | * A simple worker that will publish a message to a queue. 9 | * @param {object} job Object describing the job. 10 | * @param {string} job.queue Queue on which the message will be published. 11 | * @returns {promise} Resolved when the message is put on the queue. 12 | */ 13 | module.exports = (job, jobMeta) => { 14 | return Promise.resolve() 15 | .then(() => { 16 | if (!job.eventName) { 17 | throw new WorkerStopError('eventName is required') 18 | } 19 | if (!job.message) { 20 | throw new WorkerStopError('fail test message is required') 21 | } 22 | }) 23 | .then(() => { 24 | module.exports.emitter.emit(job.eventName, job, jobMeta) 25 | }) 26 | } 27 | module.exports.emitter = new EventEmitter() 28 | -------------------------------------------------------------------------------- /test/functional/fixtures/worker-two.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const EventEmitter = require('events') 4 | const Promise = require('bluebird') 5 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 6 | 7 | /** 8 | * A simple worker that will publish a message to a queue. 9 | * @param {object} job Object describing the job. 10 | * @param {string} job.queue Queue on which the message will be published. 11 | * @returns {promise} Resolved when the message is put on the queue. 12 | */ 13 | module.exports = (job, jobMeta) => { 14 | return Promise.resolve() 15 | .then(() => { 16 | if (!job.eventName) { 17 | throw new WorkerStopError('eventName is required') 18 | } 19 | if (!job.message) { 20 | throw new WorkerStopError('fail test message is required') 21 | } 22 | }) 23 | .then(() => { 24 | module.exports.emitter.emit(job.eventName, job, jobMeta) 25 | }) 26 | } 27 | module.exports.emitter = new EventEmitter() 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Runnable 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /test/functional/fixtures/worker-one.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const EventEmitter = require('events') 4 | const Promise = require('bluebird') 5 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 6 | 7 | let rabbitmq 8 | /** 9 | * A simple worker that will publish a message to a queue. 10 | * @param {object} job Object describing the job. 11 | * @param {string} job.queue Queue on which the message will be published. 12 | * @returns {promise} Resolved when the message is put on the queue. 13 | */ 14 | module.exports = (job, jobMeta) => { 15 | return Promise.resolve() 16 | .then(() => { 17 | if (!job.eventName) { 18 | throw new WorkerStopError('eventName is required') 19 | } 20 | if (!job.message) { 21 | throw new WorkerStopError('fail test message is required') 22 | } 23 | }) 24 | .then(() => { 25 | const job = { 26 | eventName: 'task', 27 | message: 'hello world2' 28 | } 29 | rabbitmq.publishTask('ponos-test:two', job) 30 | module.exports.emitter.emit(job.eventName, job, jobMeta) 31 | }) 32 | } 33 | 34 | module.exports.setPublisher = (publisher) => { 35 | rabbitmq = publisher 36 | } 37 | module.exports.emitter = new EventEmitter() 38 | -------------------------------------------------------------------------------- /test/functional/tid.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const testWorker = require('./fixtures/worker-tid') 4 | const testWorkerEmitter = testWorker.emitter 5 | 6 | // Ponos Tooling 7 | const ponos = require('../../src') 8 | const RabbitMQ = require('../../src/rabbitmq') 9 | 10 | describe('Basic Example', () => { 11 | let server 12 | let rabbitmq 13 | 14 | before(() => { 15 | const tasks = { 16 | 'ponos-test:one': testWorker 17 | } 18 | rabbitmq = new RabbitMQ({ 19 | tasks: Object.keys(tasks) 20 | }) 21 | server = new ponos.Server({ tasks: tasks }) 22 | return rabbitmq.connect() 23 | .then(() => { 24 | return server.start() 25 | }) 26 | }) 27 | 28 | after(() => { 29 | return server.stop() 30 | .then(() => { 31 | return rabbitmq.disconnect() 32 | }) 33 | }) 34 | 35 | it('should queue a task that triggers an event', (done) => { 36 | const job = { 37 | eventName: 'task', 38 | message: 'hello world' 39 | } 40 | testWorkerEmitter.on('failed', function (err) { 41 | done(new Error(err.message)) 42 | }) 43 | testWorkerEmitter.on('passed', function () { 44 | done() 45 | }) 46 | rabbitmq.publishTask('ponos-test:one', job) 47 | }) 48 | }) 49 | -------------------------------------------------------------------------------- /examples/topic-exchanges-on-the-fly.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getNamespace = require('continuation-local-storage').getNamespace 4 | const Promise = require('bluebird') 5 | 6 | const Ponos = require('../') 7 | 8 | function workerA (job) { 9 | return Promise.try(() => { 10 | const tid = getNamespace('ponos').get('tid') 11 | console.log('workerA got a job:', job, 'tid:', tid) 12 | }) 13 | } 14 | 15 | function workerC (job) { 16 | return Promise.try(() => { 17 | const tid = getNamespace('ponos').get('tid') 18 | console.log('workerC got a job:', job, 'tid:', tid) 19 | }) 20 | } 21 | 22 | function workerB (job, ponos) { 23 | return Promise.try(() => { 24 | const tid = getNamespace('ponos').get('tid') 25 | console.log('workerB got a job:', job, 'tid:', tid) 26 | return ponos.subscribe({ 27 | exchange: 'hello-world', 28 | routingKey: '#.new', 29 | handler: workerC 30 | }) 31 | .then(() => { 32 | return ponos.consume() 33 | }) 34 | }) 35 | } 36 | 37 | const server = new Ponos.Server({ 38 | exchanges: [{ 39 | exchange: 'hello-world', 40 | routingKey: '#.foo', 41 | handler: workerA 42 | }, { 43 | exchange: 'hello-world', 44 | routingKey: '#.bar', 45 | handler: workerB 46 | }] 47 | }) 48 | 49 | server.start() 50 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - '4' 4 | - '6' 5 | cache: 6 | directories: 7 | - node_modules 8 | sudo: false 9 | services: 10 | - rabbitmq 11 | - redis 12 | env: 13 | global: 14 | - LOG_LEVEL=error 15 | - RABBITMQ_USERNAME=guest 16 | - RABBITMQ_PASSWORD=guest 17 | - REDIS_HOST=localhost 18 | - REDIS_PORT=6379 19 | script: 20 | - npm run lint 21 | - npm run coverage 22 | - npm run functional 23 | - npm run integration 24 | - npm run build && npm run build:clean 25 | after_success: 26 | - npm run coveralls 27 | - npm run codeclimate 28 | notifications: 29 | email: false 30 | deploy: 31 | provider: npm 32 | email: bryan@bryankendall.com 33 | api_key: 34 | secure: nRD93Gz7r6FAsU5l4OC6LijvJS1f1NqrBGpAnO/vuy8+/gRNvVwMvRfIcvqEWkLZOGqfk674PAnK+D4MidVs+Ky9GJW0Lv2ZyW1JbJW9HU16l4gYwqW5uQdmV8ocguXbs2LshRI2E13ro70wFyOLl+W4Y78jJS3LSfioIY+wnIuOzLCXwCimX2YslHQu9OxiuZmdkbBplQ87t82rC42NOXR+6vRnG26pq4TkANtpu1TViaQA3Dw28rEcjff6tBwKpGQJfsWrt9LtBNHT6Fhz2WnSKNGyGI8COz9SoqzOc/M1lW5+F/+3N2+fN8/LPJvlj1OkjTiIEHI4U9sPjcVdtVXXWuzsHLBUS97orfpwPjD7lio3kPQ9t3+Jpj8b9JwvLa3Qy/ap2xvIKa7FZgo4gVvmgcxoSBTQJzWnuLUD+xazkbJhGL0HKgifFpFy9MKBivqKRf1aT9d0ocrWqY/jNXxUfE3R6d/ct+ovVGvKa+VocKxEQvffHUJRP5v2Ei+3pFZE3SO235jArUVAhVgFk6loczpCofUaL8usUuQOLm1DhwqwFv7Pz0x3RoqCJOP7lLtSmmpo5riVXsq6edgN7Ph5GyvOSro8TvLQ/nVV1O3gIfuwIFNk2jxzzhNCC9pwmYGrThK3X54/i0+A6ad8TZKk2Tf82R3A75Vyt2G334M= 35 | on: 36 | tags: true 37 | node: '4' 38 | -------------------------------------------------------------------------------- /interfaces/modules/101.js.flow: -------------------------------------------------------------------------------- 1 | declare module '101/assign' { 2 | declare var exports: { 3 | (value: Object, values: Object): Object; 4 | } 5 | } 6 | 7 | declare module '101/clone' { 8 | declare var exports: { 9 | (value: Object): Object; 10 | } 11 | } 12 | 13 | declare module '101/defaults' { 14 | declare var exports: { 15 | (value: ?Object, defaults: Object): Object; 16 | } 17 | } 18 | 19 | declare module '101/exists' { 20 | declare var exports: { 21 | (value: any): boolean; 22 | } 23 | } 24 | 25 | declare module '101/is-function' { 26 | declare var exports: { 27 | (value: any): boolean; 28 | } 29 | } 30 | 31 | declare module '101/is-number' { 32 | declare var exports: { 33 | (value: any): boolean; 34 | } 35 | } 36 | 37 | declare module '101/is-object' { 38 | declare var exports: { 39 | (value: any): boolean; 40 | } 41 | } 42 | 43 | declare module '101/is-string' { 44 | declare var exports: { 45 | (value: any): boolean; 46 | } 47 | } 48 | 49 | declare module '101/pick' { 50 | declare var exports: { 51 | (value: Object, keypaths: string | Array): Object; 52 | } 53 | } 54 | 55 | declare module '101/put' { 56 | declare var exports: { 57 | (value: Object, values: Object): Object; 58 | } 59 | } 60 | 61 | declare module '101/has-keypaths' { 62 | declare var exports: { 63 | (value: Object, values: Object): Object; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /examples/basic-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getNamespace = require('continuation-local-storage').getNamespace 4 | const Promise = require('bluebird') 5 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 6 | 7 | const Ponos = require('../') 8 | 9 | /** 10 | * A simple worker that will publish a message to a queue. 11 | * @param {object} job Object describing the job. 12 | * @param {string} job.queue Queue on which the message will be published. 13 | * @returns {promise} Resolved when the message is put on the queue. 14 | */ 15 | function basicWorker (job) { 16 | return Promise.try(() => { 17 | const tid = getNamespace('ponos').get('tid') 18 | if (!job.message) { 19 | throw new WorkerStopError('message is required', { tid: tid }) 20 | } 21 | console.log(`hello world: ${job.message}. tid: ${tid}`) 22 | }) 23 | } 24 | 25 | const server = new Ponos.Server({ 26 | tasks: { 27 | 'basic-queue-worker': basicWorker 28 | }, 29 | events: { 30 | 'basic-event-worker': basicWorker 31 | } 32 | }) 33 | 34 | server.start() 35 | .then(() => { console.log('server started') }) 36 | .catch((err) => { console.error('server error:', err.stack || err.message || err) }) 37 | 38 | process.on('SIGINT', () => { 39 | server.stop() 40 | .then(() => { console.log('server stopped') }) 41 | .catch((err) => { console.error('server error:', err.stack || err.message || err) }) 42 | }) 43 | -------------------------------------------------------------------------------- /test/functional/fixtures/timeout-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const EventEmitter = require('events') 4 | const Promise = require('bluebird') 5 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 6 | 7 | /** 8 | * A worker that will publish a message to an event emitter. This worker also 9 | * will take a long time to do so (causing timeouts). It has a _timeout value 10 | * below that is halved every run to provide a hastening task. 11 | * @param {object} job Object describing the job. 12 | * @param {string} job.queue Queue on which the message will be published. 13 | * @returns {promise} Resolved when the message is put on the queue. 14 | */ 15 | module.exports = (job) => { 16 | return Promise.resolve() 17 | .then(() => { 18 | if (!job.eventName) { 19 | throw new WorkerStopError('eventName is required') 20 | } 21 | if (!job.message) { 22 | throw new WorkerStopError('message is required') 23 | } 24 | }) 25 | .then(() => { 26 | const timeout = module.exports._timeout 27 | // every time this worker is run, it will halve it's delay (run) time 28 | module.exports._timeout = module.exports._timeout / 2 29 | return Promise.resolve() 30 | .delay(timeout) 31 | .then(() => { 32 | module.exports.emitter.emit(job.eventName, { data: job.message }) 33 | return true 34 | }) 35 | }) 36 | } 37 | 38 | // this _timeout value is the starting value for how long this worker will take 39 | module.exports._timeout = 3000 40 | module.exports.emitter = new EventEmitter() 41 | -------------------------------------------------------------------------------- /test/functional/rate-limiting.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const chai = require('chai') 3 | const Promise = require('bluebird') 4 | 5 | const ponos = require('../../src') 6 | const RabbitMQ = require('../../src/rabbitmq') 7 | 8 | const assert = chai.assert 9 | 10 | describe('Basic Example', () => { 11 | let server 12 | let rabbitmq 13 | let count = 0 14 | const testQueue = 'ponos-test:rate-limit' 15 | before(() => { 16 | const tasks = {} 17 | tasks[testQueue] = { 18 | task: () => { 19 | return Promise.try(() => { 20 | count++ 21 | }) 22 | }, 23 | durationMs: 2000, 24 | maxOperations: 10 25 | } 26 | rabbitmq = new RabbitMQ({ 27 | tasks: Object.keys(tasks) 28 | }) 29 | server = new ponos.Server({ 30 | tasks: tasks, 31 | redisRateLimiter: {} 32 | }) 33 | return rabbitmq.connect() 34 | .then(() => { 35 | return server.start() 36 | }) 37 | }) 38 | 39 | after(() => { 40 | return server.stop() 41 | .then(() => { 42 | return rabbitmq.disconnect() 43 | }) 44 | }) 45 | 46 | it('should queue a task that triggers an event', () => { 47 | const job = { 48 | eventName: 'task', 49 | message: 'hello world' 50 | } 51 | 52 | return Promise.try(() => { 53 | for (var i = 0; i < 20; i++) { 54 | rabbitmq.publishTask(testQueue, job) 55 | } 56 | }) 57 | .delay(1000) 58 | .then(() => { 59 | assert.equal(10, count) 60 | }) 61 | .delay(2000) 62 | .then(() => { 63 | assert.equal(20, count) 64 | }) 65 | }) 66 | }) 67 | 68 | // set rate 69 | // set timeout for half, ensure half ran 70 | // set timeout for all, ensure all ran 71 | -------------------------------------------------------------------------------- /interfaces/modules/amqplib.js.flow: -------------------------------------------------------------------------------- 1 | type ExchangeType = 'direct' | 'fanout' | 'topic'; 2 | 3 | declare class RabbitMQConnection { 4 | on(event: string, handler: Function): void; 5 | close(): Bluebird$Promise; 6 | createChannel(): RabbitMQChannel; 7 | createConfirmChannel(): RabbitMQConfirmChannel; 8 | } 9 | 10 | declare class RabbitMQChannel { 11 | ack(message: Object): void; 12 | assertExchange(exchange: string, type: string, opts: ?Object): Bluebird$Promise; 13 | assertQueue(queue: string, opts: ?Object): Bluebird$Promise; 14 | bindQueue(queue: string, exchange: string, routingKey: string): Bluebird$Promise; 15 | cancel(consumerTag: string): Bluebird$Promise; 16 | consume(queue: string, handler: Function): void; 17 | on(event: string, handler: Function): void; 18 | prefetch(count: Number, global?: Boolean): Bluebird$Promise; 19 | publish(exchange: string, routingKey: string, content: Buffer, opts: ?Object): Bluebird$Promise; 20 | sendToQueue(queue: string, content: Buffer, opts: ?Object): Bluebird$Promise; 21 | } 22 | 23 | declare class RabbitMQConfirmChannel extends RabbitMQChannel { 24 | waitForConfirms(): Bluebird$Promise; 25 | } 26 | 27 | type QueueObject = { 28 | queue: string 29 | } 30 | 31 | declare class RabbitMQ { 32 | static connect(url: string, opts: ?Object): RabbitMQConnection; 33 | } 34 | 35 | declare module 'amqplib' { 36 | declare var exports: typeof RabbitMQ; 37 | } 38 | 39 | type SubscribeObject = { 40 | exchange: string, 41 | exchangeOptions?: Object, 42 | handler: Function, 43 | queueOptions?: Object, 44 | routingKey?: string, 45 | type: ExchangeType 46 | } 47 | 48 | type RabbitMQOptions = { 49 | queueOptions?: Object, 50 | exchangeOptions?: Object 51 | } 52 | -------------------------------------------------------------------------------- /test/functional/retry-limit.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const sinon = require('sinon') 3 | const Promise = require('bluebird') 4 | 5 | // Ponos Tooling 6 | const ponos = require('../../src') 7 | const RabbitMQ = require('../../src/rabbitmq') 8 | 9 | /* 10 | * In this example, we are going to have a job handler that times out at 11 | * decreasing intervals, throwing TimeoutErrors, until it passes. 12 | */ 13 | describe('Retry limit task', function () { 14 | let server 15 | let rabbitmq 16 | const testRecoverStub = sinon.stub().resolves() 17 | const taskStub = sinon.stub().rejects(new Error('death to all')) 18 | before(() => { 19 | const tasks = { 20 | 'ponos-test:one': { 21 | task: taskStub, 22 | finalRetryFn: testRecoverStub, 23 | maxNumRetries: 3 24 | } 25 | } 26 | rabbitmq = new RabbitMQ({ 27 | tasks: Object.keys(tasks) 28 | }) 29 | server = new ponos.Server({ tasks: tasks }) 30 | return rabbitmq.connect() 31 | .then(() => { 32 | return server.start() 33 | }) 34 | }) 35 | 36 | after(() => { 37 | return server.stop() 38 | .then(() => { 39 | return rabbitmq.disconnect() 40 | }) 41 | }) 42 | 43 | const job = { 44 | message: 'hello world', 45 | tid: 'test-tid' 46 | } 47 | 48 | describe('with maxNumRetries', function () { 49 | it('should fail 3 times and run recovery function', () => { 50 | rabbitmq.publishTask('ponos-test:one', job) 51 | 52 | return Promise.try(function loop () { 53 | if (taskStub.callCount !== 3) { 54 | return Promise.delay(3).then(loop) 55 | } 56 | }) 57 | .then(() => { 58 | sinon.assert.calledOnce(testRecoverStub) 59 | sinon.assert.calledWith(testRecoverStub, job) 60 | sinon.assert.callCount(taskStub, 3) 61 | sinon.assert.alwaysCalledWithExactly(taskStub, job, sinon.match.object) 62 | }) 63 | }) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /test/integration/rate-limiters/redis.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const chai = require('chai') 3 | const Promise = require('bluebird') 4 | const redis = require('redis') 5 | const sinon = require('sinon') 6 | 7 | const logger = require('../../../src/logger') 8 | const RedisRateLimiter = require('../../../src/rate-limiters/redis') 9 | 10 | const assert = chai.assert 11 | 12 | describe('rate-limiters/redis integration test', () => { 13 | let redisRateLimiter 14 | const testClient = redis.createClient('6379', 'localhost') 15 | 16 | beforeEach((done) => { 17 | redisRateLimiter = new RedisRateLimiter({ 18 | log: logger.child({ module: 'ponos:test' }) 19 | }) 20 | sinon.spy(Promise, 'delay') 21 | redisRateLimiter.connect() 22 | .then(() => { 23 | testClient.flushall(done) 24 | }) 25 | }) 26 | 27 | afterEach(() => { 28 | Promise.delay.restore() 29 | }) 30 | 31 | it('should immediately resolve', () => { 32 | return assert.isFulfilled(redisRateLimiter.limit('test', {})) 33 | }) 34 | 35 | it('should only call 1 in rate limit period', () => { 36 | let count = 0 37 | const tasks = [] 38 | for (var i = 0; i < 5; i++) { 39 | tasks.push(redisRateLimiter.limit('test', { 40 | maxOperations: 1, 41 | durationMs: 500 42 | }).then(() => { count++ })) 43 | } 44 | return Promise.all(tasks) 45 | .timeout(50) 46 | .catch(Promise.TimeoutError, () => { 47 | assert.equal(count, 1) 48 | }) 49 | }) 50 | 51 | it('should limit to 5 during period', () => { 52 | let count = 0 53 | const tasks = [] 54 | for (var i = 0; i < 10; i++) { 55 | tasks.push(redisRateLimiter.limit('test', { 56 | maxOperations: 5, 57 | durationMs: 500 58 | }).then(() => { count++ })) 59 | } 60 | return Promise.all(tasks) 61 | .timeout(800) 62 | .catch(Promise.TimeoutError, () => { 63 | assert.equal(count, 5) 64 | }) 65 | }) 66 | }) // end rate-limiters/redis integration test 67 | -------------------------------------------------------------------------------- /test/functional/basic.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | 5 | const assert = chai.assert 6 | 7 | // Ponos Tooling 8 | const ponos = require('../../src') 9 | const RabbitMQ = require('../../src/rabbitmq') 10 | const basicWorker = require('./fixtures/worker') 11 | const basicWorkerTwoEmitter = basicWorker.emitter 12 | const testWorkerOne = require('./fixtures/worker-one') 13 | const testWorkerTwo = require('./fixtures/worker-two') 14 | const testWorkerTwoEmitter = testWorkerTwo.emitter 15 | 16 | describe('Basic Example', () => { 17 | let server 18 | let rabbitmq 19 | const testQueueBasic = 'ponos-test:zero' 20 | const testQueueOne = 'ponos-test:one' 21 | const testQueueTwo = 'ponos-test:two' 22 | before(() => { 23 | const tasks = {} 24 | rabbitmq = new RabbitMQ({ 25 | name: 'ponos.test', 26 | tasks: [ testQueueBasic, testQueueOne, testQueueTwo ] 27 | }) 28 | tasks[testQueueBasic] = basicWorker 29 | tasks[testQueueOne] = testWorkerOne 30 | tasks[testQueueTwo] = testWorkerTwo 31 | server = new ponos.Server({ name: 'ponos.test', tasks: tasks }) 32 | return rabbitmq.connect() 33 | .then(() => { 34 | testWorkerOne.setPublisher(rabbitmq) 35 | return server.start() 36 | }) 37 | }) 38 | 39 | after(() => { 40 | return server.stop() 41 | .then(() => { 42 | return rabbitmq.disconnect() 43 | }) 44 | }) 45 | 46 | it('should queue a task that triggers an event', (done) => { 47 | basicWorkerTwoEmitter.on('task', function (job, jobMeta) { 48 | assert.equal(job.message, 'hello world') 49 | done() 50 | }) 51 | const job = { 52 | eventName: 'task', 53 | message: 'hello world' 54 | } 55 | rabbitmq.publishTask(testQueueBasic, job) 56 | }) 57 | 58 | it('should trigger series of events', (done) => { 59 | testWorkerTwoEmitter.on('task', function (job, jobMeta) { 60 | assert.equal(jobMeta.headers.publisherWorkerName, testQueueOne) 61 | assert.equal(job.message, 'hello world2') 62 | done() 63 | }) 64 | const job = { 65 | eventName: 'task', 66 | message: 'hello world' 67 | } 68 | rabbitmq.publishTask(testQueueOne, job) 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /test/functional/failing.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | const Promise = require('bluebird') 5 | const sinon = require('sinon') 6 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 7 | const ErrorCat = require('error-cat') 8 | const assert = chai.assert 9 | 10 | // Ponos Tooling 11 | const ponos = require('../../src') 12 | const RabbitMQ = require('../../src/rabbitmq') 13 | const testWorker = require('./fixtures/worker') 14 | const testWorkerEmitter = testWorker.emitter 15 | 16 | // require the Worker class so we can verify the task is running 17 | const _Worker = require('../../src/worker') 18 | 19 | /* 20 | * In this example, we are going to pass an invalid job to the worker that will 21 | * throw a WorkerStopError, acknowledge the job, and not run it a second time. 22 | */ 23 | describe('Basic Failing Task', () => { 24 | let server 25 | let rabbitmq 26 | 27 | before(() => { 28 | sinon.spy(_Worker.prototype, 'run') 29 | sinon.spy(ErrorCat, 'report') 30 | const tasks = { 31 | 'ponos-test:one': testWorker 32 | } 33 | rabbitmq = new RabbitMQ({ 34 | tasks: Object.keys(tasks) 35 | }) 36 | server = new ponos.Server({ 37 | tasks: tasks 38 | }) 39 | return rabbitmq.connect() 40 | .then(() => { 41 | return server.start() 42 | }) 43 | }) 44 | 45 | after(() => { 46 | _Worker.prototype.run.restore() 47 | ErrorCat.report.restore() 48 | return server.stop() 49 | .then(() => { 50 | return rabbitmq.disconnect() 51 | }) 52 | }) 53 | 54 | const job = { 55 | eventName: 'will-never-emit' 56 | } 57 | 58 | // Before we run the test, let's assert that our task fails with the job. 59 | // This should be _rejected_ with an error. 60 | before(() => { 61 | return assert.isRejected( 62 | testWorker(job), 63 | WorkerStopError, 64 | /message.+required/ 65 | ) 66 | }) 67 | 68 | it('should fail once and not be re-run', () => { 69 | testWorkerEmitter.on('will-never-emit', () => { 70 | throw new Error('failing worker should not have emitted') 71 | }) 72 | rabbitmq.publishTask('ponos-test:one', job) 73 | 74 | // wait until .run is called 75 | return Promise.try(function loop () { 76 | if (!_Worker.prototype.run.calledOnce) { 77 | return Promise.delay(5).then(loop) 78 | } 79 | }) 80 | .then(() => { 81 | assert.ok(_Worker.prototype.run.calledOnce, '.run called once') 82 | /* 83 | * We can get the promise and assure that it was fulfilled! 84 | * This should be _fulfilled_ because it threw a WorkerStopError and 85 | * acknowledged that the task was completed (even though the task 86 | * rejected with an error) 87 | */ 88 | const workerRunPromise = _Worker.prototype.run.firstCall.returnValue 89 | assert.isFulfilled(workerRunPromise) 90 | assert.ok( 91 | ErrorCat.report.calledOnce, 92 | 'worker.report called once' 93 | ) 94 | const err = ErrorCat.report.firstCall.args[0] 95 | assert.instanceOf(err, WorkerStopError) 96 | assert.match(err, /fail test message is required/) 97 | 98 | return Promise.resolve() 99 | }) 100 | }) 101 | }) 102 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ponos", 3 | "version": "5.8.3", 4 | "description": "An opinionated queue based worker server for node.", 5 | "main": "lib/index.js", 6 | "engines": { 7 | "node": ">=4" 8 | }, 9 | "scripts": { 10 | "build": "babel --out-dir lib src", 11 | "build:clean": "rm -rf lib", 12 | "changelog": "github-changes -o Runnable -r ponos -a --only-pulls --use-commit-body --order-semver", 13 | "codeclimate": "npm install -g codeclimate-test-reporter && codeclimate-test-reporter < ./coverage/lcov.info", 14 | "coverage": "istanbul cover ./node_modules/.bin/_mocha -- $npm_package_options_mocha test/unit && npm run coverage-check", 15 | "coverage-check": "istanbul check-coverage && echo 'Coverage check successful!'", 16 | "coveralls": "cat ./coverage/lcov.info | coveralls", 17 | "docs": "npm run build && jsdoc --recurse --readme ./README.md lib/", 18 | "format": "standard --format", 19 | "functional": "mocha $npm_package_options_mocha test/functional", 20 | "integration": "mocha $npm_package_options_mocha test/integration", 21 | "lint": "npm run lint:format && npm run lint:type", 22 | "lint:format": "standard --verbose", 23 | "lint:type": "flow --timeout 30", 24 | "prepublish": "not-in-install && npm run build || in-install", 25 | "test": "npm run lint && npm run unit && npm run functional && npm run integration", 26 | "unit": "mocha $npm_package_options_mocha test/unit" 27 | }, 28 | "repository": { 29 | "type": "git", 30 | "url": "https://github.com/Runnable/ponos.git" 31 | }, 32 | "options": { 33 | "mocha": "--require resources/mocha-bootstrap --recursive --reporter spec --bail --timeout 5000 --compilers js:babel-register" 34 | }, 35 | "keywords": [ 36 | "worker", 37 | "queue", 38 | "rabbit", 39 | "job", 40 | "message" 41 | ], 42 | "author": "Ryan Sandor Richards ", 43 | "contributors": [ 44 | "Anandkumar Patel ", 45 | "Bryan Kendall ", 46 | "Anton Podviaznikov " 47 | ], 48 | "license": "MIT", 49 | "bugs": { 50 | "url": "https://github.com/Runnable/ponos/issues" 51 | }, 52 | "homepage": "https://github.com/Runnable/ponos", 53 | "standard": { 54 | "parser": "babel-eslint", 55 | "globals": [ 56 | "describe", 57 | "it", 58 | "before", 59 | "after", 60 | "beforeEach", 61 | "afterEach" 62 | ] 63 | }, 64 | "peerDependencies": { 65 | "error-cat": "^3.0.0" 66 | }, 67 | "dependencies": { 68 | "101": "^1.1.1", 69 | "@runnable/cls-bluebird": "^1.1.3", 70 | "amqplib": "^0.4.1", 71 | "bluebird": "^3.0.5", 72 | "bunyan": "^1.5.1", 73 | "continuation-local-storage": "^3.1.7", 74 | "error-cat": "^3.0.0", 75 | "immutable": "^3.8.1", 76 | "joi": "^9.0.4", 77 | "monitor-dog": "1.5.2", 78 | "ratelimiter": "^2.1.3", 79 | "redis": "^2.6.2", 80 | "uuid": "^2.0.2" 81 | }, 82 | "devDependencies": { 83 | "babel-cli": "^6.8.0", 84 | "babel-eslint": "^7.0.0", 85 | "babel-plugin-transform-class-properties": "^6.8.0", 86 | "babel-plugin-transform-flow-strip-types": "^6.8.0", 87 | "babel-register": "^6.8.0", 88 | "chai": "^3.3.0", 89 | "chai-as-promised": "^6.0.0", 90 | "coveralls": "^2.11.4", 91 | "eslint-config-standard": "^6.2.0", 92 | "flow-bin": "^0.33.0", 93 | "github-changes": "^1.0.0", 94 | "in-publish": "^2.0.0", 95 | "istanbul": "^1.0.0-alpha.2", 96 | "jsdoc": "^3.4.0", 97 | "mocha": "^3.0.1", 98 | "sinon": "^1.17.0", 99 | "sinon-as-promised": "^4.0.0", 100 | "standard": "8.1.0" 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/rate-limiters/redis.js: -------------------------------------------------------------------------------- 1 | /* @flow */ 2 | /* global Bluebird$Promise Logger RedisClient */ 3 | 'use strict' 4 | const joi = require('joi') 5 | const pick = require('101/pick') 6 | const RateLimiter = require('ratelimiter') 7 | const Promise = require('bluebird') 8 | const redis = require('redis') 9 | 10 | const optsSchema = joi.object({ 11 | durationMs: joi.number().integer().min(0).required(), 12 | host: joi.string().required(), 13 | log: joi.object().required(), 14 | port: joi.number().required() 15 | }) 16 | 17 | module.exports = class RedisRateLimiter { 18 | port: string; 19 | host: string; 20 | log: Logger; 21 | durationMs: number; 22 | client: RedisClient; 23 | 24 | /** 25 | * creates RedisRateLimiter object 26 | * @param {Object} opts redis connection options 27 | * @param {String} opts.port redis connection port 28 | * @param {String} opts.host redis connection host 29 | * @param {Logger} opts.log worker server logger 30 | * @return {RedisRateLimiter} 31 | */ 32 | constructor (opts: Object) { 33 | this.port = opts.port || 34 | process.env.REDIS_PORT || 35 | 6379 36 | 37 | this.host = opts.host || 38 | process.env.REDIS_HOST || 39 | 'localhost' 40 | 41 | this.durationMs = opts.durationMs || 42 | parseInt(process.env.RATE_LIMIT_DURATION, 10) || 43 | 1000 44 | 45 | this.log = opts.log 46 | 47 | joi.assert(this, optsSchema) 48 | } 49 | 50 | /** 51 | * Connect redis client to redis 52 | * @return {Promise} 53 | * @resolves {undefined} When connection is ready 54 | * @reject {Error} When there was an error connecting 55 | */ 56 | connect (): Bluebird$Promise<*> { 57 | return Promise.fromCallback((cb) => { 58 | this.log.trace('connecting to redis') 59 | this.client = redis.createClient(this.port, this.host) 60 | this.client.on('error', this._throwOnError.bind(this)) 61 | this.client.on('ready', cb) 62 | }) 63 | } 64 | 65 | /** 66 | * log error and throw 67 | * @param {Error} err error from redis 68 | * @throws {Error} always 69 | */ 70 | _throwOnError (err: Error) { 71 | this.log.fatal({ err: err }, 'redis error') 72 | throw err 73 | } 74 | 75 | /** 76 | * Ensure promise's get resolved at a given rate 77 | * @param {String} queueName queueName of task or event to limit 78 | * @param {Object} opts rate limiting options 79 | * @param {Number} opts.maxOperations max number of operations per duration 80 | * @param {Number} opts.durationMs time period to limit operations in milliseconds 81 | * @return {Promise} 82 | */ 83 | limit (queueName: string, opts: Object): Bluebird$Promise { 84 | const log = this.log.child({ 85 | queueName: queueName, 86 | opts: pick(opts, ['msTimeout', 'maxNumRetries', 'durationMs', 'maxOperations']) 87 | }) 88 | const durationMs = opts.durationMs || this.durationMs 89 | const limiter = new RateLimiter({ 90 | id: queueName, 91 | db: this.client, 92 | max: opts.maxOperations, 93 | duration: durationMs 94 | }) 95 | // is max operations not set, do not limit 96 | if (!opts.maxOperations) { 97 | return Promise.resolve() 98 | } 99 | log.trace('checking rate limit') 100 | return Promise.fromCallback((cb) => { 101 | limiter.get(cb) 102 | }) 103 | .then((limitProperties) => { 104 | if (!limitProperties.remaining) { 105 | const delayTimeMs = Math.floor(durationMs / 2) 106 | log.warn({ limitProperties: limitProperties, delayTimeMs: delayTimeMs }, 'over the limit, delaying') 107 | return Promise 108 | .delay(delayTimeMs) 109 | .then(() => { 110 | return this.limit(queueName, opts) 111 | }) 112 | } 113 | log.trace({ limitProperties: limitProperties }, 'under limit') 114 | }) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /test/functional/timeout.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | const sinon = require('sinon') 5 | 6 | const assert = chai.assert 7 | 8 | // Ponos Tooling 9 | const ponos = require('../../src') 10 | const RabbitMQ = require('../../src/rabbitmq') 11 | const TimeoutError = require('bluebird').TimeoutError 12 | const testWorker = require('./fixtures/timeout-worker') 13 | const testWorkerEmitter = testWorker.emitter 14 | 15 | // require the Worker class so we can verify the task is running 16 | const _Worker = require('../../src/worker') 17 | // require the error module so we can see the error printed 18 | const _Bunyan = require('bunyan') 19 | 20 | /* 21 | * In this example, we are going to have a job handler that times out at 22 | * decreasing intervals, throwing TimeoutErrors, until it passes. 23 | */ 24 | describe('Basic Timeout Task', function () { 25 | let server 26 | let rabbitmq 27 | 28 | before(() => { 29 | sinon.spy(_Worker.prototype, 'run') 30 | sinon.spy(_Bunyan.prototype, 'warn') 31 | const tasks = { 32 | 'ponos-test:one': testWorker 33 | } 34 | rabbitmq = new RabbitMQ({ 35 | tasks: Object.keys(tasks) 36 | }) 37 | server = new ponos.Server({ tasks: tasks }) 38 | return rabbitmq.connect() 39 | .then(() => { 40 | return server.start() 41 | }) 42 | }) 43 | after(() => { 44 | _Worker.prototype.run.restore() 45 | _Bunyan.prototype.warn.restore() 46 | return server.stop() 47 | .then(() => { 48 | return rabbitmq.disconnect() 49 | }) 50 | }) 51 | 52 | const job = { 53 | eventName: 'did-not-time-out', 54 | message: 'hello world' 55 | } 56 | 57 | describe('with a timeout', function () { 58 | this.timeout(3500) 59 | let prevTimeout 60 | 61 | before(() => { 62 | prevTimeout = process.env.WORKER_TIMEOUT 63 | process.env.WORKER_TIMEOUT = 1000 64 | }) 65 | 66 | after(() => { 67 | process.env.WORKER_TIMEOUT = prevTimeout 68 | }) 69 | 70 | it('should fail twice and pass the third time', (done) => { 71 | testWorkerEmitter.on('did-not-time-out', () => { 72 | // process.nextTick so the worker can resolve 73 | // NOTE(bryan): I found nextTick to be more consistant than setTimeout 74 | process.nextTick(() => { 75 | // this signals to us that we are done! 76 | assert.ok(_Worker.prototype.run.calledThrice, '.run called thrice') 77 | /* 78 | * We can get the promise and assure that it was fulfilled! 79 | * It was run three times and all three should be fulfilled. 80 | */ 81 | ;[ 82 | _Worker.prototype.run.firstCall.returnValue, 83 | _Worker.prototype.run.secondCall.returnValue, 84 | _Worker.prototype.run.thirdCall.returnValue 85 | ].forEach(function (p) { assert.isFulfilled(p) }) 86 | /* 87 | * and, make sure the error module has logged the TimeoutError twice 88 | * have to do a bit of weird filtering, but this is correct. Long 89 | * story short, log.warn is called a couple times, but we just want to 90 | * make sure the 'task timed out' message is just twice (the number of 91 | * times this worker failed). 92 | */ 93 | const bunyanCalls = _Bunyan.prototype.warn.args 94 | const errors = bunyanCalls.reduce(function (memo, args) { 95 | const checkArgs = args.filter(function (arg) { 96 | return /task timed out/i.test(arg) 97 | }) 98 | if (checkArgs.length) { memo.push(args.shift().err) } 99 | return memo 100 | }, []) 101 | errors.forEach(function (err) { 102 | assert.instanceOf(err, TimeoutError) 103 | }) 104 | done() 105 | }) 106 | }) 107 | 108 | rabbitmq.publishTask('ponos-test:one', job) 109 | }) 110 | }) 111 | }) 112 | -------------------------------------------------------------------------------- /docs/Guides-Migration-v3.0.0.md: -------------------------------------------------------------------------------- 1 | # Migration Guide v3.0.0 2 | 3 | ## Breaking Changes 4 | 5 | #### `runnable-hermes` has been removed 6 | 7 | `runnable-hermes` has been removed from the project. This means that the constructor for the Ponos server does not take accept a `hermes` option any longer. 8 | 9 | #### `TaskError` and `TaskFatalError` have been removed 10 | 11 | Runnable has developed a new error library called [`ErrorCat`](https://github.com/Runnable/error-cat). `ErrorCat` provides a hierarchy of errors that create a better abstraction. `TaskError` and `TaskFatalError` have been replaced with the `WorkerError` and `WorkerStopError`, respectively, from `ErrorCat`. 12 | 13 | #### RabbitMQ Default Authentication Cleared 14 | 15 | The authentication parameters for RabbitMQ `username` and `password` no longer default to `guest` and `guest`. Previously, there was no way to eliminate authentication entirely. `username` and `password` are undefined by default and authentication will not be included by default when connecting to RabbitMQ. 16 | 17 | #### Server Constructor Changed 18 | 19 | The Ponos server constructor has been changed. First, as mentioned before, `hermes` is no longer accepted as an option: Ponos manages its own RabbitMQ library with connection controlled via a `rabbitmq` object in the server options or `RABBITMQ_*` environment variables. 20 | 21 | Second, `queues` is no longer a required option when _not_ providing a `hermes` client. Tasks and events can be set in the constructor or after the server is created. This is detailed below. 22 | 23 | ## Migrating from v2 to v3 24 | 25 | ### Ponos Server Constructor 26 | 27 | The server constructor has been changed to reflect the lack of `hermes` and the ability to set `task` and `event` handlers up front. 28 | 29 | #### RabbitMQ Connection 30 | 31 | Since a `hermes` option cannot be passed into the constructor, a `rabbitmq` option is available to set connections if that is desired. These options can also be set using `RABBITMQ_*` environment variables. 32 | 33 | ```javascript 34 | const server = new ponos.Server({ 35 | rabbitmq: { 36 | hostname: 'rabbitmq.host', // defaults to 'localhost' 37 | port: 5566, // defaults to 5672 38 | username: 'myusername', // defaults to undefined 39 | password: 'mypassword' // defaults to undefined 40 | } 41 | }) 42 | ``` 43 | 44 | #### Named Clients 45 | 46 | `hemes` allowed the user to namespace the queues that were created when attached to a fanout exchange. To do this, now you can pass a `name` parameter to the server constructor and it will be passed to RabbitMQ. 47 | 48 | ```javascript 49 | const server = new ponos.Server({ 50 | name: 'my-unique-name' 51 | }) 52 | ``` 53 | 54 | #### Publishing from Workers 55 | 56 | Projects that used `hermes` before typically used `hermes` to publish to queues as well from other workers. Ponos's RabbitMQ model is available to be required and used in a very similar way, if you wish. 57 | 58 | ```javascript 59 | const RabbitMQ = require('ponos/lib/rabbitmq') 60 | const rabbit = new RabbitMQ({ 61 | // this takes the same options as the `rabbitmq` option for the server. 62 | // you can also set `name` here if you like, to name the client. 63 | }) 64 | 65 | // be sure to connect to the RabbitMQ server 66 | rabbit.connect() 67 | .then(() => { 68 | // publish directly to a queue (returns a promise) 69 | rabbit.publishToQueue('task.queue', { hello: 'world' }) 70 | 71 | // or publish to an exchange (returns a promise) 72 | rabbit.publishToExchange('event.queue', 'routing-key', { hello: 'world' }) 73 | }) 74 | ``` 75 | 76 | #### Queues Pre-definition 77 | 78 | `queues` are no longer required up-front when creating a server. Setting tasks and events through `setTask`, `setAllTasks`, `setEvent`, and `setAllEvents` are available, but `tasks` and `events` can be set up in the constructor now as well. 79 | 80 | ```javascript 81 | const server = new ponos.Server({ 82 | tasks: { 83 | 'task.queue': require('./my-task-worker') 84 | }, 85 | events: { 86 | 'event.queue': require('./queue-worker') 87 | } 88 | }) 89 | ``` 90 | 91 | ### Ponos Workers 92 | 93 | #### Task (Fatal) Errors 94 | 95 | `TaskError` and `TaskFatalError` have been removed from Ponos, in favor of errors from `ErrorCat`: `WorkerError` and `WorkerStopError`. The constructors for the errors are: 96 | 97 | ```javascript 98 | // previously TaskError 99 | new WorkerError('message about error', { optional: 'data' }) 100 | 101 | // previously TaskFatalError 102 | new WorkerStopError('message about error', { optional: 'data' }) 103 | ``` 104 | 105 | These constructors have almost an identical structure, but do not require the queue name as the first parameter. Ponos's worker will decorate the error with the queue name and the job for you, so you have to pass less data to these errors up front. 106 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## Change Log 2 | 3 | ### v4.2.1 (2016/06/28 21:41 +00:00) 4 | - [#58](https://github.com/Runnable/ponos/pull/58) strengthening types (@bkendall) 5 | 6 | ### v4.2.0 (2016/06/28 00:00 +00:00) 7 | - [#57](https://github.com/Runnable/ponos/pull/57) add cls and TID (@bkendall, @anandkumarpatel) 8 | 9 | ### v4.1.0-0 (2016/06/21 22:26 +00:00) 10 | - [#56](https://github.com/Runnable/ponos/pull/56) codeclimate: report coverage (@bkendall) 11 | - [#55](https://github.com/Runnable/ponos/pull/55) codeclimate: ignore supporting folders (@bkendall) 12 | 13 | ### v4.0.0 (2016/06/15 18:30 +00:00) 14 | - [#54](https://github.com/Runnable/ponos/pull/54) Update ErrorCat@3.0.0 (@bkendall, @podviaznikov) 15 | - [#52](https://github.com/Runnable/ponos/pull/52) Update flow-bin to version 0.27.0 🚀 (@greenkeeperio-bot) 16 | 17 | ### v3.2.0 (2016/06/01 23:11 +00:00) 18 | - [#51](https://github.com/Runnable/ponos/pull/51) make publish methods more clear (@bkendall) 19 | - [#50](https://github.com/Runnable/ponos/pull/50) Update flow-bin to version 0.26.0 🚀 (@greenkeeperio-bot) 20 | - [#49](https://github.com/Runnable/ponos/pull/49) add codeclimate config (@bkendall) 21 | 22 | ### v3.1.0 (2016/05/24 22:18 +00:00) 23 | - [#48](https://github.com/Runnable/ponos/pull/48) Prefetch (@bkendall) 24 | - [#47](https://github.com/Runnable/ponos/pull/47) mocha@2.5.1 update (@bkendall, @greenkeeperio-bot) 25 | 26 | ### v3.0.2 (2016/05/19 19:23 +00:00) 27 | - [#46](https://github.com/Runnable/ponos/pull/46) Confirm Publish (@bkendall) 28 | - [#44](https://github.com/Runnable/ponos/pull/44) Update flow-bin to version 0.25.0 🚀 (@greenkeeperio-bot) 29 | 30 | ### v3.0.0 (2016/05/14 00:06 +00:00) 31 | - [#43](https://github.com/Runnable/ponos/pull/43) Ponos 3.0.0 (@bkendall) 32 | - [#42](https://github.com/Runnable/ponos/pull/42) Publish methods (@bkendall) 33 | - [#41](https://github.com/Runnable/ponos/pull/41) Name RabbitMQ Client & Queues (@bkendall) 34 | - [#40](https://github.com/Runnable/ponos/pull/40) clear channel and connection on disconnect (@bkendall) 35 | - [#35](https://github.com/Runnable/ponos/pull/35) New RabbitMQ Client; Flowtype (@bkendall) 36 | - [#33](https://github.com/Runnable/ponos/pull/33) Update ErrorCat (@bkendall) 37 | - [#36](https://github.com/Runnable/ponos/pull/36) Travis Badge: use master (@bkendall) 38 | - [#32](https://github.com/Runnable/ponos/pull/32) bump standard - Travis updates (@bkendall) 39 | - [#24](https://github.com/Runnable/ponos/pull/24) add code-climate badge (@bkendall, @podviaznikov) 40 | 41 | ### v2.0.0 (2016/03/03 22:16 +00:00) 42 | - [#30](https://github.com/Runnable/ponos/pull/30) package.json engine (@bkendall) 43 | - [#29](https://github.com/Runnable/ponos/pull/29) auto deploy to NPM (@bkendall) 44 | - [#28](https://github.com/Runnable/ponos/pull/28) ES6 (@bkendall) 45 | - [#26](https://github.com/Runnable/ponos/pull/26) remove npm publish (@bkendall) 46 | - [#27](https://github.com/Runnable/ponos/pull/27) fix allowed failures entry (@bkendall) 47 | 48 | ### v1.3.0 (2016/01/21 21:32 +00:00) 49 | - [#25](https://github.com/Runnable/ponos/pull/25) Add monitor-dog (@podviaznikov) 50 | - [#23](https://github.com/Runnable/ponos/pull/23) use travis to deploy to npm (@bkendall) 51 | 52 | ### v1.2.1 (2016/01/12 00:14 +00:00) 53 | - [#22](https://github.com/Runnable/ponos/pull/22) Change throw to warn log on queue checks (@cflynn07, @bkendall) 54 | - [#21](https://github.com/Runnable/ponos/pull/21) Maintenance Update (@bkendall) 55 | 56 | ### v1.2.0 (2016/01/11 21:08 +00:00) 57 | - [#20](https://github.com/Runnable/ponos/pull/20) Replace throw with warn log (@bkendall, @cflynn07) 58 | 59 | ### v1.1.1 (2015/11/12 02:23 +00:00) 60 | - [#18](https://github.com/Runnable/ponos/pull/18) Hermes Update (@bkendall) 61 | - [#15](https://github.com/Runnable/ponos/pull/15) StandardJS Formatting (@bkendall) 62 | 63 | ### v1.1.0 (2015/11/06 01:02 +00:00) 64 | - [#12](https://github.com/Runnable/ponos/pull/12) unsubscribe and unsubscribeAll (@tjmehta) 65 | - [#14](https://github.com/Runnable/ponos/pull/14) Updated Bluebird, Istanbul (@bkendall) 66 | - [#17](https://github.com/Runnable/ponos/pull/17) Mocha Bootstrap (@bkendall) 67 | 68 | ### v1.0.1 (2015/10/22 01:54 +00:00) 69 | - [#11](https://github.com/Runnable/ponos/pull/11) Fix Worker Options (@rsandor) 70 | 71 | ### v1.0.0 (2015/10/15 22:03 +00:00) 72 | - [#10](https://github.com/Runnable/ponos/pull/10) Changed `setTask` and `setAllTasks` to be synchronous. (@rsandor) 73 | 74 | ### v0.11.1 (2015/10/14 20:06 +00:00) 75 | - [#9](https://github.com/Runnable/ponos/pull/9) runnable-hermes@6.2.1 Update (@cflynn07, @bkendall) 76 | 77 | ### v0.11.0 (2015/10/07 23:26 +00:00) 78 | - [#8](https://github.com/Runnable/ponos/pull/8) Various Enhancements (@rsandor) 79 | 80 | ### v0.10.0 (2015/10/01 07:01 +00:00) 81 | - [#7](https://github.com/Runnable/ponos/pull/7) worker timeouts (@bkendall) 82 | 83 | ### v0.9.1 (2015/09/29 21:38 +00:00) 84 | - [#6](https://github.com/Runnable/ponos/pull/6) bunyan standard serializers (@bkendall) 85 | - [#5](https://github.com/Runnable/ponos/pull/5) Fix David Dependencies Badge (@bkendall) 86 | - [#4](https://github.com/Runnable/ponos/pull/4) Consistant Badges (@bkendall) 87 | - [#3](https://github.com/Runnable/ponos/pull/3) Fix Badges (@bkendall) 88 | - [#2](https://github.com/Runnable/ponos/pull/2) Badges (@bkendall) 89 | 90 | ### v0.9.0 (2015/09/24 18:54 +00:00) 91 | - [#1](https://github.com/Runnable/ponos/pull/1) Initial Implementation (@bkendall) -------------------------------------------------------------------------------- /interfaces/modules/bunyan.js.flow: -------------------------------------------------------------------------------- 1 | declare type Stream = { 2 | type?: string; 3 | level?: number | string; 4 | path?: string; 5 | stream: stream$Writable | Stream; 6 | closeOnExit?: boolean; 7 | period?: string; 8 | count?: number; 9 | } 10 | 11 | declare class Logger extends events$EventEmitter { 12 | constructor(options: LoggerOptions): any; 13 | addStream(stream: Stream): void; 14 | addSerializers(serializers: Serializers): void; 15 | child(opts: LoggerOptions, simple?: boolean): Logger; 16 | reopenFileStreams(): void; 17 | level(): string | number; 18 | level(value: number | string): void; 19 | levels(name: number | string, value: number | string): void; 20 | trace(...params: Array): boolean; 21 | trace(error: Error, format?: any, ...params: Array): void; 22 | trace(buffer: Buffer, format?: any, ...params: Array): void; 23 | trace(obj: Object, format?: any, ...params: Array): void; 24 | trace(format: string, ...params: Array): void; 25 | debug(...params: Array): boolean; 26 | debug(error: Error, format?: any, ...params: Array): void; 27 | debug(buffer: Buffer, format?: any, ...params: Array): void; 28 | debug(obj: Object, format?: any, ...params: Array): void; 29 | debug(format: string, ...params: Array): void; 30 | info(...params: Array): boolean; 31 | info(error: Error, format?: any, ...params: Array): void; 32 | info(buffer: Buffer, format?: any, ...params: Array): void; 33 | info(obj: Object, format?: any, ...params: Array): void; 34 | info(format: string, ...params: Array): void; 35 | warn(...params: Array): boolean; 36 | warn(error: Error, format?: any, ...params: Array): void; 37 | warn(buffer: Buffer, format?: any, ...params: Array): void; 38 | warn(obj: Object, format?: any, ...params: Array): void; 39 | warn(format: string, ...params: Array): void; 40 | error(...params: Array): boolean; 41 | error(error: Error, format?: any, ...params: Array): void; 42 | error(buffer: Buffer, format?: any, ...params: Array): void; 43 | error(obj: Object, format?: any, ...params: Array): void; 44 | error(format: string, ...params: Array): void; 45 | fatal(...params: Array): boolean; 46 | fatal(error: Error, format?: any, ...params: Array): void; 47 | fatal(buffer: Buffer, format?: any, ...params: Array): void; 48 | fatal(obj: Object, format?: any, ...params: Array): void; 49 | fatal(format: string, ...params: Array): void; 50 | static stdSerializers: { 51 | req: (req: http$ClientRequest) => { 52 | method: string, 53 | url: string, 54 | headers: mixed, 55 | remoteAddress: string, 56 | remotePort: number 57 | }, 58 | res: (res: http$IncomingMessage) => { statusCode: number, header: string }, 59 | err: (err: Error) => { 60 | message: string, 61 | name: string, 62 | stack: string, 63 | code: string, 64 | signal: string 65 | } 66 | } 67 | } 68 | 69 | declare module 'bunyan' { 70 | declare var TRACE: 10; 71 | declare var DEBUG: 20; 72 | declare var INFO: 30; 73 | declare var WARN: 40; 74 | declare var ERROR: 50; 75 | declare var FATAL: 60; 76 | 77 | declare type BunyanLogLevels = 78 | 60 | // fatal 79 | 50 | // error 80 | 40 | // warn 81 | 30 | // info 82 | 20 | // debug 83 | 10; // info 84 | declare type BunyanRecord = { 85 | v: number, 86 | level: BunyanLogLevels, 87 | name: string, 88 | hostname: string, 89 | pid: string, 90 | time: Date, 91 | msg: string, 92 | src: string, 93 | err?: { 94 | message: string, 95 | name: string, 96 | code: any, 97 | signal: any, 98 | stack: string, 99 | }, 100 | [key: string]: any 101 | }; 102 | declare interface LoggerOptions { 103 | name?: string; 104 | streams?: Array; 105 | level?: BunyanLogLevels | string, 106 | stream?: stream$Writable; 107 | serializers?: Serializers; 108 | src?: boolean; 109 | } 110 | declare interface Serializers { 111 | [key: string]: (input: any) => string; 112 | } 113 | declare var stdSerializers: Serializers; 114 | declare function resolveLevel(value: number | string): number; 115 | declare function createLogger(options: LoggerOptions): Logger; 116 | declare class RingBuffer extends events$EventEmitter { 117 | constructor(options: RingBufferOptions): any; 118 | writable: boolean; 119 | records: Array; 120 | write(record: BunyanRecord): void; 121 | end(record?: any): void; 122 | destroy(): void; 123 | destroySoon(): void; 124 | } 125 | declare interface RingBufferOptions { 126 | limit: number; 127 | } 128 | declare function safeCycles(): (key: string, value: any) => any; 129 | declare class ConsoleRawStream { 130 | write(rec: BunyanRecord): void; 131 | } 132 | declare var levelFromName: { 133 | 'trace': typeof TRACE, 134 | 'debug': typeof DEBUG, 135 | 'info': typeof INFO, 136 | 'warn': typeof WARN, 137 | 'error': typeof ERROR, 138 | 'fatal': typeof FATAL 139 | }; 140 | declare var nameFromLevel: { 141 | [key: BunyanLogLevels]: string 142 | }; 143 | declare var VERSION: string; 144 | declare var LOG_VERSION: string; 145 | } 146 | 147 | export { 148 | Logger 149 | } 150 | -------------------------------------------------------------------------------- /interfaces/modules/bluebird.js.flow: -------------------------------------------------------------------------------- 1 | type Bluebird$RangeError = Error; 2 | type Bluebird$CancellationErrors = Error; 3 | type Bluebird$TimeoutError = Error; 4 | type Bluebird$RejectionError = Error; 5 | type Bluebird$OperationalError = Error; 6 | 7 | type Bluebird$ConcurrencyOption = { 8 | concurrency: number, 9 | }; 10 | type Bluebird$SpreadOption = { 11 | spread: boolean; 12 | }; 13 | type Bluebird$BluebirdConfig = { 14 | warnings?: boolean, 15 | longStackTraces?: boolean, 16 | cancellation?: boolean, 17 | monitoring?: boolean 18 | }; 19 | 20 | declare class Bluebird$PromiseInspection { 21 | isCancelled(): bool; 22 | isFulfilled(): bool; 23 | isRejected(): bool; 24 | pending(): bool; 25 | reason(): any; 26 | value(): T; 27 | } 28 | 29 | declare class Bluebird$Promise { 30 | static Defer: Class; 31 | static PromiseInspection: Class>; 32 | static TimeoutError: typeof Error; 33 | 34 | static all | T>(Promises: Array): Bluebird$Promise>; 35 | static props(input: Object|Map<*,*>|Bluebird$Promise>): Bluebird$Promise<*>; 36 | static any | T>(Promises: Array): Bluebird$Promise; 37 | static race | T>(Promises: Array): Bluebird$Promise; 38 | static reject(error?: any): Bluebird$Promise; 39 | static resolve(object?: Bluebird$Promise | T): Bluebird$Promise; 40 | static some | T>(Promises: Array, count: number): Bluebird$Promise>; 41 | static join | T>(...Promises: Array): Bluebird$Promise>; 42 | static map | T>( 43 | Promises: Array | IndexedIterable, 44 | mapper: (item: T, index: number, arrayLength: number) => U, 45 | options?: Bluebird$ConcurrencyOption 46 | ): Bluebird$Promise>; 47 | static mapSeries | T>( 48 | Promises: Array, 49 | mapper: (item: T, index: number, arrayLength: number) => U 50 | ): Bluebird$Promise>; 51 | static reduce | T>( 52 | Promises: Array, 53 | reducer: (total: U, current: T, index: number, arrayLength: number) => U, 54 | initialValue?: U 55 | ): Bluebird$Promise; 56 | static filter | T>( 57 | Promises: Array, 58 | filterer: (item: T, index: number, arrayLength: number) => Bluebird$Promise|bool, 59 | option?: Bluebird$ConcurrencyOption 60 | ): Bluebird$Promise>; 61 | static each | T>( 62 | Promises: Array, 63 | iterator: (item: T, index: number, arrayLength: number) => Bluebird$Promise|mixed, 64 | ): Bluebird$Promise>; 65 | static try(fn: () => T|Bluebird$Promise, args: ?Array, ctx: ?any): Bluebird$Promise; 66 | static attempt(fn: () => T|Bluebird$Promise, args: ?Array, ctx: ?any): Bluebird$Promise; 67 | static delay(value: T|Bluebird$Promise, ms: number): Bluebird$Promise; 68 | static delay(ms: number): Bluebird$Promise; 69 | static config(config: Bluebird$BluebirdConfig): void; 70 | static defer(): Bluebird$Defer; 71 | static setScheduler(scheduler: (callback: (...args: Array) => void) => void): void; 72 | static method(fn: (...args: any) => T): Bluebird$Promise; 73 | static cast(value: T|Bluebird$Promise): Bluebird$Promise; 74 | static bind(ctx: any): Bluebird$Promise; 75 | static is(value: any): boolean; 76 | static longStackTraces(): void; 77 | static onPossiblyUnhandledRejection(handler: (reason: any) => any): void; 78 | static fromCallback(fn: (fn: (error: ?Error, value?: T) => any) => any): Bluebird$Promise; 79 | 80 | constructor(callback: ( 81 | resolve: (result?: Bluebird$Promise | R) => void, 82 | reject: (error?: any) => void 83 | ) => mixed): void; 84 | then(onFulfill?: (value: R) => Bluebird$Promise | U, onReject?: (error: any) => Bluebird$Promise | U): Bluebird$Promise; 85 | catch(onReject?: (error: any) => ?Bluebird$Promise | U): Bluebird$Promise; 86 | caught(onReject?: (error: any) => ?Bluebird$Promise | U): Bluebird$Promise; 87 | error(onReject?: (error: any) => ?Bluebird$Promise | U): Bluebird$Promise; 88 | done(onFulfill?: (value: R) => mixed, onReject?: (error: any) => mixed): void; 89 | finally(onDone?: (value: R) => mixed): Bluebird$Promise; 90 | lastly(onDone?: (value: R) => mixed): Bluebird$Promise; 91 | tap(onDone?: (value: R) => mixed): Bluebird$Promise; 92 | delay(ms: number): Bluebird$Promise; 93 | timeout(ms: number, message?: string): Bluebird$Promise; 94 | cancel(): void; 95 | bind(ctx: any): Bluebird$Promise; 96 | call(propertyName: string, ...args: Array): Bluebird$Promise; 97 | throw(reason: Error): Bluebird$Promise; 98 | thenThrow(reason: Error): Bluebird$Promise; 99 | all(): Bluebird$Promise>; 100 | any(): Bluebird$Promise; 101 | some(count: number): Bluebird$Promise>; 102 | race(): Bluebird$Promise; 103 | map(mapper: (item: T, index: number, arrayLength: number) => Bluebird$Promise | U, options?: Bluebird$ConcurrencyOption): Bluebird$Promise>; 104 | mapSeries(mapper: (item: T, index: number, arrayLength: number) => Bluebird$Promise | U): Bluebird$Promise>; 105 | reduce( 106 | reducer: (total: T, item: U, index: number, arrayLength: number) => Bluebird$Promise | T, 107 | initialValue?: T 108 | ): Bluebird$Promise; 109 | filter(filterer: (item: T, index: number, arrayLength: number) => Bluebird$Promise | bool, options?: Bluebird$ConcurrencyOption): Bluebird$Promise>; 110 | each(iterator: (item: T, index: number, arrayLength: number) => Bluebird$Promise | U): Bluebird$Promise>; 111 | reflect(): Bluebird$Promise>; 112 | isFulfilled(): bool; 113 | isRejected(): bool; 114 | isPending(): bool; 115 | isResolved(): bool; 116 | value(): R; 117 | reason(): any; 118 | asCallback(fn: (error: ?Error, value?: T) => any): void; 119 | return(value: T): Bluebird$Promise; 120 | } 121 | 122 | declare class Bluebird$Defer { 123 | promise: Bluebird$Promise<*>; 124 | resolve: (value: any) => any; 125 | reject: (value: any) => any; 126 | } 127 | 128 | declare module 'bluebird' { 129 | declare var exports: typeof Bluebird$Promise; 130 | } 131 | -------------------------------------------------------------------------------- /test/unit/rate-limiters/redis.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const chai = require('chai') 3 | const Promise = require('bluebird') 4 | const put = require('101/put') 5 | const RateLimiter = require('ratelimiter') 6 | const redis = require('redis') 7 | const sinon = require('sinon') 8 | 9 | const RedisRateLimiter = require('../../../src/rate-limiters/redis') 10 | const logger = require('../../../src/logger') 11 | 12 | const assert = chai.assert 13 | 14 | describe('redis', () => { 15 | let testOpts 16 | let testRedisRateLimiter 17 | 18 | beforeEach(() => { 19 | delete process.env.REDIS_PORT 20 | delete process.env.REDIS_HOST 21 | delete process.env.RATE_LIMIT_DURATION 22 | 23 | testOpts = { 24 | port: '1', 25 | host: 'remotehost', 26 | log: logger 27 | } 28 | testRedisRateLimiter = new RedisRateLimiter(testOpts) 29 | }) 30 | 31 | describe('constructor', () => { 32 | it('should use passed port', () => { 33 | const out = new RedisRateLimiter(testOpts) 34 | assert.equal(out.port, testOpts.port) 35 | }) 36 | 37 | it('should use env port', () => { 38 | delete testOpts.port 39 | process.env.REDIS_PORT = '1234' 40 | const out = new RedisRateLimiter(testOpts) 41 | assert.equal(out.port, process.env.REDIS_PORT) 42 | }) 43 | 44 | it('should use default port', () => { 45 | delete testOpts.port 46 | const out = new RedisRateLimiter(testOpts) 47 | assert.equal(out.port, '6379') 48 | }) 49 | 50 | it('should use passed host', () => { 51 | const out = new RedisRateLimiter(testOpts) 52 | assert.equal(out.host, testOpts.host) 53 | }) 54 | 55 | it('should use env host', () => { 56 | delete testOpts.host 57 | process.env.REDIS_HOST = 'moonhost' 58 | const out = new RedisRateLimiter(testOpts) 59 | assert.equal(out.host, process.env.REDIS_HOST) 60 | }) 61 | 62 | it('should use default host', () => { 63 | delete testOpts.host 64 | const out = new RedisRateLimiter(testOpts) 65 | assert.equal(out.host, 'localhost') 66 | }) 67 | 68 | it('should default durationMs to 1000', () => { 69 | const out = new RedisRateLimiter(testOpts) 70 | assert.equal(out.durationMs, 1000) 71 | }) 72 | 73 | it('should use env durationMs', () => { 74 | process.env.RATE_LIMIT_DURATION = '5678' 75 | const out = new RedisRateLimiter(testOpts) 76 | assert.equal(out.durationMs, process.env.RATE_LIMIT_DURATION) 77 | }) 78 | 79 | it('should use passed durationMs', () => { 80 | const out = new RedisRateLimiter(put({ 81 | durationMs: 1738 82 | }, testOpts)) 83 | assert.equal(out.durationMs, 1738) 84 | }) 85 | }) // constructor 86 | 87 | describe('_throwOnError', function () { 88 | it('should throw on error', () => { 89 | const testError = new Error('lost connection') 90 | return assert.throws(() => { 91 | testRedisRateLimiter._throwOnError(testError) 92 | }, /lost connection/) 93 | }) 94 | }) // end _throwOnError 95 | 96 | describe('connect', () => { 97 | let onStub 98 | beforeEach(() => { 99 | onStub = sinon.stub() 100 | sinon.stub(testRedisRateLimiter, '_throwOnError') 101 | sinon.stub(redis, 'createClient').returns({ 102 | on: onStub 103 | }) 104 | }) 105 | 106 | afterEach(() => { 107 | redis.createClient.restore() 108 | }) 109 | 110 | it('should create redis client', () => { 111 | onStub.onSecondCall().yieldsAsync() 112 | return assert.isFulfilled(testRedisRateLimiter.connect()) 113 | .then(() => { 114 | sinon.assert.calledOnce(redis.createClient) 115 | sinon.assert.calledWith(redis.createClient, testOpts.port, testOpts.host) 116 | }) 117 | }) 118 | 119 | it('should attach error handler', () => { 120 | onStub.onSecondCall().yieldsAsync() 121 | return assert.isFulfilled(testRedisRateLimiter.connect()) 122 | .then(() => { 123 | sinon.assert.calledTwice(onStub) 124 | sinon.assert.calledWith(onStub, 'ready', sinon.match.func) 125 | }) 126 | }) 127 | 128 | it('should attach error handler', () => { 129 | const testError = new Error('found') 130 | onStub.onFirstCall().yields(testError) 131 | onStub.onSecondCall().yieldsAsync() 132 | return assert.isFulfilled(testRedisRateLimiter.connect()) 133 | .then(() => { 134 | sinon.assert.calledOnce(testRedisRateLimiter._throwOnError) 135 | sinon.assert.calledWith(testRedisRateLimiter._throwOnError, testError) 136 | }) 137 | }) 138 | }) // end connect 139 | 140 | describe('limit', () => { 141 | const testLimitOpts = { 142 | durationMs: 2000, 143 | maxOperations: 3 144 | } 145 | 146 | const testName = 'Shiva' 147 | let stubTime 148 | beforeEach(() => { 149 | sinon.spy(Promise, 'delay') 150 | stubTime = sinon.useFakeTimers() 151 | sinon.stub(RateLimiter.prototype, 'get') 152 | sinon.spy(testRedisRateLimiter, 'limit') 153 | testRedisRateLimiter.client = {} 154 | }) 155 | 156 | afterEach(() => { 157 | Promise.delay.restore() 158 | stubTime.restore() 159 | RateLimiter.prototype.get.restore() 160 | }) 161 | 162 | it('should resolve if maxOperations not defined', () => { 163 | return assert.isFulfilled(testRedisRateLimiter.limit(testName, {})) 164 | .then(() => { 165 | sinon.assert.notCalled(RateLimiter.prototype.get) 166 | }) 167 | }) 168 | 169 | it('should resolve if items remaining', () => { 170 | RateLimiter.prototype.get.yieldsAsync(null, { 171 | remaining: 5 172 | }) 173 | return assert.isFulfilled(testRedisRateLimiter.limit(testName, testLimitOpts)) 174 | .then(() => { 175 | sinon.assert.calledOnce(RateLimiter.prototype.get) 176 | sinon.assert.calledOnce(testRedisRateLimiter.limit) 177 | }) 178 | }) 179 | 180 | it('should delay until there is space', () => { 181 | RateLimiter.prototype.get.onFirstCall().yieldsAsync(null, { 182 | remaining: 0 183 | }) 184 | RateLimiter.prototype.get.onSecondCall().yieldsAsync(null, { 185 | remaining: 0 186 | }) 187 | RateLimiter.prototype.get.onThirdCall().yieldsAsync(null, { 188 | remaining: 1 189 | }) 190 | 191 | return assert.isFulfilled(Promise.all([ 192 | testRedisRateLimiter.limit(testName, testLimitOpts) 193 | .then(() => { 194 | sinon.assert.calledThrice(RateLimiter.prototype.get) 195 | sinon.assert.calledThrice(testRedisRateLimiter.limit) 196 | sinon.assert.calledTwice(Promise.delay) 197 | sinon.assert.alwaysCalledWith(Promise.delay, testLimitOpts.durationMs / 2) 198 | }), 199 | Promise.try(function loop () { 200 | if (Promise.delay.callCount !== 1) { 201 | return Promise.fromCallback(process.nextTick).then(loop) 202 | } 203 | }) 204 | .then(() => { 205 | sinon.assert.calledOnce(RateLimiter.prototype.get) 206 | sinon.assert.calledOnce(testRedisRateLimiter.limit) 207 | sinon.assert.alwaysCalledWith(testRedisRateLimiter.limit, testName, testLimitOpts) 208 | sinon.assert.calledOnce(Promise.delay) 209 | sinon.assert.alwaysCalledWith(Promise.delay, testLimitOpts.durationMs / 2) 210 | stubTime.tick(testLimitOpts.durationMs) 211 | }) 212 | .then(function loop () { 213 | if (Promise.delay.callCount !== 2) { 214 | return Promise.fromCallback(process.nextTick).then(loop) 215 | } 216 | }) 217 | .then(() => { 218 | sinon.assert.calledTwice(RateLimiter.prototype.get) 219 | sinon.assert.calledTwice(testRedisRateLimiter.limit) 220 | sinon.assert.calledTwice(Promise.delay) 221 | sinon.assert.alwaysCalledWith(Promise.delay, testLimitOpts.durationMs / 2) 222 | stubTime.tick(testLimitOpts.durationMs) 223 | }) 224 | ])) 225 | }) 226 | }) // end limit 227 | }) 228 | -------------------------------------------------------------------------------- /test/functional/fixtures/worker-tid.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const EventEmitter = require('events') 4 | const Promise = require('bluebird') 5 | const getNamespace = require('continuation-local-storage').getNamespace 6 | 7 | const testClsData = (step) => { 8 | if (typeof getNamespace('ponos').get('tid') !== 'string') { 9 | throw new Error('tid not found after Promise.' + step) 10 | } 11 | if (getNamespace('ponos').get('currentWorkerName') !== 'ponos-test:one') { 12 | throw new Error('currentWorkerName not found after Promise.' + step) 13 | } 14 | } 15 | 16 | /** 17 | * A simple worker that will publish a message to a queue. 18 | * @param {object} job Object describing the job. 19 | * @param {string} job.queue Queue on which the message will be published. 20 | * @returns {promise} Resolved when the message is put on the queue. 21 | */ 22 | module.exports = (job) => { 23 | return Promise 24 | .try(() => { 25 | testClsData('try') 26 | return Promise.try(() => { 27 | testClsData('try.try') 28 | }).then(() => { 29 | testClsData('try.then') 30 | }) 31 | .then(() => { 32 | return Promise.resolve() 33 | .then(() => { 34 | testClsData('try.then.resolve.then') 35 | }) 36 | }) 37 | }) 38 | .then(() => { 39 | testClsData('then') 40 | return [1, 2, 3] 41 | }) 42 | .spread((a, b, c) => { 43 | testClsData('spread') 44 | return [1, 2, 3] 45 | }) 46 | .then(() => { 47 | testClsData('then after spread') 48 | throw new Error('test') 49 | }) 50 | .catch(() => { 51 | testClsData('catch') 52 | }) 53 | .then(() => { 54 | testClsData('then after catch') 55 | }) 56 | .finally(() => { 57 | testClsData('finally') 58 | }) 59 | .then(() => { 60 | testClsData('then after finally') 61 | }) 62 | .then(() => { 63 | return Promise.resolve().then(() => { 64 | testClsData('then.resolve.then.bind') 65 | }) 66 | .bind(this) 67 | .then(() => { 68 | testClsData('then.resolve.then.bind.then') 69 | }) 70 | }) 71 | .then(() => { 72 | return Promise.reject(new Error('test')).catch(() => { 73 | testClsData('then.resolve.reject.catch') 74 | }) 75 | }) 76 | .then(() => { 77 | return Promise.all([ 78 | Promise.resolve().then(() => { testClsData('then.all.resolve.then') }), 79 | Promise.try(() => { testClsData('then.all.try') }) 80 | ]) 81 | .then(() => { 82 | testClsData('then after all') 83 | }) 84 | }) 85 | .then(() => { 86 | return Promise.props({ 87 | a: Promise.resolve().then(() => { testClsData('then.props.resolve.then') }), 88 | b: Promise.try(() => { testClsData('then.props.try') }) 89 | }) 90 | .then(() => { 91 | testClsData('then after props') 92 | }) 93 | }) 94 | .then(() => { 95 | return Promise.any([ 96 | Promise.resolve().then(() => { testClsData('then.any.resolve.then') }), 97 | Promise.try(() => { testClsData('then.any.try') }) 98 | ]) 99 | .then(() => { 100 | testClsData('then after any') 101 | }) 102 | }) 103 | .then(() => { 104 | return Promise.some([ 105 | Promise.resolve().then(() => { testClsData('then.some.resolve.then') }), 106 | Promise.try(() => { testClsData('then.some.try') }) 107 | ], 2) 108 | .then(() => { 109 | testClsData('then after some') 110 | }) 111 | }) 112 | .then(() => { 113 | return Promise.map([1, 2], () => { 114 | testClsData('then.map') 115 | }) 116 | .then(() => { 117 | testClsData('then after map') 118 | }) 119 | }) 120 | .then(() => { 121 | return Promise.reduce([1, 2], () => { 122 | testClsData('then.reduce') 123 | }) 124 | .then(() => { 125 | testClsData('then after reduce') 126 | }) 127 | }) 128 | .then(() => { 129 | return Promise.filter([1, 2], () => { 130 | testClsData('then.filter') 131 | }) 132 | .then(() => { 133 | testClsData('then after filter') 134 | }) 135 | }) 136 | .then(() => { 137 | return Promise.each([1, 2], () => { 138 | testClsData('then.each') 139 | }) 140 | .then(() => { 141 | testClsData('then after each') 142 | }) 143 | }) 144 | .then(() => { 145 | return Promise.mapSeries([1, 2], () => { 146 | testClsData('then.mapSeries') 147 | }) 148 | .then(() => { 149 | testClsData('then after mapSeries') 150 | }) 151 | }) 152 | .then(() => { 153 | return Promise.race([ 154 | Promise.resolve().then(() => { testClsData('then.race.resolve.then') }), 155 | Promise.try(() => { testClsData('then.race.try') }) 156 | ]) 157 | .then(() => { 158 | testClsData('then after race') 159 | }) 160 | }) 161 | .then(() => { 162 | return Promise.using(() => { 163 | return Promise.resolve().disposer(() => { 164 | testClsData('then.using.resolve.disposer') 165 | }) 166 | }, () => { 167 | return Promise.try(() => { testClsData('then.using.try') }) 168 | }) 169 | .then(() => { 170 | testClsData('then after using') 171 | }) 172 | }) 173 | .then(() => { 174 | const testFuncs = { 175 | sync: (cb) => { 176 | testClsData('promisify.sync') 177 | cb() 178 | }, 179 | cb: (cb) => { 180 | testClsData('promisify.cb') 181 | setTimeout(() => { 182 | testClsData('promisify.cb.setTimeout') 183 | cb() 184 | }) 185 | } 186 | } 187 | const syncA = Promise.promisify(testFuncs.sync) 188 | const cbA = Promise.promisify(testFuncs.cb) 189 | return Promise.all([syncA(), cbA()]) 190 | .then(() => { 191 | testClsData('then.promisify.all.then') 192 | }) 193 | }) 194 | .then(() => { 195 | const testFuncs = { 196 | sync: (cb) => { 197 | testClsData('promisifyAll.sync') 198 | cb() 199 | }, 200 | cb: (cb) => { 201 | testClsData('promisifyAll.cb') 202 | setTimeout(() => { 203 | testClsData('promisifyAll.cb.setTimeout') 204 | cb() 205 | }, 10) 206 | } 207 | } 208 | Promise.promisifyAll(testFuncs) 209 | return Promise.all([testFuncs.syncAsync(), testFuncs.cbAsync()]) 210 | .then(() => { 211 | testClsData('then.promisifyAll.all.then') 212 | }) 213 | }) 214 | .then(() => { 215 | const testFuncs = { 216 | sync: (cb) => { 217 | testClsData('fromCallback.sync') 218 | cb() 219 | }, 220 | cb: (cb) => { 221 | testClsData('fromCallback.cb') 222 | setTimeout(() => { 223 | testClsData('fromCallback.cb.setTimeout') 224 | cb() 225 | }) 226 | } 227 | } 228 | return Promise.all([Promise.fromCallback((cb) => { 229 | testClsData('then.all.fromCallback.sync') 230 | testFuncs.sync(cb) 231 | }), Promise.fromCallback((cb) => { 232 | testClsData('then.all.fromCallback.cb') 233 | testFuncs.cb(cb) 234 | })]) 235 | .then(() => { 236 | testClsData('then.fromCallback.all.then') 237 | }) 238 | }) 239 | .then(() => { 240 | return Promise.fromCallback((cb) => { 241 | testClsData('then.fromCallback') 242 | return Promise.resolve().asCallback(() => { 243 | testClsData('then.asCallback') 244 | cb() 245 | }) 246 | }) 247 | .then(() => { 248 | testClsData('then after asCallback') 249 | }) 250 | }) 251 | .then(() => { 252 | return Promise.delay(1) 253 | .then(() => { 254 | testClsData('then after delay') 255 | }) 256 | }) 257 | .then(() => { 258 | return Promise.delay(100).timeout(10).catch(Promise.TimeoutError, () => { 259 | testClsData('then.TimeoutError') 260 | }) 261 | .then(() => { 262 | testClsData('then after timeout') 263 | }) 264 | }) 265 | .tap(() => { 266 | testClsData('tap') 267 | }) 268 | .then(() => { 269 | module.exports.emitter.emit('passed') 270 | }) 271 | .catch((err) => { 272 | module.exports.emitter.emit('failed', err) 273 | }) 274 | } 275 | 276 | module.exports.emitter = new EventEmitter() 277 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ponos 2 | 3 | [![travis]](https://travis-ci.org/Runnable/ponos) 4 | [![coveralls]](https://coveralls.io/github/Runnable/ponos?branch=master) 5 | [![dependencies]](https://david-dm.org/Runnable/ponos) 6 | [![devdependencies]](https://david-dm.org/Runnable/ponos#info=devDependencies) 7 | [![codeclimate]](https://codeclimate.com/github/Runnable/ponos) 8 | 9 | Documentation is available at [runnable.github.io/ponos][documentation] 10 | 11 | A migration guide for v3.0.0 [is available](docs/Guides-Migration-v3.0.0.md)! 12 | 13 | An opinionated queue based worker server for node. 14 | 15 | For ease of use we provide options to set the host, port, username, and password to the RabbitMQ server. If not present in options, the server will attempt to use the following environment variables and final defaults: 16 | 17 | options | environment | default 18 | ------------------------------------|-----------------------------|-------------- 19 | `opts.rabbitmq.hostname` | `RABBITMQ_HOSTNAME` | `'localhost'` 20 | `opts.rabbitmq.port` | `RABBITMQ_PORT` | `'5672'` 21 | `opts.rabbitmq.username` | `RABBITMQ_USERNAME` | _none_ 22 | `opts.rabbitmq.password` | `RABBITMQ_PASSWORD` | _none_ 23 | `opts.redisRateLimiting.host` | `REDIS_HOST` | `'localhost'` 24 | `opts.redisRateLimiting.port` | `REDIS_PORT` | `'6379'` 25 | `opts.redisRateLimiting.durationMs` | `RATE_LIMIT_DURATION` | `1000` 26 | `opts.log` | _N/A_ | Basic [bunyan](https://github.com/trentm/node-bunyan) instance with `stdout` stream (for logging) 27 | `opts.errorCat` | _N/A_ | Basic [error-cat](https://github.com/runnable/error-cat) instance (for rollbar error reporting) 28 | 29 | Other options for Ponos are as follows: 30 | 31 | environment variable | default | description 32 | -------------------------|---------|------------ 33 | `WORKER_MAX_RETRY_DELAY` | `0` | The maximum time, in milliseconds, that the worker will wait before retrying a task. The timeout will exponentially increase from `MIN_RETRY_DELAY` to `MAX_RETRY_DELAY` if the latter is set higher than the former. If this value is not set, the worker will not exponentially back off. 34 | `WORKER_MIN_RETRY_DELAY` | `1` | Time, in milliseconds, the worker will wait at minimum will wait before retrying a task. 35 | `WORKER_TIMEOUT` | `0` | Timeout, in milliseconds, at which the worker task will be retried. 36 | `WORKER_MAX_NUM_RETRIES` | `0` | The maximum number of times a job will retry due to failures. 0 means infinity 37 | 38 | ## Usage 39 | 40 | From a high level, Ponos is used to create a worker server that responds to jobs provided from RabbitMQ. The user defines handlers for each queue's jobs that are invoked by Ponos. 41 | 42 | Ponos has built in support for retrying and catching specific errors, which are described below. 43 | 44 | ## Workers 45 | 46 | Workers need to be defined as a function that takes a Object `job` an returns a promise. For example: 47 | 48 | ```javascript 49 | function myWorker (job) { 50 | return Promise.resolve() 51 | .then(() => { 52 | return doSomeWork(job) 53 | }) 54 | } 55 | ``` 56 | 57 | This worker takes the `job`, does work with it, and returns the result. Since (in theory) this does not throw any errors, the worker will see this resolution and acknowledge the job. 58 | 59 | ## Tasks vs. Events 60 | 61 | Ponos provides (currently) two paradigms for doing work. First is subscribing directly to queues in RabbitMQ using the `tasks` parameter in the constructor. The other is the ability to subscribe to a fanout exchange using the `events` parameter, which can provide for a much more useful utilization of RabbitMQ's structure. 62 | 63 | ```javascript 64 | const ponos = require('ponos') 65 | const server = new ponos.Server({ 66 | tasks: { 67 | 'a-queue': (job) => { return Promise.resolve(job) } 68 | }, 69 | events: { 70 | 'an-exchange': (job) => { return Promise.resolve(job) } 71 | } 72 | }) 73 | ``` 74 | 75 | ### Worker Errors 76 | 77 | Ponos's worker is designed to retry any error that is not specifically a fatal error. Ponos has been designed to work well with our error library [`error-cat`](https://github.com/Runnable/error-cat). 78 | 79 | A fatal error is defined with the `WorkerStopError` class from `error-cat`. If a worker rejects with a `WorkerStopError`, the worker will automatically assume the job can _never_ be completed and will acknowledge the job. 80 | 81 | As an example, a `WorkerStopError` can be used to fail a task given an invalid job: 82 | 83 | ```javascript 84 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 85 | function myWorker (job) { 86 | return Promise.resolve() 87 | .then(() => { 88 | if (!job.host) { 89 | throw new WorkerStopError('host is required', {}, 'my.queue', job) 90 | } 91 | }) 92 | .then(() => { 93 | return doSomethingWithHost(job) 94 | }) 95 | } 96 | ``` 97 | 98 | This worker will reject the promise with a `WorkerStopError`. Ponos will log the error itself, acknowledge the job to remove it from the queue, and continue with other jobs. You may catch and re-throw the error if you wish to do additional logging or reporting. 99 | 100 | Finally, as was mentioned before, Ponos will retry any other errors. `error-cat` provides a `WorkerError` class you may use, or you may throw normal `Error`s. If you do, the worker will catch these and retry according to the server's configuration (retry delay, back-off, max delay, etc.). 101 | 102 | ```javascript 103 | const WorkerError = require('error-cat/errors/worker-error') 104 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 105 | function myWorker (job) { 106 | return Promise.resolve() 107 | .then(() => { 108 | return externalService.something(job) 109 | }) 110 | // Note: w/o this catch, the error would simply propagate to the worker and 111 | // be handled. 112 | .catch((err) => { 113 | logErrorToService(err) 114 | // If the error is 'recoverable' (e.g., network fluke, temporary outage), 115 | // we want to be able to retry. 116 | if (err.isRecoverable) { 117 | throw new Error('hit a momentary issue. try again.') 118 | } else { 119 | // maybe we know we can't recover from this 120 | throw new WorkerStopError( 121 | 'cannot recover. acknowledge and remove job', 122 | {}, 123 | 'this.queue', 124 | job 125 | ) 126 | } 127 | }) 128 | } 129 | ``` 130 | 131 | ## Worker Options 132 | 133 | Currently workers can be defined with a `msTimeout` option. This value defaults to `process.env.WORKER_TIMEOUT || 0`. One can set a specific millisecond timeout for a worker like so: 134 | 135 | ```js 136 | server.setTask('my-queue', workerFunction, { msTimeout: 1234 }) 137 | ``` 138 | 139 | Or one can set this option via `setAllTasks`: 140 | 141 | ```js 142 | server.setAllTasks({ 143 | // This will use the default timeout, maxNumRetries, ... 144 | 'queue-1': queueOneTaskFn, 145 | // This will use the specified timeout, maxNumRetries, ... 146 | 'queue-2': { 147 | // worker function to run 148 | task: queueTwoTaskFn, 149 | 150 | // schema to validate job against 151 | jobSchema: Joi.object({ tid: Joi.string() }), 152 | 153 | // time before job will throw timeout error 154 | msTimeout: 1337, 155 | 156 | // number of times before job will stop retrying on failure 157 | maxNumRetries: 7, 158 | 159 | // function to run when we hit max retries 160 | finalRetryFn: () => { return Promise.try(...)}, 161 | 162 | // number of jobs that we can perform in given duration 163 | maxOperations: 5, 164 | 165 | // duration under which rate limit is accounted for 166 | durationMs: 60000 167 | } 168 | }) 169 | ``` 170 | 171 | These options are also available for `setEvent` and `setAllEvents`. 172 | 173 | ## Worker Namespaces 174 | 175 | Each worker is wrapped in a [continuation-local-storage](https://github.com/othiym23/node-continuation-local-storage) namespace called `ponos`. 176 | 177 | Ponos adds a `tid` to the `ponos` namespace. This `tid` is unique per job. To access this `tid`: 178 | 179 | ```js 180 | const getNamespace = require('continuation-local-storage').getNamespace 181 | 182 | module.export.worker = Promise.try(() => { 183 | const tid = getNamespace('ponos').get('tid') 184 | console.log(`hello world: tid: ${tid}`) 185 | }) 186 | ``` 187 | 188 | **NOTES:** 189 | * `Promise.resolve().then(() => {...})` breaks out of Ponos namespace and `tid` will not be available 190 | * `getNamespace` must be called in the worker itself 191 | 192 | ## Full Example 193 | 194 | ```javascript 195 | const ponos = require('ponos') 196 | 197 | const tasks = { 198 | 'queue-1': (job) => { return Promise.resolve(job) }, 199 | 'queue-2': (job) => { return Promise.resolve(job) } 200 | } 201 | 202 | const events = { 203 | 'exchange-1': (job) => { return Promise.resolve(job) } 204 | } 205 | 206 | // Create the server 207 | var server = new ponos.Server({ 208 | events: events, 209 | tasks: tasks 210 | }) 211 | 212 | // If tasks were not provided in the constructor, set tasks for workers handling 213 | // jobs on each queue 214 | server.setAllTasks(tasks) 215 | // Similarly, you can set events. 216 | server.setAllEvents(events) 217 | 218 | // Start the server! 219 | server.start() 220 | .then(() => { console.log('Server started!') }) 221 | .catch((err) => { console.error('Server failed', err) }) 222 | ``` 223 | 224 | ## License 225 | 226 | MIT 227 | 228 | [travis]: https://img.shields.io/travis/Runnable/ponos/master.svg?style=flat-square "Build Status" 229 | [coveralls]: https://img.shields.io/coveralls/Runnable/ponos/master.svg?style=flat-square "Coverage Status" 230 | [dependencies]: https://img.shields.io/david/Runnable/ponos.svg?style=flat-square "Dependency Status" 231 | [devdependencies]: https://img.shields.io/david/dev/Runnable/ponos.svg?style=flat-square "Dev Dependency Status" 232 | [documentation]: https://runnable.github.io/ponos "Ponos Documentation" 233 | [codeclimate]: https://img.shields.io/codeclimate/github/Runnable/ponos.svg?style=flat-square "Code Climate" 234 | -------------------------------------------------------------------------------- /src/worker.js: -------------------------------------------------------------------------------- 1 | /* @flow */ 2 | /* global Logger DDTimer */ 3 | 'use strict' 4 | 5 | const cls = require('continuation-local-storage').createNamespace('ponos') 6 | const clsBlueBird = require('@runnable/cls-bluebird') 7 | const defaults = require('101/defaults') 8 | const ErrorCat = require('error-cat') 9 | const isObject = require('101/is-object') 10 | const joi = require('joi') 11 | const merge = require('101/put') 12 | const monitor = require('monitor-dog') 13 | const Promise = require('bluebird') 14 | const RabbitMQ = require('./rabbitmq') 15 | const uuid = require('uuid') 16 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 17 | 18 | const TimeoutError = Promise.TimeoutError 19 | clsBlueBird(cls) 20 | 21 | const optsSchema = joi.object({ 22 | attempt: joi.number().integer().min(0).required(), 23 | errorCat: joi.object(), 24 | errorPublisher: joi.object(), 25 | finalRetryFn: joi.func(), 26 | jobSchema: joi.object({ 27 | isJoi: joi.bool().valid(true) 28 | }).unknown(), 29 | job: joi.object().required(), 30 | jobMeta: joi.object().unknown(), 31 | log: joi.object().required(), 32 | maxNumRetries: joi.number().integer().min(0).required(), 33 | msTimeout: joi.number().integer().min(0).required(), 34 | queue: joi.string().required(), 35 | retryDelay: joi.number().integer().min(1).required(), 36 | maxRetryDelay: joi.number().integer().min(0).required(), 37 | task: joi.func().required() 38 | }).unknown() 39 | 40 | /** 41 | * Performs tasks for jobs on a given queue. 42 | * 43 | * @author Bryan Kendall 44 | * @author Ryan Sandor Richards 45 | * @param {Object} opts Options for the worker. 46 | * @param {Object} opts.job Data for the job to process. 47 | * @param {String} opts.queue Name of the queue for the job the worker is 48 | * processing. 49 | * @param {Function} opts.task A function to handle the tasks. 50 | * @param {ErrorCat} [opts.errorCat] An error-cat instance to use for the 51 | * worker. 52 | * @param {bunyan} [opts.log] The bunyan logger to use when logging messages 53 | * from the worker. 54 | * @param {number} [opts.msTimeout] A specific millisecond timeout for this 55 | * worker. 56 | */ 57 | class Worker { 58 | attempt: number; 59 | errorCat: ErrorCat; 60 | errorPublisher: RabbitMQ; 61 | finalRetryFn: Function; 62 | jobSchema: Object; 63 | job: Object; 64 | jobMeta: Object; 65 | log: Logger; 66 | maxNumRetries: number; 67 | msTimeout: number; 68 | queue: String; 69 | retryDelay: number; 70 | maxRetryDelay: number 71 | task: Function; 72 | tid: String; 73 | 74 | constructor (opts: Object) { 75 | defaults(opts, { 76 | // default non-required user options 77 | errorCat: ErrorCat, 78 | // other options 79 | attempt: 0, 80 | finalRetryFn: () => { return Promise.resolve() }, 81 | maxNumRetries: parseInt(process.env.WORKER_MAX_NUM_RETRIES, 10) || Number.MAX_SAFE_INTEGER, 82 | msTimeout: parseInt(process.env.WORKER_TIMEOUT, 10) || 0, 83 | maxRetryDelay: parseInt(process.env.WORKER_MAX_RETRY_DELAY, 10) || Number.MAX_SAFE_INTEGER, 84 | retryDelay: parseInt(process.env.WORKER_MIN_RETRY_DELAY, 10) || 1 85 | }) 86 | // managed required fields 87 | joi.assert(opts, optsSchema) 88 | this.tid = opts.job.tid || uuid() 89 | opts.log = opts.log.child({ tid: this.tid, module: 'ponos:worker' }) 90 | // put all opts on this 91 | Object.assign(this, opts) 92 | } 93 | 94 | /** 95 | * Factory method for creating new workers. This method exists to make it 96 | * easier to unit test other modules that need to instantiate new workers. 97 | * 98 | * @see Worker 99 | * @param {Object} opts Options for the Worker. 100 | * @returns {Worker} New Worker. 101 | */ 102 | static create (opts: Object): Worker { 103 | return new Worker(opts) 104 | } 105 | 106 | /** 107 | * validate job against schema if passed 108 | * @return {Promise} 109 | * @rejects {WorkerStopError} when job does not match schema 110 | */ 111 | _validateJob (): Promise { 112 | return Promise.try(() => { 113 | if (this.jobSchema) { 114 | joi.assert(this.job, this.jobSchema) 115 | } 116 | }) 117 | .catch((err) => { 118 | if (!err.isJoi) { 119 | throw err 120 | } 121 | 122 | throw new WorkerStopError('Invalid job', { 123 | queue: this.queue, 124 | job: this.job, 125 | validationErr: err 126 | }) 127 | }) 128 | } 129 | /** 130 | * Wraps tasks with CLS and timeout 131 | * @returns {Promise} 132 | * @resolves {Object} when task is complete 133 | * @rejects {Error} if job errored 134 | */ 135 | _wrapTask (): Promise { 136 | return Promise.fromCallback((cb) => { 137 | cls.run(() => { 138 | cls.set('tid', this.tid) 139 | cls.set('currentWorkerName', this.queue) 140 | Promise.try(() => { 141 | this.log.info({ 142 | attempt: this.attempt++, 143 | timeout: this.msTimeout 144 | }, 'running task') 145 | let taskPromise = Promise.try(() => { 146 | return this.task(this.job, this.jobMeta) 147 | }) 148 | 149 | if (this.msTimeout) { 150 | taskPromise = taskPromise.timeout(this.msTimeout) 151 | } 152 | return taskPromise 153 | }).asCallback(cb) 154 | }) 155 | }) 156 | } 157 | 158 | /** 159 | * adds worker properties to error 160 | * @param {Error} err error to augment 161 | * @throws {Error} error with extra data 162 | */ 163 | _addWorkerDataToError (err: Object) { 164 | if (err.cause) { 165 | err = err.cause 166 | } 167 | if (!isObject(err.data)) { 168 | err.data = {} 169 | } 170 | if (!err.data.queue) { 171 | err.data.queue = this.queue 172 | } 173 | if (!err.data.job) { 174 | err.data.job = this.job 175 | } 176 | throw err 177 | } 178 | 179 | /** 180 | * retry task with delay function 181 | * @param {Error} err error that is causing retry 182 | * @return {Promise} 183 | * @resolves {Object} when task is resolved 184 | */ 185 | _retryWithDelay (err: Object) { 186 | this.log.warn({ 187 | err: err, 188 | nextAttemptDelay: this.retryDelay, 189 | attemptCount: this.attempt 190 | }, 'Task failed, retrying') 191 | this._incMonitor('ponos.finish-error', { result: 'task-error' }) 192 | 193 | // Try again after a delay 194 | return Promise.delay(this.retryDelay) 195 | .then(() => { 196 | // Exponentially increase the retry delay to max 197 | if (this.retryDelay < this.maxRetryDelay) { 198 | this.retryDelay *= 2 199 | } 200 | return this.run() 201 | }) 202 | } 203 | 204 | /** 205 | * throw Worker Stop Error if we reached retry limit 206 | * @param {Error} err error that worker threw 207 | * @return {Promise} 208 | * @resolves should never resolve 209 | * @rejects {Error} when attempt limit not reached 210 | * @rejects {WorkerStopErrpr} when attempt limit reached 211 | */ 212 | _enforceRetryLimit (err: Object) { 213 | if (this.attempt < this.maxNumRetries) { 214 | return Promise.reject(err) 215 | } 216 | 217 | this.log.error({ 218 | attempt: this.attempt, 219 | maxNumRetries: this.maxNumRetries 220 | }, 'retry limit reached, trying handler') 221 | 222 | return Promise.try(() => { 223 | return this.finalRetryFn(this.job) 224 | }) 225 | .catch((finalErr) => { 226 | this._incMonitor('ponos.finish-retry-fn-error', { result: 'retry-fn-error' }) 227 | this.log.warn({ err: finalErr }, 'final function errored') 228 | }) 229 | .finally(() => { 230 | this._incMonitor('ponos.finish-error', { result: 'retry-error' }) 231 | throw new WorkerStopError('final retry handler finished', { 232 | originalError: err, 233 | queue: this.queue, 234 | job: this.job, 235 | attempt: this.attempt 236 | }) 237 | }) 238 | } 239 | 240 | /** 241 | * Do not propagate error and log 242 | * @param {WorkerStopError} err error that caused worker to stop 243 | * @return {undefined} 244 | */ 245 | _handleWorkerStopError (err: Object) { 246 | this.log.error({ err: err }, 'Worker task fatally errored') 247 | this._incMonitor('ponos.finish-error', { result: 'fatal-error' }) 248 | this._incMonitor('ponos.finish', { result: 'fatal-error' }) 249 | if (this.errorPublisher) { 250 | const erroredJob = { 251 | originalJobPayload: this.job, 252 | originalJobMeta: this.jobMeta, 253 | originalWorkerName: this.queue, 254 | error: err 255 | } 256 | this.errorPublisher.publishEvent('worker.errored', erroredJob) 257 | } 258 | } 259 | 260 | /** 261 | * Propagate error and log 262 | * @param {TimeoutError} err error that caused worker to stop 263 | * @return {undefined} 264 | */ 265 | _handleTimeoutError (err: Object) { 266 | this.log.warn({ err: err }, 'Task timed out') 267 | this._incMonitor('ponos.finish-error', { result: 'timeout-error' }) 268 | // by throwing this type of error, we will retry :) 269 | throw err 270 | } 271 | 272 | /** 273 | * log task complete 274 | * @return {undefined} 275 | */ 276 | _handleTaskSuccess () { 277 | this.log.info('Task complete') 278 | this._incMonitor('ponos.finish', { result: 'success' }) 279 | } 280 | 281 | /** 282 | * Runs the worker. If the task for the job fails, then this method will retry 283 | * the task (with an exponential backoff) as set by the environment. 284 | * 285 | * @returns {Promise} Promise that is resolved once the task succeeds or 286 | * fails. 287 | */ 288 | run (): Promise { 289 | this._incMonitor('ponos') 290 | const timer = this._createTimer() 291 | this.log = this.log.child({ 292 | method: 'run', 293 | queue: this.queue, 294 | job: this.job, 295 | jobMeta: this.jobMeta 296 | }) 297 | return this._validateJob() 298 | .bind(this) 299 | .then(this._wrapTask) 300 | .then(this._handleTaskSuccess) 301 | // If the type is TimeoutError, log and re-throw error 302 | .catch(TimeoutError, this._handleTimeoutError) 303 | .catch(this._enforceRetryLimit) 304 | .catch(this._addWorkerDataToError) 305 | .catch((err) => { 306 | this.errorCat.report(err) 307 | throw err 308 | }) 309 | // If it's a WorkerStopError, we stop this task by swallowing error 310 | .catch(WorkerStopError, this._handleWorkerStopError) 311 | // If we made it here we retry by calling run again (recursion) 312 | .catch(this._retryWithDelay) 313 | .finally(() => { 314 | if (timer) { 315 | timer.stop() 316 | } 317 | }) 318 | } 319 | 320 | /** 321 | * Helper function for creating monitor-dog events tags. `queue` is the only 322 | * mandatory tag. Few tags will be created depending on the queue name. If 323 | * queueName use `.` as delimiter e.x. `10.0.0.20.api.github.push` then the 324 | * following tags will be created: 325 | * { 326 | * token0: 'push' 327 | * token1: 'github.push' 328 | * token2: 'api.github.push' 329 | * token3: '10.0.0.20.api.github.push' 330 | * } 331 | * 332 | * @private 333 | * @returns {Object} tags as Object { queue: 'docker.event.publish' }. 334 | */ 335 | _eventTags (): Object { 336 | const tokens = this.queue.split('.').reverse() 337 | let lastToken = '' 338 | let tags = tokens.reduce((acc, currentValue, currentIndex) => { 339 | const key = 'token' + currentIndex 340 | const newToken = currentIndex === 0 341 | ? currentValue 342 | : currentValue + '.' + lastToken 343 | acc[key] = newToken 344 | lastToken = newToken 345 | return acc 346 | }, {}) 347 | tags.queue = this.queue 348 | return tags 349 | } 350 | 351 | /** 352 | * Helper function calling `monitor.increment`. Monitor won't be called if 353 | * `WORKER_MONITOR_DISABLED` is set. 354 | * 355 | * @private 356 | * @param {String} eventName Name to be reported into the datadog. 357 | * @param {Object} [extraTags] Extra tags to be send with the event. 358 | */ 359 | _incMonitor (eventName: string, extraTags?: Object): void { 360 | if (process.env.WORKER_MONITOR_DISABLED) { 361 | return 362 | } 363 | let tags = this._eventTags() 364 | if (extraTags) { 365 | tags = merge(tags, extraTags) 366 | } 367 | monitor.increment(eventName, tags) 368 | } 369 | 370 | /** 371 | * Helper function calling `monitor.timer`. Timer won't be created if 372 | * `WORKER_MONITOR_DISABLED` is set. 373 | * 374 | * @return {Object} New timer. 375 | * @private 376 | */ 377 | _createTimer (): ?DDTimer { 378 | const tags = this._eventTags() 379 | return !process.env.WORKER_MONITOR_DISABLED 380 | ? monitor.timer('ponos.timer', true, tags) 381 | : null 382 | } 383 | } 384 | 385 | /** 386 | * Worker class. 387 | * @module ponos/lib/worker 388 | * @see Worker 389 | */ 390 | module.exports = Worker 391 | -------------------------------------------------------------------------------- /src/server.js: -------------------------------------------------------------------------------- 1 | /* @flow */ 2 | /* global Bluebird$Promise Logger */ 3 | 'use strict' 4 | const assign = require('101/assign') 5 | const clone = require('101/clone') 6 | const defaults = require('101/defaults') 7 | const ErrorCat = require('error-cat') 8 | const Immutable = require('immutable') 9 | const isFunction = require('101/is-function') 10 | const isObject = require('101/is-object') 11 | const joi = require('joi') 12 | const Promise = require('bluebird') 13 | const put = require('101/put') 14 | 15 | const logger = require('./logger') 16 | const RabbitMQ = require('./rabbitmq') 17 | const RedisRateLimiter = require('./rate-limiters/redis') 18 | const Worker = require('./worker') 19 | 20 | /** 21 | * Ponos server class. Given a queue adapter the worker server will 22 | * connect to RabbitMQ, subscribe to the given queues, and begin spawning 23 | * workers for incoming jobs. 24 | * 25 | * The only required option is `opts.queues` which should be a non-empty flat 26 | * list of strings. The server uses this list to subscribe to only queues you 27 | * have provided. 28 | * 29 | * @author Bryan Kendall 30 | * @author Ryan Sandor Richards 31 | * @param {Object} opts Options for the server. 32 | * @param {ErrorCat} [opts.errorCat] An error cat instance to use for the 33 | * server. 34 | * @param {Object} [opts.events] Mapping of event (fanout) 35 | * exchanges which to subscribe and handlers. 36 | * @param {bunyan} [opts.log] A bunyan logger to use for the server. 37 | * @param {String} [opts.name=ponos] A name to namespace the created exchange queues. 38 | * @param {Object} [opts.rabbitmq] RabbitMQ connection options. 39 | * @param {Object} [opts.rabbitmq.channel] RabbitMQ channel options. 40 | * @param {Object} [opts.rabbitmq.channel.prefetch] Set prefetch for each 41 | * consumer in a channel. 42 | * @param {String} [opts.rabbitmq.hostname=localhost] Hostname for RabbitMQ. Can 43 | * be set with environment variable RABBITMQ_HOSTNAME. 44 | * @param {Number} [opts.rabbitmq.port=5672] Port for RabbitMQ. Can be set with 45 | * environment variable RABBITMQ_PORT. 46 | * @param {String} [opts.rabbitmq.username] Username for RabbitMQ. Can be set 47 | * with environment variable RABBITMQ_USERNAME. 48 | * @param {String} [opts.rabbitmq.password] Username for Password. Can be set 49 | * with environment variable RABBITMQ_PASSWORD. 50 | * @param {Object} [opts.tasks] Mapping of queues to subscribe 51 | * directly with handlers. 52 | * @param {Object} [opts.redisRateLimiter] options for redis-rate-limiter. checkout 53 | * module for params 54 | * @param {Object} [opts.enableWorkerErrorEvents] option to enable global worker error events 55 | */ 56 | class Server { 57 | _events: Map; 58 | _opts: Object; 59 | _rabbitmq: RabbitMQ; 60 | _redisRateLimiter: RedisRateLimiter; 61 | _tasks: Map; 62 | _workerOptions: Object; 63 | _workQueues: Object; 64 | 65 | errorCat: ErrorCat; 66 | log: Logger; 67 | errorPublisher: RabbitMQ; 68 | 69 | constructor (opts: Object) { 70 | this._opts = assign({}, opts) 71 | this.log = this._opts.log || logger.child({ module: 'ponos:server' }) 72 | this._workerOptions = {} 73 | this._workQueues = {} 74 | 75 | this._tasks = new Immutable.Map() 76 | if (this._opts.tasks) { 77 | this.setAllTasks(this._opts.tasks) 78 | } 79 | this._events = new Immutable.Map() 80 | if (this._opts.events) { 81 | this.setAllEvents(this._opts.events) 82 | } 83 | 84 | this.errorCat = this._opts.errorCat || ErrorCat 85 | 86 | if (this._opts.redisRateLimiter) { 87 | this._redisRateLimiter = new RedisRateLimiter(put(this._opts.redisRateLimiter, { 88 | log: this.log 89 | })) 90 | } 91 | 92 | // add the name to RabbitMQ options 93 | const rabbitmqOpts = defaults( 94 | this._opts.rabbitmq || {}, 95 | { name: this._opts.name } 96 | ) 97 | this._rabbitmq = new RabbitMQ(rabbitmqOpts) 98 | if (this._opts.enableErrorEvents) { 99 | const errorPublisherAppName = this._opts.name ? this._opts.name + '.error.publisher' : 'ponos.error.publisher' 100 | const rabbitmqPublisherOpts = defaults( 101 | this._opts.rabbitmq || {}, 102 | { 103 | name: errorPublisherAppName, 104 | events: [ 105 | { 106 | name: 'worker.errored', 107 | jobSchema: joi.object({ 108 | originalJobPayload: joi.object().unknown().required(), 109 | originalJobMeta: joi.object().unknown().required(), 110 | originalWorkerName: joi.string().required(), 111 | error: joi.object().required() 112 | }).unknown() 113 | } 114 | ] 115 | } 116 | ) 117 | this.errorPublisher = new RabbitMQ(rabbitmqPublisherOpts) 118 | } 119 | } 120 | 121 | /** 122 | * Start consuming from the subscribed queues. This is called by `.start`. 123 | * This can be called after the server has been started to start consuming 124 | * from additional queues. 125 | * 126 | * @return {Promise} Promise resolved when consuming has started. 127 | */ 128 | consume (): Bluebird$Promise { 129 | return this._rabbitmq.consume().return() 130 | } 131 | 132 | /** 133 | * Starts the worker server, connects to RabbitMQ, subscribes and consumes 134 | * from all the provided queues and exchanges (tasks and events). 135 | * 136 | * @return {Promise} Promise that resolves once the server is listening. 137 | */ 138 | start (): Bluebird$Promise { 139 | this.log.trace('starting') 140 | return this._rabbitmq.connect() 141 | .then(() => { 142 | if (this.errorPublisher) { 143 | return this.errorPublisher.connect() 144 | } 145 | }) 146 | .then(() => { 147 | if (this._redisRateLimiter) { 148 | return this._redisRateLimiter.connect() 149 | } 150 | }) 151 | .then(() => { 152 | return this._subscribeAll() 153 | }) 154 | .then(() => { 155 | return this.consume() 156 | }) 157 | .then(() => { 158 | this.log.trace('started') 159 | }) 160 | .catch((err) => { 161 | this.errorCat.report(err) 162 | throw err 163 | }) 164 | } 165 | 166 | /** 167 | * Stops the worker server, unsubscribing and disconnecting from RabbitMQ. 168 | * 169 | * @return {Promise} A promise that resolves when the server is stopped. 170 | */ 171 | stop (): Bluebird$Promise { 172 | this.log.trace('stopping') 173 | return this._rabbitmq.unsubscribe() 174 | .then(() => { 175 | return this._rabbitmq.disconnect() 176 | }) 177 | .then(() => { 178 | if (this.errorPublisher) { 179 | return this.errorPublisher.disconnect() 180 | } 181 | }) 182 | .then(() => { 183 | this.log.trace('stopped') 184 | }) 185 | .catch((err) => { 186 | this.errorCat.report(err) 187 | throw err 188 | }) 189 | } 190 | 191 | /** 192 | * Takes a map of queues and task handlers and sets them all. 193 | * 194 | * @param {Object} map A map of queue names and task 195 | * handlers. 196 | * @param {String} map.key Queue name. 197 | * @param {Object} map.value Object with a handler and additional options for 198 | * the worker (must have a `.task` handler function) 199 | * @param {Function} map.value Handler function to take a job. 200 | * @returns {Server} The server. 201 | */ 202 | setAllTasks (map: Object): Server { 203 | if (!isObject(map)) { 204 | throw new Error('ponos.server: setAllTasks must be called with an object') 205 | } 206 | Object.keys(map).forEach((key) => { 207 | this._workQueues[key] = [] 208 | const value = map[key] 209 | if (isObject(value)) { 210 | if (!isFunction(value.task)) { 211 | this.log.warn({ key: key }, 'no task function defined for key') 212 | return 213 | } 214 | this.setTask(key, value.task, value) 215 | } else { 216 | this.setTask(key, map[key]) 217 | } 218 | }) 219 | return this 220 | } 221 | 222 | /** 223 | * Takes a map of event exchanges and handlers and subscribes to them all. 224 | * 225 | * @param {Object} map A map of exchanges and task handlers. 226 | * @param {String} map.key Exchange name. 227 | * @param {Object} map.value Object with handler and additional options for 228 | * the worker (must have a `.task` handler function) 229 | * @param {Function} map.value Handler function to take a job. 230 | * @returns {Server} The server. 231 | */ 232 | setAllEvents (map: Object): Server { 233 | if (!isObject(map)) { 234 | throw new Error('ponos.server: setAllEvents must be called with an object') 235 | } 236 | Object.keys(map).forEach((key) => { 237 | this._workQueues[key] = [] 238 | const value = map[key] 239 | if (isObject(value)) { 240 | if (!isFunction(value.task)) { 241 | this.log.warn({ key: key }, 'no task function defined for key') 242 | return 243 | } 244 | this.setEvent(key, value.task, value) 245 | } else { 246 | this.setEvent(key, map[key]) 247 | } 248 | }) 249 | return this 250 | } 251 | 252 | /** 253 | * Assigns a task to a queue. 254 | * 255 | * @param {String} queueName Queue name. 256 | * @param {Function} task Function to take a job and return a promise. 257 | * @param {Object} [opts] Options for the worker that performs the task. 258 | * @returns {Server} The server. 259 | */ 260 | setTask (queueName: string, task: Function, opts?: Object): Server { 261 | this.log.trace({ 262 | method: 'setTask', 263 | queueName, 264 | opts 265 | }, 'setting task for queue') 266 | if (!isFunction(task)) { 267 | throw new Error('ponos.server: setTask task handler must be a function') 268 | } 269 | if (opts && !isObject(opts)) { 270 | throw new Error('ponos.server: setTask opts must be a object') 271 | } 272 | 273 | this._tasks = this._tasks.set(queueName, task) 274 | this._workerOptions[queueName] = opts || {} 275 | return this 276 | } 277 | 278 | /** 279 | * Assigns a task to an exchange. 280 | * 281 | * @param {String} exchangeName Exchange name. 282 | * @param {Function} task Function to take a job and return a promise. 283 | * @param {Object} [opts] Options for the worker that performs the task. 284 | * @returns {Server} The server. 285 | */ 286 | setEvent (exchangeName: string, task: Function, opts?: Object): Server { 287 | this.log.trace({ 288 | method: 'setEvent', 289 | exchangeName, 290 | opts 291 | }, 'setting event for queue') 292 | if (!isFunction(task)) { 293 | throw new Error('ponos.server: setEvent task handler must be a function') 294 | } 295 | if (opts && !isObject(opts)) { 296 | throw new Error('ponos.server: setEvent opts must be a object') 297 | } 298 | 299 | this._events = this._events.set(exchangeName, task) 300 | this._workerOptions[exchangeName] = opts || {} 301 | return this 302 | } 303 | 304 | // Private Methods 305 | 306 | /** 307 | * Helper function to subscribe to all queues. 308 | * 309 | * @private 310 | * @return {Promise} Promise that resolves when queues are all subscribed. 311 | */ 312 | _subscribeAll (): Bluebird$Promise { 313 | this.log.trace('_subscribeAll') 314 | const tasks = this._tasks 315 | const events = this._events 316 | return Promise.map(tasks.keySeq(), (queue) => { 317 | return this._rabbitmq.subscribeToQueue( 318 | queue, 319 | (job, jobMeta, done) => { 320 | this._enqueue(queue, tasks.get(queue), job, jobMeta, done) 321 | } 322 | ) 323 | }) 324 | .then(() => { 325 | return Promise.map(events.keySeq(), (exchange) => { 326 | const worker = events.get(exchange) 327 | const options = this._workerOptions[exchange] || {} 328 | return this._rabbitmq.subscribeToFanoutExchange( 329 | exchange, 330 | (job, jobMeta, done) => { 331 | this._enqueue(exchange, worker, job, jobMeta, done) 332 | }, 333 | options 334 | ) 335 | }) 336 | }) 337 | .return() 338 | } 339 | 340 | /** 341 | * Adds worker to queue and starts work loop if there is work to do 342 | * @param {String} name name of queue 343 | * @param {Promise} worker worker promise to run 344 | * @param {Object} job job for worker 345 | * @param {Object} jobMeta job metadata 346 | * @param {Function} done worker callback 347 | * @return {undefined} 348 | */ 349 | _enqueue (name: string, worker: Promise<*>, job: Object, jobMeta: Object, done: Function) { 350 | this._workQueues[name].push(this._runWorker.bind(this, name, worker, job, jobMeta, done)) 351 | // we are already processing _workQueues 352 | if (this._workQueues[name].length === 1) { 353 | // this is first job in _workQueues, start the loop 354 | this._workLoop(name) 355 | } 356 | } 357 | 358 | /** 359 | * Loop which pops items off the run queue and executes them 360 | * this runs asynchronously to caller 361 | * @param {String} name name of task or event 362 | * @return {Promise} 363 | * @resolves {undefined} 364 | */ 365 | _workLoop (name: string) { 366 | return Promise.try(() => { 367 | if (this._redisRateLimiter) { 368 | return this._redisRateLimiter.limit(name, this._workerOptions[name]) 369 | } 370 | }) 371 | .catch((err) => { 372 | // ignore rate limiter errors, just continue 373 | this.errorCat.report(err) 374 | }) 375 | .then(() => { 376 | const worker = this._workQueues[name].pop() 377 | if (worker) { 378 | // run worker and start next task in parallel 379 | worker() 380 | this._workLoop(name) 381 | } 382 | }) 383 | } 384 | 385 | /** 386 | * Runs a worker for the given queue name, job, and acknowledgement callback. 387 | * 388 | * @private 389 | * @param {String} queueName Name of the queue. 390 | * @param {Function} handler Handler to perform the work. 391 | * @param {Object} job Job for the worker to perform. 392 | * @param {Object} jobMeta Job's metadata with appId, timestamp and additonal headers. 393 | * @param {Function} done RabbitMQ acknowledgement callback. 394 | * @return {Promise} 395 | * @resolves {undefined} when worker is successful 396 | * @rejects {Error} when creating Worker fails 397 | */ 398 | _runWorker ( 399 | queueName: string, 400 | handler: Promise<*>, 401 | job: Object, 402 | jobMeta: Object, 403 | done: Function 404 | ): Promise<*> { 405 | return Promise.try(() => { 406 | this.log.info({ 407 | queue: queueName, 408 | job: job, 409 | jobMeta: jobMeta, 410 | method: '_runWorker' 411 | }, 'running worker') 412 | const opts = clone(this._workerOptions[queueName]) 413 | defaults(opts, { 414 | queue: queueName, 415 | job: job, 416 | jobMeta: jobMeta, 417 | task: handler, 418 | log: this.log, 419 | errorCat: this.errorCat, 420 | errorPublisher: this.errorPublisher 421 | }) 422 | const worker = Worker.create(opts) 423 | return worker.run().finally(() => { 424 | // done has to be called with no arguments 425 | done() 426 | }) 427 | }) 428 | } 429 | } 430 | 431 | /** 432 | * Server class. 433 | * @module ponos/lib/server 434 | * @see Server 435 | */ 436 | module.exports = Server 437 | -------------------------------------------------------------------------------- /interfaces/modules/immutable.js.flow: -------------------------------------------------------------------------------- 1 | /** 2 | * This file provides type definitions for use with the Flow type checker. 3 | * 4 | * An important caveat when using these definitions is that the types for 5 | * `Iterable.Keyed`, `Iterable.Indexed`, `Seq.Keyed`, and so on are stubs. 6 | * When referring to those types, you can get the proper definitions by 7 | * importing the types `KeyedIterable`, `IndexedIterable`, `KeyedSeq`, etc. 8 | * For example, 9 | * 10 | * import { Seq } from 'immutable' 11 | * import type { IndexedIterable, IndexedSeq } from 'immutable' 12 | * 13 | * const someSeq: IndexedSeq = Seq.Indexed.of(1, 2, 3) 14 | * 15 | * function takesASeq>(iter: TS): TS { 16 | * return iter.butLast() 17 | * } 18 | * 19 | * takesASeq(someSeq) 20 | * 21 | * @flow 22 | */ 23 | 24 | /* 25 | * Alias for ECMAScript `Iterable` type, declared in 26 | * https://github.com/facebook/flow/blob/master/lib/core.js 27 | * 28 | * Note that Immutable values implement the `ESIterable` interface. 29 | */ 30 | type ESIterable = $Iterable; 31 | 32 | declare class Iterable extends _Iterable {} 33 | 34 | declare class _Iterable { 35 | static Keyed: KI; 36 | static Indexed: II; 37 | static Set: SI; 38 | 39 | static isIterable(maybeIterable: any): boolean; 40 | static isKeyed(maybeKeyed: any): boolean; 41 | static isIndexed(maybeIndexed: any): boolean; 42 | static isAssociative(maybeAssociative: any): boolean; 43 | static isOrdered(maybeOrdered: any): boolean; 44 | 45 | equals(other: Iterable): boolean; 46 | hashCode(): number; 47 | get(key: K): V; 48 | get(key: K, notSetValue: V_): V|V_; 49 | has(key: K): boolean; 50 | includes(value: V): boolean; 51 | contains(value: V): boolean; 52 | first(): V; 53 | last(): V; 54 | 55 | getIn(searchKeyPath: ESIterable, notSetValue: T): T; 56 | getIn(searchKeyPath: ESIterable): T; 57 | hasIn(searchKeyPath: ESIterable): boolean; 58 | 59 | toJS(): any; 60 | toArray(): V[]; 61 | toObject(): { [key: string]: V }; 62 | toMap(): Map; 63 | toOrderedMap(): Map; 64 | toSet(): Set; 65 | toOrderedSet(): Set; 66 | toList(): List; 67 | toStack(): Stack; 68 | toSeq(): Seq; 69 | toKeyedSeq(): KeyedSeq; 70 | toIndexedSeq(): IndexedSeq; 71 | toSetSeq(): SetSeq; 72 | 73 | keys(): Iterator; 74 | values(): Iterator; 75 | entries(): Iterator<[K,V]>; 76 | 77 | keySeq(): IndexedSeq; 78 | valueSeq(): IndexedSeq; 79 | entrySeq(): IndexedSeq<[K,V]>; 80 | 81 | reverse(): this; 82 | sort(comparator?: (valueA: V, valueB: V) => number): this; 83 | 84 | sortBy( 85 | comparatorValueMapper: (value: V, key: K, iter: this) => C, 86 | comparator?: (valueA: C, valueB: C) => number 87 | ): this; 88 | 89 | groupBy( 90 | grouper: (value: V, key: K, iter: this) => G, 91 | context?: any 92 | ): KeyedSeq; 93 | 94 | forEach( 95 | sideEffect: (value: V, key: K, iter: this) => any, 96 | context?: any 97 | ): number; 98 | 99 | slice(begin?: number, end?: number): this; 100 | rest(): this; 101 | butLast(): this; 102 | skip(amount: number): this; 103 | skipLast(amount: number): this; 104 | skipWhile(predicate: (value: V, key: K, iter: this) => mixed, context?: any): this; 105 | skipUntil(predicate: (value: V, key: K, iter: this) => mixed, context?: any): this; 106 | take(amount: number): this; 107 | takeLast(amount: number): this; 108 | takeWhile(predicate: (value: V, key: K, iter: this) => mixed, context?: any): this; 109 | takeUntil(predicate: (value: V, key: K, iter: this) => mixed, context?: any): this; 110 | flatten(depth?: number): /*this*/Iterable; 111 | flatten(shallow?: boolean): /*this*/Iterable; 112 | 113 | filter( 114 | predicate: (value: V, key: K, iter: this) => mixed, 115 | context?: any 116 | ): this; 117 | 118 | filterNot( 119 | predicate: (value: V, key: K, iter: this) => mixed, 120 | context?: any 121 | ): this; 122 | 123 | reduce( 124 | reducer: (reduction: R, value: V, key: K, iter: this) => R, 125 | initialReduction?: R, 126 | context?: any, 127 | ): R; 128 | 129 | reduceRight( 130 | reducer: (reduction: R, value: V, key: K, iter: this) => R, 131 | initialReduction?: R, 132 | context?: any, 133 | ): R; 134 | 135 | every(predicate: (value: V, key: K, iter: this) => mixed, context?: any): boolean; 136 | some(predicate: (value: V, key: K, iter: this) => mixed, context?: any): boolean; 137 | join(separator?: string): string; 138 | isEmpty(): boolean; 139 | count(predicate?: (value: V, key: K, iter: this) => mixed, context?: any): number; 140 | countBy(grouper: (value: V, key: K, iter: this) => G, context?: any): Map; 141 | 142 | find( 143 | predicate: (value: V, key: K, iter: this) => mixed, 144 | context?: any, 145 | ): ?V; 146 | find( 147 | predicate: (value: V, key: K, iter: this) => mixed, 148 | context: any, 149 | notSetValue: V_ 150 | ): V|V_; 151 | 152 | findLast( 153 | predicate: (value: V, key: K, iter: this) => mixed, 154 | context?: any, 155 | ): ?V; 156 | findLast( 157 | predicate: (value: V, key: K, iter: this) => mixed, 158 | context: any, 159 | notSetValue: V_ 160 | ): V|V_; 161 | 162 | 163 | findEntry(predicate: (value: V, key: K, iter: this) => mixed): ?[K,V]; 164 | findLastEntry(predicate: (value: V, key: K, iter: this) => mixed): ?[K,V]; 165 | 166 | findKey(predicate: (value: V, key: K, iter: this) => mixed, context?: any): ?K; 167 | findLastKey(predicate: (value: V, key: K, iter: this) => mixed, context?: any): ?K; 168 | 169 | keyOf(searchValue: V): ?K; 170 | lastKeyOf(searchValue: V): ?K; 171 | 172 | max(comparator?: (valueA: V, valueB: V) => number): V; 173 | maxBy( 174 | comparatorValueMapper: (value: V, key: K, iter: this) => C, 175 | comparator?: (valueA: C, valueB: C) => number 176 | ): V; 177 | min(comparator?: (valueA: V, valueB: V) => number): V; 178 | minBy( 179 | comparatorValueMapper: (value: V, key: K, iter: this) => C, 180 | comparator?: (valueA: C, valueB: C) => number 181 | ): V; 182 | 183 | isSubset(iter: Iterable): boolean; 184 | isSubset(iter: ESIterable): boolean; 185 | isSuperset(iter: Iterable): boolean; 186 | isSuperset(iter: ESIterable): boolean; 187 | } 188 | 189 | declare class KeyedIterable extends Iterable { 190 | static (iter?: ESIterable<[K,V]>): KeyedIterable; 191 | static (obj?: { [key: K]: V }): KeyedIterable; 192 | 193 | @@iterator(): Iterator<[K,V]>; 194 | toSeq(): KeyedSeq; 195 | flip(): /*this*/KeyedIterable; 196 | 197 | mapKeys( 198 | mapper: (key: K, value: V, iter: this) => K_, 199 | context?: any 200 | ): /*this*/KeyedIterable; 201 | 202 | mapEntries( 203 | mapper: (entry: [K,V], index: number, iter: this) => [K_,V_], 204 | context?: any 205 | ): /*this*/KeyedIterable; 206 | 207 | concat(...iters: ESIterable<[K,V]>[]): this; 208 | 209 | map( 210 | mapper: (value: V, key: K, iter: this) => V_, 211 | context?: any 212 | ): /*this*/KeyedIterable; 213 | 214 | flatMap( 215 | mapper: (value: V, key: K, iter: this) => ESIterable<[K_,V_]>, 216 | context?: any 217 | ): /*this*/KeyedIterable; 218 | 219 | flatten(depth?: number): /*this*/KeyedIterable; 220 | flatten(shallow?: boolean): /*this*/KeyedIterable; 221 | } 222 | 223 | declare class IndexedIterable extends Iterable { 224 | static (iter?: ESIterable): IndexedIterable; 225 | 226 | @@iterator(): Iterator; 227 | toSeq(): IndexedSeq; 228 | fromEntrySeq(): KeyedSeq; 229 | interpose(separator: T): this; 230 | interleave(...iterables: ESIterable[]): this; 231 | splice( 232 | index: number, 233 | removeNum: number, 234 | ...values: T[] 235 | ): this; 236 | 237 | zip( 238 | a: ESIterable, 239 | $?: null 240 | ): IndexedIterable<[T,A]>; 241 | zip( 242 | a: ESIterable, 243 | b: ESIterable, 244 | $?: null 245 | ): IndexedIterable<[T,A,B]>; 246 | zip( 247 | a: ESIterable, 248 | b: ESIterable, 249 | c: ESIterable, 250 | $?: null 251 | ): IndexedIterable<[T,A,B,C]>; 252 | zip( 253 | a: ESIterable, 254 | b: ESIterable, 255 | c: ESIterable, 256 | d: ESIterable, 257 | $?: null 258 | ): IndexedIterable<[T,A,B,C,D]>; 259 | zip( 260 | a: ESIterable, 261 | b: ESIterable, 262 | c: ESIterable, 263 | d: ESIterable, 264 | e: ESIterable, 265 | $?: null 266 | ): IndexedIterable<[T,A,B,C,D,E]>; 267 | 268 | zipWith( 269 | zipper: (value: T, a: A) => R, 270 | a: ESIterable, 271 | $?: null 272 | ): IndexedIterable; 273 | zipWith( 274 | zipper: (value: T, a: A, b: B) => R, 275 | a: ESIterable, 276 | b: ESIterable, 277 | $?: null 278 | ): IndexedIterable; 279 | zipWith( 280 | zipper: (value: T, a: A, b: B, c: C) => R, 281 | a: ESIterable, 282 | b: ESIterable, 283 | c: ESIterable, 284 | $?: null 285 | ): IndexedIterable; 286 | zipWith( 287 | zipper: (value: T, a: A, b: B, c: C, d: D) => R, 288 | a: ESIterable, 289 | b: ESIterable, 290 | c: ESIterable, 291 | d: ESIterable, 292 | $?: null 293 | ): IndexedIterable; 294 | zipWith( 295 | zipper: (value: T, a: A, b: B, c: C, d: D, e: E) => R, 296 | a: ESIterable, 297 | b: ESIterable, 298 | c: ESIterable, 299 | d: ESIterable, 300 | e: ESIterable, 301 | $?: null 302 | ): IndexedIterable; 303 | 304 | indexOf(searchValue: T): number; 305 | lastIndexOf(searchValue: T): number; 306 | findIndex( 307 | predicate: (value: T, index: number, iter: this) => mixed, 308 | context?: any 309 | ): number; 310 | findLastIndex( 311 | predicate: (value: T, index: number, iter: this) => mixed, 312 | context?: any 313 | ): number; 314 | 315 | concat(...iters: ESIterable[]): this; 316 | 317 | map( 318 | mapper: (value: T, index: number, iter: this) => U, 319 | context?: any 320 | ): /*this*/IndexedIterable; 321 | 322 | flatMap( 323 | mapper: (value: T, index: number, iter: this) => ESIterable, 324 | context?: any 325 | ): /*this*/IndexedIterable; 326 | 327 | flatten(depth?: number): /*this*/IndexedIterable; 328 | flatten(shallow?: boolean): /*this*/IndexedIterable; 329 | } 330 | 331 | declare class SetIterable extends Iterable { 332 | static (iter?: ESIterable): SetIterable; 333 | 334 | @@iterator(): Iterator; 335 | toSeq(): SetSeq; 336 | 337 | concat(...iters: ESIterable[]): this; 338 | 339 | // `map` and `flatMap` cannot be defined further up the hiearchy, because the 340 | // implementation for `KeyedIterable` allows the value type to change without 341 | // constraining the key type. That does not work for `SetIterable` - the value 342 | // and key types *must* match. 343 | map( 344 | mapper: (value: T, value: T, iter: this) => U, 345 | context?: any 346 | ): /*this*/SetIterable; 347 | 348 | flatMap( 349 | mapper: (value: T, value: T, iter: this) => ESIterable, 350 | context?: any 351 | ): /*this*/SetIterable; 352 | 353 | flatten(depth?: number): /*this*/SetIterable; 354 | flatten(shallow?: boolean): /*this*/SetIterable; 355 | } 356 | 357 | declare class Collection extends _Iterable { 358 | size: number; 359 | } 360 | 361 | declare class KeyedCollection extends Collection mixins KeyedIterable { 362 | toSeq(): KeyedSeq; 363 | } 364 | 365 | declare class IndexedCollection extends Collection mixins IndexedIterable { 366 | toSeq(): IndexedSeq; 367 | } 368 | 369 | declare class SetCollection extends Collection mixins SetIterable { 370 | toSeq(): SetSeq; 371 | } 372 | 373 | declare class Seq extends _Iterable { 374 | static (iter: KeyedSeq): KeyedSeq; 375 | static (iter: SetSeq): SetSeq; 376 | static (iter?: ESIterable): IndexedSeq; 377 | static (iter: { [key: K]: V }): KeyedSeq; 378 | 379 | static isSeq(maybeSeq: any): boolean; 380 | static of(...values: T[]): IndexedSeq; 381 | 382 | size: ?number; 383 | cacheResult(): this; 384 | toSeq(): this; 385 | } 386 | 387 | declare class KeyedSeq extends Seq mixins KeyedIterable { 388 | static (iter?: ESIterable<[K,V]>): KeyedSeq; 389 | static (iter?: { [key: K]: V }): KeyedSeq; 390 | } 391 | 392 | declare class IndexedSeq extends Seq mixins IndexedIterable { 393 | static (iter?: ESIterable): IndexedSeq; 394 | static of(...values: T[]): IndexedSeq; 395 | } 396 | 397 | declare class SetSeq extends Seq mixins SetIterable { 398 | static (iter?: ESIterable): IndexedSeq; 399 | static of(...values: T[]): SetSeq; 400 | } 401 | 402 | declare class List extends IndexedCollection { 403 | static (iterable?: ESIterable): List; 404 | 405 | static isList(maybeList: any): boolean; 406 | static of(...values: T[]): List; 407 | 408 | set(index: number, value: U): List; 409 | delete(index: number): this; 410 | remove(index: number): this; 411 | insert(index: number, value: U): List; 412 | clear(): this; 413 | push(...values: U[]): List; 414 | pop(): this; 415 | unshift(...values: U[]): List; 416 | shift(): this; 417 | 418 | update(updater: (value: this) => List): List; 419 | update(index: number, updater: (value: T) => U): List; 420 | update(index: number, notSetValue: U, updater: (value: T) => U): List; 421 | 422 | merge(...iterables: ESIterable[]): List; 423 | 424 | mergeWith( 425 | merger: (previous: T, next: U, key: number) => V, 426 | ...iterables: ESIterable[] 427 | ): List; 428 | 429 | mergeDeep(...iterables: ESIterable[]): List; 430 | 431 | mergeDeepWith( 432 | merger: (previous: T, next: U, key: number) => V, 433 | ...iterables: ESIterable[] 434 | ): List; 435 | 436 | setSize(size: number): List; 437 | setIn(keyPath: ESIterable, value: any): List; 438 | deleteIn(keyPath: ESIterable, value: any): this; 439 | removeIn(keyPath: ESIterable, value: any): this; 440 | 441 | updateIn(keyPath: ESIterable, notSetValue: any, value: any): List; 442 | updateIn(keyPath: ESIterable, value: any): List; 443 | 444 | mergeIn(keyPath: ESIterable, ...iterables: ESIterable[]): List; 445 | mergeDeepIn(keyPath: ESIterable, ...iterables: ESIterable[]): List; 446 | 447 | withMutations(mutator: (mutable: this) => any): this; 448 | asMutable(): this; 449 | asImmutable(): this; 450 | 451 | // Overrides that specialize return types 452 | map( 453 | mapper: (value: T, index: number, iter: this) => M, 454 | context?: any 455 | ): List; 456 | 457 | flatMap( 458 | mapper: (value: T, index: number, iter: this) => ESIterable, 459 | context?: any 460 | ): List; 461 | 462 | flatten(depth?: number): /*this*/List; 463 | flatten(shallow?: boolean): /*this*/List; 464 | } 465 | 466 | declare class Map extends KeyedCollection { 467 | static (): Map; 468 | static (obj?: {[key: string]: V}): Map; 469 | static (iterable?: ESIterable<[K,V]>): Map; 470 | 471 | static isMap(maybeMap: any): boolean; 472 | 473 | set(key: K_, value: V_): Map; 474 | delete(key: K): this; 475 | remove(key: K): this; 476 | clear(): this; 477 | 478 | update(updater: (value: this) => Map): Map; 479 | update(key: K, updater: (value: V) => V_): Map; 480 | update(key: K, notSetValue: V_, updater: (value: V) => V_): Map; 481 | 482 | merge( 483 | ...iterables: (ESIterable<[K_,V_]> | { [key: K_]: V_ })[] 484 | ): Map; 485 | 486 | mergeWith( 487 | merger: (previous: V, next: W, key: number) => X, 488 | ...iterables: ESIterable[] 489 | ): Map; 490 | 491 | mergeDeep( 492 | ...iterables: (ESIterable<[K_,V_]> | { [key: K_]: V_ })[] 493 | ): Map; 494 | 495 | mergeDeepWith( 496 | merger: (previous: V, next: W, key: number) => X, 497 | ...iterables: ESIterable[] 498 | ): Map; 499 | 500 | setIn(keyPath: ESIterable, value: any): Map; 501 | deleteIn(keyPath: ESIterable, value: any): this; 502 | removeIn(keyPath: ESIterable, value: any): this; 503 | 504 | updateIn(keyPath: ESIterable, notSetValue: any, value: any): Map; 505 | updateIn(keyPath: ESIterable, value: any): Map; 506 | 507 | mergeIn(keyPath: ESIterable, ...iterables: ESIterable[]): Map; 508 | mergeDeepIn(keyPath: ESIterable, ...iterables: ESIterable[]): Map; 509 | 510 | withMutations(mutator: (mutable: this) => any): this; 511 | asMutable(): this; 512 | asImmutable(): this; 513 | 514 | // Overrides that specialize return types 515 | 516 | map( 517 | mapper: (value: V, key: K, iter: this) => V_, 518 | context?: any 519 | ): Map; 520 | 521 | flatMap( 522 | mapper: (value: V, key: K, iter: this) => ESIterable<[K_,V_]>, 523 | context?: any 524 | ): Map; 525 | 526 | flip(): Map; 527 | 528 | mapKeys( 529 | mapper: (key: K, value: V, iter: this) => K_, 530 | context?: any 531 | ): Map; 532 | 533 | flatten(depth?: number): /*this*/Map; 534 | flatten(shallow?: boolean): /*this*/Map; 535 | } 536 | 537 | // OrderedMaps have nothing that Maps do not have. We do not need to override constructor & other statics 538 | declare class OrderedMap extends Map { 539 | static isOrderedMap(maybeOrderedMap: any): bool; 540 | } 541 | 542 | declare class Set extends SetCollection { 543 | static (iterable?: ESIterable): Set; 544 | 545 | static isSet(maybeSet: any): boolean; 546 | static of(...values: T[]): Set; 547 | static fromKeys(iter: ESIterable<[T,any]>): Set; 548 | static fromKeys(iter: { [key: string]: any }): Set; 549 | 550 | add(value: U): Set; 551 | delete(value: T): this; 552 | remove(value: T): this; 553 | clear(): this; 554 | union(...iterables: ESIterable[]): Set; 555 | merge(...iterables: ESIterable[]): Set; 556 | intersect(...iterables: ESIterable[]): Set; 557 | subtract(...iterables: ESIterable[]): Set; 558 | 559 | withMutations(mutator: (mutable: this) => any): this; 560 | asMutable(): this; 561 | asImmutable(): this; 562 | 563 | // Overrides that specialize return types 564 | 565 | map( 566 | mapper: (value: T, value: T, iter: this) => M, 567 | context?: any 568 | ): Set; 569 | 570 | flatMap( 571 | mapper: (value: T, value: T, iter: this) => ESIterable, 572 | context?: any 573 | ): Set; 574 | 575 | flatten(depth?: number): /*this*/Set; 576 | flatten(shallow?: boolean): /*this*/Set; 577 | } 578 | 579 | // OrderedSets have nothing that Sets do not have. We do not need to override constructor & other statics 580 | declare class OrderedSet extends Set { 581 | static isOrderedSet(maybeOrderedSet: any): bool; 582 | } 583 | 584 | declare class Stack extends IndexedCollection { 585 | static (iterable?: ESIterable): Stack; 586 | 587 | static isStack(maybeStack: any): boolean; 588 | static of(...values: T[]): Stack; 589 | 590 | peek(): T; 591 | clear(): this; 592 | unshift(...values: U[]): Stack; 593 | unshiftAll(iter: ESIterable): Stack; 594 | shift(): this; 595 | push(...values: U[]): Stack; 596 | pushAll(iter: ESIterable): Stack; 597 | pop(): this; 598 | 599 | withMutations(mutator: (mutable: this) => any): this; 600 | asMutable(): this; 601 | asImmutable(): this; 602 | 603 | // Overrides that specialize return types 604 | 605 | map( 606 | mapper: (value: T, index: number, iter: this) => U, 607 | context?: any 608 | ): Stack; 609 | 610 | flatMap( 611 | mapper: (value: T, index: number, iter: this) => ESIterable, 612 | context?: any 613 | ): Stack; 614 | 615 | flatten(depth?: number): /*this*/Stack; 616 | flatten(shallow?: boolean): /*this*/Stack; 617 | } 618 | 619 | declare function Range(start?: number, end?: number, step?: number): IndexedSeq; 620 | declare function Repeat(value: T, times?: number): IndexedSeq; 621 | 622 | //TODO: Once flow can extend normal Objects we can change this back to actually reflect Record behavior. 623 | // For now fallback to any to not break existing Code 624 | declare class Record { 625 | static (spec: T, name?: string): /*T & Record*/any; 626 | get(key: $Keys): A; 627 | set(key: $Keys, value: A): /*T & Record*/this; 628 | remove(key: $Keys): /*T & Record*/this; 629 | } 630 | 631 | declare function fromJS(json: any, reviver?: (k: any, v: Iterable) => any): any; 632 | declare function is(first: any, second: any): boolean; 633 | 634 | declare module 'immutable' { 635 | declare var exports: { 636 | Map: Class>, 637 | Set: Class> 638 | } 639 | } 640 | 641 | export { 642 | Iterable, 643 | Collection, 644 | Seq, 645 | 646 | // These classes do not actually exist under these names. But it is useful to 647 | // have the types available. 648 | KeyedIterable, 649 | IndexedIterable, 650 | SetIterable, 651 | KeyedCollection, 652 | IndexedCollection, 653 | SetCollection, 654 | KeyedSeq, 655 | IndexedSeq, 656 | SetSeq, 657 | 658 | List, 659 | Map, 660 | OrderedMap, 661 | OrderedSet, 662 | Range, 663 | Repeat, 664 | Record, 665 | Set, 666 | Stack, 667 | 668 | fromJS, 669 | is, 670 | } 671 | -------------------------------------------------------------------------------- /src/rabbitmq.js: -------------------------------------------------------------------------------- 1 | /* @flow */ 2 | /* global Bluebird$Promise RabbitMQChannel RabbitMQConfirmChannel RabbitMQConnection SubscribeObject RabbitMQOptions QueueObject */ 3 | 'use strict' 4 | 5 | const amqplib = require('amqplib') 6 | const defaults = require('101/defaults') 7 | const getNamespace = require('continuation-local-storage').getNamespace 8 | const hasKeypaths = require('101/has-keypaths') 9 | const Immutable = require('immutable') 10 | const isFunction = require('101/is-function') 11 | const isObject = require('101/is-object') 12 | const isString = require('101/is-string') 13 | const joi = require('joi') 14 | const monitor = require('monitor-dog') 15 | const Promise = require('bluebird') 16 | const uuid = require('uuid') 17 | 18 | const logger = require('./logger') 19 | 20 | const tasksSchema = joi.object({ 21 | name: joi.string().required(), 22 | exclusive: joi.bool(), 23 | durable: joi.bool(), 24 | autoDelete: joi.bool(), 25 | jobSchema: joi.object({ 26 | isJoi: joi.bool().valid(true) 27 | }).unknown() 28 | }) 29 | 30 | const eventsSchema = joi.object({ 31 | name: joi.string().required(), 32 | internal: joi.bool(), 33 | durable: joi.bool(), 34 | autoDelete: joi.bool(), 35 | alternateExchange: joi.bool(), 36 | jobSchema: joi.object({ 37 | isJoi: joi.bool().valid(true) 38 | }).unknown() 39 | }) 40 | 41 | const optsSchema = joi.object({ 42 | name: joi.string(), 43 | hostname: joi.string(), 44 | port: joi.number(), 45 | username: joi.string(), 46 | password: joi.string(), 47 | log: joi.object().type(logger.constructor, 'Bunyan Logger'), 48 | channelOpts: joi.object(), 49 | tasks: joi.array().items(tasksSchema), 50 | events: joi.array().items(eventsSchema) 51 | }).or('tasks', 'events').required() 52 | 53 | /** 54 | * RabbitMQ model. Can be used independently for publishing or other uses. 55 | * 56 | * @author Bryan Kendall 57 | * @param {Object} [opts] RabbitMQ connection options. 58 | * @param {Object} [opts.channel] RabbitMQ channel options. 59 | * @param {Object} [opts.channel.prefetch] Set prefetch for each consumer in a 60 | * channel. 61 | * @param {String} [opts.hostname=localhost] Hostname for RabbitMQ. Can be set 62 | * with environment variable RABBITMQ_HOSTNAME. 63 | * @param {Number} [opts.port=5672] Port for RabbitMQ. Can be set with 64 | * environment variable RABBITMQ_PORT. 65 | * @param {String} [opts.username] Username for RabbitMQ. Can be set with 66 | * environment variable RABBITMQ_USERNAME. 67 | * @param {String} [opts.password] Username for Password. Can be set with 68 | * environment variable RABBITMQ_PASSWORD. 69 | */ 70 | class RabbitMQ { 71 | static AMQPLIB_QUEUE_DEFAULTS: Object; 72 | static AMQPLIB_EXCHANGE_DEFAULTS: Object; 73 | 74 | channel: RabbitMQChannel; 75 | channelOpts: Object; 76 | connection: RabbitMQConnection; 77 | consuming: Map; 78 | events: Array; 79 | hostname: string; 80 | log: Object; 81 | name: string; 82 | password: string; 83 | port: number; 84 | publishChannel: RabbitMQConfirmChannel; 85 | subscribed: Set; 86 | subscriptions: Map; 87 | tasks: Array; 88 | username: string; 89 | 90 | constructor (opts: Object) { 91 | this.name = opts.name || 'ponos' 92 | this.hostname = opts.hostname || process.env.RABBITMQ_HOSTNAME || 'localhost' 93 | this.port = opts.port || parseInt(process.env.RABBITMQ_PORT, 10) || 5672 94 | this.username = opts.username || process.env.RABBITMQ_USERNAME 95 | this.password = opts.password || process.env.RABBITMQ_PASSWORD 96 | this.log = opts.log || logger.child({ module: 'ponos:rabbitmq' }) 97 | this.channelOpts = opts.channel || {} 98 | if (!this.username || !this.password) { 99 | this.log.warn( 100 | 'RabbitMQ username and password not found. See Ponos Server ' + 101 | 'constructor documentation.' 102 | ) 103 | } 104 | this.tasks = opts.tasks || [] 105 | this.tasks = this.tasks.map(RabbitMQ._formatJobs) 106 | this.events = opts.events || [] 107 | this.events = this.events.map(RabbitMQ._formatJobs) 108 | this.log.trace({ opts: opts }, 'RabbitMQ constructor') 109 | joi.assert(this, optsSchema) 110 | this._setCleanState() 111 | } 112 | 113 | /** 114 | * formats events and tasks to consistent format. 115 | * add TID validation if not already there 116 | * @param {Object|String} item task/job item from map 117 | * @return {Object} formated job type 118 | */ 119 | static _formatJobs (item: string|Object): Object { 120 | if (typeof item === 'string') { 121 | return { name: item } 122 | } 123 | if (item.jobSchema) { 124 | item.jobSchema = item.jobSchema.concat(joi.object({ tid: joi.string() })) 125 | } 126 | return item 127 | } 128 | 129 | /** 130 | * Connect to the RabbitMQ server. 131 | * 132 | * @return {Promise} Promise that resolves once connection is established. 133 | */ 134 | connect (): Bluebird$Promise { 135 | if (this._isPartlyConnected() || this._isConnected()) { 136 | return Promise.reject(new Error('cannot call connect twice')) 137 | } 138 | let authString = '' 139 | if (this.username && this.password) { 140 | authString = `${this.username}:${this.password}@` 141 | } 142 | const url = `amqp://${authString}${this.hostname}:${this.port}` 143 | this.log.info({ url: url }, 'connecting') 144 | return Promise 145 | .resolve(amqplib.connect(url, {})) 146 | .catch((err) => { 147 | this.log.fatal({ err: err }, 'an error occured while connecting') 148 | throw err 149 | }) 150 | .then((conn) => { 151 | this.connection = conn 152 | this.log.info('connected') 153 | this.connection.on('error', this._connectionErrorHandler.bind(this)) 154 | 155 | this.log.info('creating channel') 156 | return Promise.resolve(this.connection.createChannel()) 157 | .catch((err) => { 158 | this.log.fatal({ err: err }, 'an error occured creating channel') 159 | throw err 160 | }) 161 | }) 162 | .then((channel) => { 163 | if (this.channelOpts.prefetch) { 164 | this.log.info('setting prefetch on channel') 165 | return Promise.resolve(channel.prefetch(this.channelOpts.prefetch)) 166 | .return((channel)) 167 | } 168 | return channel 169 | }) 170 | .then((channel) => { 171 | this.log.info('created channel') 172 | this.channel = channel 173 | this.channel.on('error', this._channelErrorHandler.bind(this)) 174 | 175 | this.log.info('creating publishing channel') 176 | return Promise.resolve(this.connection.createConfirmChannel()) 177 | .catch((err) => { 178 | this.log.fatal({ err: err }, 'errored creating confirm channel') 179 | throw err 180 | }) 181 | }) 182 | .then((channel) => { 183 | this.log.info('created confirm channel') 184 | this.publishChannel = channel 185 | this.publishChannel.on('error', this._channelErrorHandler.bind(this)) 186 | }) 187 | .then(() => { 188 | return this._assertQueuesAndExchanges() 189 | }) 190 | } 191 | 192 | /** 193 | * Asserts all passed queues and exchanges on channel 194 | * @return {Promise} Promise resolved when everything is asserted 195 | */ 196 | _assertQueuesAndExchanges (): Bluebird$Promise { 197 | return Promise.each(this.events, (event) => { 198 | if (typeof event === 'string') { 199 | return this._assertExchange(event, 'fanout') 200 | } 201 | 202 | return this._assertExchange(event.name, 'fanout', event) 203 | }) 204 | .then(() => { 205 | return Promise.each(this.tasks, (task) => { 206 | if (typeof task === 'string') { 207 | return this._assertQueue(`${this.name}.${task}`) 208 | } 209 | 210 | return this._assertQueue(`${this.name}.${task.name}`, task) 211 | }) 212 | }) 213 | .return() 214 | } 215 | /** 216 | * Takes an object representing a message and sends it to a queue. 217 | * 218 | * @deprecated 219 | * @param {String} queue Queue to receive the message. 220 | * @param {Object} content Content to send. 221 | * @return {Promise} Promise resolved when message is sent to queue. 222 | */ 223 | publishToQueue (queue: string, content: Object): Bluebird$Promise { 224 | return Promise.try(() => { 225 | this.log.warn({ 226 | method: 'publishToQueue', 227 | queue 228 | }, 'rabbitmq.publishToQueue is deprecated. use `publishTask`.') 229 | return this.publishTask(queue, content) 230 | }) 231 | .return() 232 | } 233 | 234 | /** 235 | * Takes an object representing a message and sends it to an exchange using 236 | * a provided routing key. 237 | * 238 | * Note: Providing an empty string as the routing key is functionally the same 239 | * as sending the message directly to a named queue. The function 240 | * {@link RabbitMQ#publishToQueue} is preferred in this case. 241 | * 242 | * @deprecated 243 | * @param {String} queue Exchange to receive the message. 244 | * @param {String} routingKey Routing Key for the exchange. 245 | * @param {Object} content Content to send. 246 | * @return {Promise} Promise resolved when message is sent to the exchange. 247 | */ 248 | publishToExchange ( 249 | exchange: string, 250 | routingKey: string, 251 | content: Object 252 | ): Bluebird$Promise { 253 | return Promise.try(() => { 254 | this.log.warn({ 255 | method: 'publishToExchange', 256 | exchange 257 | }, 'rabbitmq.publishToExchange is deprecated. use `publishEvent`.') 258 | return this.publishEvent(exchange, content) 259 | }) 260 | .return() 261 | } 262 | 263 | /** 264 | * Takes an object representing a message and sends it to a task queue. 265 | * appends passed in name to tasks 266 | * @param {String} queue Task queue to receive the message. 267 | * @param {Object} content Job to send. 268 | * @param {Object} opts extra options for message. 269 | * @return {Promise} Promise resolved when message is sent to queue. 270 | */ 271 | publishTask (queue: string, content: Object, opts?: Object): Bluebird$Promise { 272 | return Promise.try(() => { 273 | const queueName = `${this.name}.${queue}` 274 | this._validatePublish(queue, content, 'tasks') 275 | const payload = RabbitMQ.buildJobPayload(content) 276 | const meta = RabbitMQ.buildJobMeta(this.name, opts) 277 | this.log.info({ queue: queueName, job: content, jobMeta: meta }, 'Publishing task') 278 | this._incMonitor('task', queueName) 279 | return Promise.resolve( 280 | this.publishChannel.sendToQueue(queueName, payload, meta) 281 | ) 282 | }) 283 | } 284 | 285 | /** 286 | * Sends an object representing a message to an exchange for the specified 287 | * event. 288 | * 289 | * @param {String} queue Exchange to receive the message. 290 | * @param {Object} content Content to send. 291 | * @param {Object} opts extra options for message. 292 | * @return {Promise} Promise resolved when message is sent to the exchange. 293 | */ 294 | publishEvent (exchange: string, content: Object, opts?: Object): Bluebird$Promise { 295 | return Promise.try(() => { 296 | this._validatePublish(exchange, content, 'events') 297 | const payload = RabbitMQ.buildJobPayload(content) 298 | const meta = RabbitMQ.buildJobMeta(this.name, opts) 299 | this.log.info({ event: exchange, job: content, jobMeta: meta }, 'Publishing event') 300 | // events do not need a routing key (so we send '') 301 | this._incMonitor('event', exchange) 302 | return Promise.resolve( 303 | this.publishChannel.publish(exchange, '', payload, meta) 304 | ) 305 | }) 306 | } 307 | 308 | /** 309 | * Helper function calling `monitor.increment`. Monitor won't be called if 310 | * `WORKER_MONITOR_DISABLED` is set. 311 | * 312 | * @private 313 | * @param {String} type either event or task 314 | * @param {String} name name of the event or task 315 | */ 316 | _incMonitor (type: string, name: string): void { 317 | if (process.env.WORKER_MONITOR_DISABLED) { 318 | return 319 | } 320 | const tags = { 321 | type: type, 322 | app_id: this.name, 323 | name: name 324 | } 325 | monitor.increment('ponos.publish', tags) 326 | } 327 | /** 328 | * Asserts exchanges on the channel. 329 | * 330 | * @param {String} name Exchange Name 331 | * @param {String} type Type of exchange [topic|fanout] 332 | * @param {Object} opts extra options for exchange 333 | * @return {Promise} Promise resolved when exchange is created. 334 | * @resolves {QueueObject} asserted exchange 335 | */ 336 | _assertExchange (name: string, type: string, opts?: Object): Bluebird$Promise { 337 | return Promise.resolve( 338 | this.channel.assertExchange( 339 | name, 340 | type, 341 | defaults(opts, RabbitMQ.AMQPLIB_EXCHANGE_DEFAULTS) 342 | ) 343 | ) 344 | } 345 | 346 | /** 347 | * Asserts queue on the channel. 348 | * 349 | * @param {String} name Queue Name 350 | * @param {Object} opts extra options for queue 351 | * @return {Promise} Promise resolved when queue is created. 352 | * @resolves {QueueObject} asserted queue 353 | */ 354 | _assertQueue (name: string, opts?: Object): Bluebird$Promise { 355 | return Promise.resolve( 356 | this.channel.assertQueue( 357 | name, 358 | defaults(opts, RabbitMQ.AMQPLIB_QUEUE_DEFAULTS) 359 | ) 360 | ) 361 | } 362 | 363 | /** 364 | * Subscribe to a specific direct queue. 365 | * 366 | * @private 367 | * @param {String} queue Queue name. 368 | * @param {Function} handler Handler for jobs. 369 | * @param {Object} [queueOptions] Options for the queue. 370 | * @see RabbitMQ.AMQPLIB_QUEUE_DEFAULTS 371 | * @return {Promise} Promise that is resolved once queue is subscribed. 372 | */ 373 | subscribeToQueue ( 374 | queue: string, 375 | handler: Function, 376 | queueOptions?: Object 377 | ): Bluebird$Promise { 378 | const queueName = `${this.name}.${queue}` 379 | const log = this.log.child({ 380 | method: 'subscribeToQueue', 381 | queue: queueName 382 | }) 383 | log.info('subscribing to queue') 384 | if (!this._isConnected()) { 385 | return Promise.reject(new Error('you must .connect() before subscribing')) 386 | } 387 | if (!isFunction(handler)) { 388 | log.error('handler must be a function') 389 | return Promise.reject( 390 | new Error(`handler for ${queueName} must be a function`) 391 | ) 392 | } 393 | return Promise.try(() => { 394 | log.trace('binding to queue') 395 | this.subscriptions = this.subscriptions.set(queueName, handler) 396 | this.subscribed = this.subscribed.add(`queue:::${queueName}`) 397 | }) 398 | } 399 | 400 | /** 401 | * Subcribe to fanout exchange. 402 | * 403 | * @private 404 | * @param {String} exchange Name of fanout exchange. 405 | * @param {Function} handler Handler for jobs. 406 | * @param {Object} [rabbitMQOptions] Options for the queues and exchanges. 407 | * @param {Object} [rabbitMQOptions.queueOptions] Options for the queue. 408 | * @see RabbitMQ.AMQPLIB_QUEUE_DEFAULTS 409 | * @param {Object} [rabbitMQOptions.exchangeOptions] Options for the exchange. 410 | * @see RabbitMQ.AMQPLIB_EXCHANGE_DEFAULTS 411 | * @return {Promise} Promise resolved once subscribed. 412 | */ 413 | subscribeToFanoutExchange ( 414 | exchange: string, 415 | handler: Function, 416 | rabbitMQOptions?: RabbitMQOptions 417 | ): Bluebird$Promise { 418 | const log = this.log.child({ 419 | method: 'subscribeToFanoutExchange', 420 | exchange, 421 | rabbitMQOptions 422 | }) 423 | log.info('subscribing to exchange') 424 | const opts = { 425 | exchange: exchange, 426 | type: 'fanout', 427 | handler: handler, 428 | queueOptions: {}, 429 | exchangeOptions: {} 430 | } 431 | if (rabbitMQOptions && rabbitMQOptions.queueOptions) { 432 | opts.queueOptions = rabbitMQOptions.queueOptions 433 | } 434 | if (rabbitMQOptions && rabbitMQOptions.exchangeOptions) { 435 | opts.exchangeOptions = rabbitMQOptions.exchangeOptions 436 | } 437 | return this._subscribeToExchange(opts) 438 | } 439 | 440 | /** 441 | * Subscribe to topic exchange. 442 | * 443 | * @private 444 | * @param {String} exchange Name of topic exchange. 445 | * @param {String} routingKey Routing key for topic exchange. 446 | * @param {Function} handler Handler for jobs. 447 | * @param {Object} [rabbitMQOptions] Options for the queues and exchanges. 448 | * @param {Object} [rabbitMQOptions.exchangeOptions] Options for the exchange. 449 | * @see RabbitMQ.AMQPLIB_EXCHANGE_DEFAULTS 450 | * @param {Object} [rabbitMQOptions.queueOptions] Options for the queue. 451 | * @see RabbitMQ.AMQPLIB_QUEUE_DEFAULTS 452 | * @return {Promise} Promise resolved once subscribed. 453 | */ 454 | subscribeToTopicExchange ( 455 | exchange: string, 456 | routingKey: string, 457 | handler: Function, 458 | rabbitMQOptions?: RabbitMQOptions 459 | ): Bluebird$Promise { 460 | const log = this.log.child({ 461 | method: 'subscribeToTopicExchange', 462 | exchange, 463 | rabbitMQOptions 464 | }) 465 | log.info('subscribing to exchange') 466 | const opts = { 467 | exchange: exchange, 468 | type: 'topic', 469 | routingKey: routingKey, 470 | handler: handler, 471 | queueOptions: {}, 472 | exchangeOptions: {} 473 | } 474 | if (rabbitMQOptions && rabbitMQOptions.queueOptions) { 475 | opts.queueOptions = rabbitMQOptions.queueOptions 476 | } 477 | if (rabbitMQOptions && rabbitMQOptions.exchangeOptions) { 478 | opts.exchangeOptions = rabbitMQOptions.exchangeOptions 479 | } 480 | return this._subscribeToExchange(opts) 481 | } 482 | 483 | /** 484 | * Start consuming from subscribed queues. 485 | * 486 | * @private 487 | * @return {Promise} Promise resolved when all queues consuming. 488 | */ 489 | consume (): Bluebird$Promise { 490 | const log = this.log.child({ method: 'consume' }) 491 | log.info('starting to consume') 492 | if (!this._isConnected()) { 493 | return Promise.reject(new Error('you must .connect() before consuming')) 494 | } 495 | const subscriptions = this.subscriptions 496 | this.subscriptions = new Immutable.Map() 497 | const channel = this.channel 498 | return Promise.map(subscriptions.keySeq(), (queue) => { 499 | const handler = subscriptions.get(queue) 500 | log.info({ queue: queue }, 'consuming on queue') 501 | // XXX(bryan): is this valid? should I not be checking _this_.consuming? 502 | if (this.consuming.has(queue)) { 503 | log.warn({ queue: queue }, 'already consuming queue') 504 | return 505 | } 506 | function wrapper (msg) { 507 | let job 508 | const jobMeta = msg.properties || {} 509 | try { 510 | job = JSON.parse(msg.content) 511 | } catch (err) { 512 | // relatively safe stringifying - could be buffer, could be invalid 513 | log.error({ job: '' + msg.content }, 'content not valid JSON') 514 | return channel.ack(msg) 515 | } 516 | handler(job, jobMeta, () => { 517 | channel.ack(msg) 518 | }) 519 | } 520 | return Promise.resolve(this.channel.consume(queue, wrapper)) 521 | .then((consumeInfo) => { 522 | this.consuming = this.consuming.set(queue, consumeInfo.consumerTag) 523 | }) 524 | }) 525 | .return() 526 | } 527 | 528 | /** 529 | * Unsubscribe and stop consuming from all queues. 530 | * 531 | * @private 532 | * @return {Promise} Promise resolved when all queues canceled. 533 | */ 534 | unsubscribe (): Bluebird$Promise { 535 | const consuming = this.consuming 536 | return Promise.map(consuming.keySeq(), (queue) => { 537 | const consumerTag = consuming.get(queue) 538 | return Promise.resolve(this.channel.cancel(consumerTag)) 539 | .then(() => { 540 | this.consuming = this.consuming.delete(queue) 541 | }) 542 | }) 543 | .return() 544 | } 545 | 546 | /** 547 | * Disconnect from RabbitMQ. 548 | * 549 | * @return {Promise} Promise resolved when disconnected from RabbitMQ. 550 | */ 551 | disconnect (): Bluebird$Promise { 552 | if (!this._isPartlyConnected()) { 553 | return Promise.reject(new Error('not connected. cannot disconnect.')) 554 | } 555 | return Promise.resolve(this.publishChannel.waitForConfirms()) 556 | .then(() => (Promise.resolve(this.connection.close()))) 557 | .then(() => (this._setCleanState())) 558 | } 559 | 560 | // Private Methods 561 | 562 | /** 563 | * Helper method to re-set the state of the model to be 'clean'. 564 | * 565 | * @private 566 | */ 567 | _setCleanState (): void { 568 | delete this.channel 569 | delete this.connection 570 | this.subscriptions = new Immutable.Map() 571 | this.subscribed = new Immutable.Set() 572 | this.consuming = new Immutable.Map() 573 | } 574 | 575 | /** 576 | * Error handler for the RabbitMQ connection. 577 | * 578 | * @private 579 | * @throws {Error} 580 | * @param {object} err Error object from event. 581 | */ 582 | _connectionErrorHandler (err: Error) { 583 | this.log.fatal({ err: err }, 'connection has caused an error') 584 | throw err 585 | } 586 | 587 | /** 588 | * Error handler for the RabbitMQ channel. 589 | * 590 | * @private 591 | * @throws {Error} 592 | * @param {object} err Error object from event. 593 | */ 594 | _channelErrorHandler (err: Error) { 595 | this.log.fatal({ err: err }, 'channel has caused an error') 596 | throw err 597 | } 598 | 599 | /** 600 | * Check to see if model is connected. 601 | * 602 | * @private 603 | * @return {Boolean} True if model is connected and channel is established. 604 | */ 605 | _isConnected (): boolean { 606 | return !!(this._isPartlyConnected() && this.channel && this.publishChannel) 607 | } 608 | 609 | /** 610 | * Check to see if model is _partially_ connected. This means that the 611 | * connection was established, but the channel was not. 612 | * 613 | * @private 614 | * @return {Boolean} True if connection is established. 615 | */ 616 | _isPartlyConnected (): boolean { 617 | return !!(this.connection) 618 | } 619 | 620 | /** 621 | * Helper function to consolidate logic for subscribing to queues. Stores 622 | * information about what is subscribed and is responsible for asserting 623 | * exchanges and queues into existance. 624 | * 625 | * @private 626 | * @param {Object} opts Object describing the exchange connection. 627 | * @param {String} opts.exchange Name of exchange. 628 | * @param {String} opts.handler Handler of jobs. 629 | * @param {String} opts.type Type of exchange: 'fanout' or 'topic'. 630 | * @param {Object} [opts.exchangeOptions] Options for the exchange. 631 | * @see RabbitMQ.AMQPLIB_EXCHANGE_DEFAULTS 632 | * @param {Object} [opts.queueOptions] Options for the queue. 633 | * @see RabbitMQ.AMQPLIB_QUEUE_DEFAULTS 634 | * @param {String} [opts.routingKey] Routing key for a topic exchange. 635 | * @return {Promise} Promise resolved when subcribed to exchange. 636 | */ 637 | _subscribeToExchange (opts: SubscribeObject): Bluebird$Promise { 638 | const log = this.log.child({ 639 | method: '_subscribeToExchange', 640 | opts: opts 641 | }) 642 | log.info('subscribing to exchange') 643 | if (!this._isConnected()) { 644 | return Promise.reject(new Error('must .connect() before subscribing')) 645 | } 646 | if (opts.type === 'topic' && !opts.routingKey) { 647 | return Promise.reject(new Error('routingKey required for topic exchange')) 648 | } 649 | let subscribedKey = `${opts.type}:::${opts.exchange}` 650 | if (opts.type === 'topic' && opts.routingKey) { 651 | subscribedKey = `${subscribedKey}:::${opts.routingKey}` 652 | } 653 | if (this.subscribed.has(subscribedKey)) { 654 | log.warn(`already subscribed to ${opts.type} exchange`) 655 | return Promise.resolve() 656 | } 657 | 658 | log.trace('asserting queue for exchange') 659 | let queueName = `${this.name}.${opts.exchange}` 660 | if (opts.type === 'topic' && opts.routingKey) { 661 | queueName = `${queueName}.${opts.routingKey}` 662 | } 663 | 664 | return this._assertQueue(queueName, opts.queueOptions) 665 | .then((queueInfo) => { 666 | const queue = queueInfo.queue 667 | log.info({ queue: queue }, 'queue asserted') 668 | log.info('binding queue') 669 | if (!opts.routingKey) { 670 | opts.routingKey = '' 671 | } 672 | return Promise 673 | .resolve( 674 | this.channel.bindQueue(queue, opts.exchange, opts.routingKey) 675 | ) 676 | .return(queue) 677 | }) 678 | .then((queue) => { 679 | log.info('bound queue') 680 | this.subscriptions = this.subscriptions.set(queue, opts.handler) 681 | this.subscribed = this.subscribed.add(subscribedKey) 682 | }) 683 | } 684 | 685 | static getKeyFromClsNamespace (key) { 686 | const ns = getNamespace('ponos') 687 | return ns && ns.get(key) 688 | } 689 | static buildJobPayload (content: Object) { 690 | if (!content.tid) { 691 | const tid = RabbitMQ.getKeyFromClsNamespace('tid') 692 | content.tid = tid || uuid() 693 | } 694 | const stringContent = JSON.stringify(content) 695 | return new Buffer(stringContent) 696 | } 697 | static buildJobMeta (name, opts) { 698 | const jobMeta = defaults({ 699 | appId: name, 700 | timestamp: Date.now() 701 | }, opts || {}) 702 | if (jobMeta.headers == null) { 703 | jobMeta.headers = {} 704 | } 705 | jobMeta.headers.publisherWorkerName = RabbitMQ.getKeyFromClsNamespace('currentWorkerName') 706 | return jobMeta 707 | } 708 | /** 709 | * Validate publish params. Adds a TID to the job it does not already have 710 | * one. 711 | * @private 712 | * @param {String} name Name of queue or exchange. 713 | * @param {Object} content Content to send. 714 | * @param {String} type type of job to validate (tasks|events). 715 | * @throws {Error} Must be connected to RabbitMQ. 716 | * @throws {Error} Name must be a non-empty string. 717 | * @throws {Error} Object must be an Object. 718 | * @throws {Error} Joi validation error if jobSchema is provided and job is invalid 719 | */ 720 | _validatePublish (name: string, content: Object, type: string): void { 721 | if (!this._isConnected()) { 722 | throw new Error('you must call .connect() before publishing') 723 | } 724 | // flowtype does not prevent users from using this function incorrectly. 725 | if (!isString(name) || name === '') { 726 | throw new Error('name must be a string') 727 | } 728 | if (!isObject(content)) { 729 | throw new Error('content must be an object') 730 | } 731 | // $FlowIgnore: flow does not understand dynamic keys 732 | const job = this[type].find(hasKeypaths({ name: name })) 733 | if (!job) { 734 | throw new Error(`${type}: "${name}" not defined in constructor`) 735 | } 736 | 737 | if (job.jobSchema) { 738 | joi.assert(content, job.jobSchema) 739 | } 740 | } 741 | } 742 | 743 | /** 744 | * Default options provided for asserted queues. 745 | * 746 | * Reference the [amqplib docs]{@link 747 | * http://www.squaremobius.net/amqp.node/channel_api.html#channel_assertQueue} 748 | * for more information. 749 | * 750 | * @typedef AMQPLIB_QUEUE_DEFAULTS 751 | * @const {Object} 752 | * @property {Boolean} autoDelete=false Delete queue when it has 0 consumers. 753 | * @property {Boolean} durable=true Queue survives broker restarts. 754 | * @property {Boolean} exclusive=false Scopes the queue to the connection. 755 | */ 756 | RabbitMQ.AMQPLIB_QUEUE_DEFAULTS = { 757 | exclusive: false, 758 | durable: true, 759 | autoDelete: false 760 | } 761 | 762 | /** 763 | * Default options provided for asserted exchanges. 764 | * 765 | * Reference the [amqplib docs]{@link 766 | * http://www.squaremobius.net/amqp.node/channel_api.html#channel_assertExchange} 767 | * for more information. 768 | * 769 | * @typedef AMQPLIB_EXCHANGE_DEFAULTS 770 | * @const {Object} 771 | * @property {Boolean} autoDelete=false Delete exchange when it has 0 bindings. 772 | * @property {Boolean} durable=true Queue survives broker restarts. 773 | * @property {Boolean} internal=false Messages cannot be published directly to 774 | * the exchange. 775 | */ 776 | RabbitMQ.AMQPLIB_EXCHANGE_DEFAULTS = { 777 | durable: true, 778 | internal: false, 779 | autoDelete: false 780 | } 781 | 782 | /** 783 | * RabbitMQ model. 784 | * 785 | * @module ponos/lib/rabbitmq 786 | * @see RabbitMQ 787 | */ 788 | module.exports = RabbitMQ 789 | -------------------------------------------------------------------------------- /test/unit/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const chai = require('chai') 4 | const joi = require('joi') 5 | const monitor = require('monitor-dog') 6 | const noop = require('101/noop') 7 | const omit = require('101/omit') 8 | const Promise = require('bluebird') 9 | const sinon = require('sinon') 10 | const RabbitMQ = require('../../src/rabbitmq') 11 | const WorkerStopError = require('error-cat/errors/worker-stop-error') 12 | const assert = chai.assert 13 | const TimeoutError = Promise.TimeoutError 14 | 15 | const Worker = require('../../src/worker') 16 | const logger = require('../../src/logger') 17 | 18 | describe('Worker', () => { 19 | let opts 20 | beforeEach(() => { 21 | opts = { 22 | queue: 'do.something.command', 23 | task: (data) => { return Promise.resolve(data) }, 24 | job: { message: 'hello world' }, 25 | log: logger.child({ module: 'ponos:test' }) 26 | } 27 | }) 28 | 29 | describe('Constructor', () => { 30 | beforeEach(() => { sinon.stub(Worker.prototype, 'run') }) 31 | 32 | afterEach(() => { Worker.prototype.run.restore() }) 33 | 34 | it('should enforce default opts', () => { 35 | const testOpts = omit(opts, 'job') 36 | assert.throws(() => { 37 | Worker.create(testOpts) 38 | }, /"job" is required/) 39 | }) 40 | 41 | it('should enforce default opts', () => { 42 | const testOpts = omit(opts, 'queue') 43 | assert.throws(() => { 44 | Worker.create(testOpts) 45 | }, /"queue" is required/) 46 | }) 47 | 48 | it('should enforce default opts', () => { 49 | const testOpts = omit(opts, 'task') 50 | assert.throws(() => { 51 | Worker.create(testOpts) 52 | }, /"task" is required/) 53 | }) 54 | 55 | it('should enforce default opts', () => { 56 | const testOpts = omit(opts, 'log') 57 | assert.throws(() => { 58 | Worker.create(testOpts) 59 | }, /"log" is required/) 60 | }) 61 | 62 | it('should throw when jobSchema is not object', () => { 63 | opts.jobSchema = 'no schema' 64 | assert.throws(() => { 65 | Worker.create(opts) 66 | }, /"jobSchema" must be an object/) 67 | }) 68 | 69 | it('should throw when jobSchema is not joi schema', () => { 70 | opts.jobSchema = { 71 | isJoi: false 72 | } 73 | assert.throws(() => { 74 | Worker.create(opts) 75 | }, /"isJoi" must be one of \[true\]/) 76 | }) 77 | 78 | it('should use the given logger', () => { 79 | const testLogger = { 80 | info: noop 81 | } 82 | const log = { 83 | child: () => { return testLogger } 84 | } 85 | opts.log = log 86 | const w = Worker.create(opts) 87 | assert.equal(w.log, testLogger) 88 | }) 89 | 90 | it('should use the given errorCat', () => { 91 | opts.errorCat = { mew: 2 } 92 | const w = Worker.create(opts) 93 | assert.deepEqual(w.errorCat, { mew: 2 }) 94 | }) 95 | 96 | it('should use the given errorPublisher', () => { 97 | opts.errorPublisher = { mew: 2 } 98 | const w = Worker.create(opts) 99 | assert.deepEqual(w.errorPublisher, { mew: 2 }) 100 | }) 101 | 102 | describe('finalErrorFn', function () { 103 | it('should use passed for function to resolve', () => { 104 | opts.finalRetryFn = sinon.stub().rejects(new Error('Glorfindel')) 105 | const w = Worker.create(opts) 106 | return assert.isRejected(w.finalRetryFn(), Error, /Glorfindel/) 107 | }) 108 | 109 | it('should default to resolve', () => { 110 | const w = Worker.create(opts) 111 | return assert.isFulfilled(w.finalRetryFn()) 112 | }) 113 | }) // end finalErrorFn 114 | describe('maxNumRetries', function () { 115 | beforeEach(() => { 116 | delete process.env.WORKER_MAX_NUM_RETRIES 117 | }) 118 | 119 | it('should used passed maxNumRetries', () => { 120 | opts.maxNumRetries = 1 121 | const w = Worker.create(opts) 122 | assert.equal(w.maxNumRetries, 1, 'set the maxNumRetries correctly') 123 | }) 124 | 125 | it('should use ENV for maxNumRetries', () => { 126 | process.env.WORKER_MAX_NUM_RETRIES = 2 127 | const w = Worker.create(opts) 128 | assert.equal(w.maxNumRetries, 2, 'set the maxNumRetries correctly') 129 | }) 130 | 131 | it('should default the maxNumRetries to max int', () => { 132 | const w = Worker.create(opts) 133 | assert.equal(w.maxNumRetries, Number.MAX_SAFE_INTEGER, 'set the maxNumRetries correctly') 134 | }) 135 | }) // end maxNumRetries 136 | 137 | describe('msTimeout', function () { 138 | beforeEach(() => { 139 | delete process.env.WORKER_TIMEOUT 140 | }) 141 | 142 | it('should used passed msTimeout', () => { 143 | opts.msTimeout = 1 144 | const w = Worker.create(opts) 145 | assert.equal(w.msTimeout, 1, 'set the msTimeout correctly') 146 | }) 147 | 148 | it('should use ENV for msTimeout', () => { 149 | process.env.WORKER_TIMEOUT = 2 150 | const w = Worker.create(opts) 151 | assert.equal(w.msTimeout, 2, 'set the msTimeout correctly') 152 | }) 153 | 154 | it('should default the msTimeout to 0', () => { 155 | const w = Worker.create(opts) 156 | assert.equal(w.msTimeout, 0, 'set the msTimeout correctly') 157 | }) 158 | }) // end msTimeout 159 | 160 | describe('maxRetryDelay', function () { 161 | beforeEach(() => { 162 | delete process.env.WORKER_MAX_RETRY_DELAY 163 | }) 164 | 165 | it('should used passed maxRetryDelay', () => { 166 | opts.maxRetryDelay = 1 167 | const w = Worker.create(opts) 168 | assert.equal(w.maxRetryDelay, 1, 'set the maxRetryDelay correctly') 169 | }) 170 | 171 | it('should use ENV for maxRetryDelay', () => { 172 | process.env.WORKER_MAX_RETRY_DELAY = 2 173 | const w = Worker.create(opts) 174 | assert.equal(w.maxRetryDelay, 2, 'set the maxRetryDelay correctly') 175 | }) 176 | 177 | it('should default the maxRetryDelay to max int', () => { 178 | const w = Worker.create(opts) 179 | assert.equal(w.maxRetryDelay, Number.MAX_SAFE_INTEGER, 'set the maxRetryDelay correctly') 180 | }) 181 | }) // end maxRetryDelay 182 | 183 | describe('retryDelay', function () { 184 | beforeEach(() => { 185 | delete process.env.WORKER_MIN_RETRY_DELAY 186 | }) 187 | 188 | it('should used passed retryDelay', () => { 189 | opts.retryDelay = 3 190 | const w = Worker.create(opts) 191 | assert.equal(w.retryDelay, 3, 'set the retryDelay correctly') 192 | }) 193 | 194 | it('should use ENV for retryDelay', () => { 195 | process.env.WORKER_MIN_RETRY_DELAY = 2 196 | const w = Worker.create(opts) 197 | assert.equal(w.retryDelay, 2, 'set the retryDelay correctly') 198 | }) 199 | 200 | it('should default the retryDelay 1', () => { 201 | const w = Worker.create(opts) 202 | assert.equal(w.retryDelay, 1, 'set the retryDelay correctly') 203 | }) 204 | }) // end retryDelay 205 | }) 206 | 207 | describe('prototype methods', () => { 208 | let worker 209 | 210 | beforeEach(() => { 211 | worker = Worker.create(opts) 212 | }) 213 | 214 | describe('_eventTags', () => { 215 | let worker 216 | const queue = 'some.queue.name' 217 | 218 | beforeEach(() => { 219 | worker = Worker.create(opts) 220 | worker.queue = queue 221 | }) 222 | 223 | it('should generate tags for new style queues', () => { 224 | const tags = worker._eventTags() 225 | assert.isObject(tags) 226 | assert.equal(Object.keys(tags).length, 4) 227 | assert.deepEqual(tags, { 228 | queue: queue, 229 | token0: 'name', 230 | token1: 'queue.name', 231 | token2: 'some.queue.name' 232 | }) 233 | }) 234 | 235 | it('should generate tags for old style queues', () => { 236 | const queue = 'some-queue-name' 237 | worker.queue = queue 238 | const tags = worker._eventTags() 239 | assert.isObject(tags) 240 | assert.equal(Object.keys(tags).length, 2) 241 | assert.deepEqual(tags, { 242 | queue: queue, 243 | token0: 'some-queue-name' 244 | }) 245 | }) 246 | }) 247 | 248 | describe('_incMonitor', () => { 249 | let worker 250 | const queue = 'do.something.command' 251 | 252 | beforeEach(() => { 253 | sinon.stub(monitor, 'increment') 254 | worker = Worker.create(opts) 255 | worker.queue = queue 256 | }) 257 | 258 | afterEach(() => { 259 | monitor.increment.restore() 260 | }) 261 | 262 | it('should call monitor increment for event without result tag', () => { 263 | worker._incMonitor('ponos') 264 | sinon.assert.calledOnce(monitor.increment) 265 | sinon.assert.calledWith(monitor.increment, 'ponos', { 266 | token0: 'command', 267 | token1: 'something.command', 268 | token2: 'do.something.command', 269 | queue: 'do.something.command' 270 | }) 271 | }) 272 | 273 | it('should call monitor increment for event with extra tags', () => { 274 | worker._incMonitor('ponos.finish', { result: 'success' }) 275 | sinon.assert.calledOnce(monitor.increment) 276 | sinon.assert.calledWith(monitor.increment, 'ponos.finish', { 277 | token0: 'command', 278 | token1: 'something.command', 279 | token2: 'do.something.command', 280 | queue: 'do.something.command', 281 | result: 'success' 282 | }) 283 | }) 284 | 285 | describe('with disabled monitoring', () => { 286 | beforeEach(() => { 287 | process.env.WORKER_MONITOR_DISABLED = 'true' 288 | }) 289 | 290 | afterEach(() => { 291 | delete process.env.WORKER_MONITOR_DISABLED 292 | }) 293 | 294 | it('should not call monitor increment', () => { 295 | worker._incMonitor('ponos.finish', { result: 'success' }) 296 | sinon.assert.notCalled(monitor.increment) 297 | }) 298 | }) 299 | }) 300 | 301 | describe('_createTimer', () => { 302 | let worker 303 | const queue = 'do.something.command' 304 | 305 | beforeEach(() => { 306 | sinon.stub(monitor, 'timer').returns({ stop: () => {} }) 307 | worker = Worker.create(opts) 308 | worker.queue = queue 309 | }) 310 | 311 | afterEach(() => { 312 | monitor.timer.restore() 313 | }) 314 | 315 | it('should call monitor.timer for event without result tag', () => { 316 | const timer = worker._createTimer() 317 | assert.isNotNull(timer) 318 | assert.isNotNull(timer.stop) 319 | sinon.assert.calledOnce(monitor.timer) 320 | sinon.assert.calledWith(monitor.timer, 'ponos.timer', true, { 321 | token0: 'command', 322 | token1: 'something.command', 323 | token2: 'do.something.command', 324 | queue: 'do.something.command' 325 | }) 326 | }) 327 | 328 | describe('with disabled monitoring', () => { 329 | beforeEach(() => { 330 | process.env.WORKER_MONITOR_DISABLED = 'true' 331 | }) 332 | 333 | afterEach(() => { 334 | delete process.env.WORKER_MONITOR_DISABLED 335 | }) 336 | 337 | it('should not call monitor.timer', () => { 338 | const timer = worker._createTimer() 339 | assert.isNull(timer) 340 | sinon.assert.notCalled(monitor.timer) 341 | }) 342 | }) 343 | }) 344 | 345 | describe('_wrapTask', () => { 346 | let clock 347 | beforeEach(() => { 348 | clock = sinon.useFakeTimers() 349 | sinon.stub(worker, 'task') 350 | }) 351 | 352 | afterEach(() => { 353 | clock.restore() 354 | }) 355 | 356 | it('should timeout the job', () => { 357 | worker.msTimeout = 50 358 | worker.task.returns(() => { 359 | return Promise.delay(100) 360 | }) 361 | return Promise.join([ 362 | assert.isRejected(worker._wrapTask(), TimeoutError), 363 | Promise.try(() => { 364 | sinon.assert.calledOnce(worker.task) 365 | clock.tick(60) 366 | }) 367 | ]) 368 | }) 369 | 370 | it('should not timeout the job', () => { 371 | worker.msTimeout = 100 372 | worker.task.returns(() => { 373 | return Promise.delay(10) 374 | }) 375 | return Promise.join([ 376 | assert.isFulfilled(worker._wrapTask()), 377 | Promise.try(() => { 378 | sinon.assert.calledOnce(worker.task) 379 | clock.tick(20) 380 | }) 381 | ]) 382 | }) 383 | 384 | it('should run task', () => { 385 | const TestJob = { who: 'ami' } 386 | worker.timeout = null 387 | worker.job = TestJob 388 | worker.jobMeta = { 389 | appId: 'api', 390 | timestamp: Date.now() 391 | } 392 | return assert.isFulfilled(worker._wrapTask()) 393 | .then(() => { 394 | sinon.assert.calledOnce(worker.task) 395 | sinon.assert.calledWith(worker.task, TestJob, worker.jobMeta) 396 | }) 397 | }) 398 | }) // end _wrapTask 399 | 400 | describe('_validateJob', () => { 401 | it('should reject and not run if bad job', () => { 402 | worker.jobSchema = joi.string() 403 | worker.job = 123123 404 | return assert.isRejected(worker._validateJob(), WorkerStopError) 405 | }) 406 | 407 | it('should run if valid schema', () => { 408 | worker.jobSchema = joi.string() 409 | worker.job = '123123' 410 | return assert.isFulfilled(worker._validateJob()) 411 | }) 412 | 413 | describe('mocked joi', () => { 414 | beforeEach(() => { 415 | sinon.stub(joi, 'assert') 416 | }) 417 | 418 | afterEach(() => { 419 | joi.assert.restore() 420 | }) 421 | 422 | it('should throw original error if not joi', () => { 423 | const testError = new Error('Adrahil') 424 | worker.jobSchema = joi.string() 425 | joi.assert.throws(testError) 426 | 427 | return assert.isRejected(worker._validateJob(), Error, /Adrahil/) 428 | }) 429 | }) // end mocked joi 430 | }) // end _validateJob 431 | 432 | describe('_addWorkerDataToError', () => { 433 | it('should make err cause if it has a cause', () => { 434 | const testError = { 435 | cause: new Error('Frodo') 436 | } 437 | try { 438 | worker._addWorkerDataToError(testError) 439 | } catch (err) { 440 | assert.deepEqual(err, testError.cause) 441 | } 442 | }) 443 | 444 | it('should use passed error', () => { 445 | const testError = new Error('Gandalf') 446 | try { 447 | worker._addWorkerDataToError(testError) 448 | } catch (err) { 449 | assert.deepEqual(err, testError) 450 | } 451 | }) 452 | 453 | it('should convert data to object', () => { 454 | const testError = new Error('Samwise') 455 | testError.data = 'string' 456 | try { 457 | worker._addWorkerDataToError(testError) 458 | } catch (err) { 459 | assert.isObject(err.data) 460 | } 461 | }) 462 | 463 | it('should leave data alone', () => { 464 | const testError = new Error('Meriadoc') 465 | testError.data = { 466 | Merry: 'Brandybuck' 467 | } 468 | try { 469 | worker._addWorkerDataToError(testError) 470 | } catch (err) { 471 | assert.deepEqual(err, testError) 472 | } 473 | }) 474 | 475 | it('should add queue', () => { 476 | const testError = new Error('Peregrin') 477 | worker.queue = 'Pippin' 478 | try { 479 | worker._addWorkerDataToError(testError) 480 | } catch (err) { 481 | assert.equal(err.data.queue, worker.queue) 482 | } 483 | }) 484 | 485 | it('should leave queue alone', () => { 486 | const testError = new Error('Aragorn') 487 | worker.queue = 'Isildur' 488 | testError.data = { 489 | queue: 'Gondor' 490 | } 491 | try { 492 | worker._addWorkerDataToError(testError) 493 | } catch (err) { 494 | assert.equal(err.data.queue, testError.data.queue) 495 | } 496 | }) 497 | 498 | it('should add job', () => { 499 | const testError = new Error('Peregrin') 500 | worker.job = 'Pippin' 501 | try { 502 | worker._addWorkerDataToError(testError) 503 | } catch (err) { 504 | assert.equal(err.data.job, worker.job) 505 | } 506 | }) 507 | 508 | it('should leave job alone', () => { 509 | const testError = new Error('Aragorn') 510 | worker.job = 'Isildur' 511 | testError.data = { 512 | job: 'Gondor' 513 | } 514 | try { 515 | worker._addWorkerDataToError(testError) 516 | } catch (err) { 517 | assert.equal(err.data.job, testError.data.job) 518 | } 519 | }) 520 | }) // end _addWorkerDataToError 521 | 522 | describe('_retryWithDelay', () => { 523 | beforeEach(() => { 524 | sinon.stub(worker, '_incMonitor').returns() 525 | sinon.stub(worker, 'run').resolves() 526 | }) 527 | 528 | it('should _incMonitor', () => { 529 | return assert.isFulfilled(worker._retryWithDelay()) 530 | .then(() => { 531 | sinon.assert.calledOnce(worker._incMonitor) 532 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { 533 | result: 'task-error' 534 | }) 535 | }) 536 | }) 537 | 538 | it('should call after delay', () => { 539 | const clock = sinon.useFakeTimers() 540 | worker.retryDelay = 100 541 | return Promise.join([ 542 | assert.isFulfilled(worker._retryWithDelay()), 543 | Promise.try(() => { 544 | sinon.assert.notCalled(worker.run) 545 | clock.tick(50) 546 | sinon.assert.notCalled(worker.run) 547 | clock.tick(50) 548 | sinon.assert.calledOnce(worker.run) 549 | clock.restore() 550 | }) 551 | ]) 552 | }) 553 | 554 | it('should double delay', () => { 555 | worker.retryDelay = 1 556 | return assert.isFulfilled(worker._retryWithDelay()) 557 | .then(() => { 558 | assert.equal(worker.retryDelay, 2) 559 | }) 560 | }) 561 | 562 | it('should not exceed max', () => { 563 | worker.retryDelay = 2 564 | worker.maxRetryDelay = 4 565 | return assert.isFulfilled(worker._retryWithDelay()) 566 | .then(() => { 567 | return worker._retryWithDelay() 568 | }) 569 | .then(() => { 570 | return worker._retryWithDelay() 571 | }) 572 | .then(() => { 573 | assert.equal(worker.retryDelay, 4) 574 | }) 575 | }) 576 | }) // end _retryWithDelay 577 | 578 | describe('_enforceRetryLimit', () => { 579 | beforeEach(() => { 580 | sinon.stub(worker, '_incMonitor').returns() 581 | sinon.stub(worker, 'finalRetryFn').resolves() 582 | }) 583 | 584 | it('should throw original error if limit not reached', () => { 585 | worker.attempt = 0 586 | worker.maxNumRetries = 5 587 | const testError = new Error('Legolas') 588 | return assert.isRejected(worker._enforceRetryLimit(testError), Error, /Legolas/) 589 | .then(() => { 590 | sinon.assert.notCalled(worker._incMonitor) 591 | }) 592 | }) 593 | 594 | it('should throw WorkerStopError error if limit reached', () => { 595 | worker.attempt = 10 596 | worker.maxNumRetries = 5 597 | const testError = new Error('Thranduil') 598 | return assert.isRejected(worker._enforceRetryLimit(testError), WorkerStopError, /final retry handler finished/) 599 | .then(() => { 600 | sinon.assert.calledOnce(worker._incMonitor) 601 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { result: 'retry-error' }) 602 | }) 603 | }) 604 | 605 | it('should throw WorkerStopError error if finalRetryFn rejected', () => { 606 | worker.attempt = 10 607 | worker.maxNumRetries = 5 608 | const testError = new Error('Gimli') 609 | const retryError = new Error('Glóin') 610 | worker.finalRetryFn.rejects(retryError) 611 | return assert.isRejected(worker._enforceRetryLimit(testError), WorkerStopError, /final retry handler finished/) 612 | .then(() => { 613 | sinon.assert.calledTwice(worker._incMonitor) 614 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-retry-fn-error', { result: 'retry-fn-error' }) 615 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { result: 'retry-error' }) 616 | }) 617 | }) 618 | 619 | it('should throw WorkerStopError error if finalRetryFn throws', () => { 620 | worker.attempt = 10 621 | worker.maxNumRetries = 5 622 | const testError = new Error('Boromir') 623 | const retryError = new Error('Denethor') 624 | worker.finalRetryFn.throws(retryError) 625 | return assert.isRejected(worker._enforceRetryLimit(testError), WorkerStopError, /final retry handler finished/) 626 | .then(() => { 627 | sinon.assert.calledTwice(worker._incMonitor) 628 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-retry-fn-error', { result: 'retry-fn-error' }) 629 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { result: 'retry-error' }) 630 | }) 631 | }) 632 | }) // end _enforceRetryLimit 633 | 634 | describe('_handleWorkerStopError', () => { 635 | beforeEach(() => { 636 | sinon.stub(worker, '_incMonitor') 637 | sinon.stub(RabbitMQ.prototype, 'publishEvent') 638 | }) 639 | afterEach(() => { 640 | RabbitMQ.prototype.publishEvent.restore() 641 | }) 642 | 643 | it('should monitor error', () => { 644 | worker._handleWorkerStopError() 645 | sinon.assert.calledTwice(worker._incMonitor) 646 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { result: 'fatal-error' }) 647 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish', { result: 'fatal-error' }) 648 | sinon.assert.notCalled(RabbitMQ.prototype.publishEvent) 649 | }) 650 | 651 | it('should call errorPublisher.publishEvent', () => { 652 | worker.errorPublisher = new RabbitMQ({}) 653 | const error = new Error('Failed') 654 | worker._handleWorkerStopError(error) 655 | sinon.assert.calledOnce(RabbitMQ.prototype.publishEvent) 656 | const erroredJob = { 657 | originalJobPayload: worker.job, 658 | originalJobMeta: worker.jobMeta, 659 | originalWorkerName: worker.queue, 660 | error: error 661 | } 662 | sinon.assert.calledWith(RabbitMQ.prototype.publishEvent, 'worker.errored', erroredJob) 663 | }) 664 | }) // end _handleWorkerStopError 665 | 666 | describe('_handleTimeoutError', () => { 667 | beforeEach(() => { 668 | sinon.stub(worker, '_incMonitor') 669 | }) 670 | 671 | it('should propagate and monitor error', () => { 672 | const testError = new Error('Sauron') 673 | assert.throws(() => { 674 | worker._handleTimeoutError(testError) 675 | }, Error, /Sauron/) 676 | sinon.assert.calledOnce(worker._incMonitor) 677 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish-error', { result: 'timeout-error' }) 678 | }) 679 | }) // end _handleTimeoutError 680 | 681 | describe('_handleTaskSuccess', () => { 682 | beforeEach(() => { 683 | sinon.stub(worker, '_incMonitor') 684 | }) 685 | 686 | it('should monitor success', () => { 687 | worker._handleTaskSuccess() 688 | sinon.assert.calledOnce(worker._incMonitor) 689 | sinon.assert.calledWith(worker._incMonitor, 'ponos.finish', { result: 'success' }) 690 | }) 691 | }) // end _handleTaskSuccess 692 | 693 | describe('run', () => { 694 | let timerStub 695 | beforeEach(() => { 696 | timerStub = sinon.stub() 697 | sinon.stub(worker, '_createTimer').returns({ 698 | stop: timerStub 699 | }) 700 | sinon.stub(worker, '_wrapTask').resolves() 701 | sinon.stub(worker, '_handleTaskSuccess').resolves() 702 | sinon.stub(worker, '_addWorkerDataToError').resolves() 703 | sinon.stub(worker, '_handleTimeoutError').resolves() 704 | sinon.stub(worker, '_enforceRetryLimit').resolves() 705 | sinon.stub(worker.errorCat, 'report').resolves() 706 | sinon.stub(worker, '_handleWorkerStopError').resolves() 707 | sinon.stub(worker, '_retryWithDelay').resolves() 708 | }) 709 | 710 | afterEach(() => { 711 | worker.errorCat.report.restore() 712 | }) 713 | 714 | it('should not call error handlers on success', () => { 715 | return assert.isFulfilled(worker.run()) 716 | .then(() => { 717 | sinon.assert.calledOnce(worker._createTimer) 718 | sinon.assert.calledOnce(worker._wrapTask) 719 | sinon.assert.calledOnce(worker._handleTaskSuccess) 720 | sinon.assert.notCalled(worker._addWorkerDataToError) 721 | sinon.assert.notCalled(worker._handleTimeoutError) 722 | sinon.assert.notCalled(worker._enforceRetryLimit) 723 | sinon.assert.notCalled(worker.errorCat.report) 724 | sinon.assert.notCalled(worker._handleWorkerStopError) 725 | sinon.assert.notCalled(worker._retryWithDelay) 726 | sinon.assert.calledOnce(timerStub) 727 | }) 728 | }) 729 | 730 | it('should not stop null timer', () => { 731 | worker._createTimer.returns(null) 732 | return assert.isFulfilled(worker.run()) 733 | .then(() => { 734 | sinon.assert.calledOnce(worker._createTimer) 735 | }) 736 | }) 737 | 738 | it('should call correct timeout handlers', () => { 739 | const timeoutError = new TimeoutError('Nazgûl') 740 | worker._wrapTask.rejects(timeoutError) 741 | worker._addWorkerDataToError.rejects(timeoutError) 742 | worker._handleTimeoutError.rejects(timeoutError) 743 | worker._enforceRetryLimit.rejects(timeoutError) 744 | worker.errorCat.report.rejects(timeoutError) 745 | 746 | return assert.isFulfilled(worker.run()) 747 | .then(() => { 748 | sinon.assert.calledOnce(worker._createTimer) 749 | sinon.assert.calledOnce(worker._wrapTask) 750 | sinon.assert.notCalled(worker._handleTaskSuccess) 751 | sinon.assert.calledOnce(worker._addWorkerDataToError) 752 | sinon.assert.calledOnce(worker._handleTimeoutError) 753 | sinon.assert.calledOnce(worker._enforceRetryLimit) 754 | sinon.assert.calledOnce(worker.errorCat.report) 755 | sinon.assert.notCalled(worker._handleWorkerStopError) 756 | sinon.assert.calledOnce(worker._retryWithDelay) 757 | sinon.assert.calledOnce(timerStub) 758 | }) 759 | }) 760 | 761 | it('should call correct worker stop handlers', () => { 762 | const workerStopError = new WorkerStopError('Gollum') 763 | worker._wrapTask.rejects(workerStopError) 764 | worker._addWorkerDataToError.rejects(workerStopError) 765 | worker._handleTimeoutError.rejects(workerStopError) 766 | worker._enforceRetryLimit.rejects(workerStopError) 767 | worker.errorCat.report.rejects(workerStopError) 768 | 769 | return assert.isFulfilled(worker.run()) 770 | .then(() => { 771 | sinon.assert.calledOnce(worker._createTimer) 772 | sinon.assert.calledOnce(worker._wrapTask) 773 | sinon.assert.notCalled(worker._handleTaskSuccess) 774 | sinon.assert.calledOnce(worker._addWorkerDataToError) 775 | sinon.assert.notCalled(worker._handleTimeoutError) 776 | sinon.assert.calledOnce(worker._enforceRetryLimit) 777 | sinon.assert.calledOnce(worker.errorCat.report) 778 | sinon.assert.calledOnce(worker._handleWorkerStopError) 779 | sinon.assert.notCalled(worker._retryWithDelay) 780 | sinon.assert.calledOnce(timerStub) 781 | }) 782 | }) 783 | 784 | it('should call correct error handlers', () => { 785 | const normalErr = new Error('Bilbo') 786 | worker._wrapTask.rejects(normalErr) 787 | worker._addWorkerDataToError.rejects(normalErr) 788 | worker._handleTimeoutError.rejects(normalErr) 789 | worker._enforceRetryLimit.rejects(normalErr) 790 | worker.errorCat.report.rejects(normalErr) 791 | 792 | return assert.isFulfilled(worker.run()) 793 | .then(() => { 794 | sinon.assert.calledOnce(worker._createTimer) 795 | sinon.assert.calledOnce(worker._wrapTask) 796 | sinon.assert.notCalled(worker._handleTaskSuccess) 797 | sinon.assert.calledOnce(worker._addWorkerDataToError) 798 | sinon.assert.notCalled(worker._handleTimeoutError) 799 | sinon.assert.calledOnce(worker._enforceRetryLimit) 800 | sinon.assert.calledOnce(worker.errorCat.report) 801 | sinon.assert.notCalled(worker._handleWorkerStopError) 802 | sinon.assert.calledOnce(worker._retryWithDelay) 803 | sinon.assert.calledOnce(timerStub) 804 | }) 805 | }) 806 | }) // end run 807 | }) 808 | }) 809 | --------------------------------------------------------------------------------