├── .gitignore ├── .npmignore ├── .travis.yml ├── Makefile ├── README.md ├── client.js ├── dist ├── client.js └── client.min.js ├── index.js ├── package.json ├── server.js ├── src ├── client.coffee ├── common.coffee ├── server.coffee ├── task.coffee └── worker.coffee └── test ├── coverage.js └── index.coffee /.gitignore: -------------------------------------------------------------------------------- 1 | coverage/ 2 | node_modules/ 3 | lib/ 4 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | coverage/ 2 | node_modules/ 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "7" 4 | - "6" 5 | - "5" 6 | - "4" 7 | after_success: 8 | - "cat ./coverage/lcov.info | ./node_modules/.bin/coveralls" 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | PATH := node_modules/.bin:$(PATH) 3 | SHELL := /bin/bash 4 | 5 | .PHONY: test 6 | test: node_modules 7 | @mocha --compilers coffee:coffee-script/register --bail test 8 | 9 | .PHONY: coverage 10 | coverage: node_modules 11 | @mocha --compilers coffee:coffee-script/register --require test/coverage.js test/*.coffee 12 | @istanbul report html 13 | @open coverage/index.html 14 | 15 | .PHONY: client 16 | client: node_modules 17 | browserify -t coffeeify --extension .coffee -s wsq -o dist/client.js src/client.coffee 18 | cat ./dist/client.js | uglifyjs > ./dist/client.min.js 19 | 20 | node_modules: 21 | npm install 22 | 23 | .PHONY: clean 24 | clean: 25 | rm -rf node_modules 26 | rm -rf lib 27 | rm -rf coverage 28 | 29 | .PHONY: publish 30 | publish: 31 | @set -e ;\ 32 | current_version=$$(node -e 'console.log(require("./package").version)') ;\ 33 | read -r -p "New version (current $$current_version): " version ;\ 34 | node -e "p=require('./package');p.version='$$version';console.log(JSON.stringify(p, null, 2))" > package_tmp.json ;\ 35 | make client ;\ 36 | while [ -z "$$really" ]; do \ 37 | read -r -p "Publish as version $$version? [y/N]: " really ;\ 38 | done ;\ 39 | [ $$really = "y" ] || [ $$really = "Y" ] || (echo "Nevermind then."; exit 1;) ;\ 40 | mv package_tmp.json package.json ;\ 41 | git add dist/ package.json ;\ 42 | git commit -m $$version ;\ 43 | git tag $$version ;\ 44 | npm publish 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # wsq [![Build Status](https://travis-ci.org/jnordberg/wsq.svg)](https://travis-ci.org/jnordberg/wsq) [![Coverage Status](https://coveralls.io/repos/jnordberg/wsq/badge.svg?branch=master&service=github)](https://coveralls.io/github/jnordberg/wsq?branch=master) [![Package Version](https://img.shields.io/npm/v/wsq.svg)](https://www.npmjs.com/package/wsq) ![License](https://img.shields.io/npm/l/wsq.svg) 3 | 4 | Websocket task queue 5 | 6 | 7 | What is it? 8 | ----------- 9 | 10 | An easy to use task queue that handles streaming data, also works in the browser. 11 | 12 | 13 | Example 14 | ------- 15 | 16 | Video encoding 17 | 18 | server.js (see [wsq-server](https://github.com/jnordberg/wsq-server) for a standalone server with logging) 19 | ```javascript 20 | var Server = require('wsq/server') 21 | var leveldown = require('leveldown') 22 | var BlobStore = require('fs-blob-store') 23 | 24 | new WsqServer({ 25 | socketOptions: {port: 4242}, 26 | dbLocation: '/var/leveldb/wsq', 27 | dbOptions: {db: leveldown}, // db can be any 'abstract-leveldown' compatible instance 28 | blobStore: new BlobStore('/var/storage/wsq') // same here any 'abstract-blob-store' will do 29 | }) 30 | ``` 31 | 32 | add.js: 33 | ```javascript 34 | // usage: node add.js 35 | 36 | var Client = require('wsq/client') 37 | var fs = require('fs') 38 | 39 | var client = new Client('ws://localhost:4242') 40 | var queue = client.queue('ffmpeg') 41 | 42 | var data = { 43 | video: fs.createReadStream(process.argv[2]), 44 | args: process.argv.slice(3) 45 | } 46 | 47 | var task = queue.add(data, function(error){ 48 | if (error) { 49 | console.log('Error queueing video: ' + error.message) 50 | process.exit(1) 51 | } else { 52 | console.log('Video queued for processing.') 53 | process.exit() 54 | } 55 | }) 56 | 57 | ``` 58 | 59 | worker.js: 60 | ```javascript 61 | var Client = require('wsq/client') 62 | var fs = require('fs') 63 | var os = require('os') 64 | var path = require('path') 65 | 66 | var client = new Client('ws://localhost:4242') 67 | 68 | var videoQueue = client.queue('ffmpeg') 69 | var resultQueue = client.quueu('ffmpeg-results') 70 | 71 | videoQueue.process(function(task, callback) { 72 | var encoder = new VideoEncoder(task.data.args) 73 | 74 | encoder.on('progress', function(percent) { 75 | // update task progress, this will also reset the task timeout (default 60 seconds) 76 | // useful for long running tasks like this one 77 | task.updateProgress(percent) 78 | }) 79 | 80 | // start encoding 81 | task.data.video.pipe(encoder) 82 | 83 | // start streaming the encoded video to the result queue, if the stream emits an error 84 | // the result task will not be created and any partial data streamed is discarded 85 | resultQueue.add({video: encoder}, function(error){ 86 | if (error) { 87 | console.log('Encoding failed: ' + error.message) 88 | callback(error) // task is marked as failed, and possibly re-queued based on its options 89 | } else { 90 | // all good, ready to accept next task 91 | callback() 92 | } 93 | }) 94 | }) 95 | ``` 96 | 97 | 98 | Documentation 99 | ------------- 100 | 101 | ### Class: Client 102 | 103 | This class is a wsq client. It is an `EventEmitter`. 104 | 105 | #### new Client(address, [options]) 106 | 107 | * `address` String 108 | * `options` Object 109 | * `backoff` Function 110 | 111 | Construct a new client object. 112 | 113 | #### address 114 | 115 | Address to wsq server, e.g. `'ws://localhost:1324'` 116 | 117 | #### options.backoff 118 | 119 | Function with the signature `function(tries){}` that should return number of milliseconds 120 | to wait until next conneciton attempt. 121 | 122 | The default funciton looks like: 123 | 124 | ```javascript 125 | function(tries){ 126 | return Math.min(Math.pow(tries * 10, 2), 60 * 1000) 127 | } 128 | ``` 129 | 130 | #### client.queue(name) 131 | 132 | Return a `ClientQueue` instance. Will be created if nonexistent. 133 | 134 | #### client.listQueues() 135 | 136 | Return an array of active `ClientQueue` instances. 137 | 138 | #### client.getEventStream() 139 | 140 | Return a object stream that writes all the events as they come in from the server. 141 | 142 | ```json 143 | { 144 | "event": "", 145 | "args": [..] 146 | } 147 | ``` 148 | 149 | #### Event: 'error' 150 | 151 | `function(error){}` 152 | 153 | #### Event: 'connect' 154 | 155 | `function(){}` 156 | 157 | Connected to server. 158 | 159 | #### Event: 'disconnect' 160 | 161 | `function(){}` 162 | 163 | Connection was lost. 164 | 165 | 166 | ### Class: ClientQueue 167 | 168 | This class is the client's representation of a queue. It is an `EventEmitter`. 169 | 170 | #### queue.add(data, [options], [callback]) 171 | 172 | Add a task to the queue. The optional callback is called when the task is successfully added queued 173 | or with an `Error` object on failure. 174 | 175 | * `options` Object 176 | * `timeout` Number - Default `60 * 1000` 177 | * `retries` Number - Default `0` 178 | * `autoremove` Boolean - Default `false` 179 | 180 | #### options.timeout 181 | 182 | How long to wait for the task to complete without hearing from the worker in milliseconds. 183 | Set to -1 to disable timeout (not recommended, use progress updates for long running tasks instead) 184 | 185 | #### options.retries 186 | 187 | How many times the task should be re-queued on failure. A value of zero 188 | means no retries before the task have to be re-queued or removed explicitly. 189 | Can also be set to -1 to retry forever. 190 | 191 | #### options.autoremove 192 | 193 | Wether to remove the task and any associated streams that where buffered on completion. 194 | Note that failed tasks will always have to be handled explicitly. 195 | 196 | #### queue.process(workerFn) 197 | 198 | Add a worker to the queue. `workerFn` has the signature `function(task, callback){}`. 199 | 200 | The callback should be called when the worker has completed processing the 201 | task, or with an `Error` object on failure. 202 | 203 | #### queue.all(callback) 204 | 205 | Callback with a list of all `Task` instances in the queue. 206 | 207 | #### queue.waiting(callback) 208 | 209 | Callback with a list of all waiting `Task` instances in the queue. 210 | 211 | #### queue.active(callback) 212 | 213 | Callback with a list of all active `Task` instances in the queue. 214 | 215 | #### queue.completed(callback) 216 | 217 | Callback with a list of all completed `Task` instances in the queue. 218 | 219 | #### queue.failed(callback) 220 | 221 | Callback with a list of all failed `Task` instances in the queue. 222 | 223 | #### Event: 'worker added' 224 | 225 | `function(worker){}` 226 | 227 | Worker was added to the queue. 228 | 229 | #### Event: 'worker removed' 230 | 231 | `function(worker){}` 232 | 233 | Worker was removed from the queue. 234 | 235 | #### Event: 'worker started' 236 | 237 | `function(worker, task){}` 238 | 239 | Worker started processing task. 240 | 241 | #### Event: 'task ' 242 | 243 | See `Task` events. 244 | 245 | 246 | ### Class: Task 247 | 248 | This class represents a task. It is an `EventEmitter`. 249 | 250 | 251 | #### task.updateProgress(percentage) 252 | 253 | Update the progress of the task. Percentage is a fraction between 0 and 1. 254 | Calling this resets the task timeout timer. 255 | 256 | #### task.touch() 257 | 258 | Reset the task timeout. Useful if your task process does not have any useful progress information 259 | but you still want to keep long living tasks running. 260 | 261 | #### task.remove(callback) 262 | 263 | Remove the task from the system. Do not call this from inside a worker. 264 | 265 | #### task.retry(callback) 266 | 267 | Reschedule a failed task. Do not call this from inside a worker. 268 | 269 | #### task.getData(callback) 270 | 271 | Return task data with streams resolved. Note that `task.data` will already be resolved for tasks 272 | passed to a worker. 273 | 274 | #### Event: 'added' 275 | 276 | `function(task){}` 277 | 278 | Added to queue. 279 | 280 | #### Event: 'queued' 281 | 282 | `function(task){}` 283 | 284 | Queued for processing. 285 | 286 | #### Event: 'started' 287 | 288 | `function(task){}` 289 | 290 | Started processing. 291 | 292 | #### Event: 'progress' 293 | 294 | `function(task, percentage){}` 295 | 296 | Progress updated. Percentage is a fraction between 0 and 1. 297 | 298 | #### Event: 'completed' 299 | 300 | `function(task){}` 301 | 302 | Successfully processed. 303 | 304 | #### Event: 'failed' 305 | 306 | `function(task, willRetry){}` 307 | 308 | Task failed, `task.error` will contain the failure message. `willRetry` is true if the task will be retried. 309 | 310 | #### Event: 'deleted' 311 | 312 | `function(task){}` 313 | 314 | Task and associated streams was removed from the queue. 315 | 316 | 317 | License 318 | ------- 319 | 320 | MIT 321 | -------------------------------------------------------------------------------- /client.js: -------------------------------------------------------------------------------- 1 | module.exports = require('./lib/client').Client -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | Server: require('./lib/server').Server, 3 | Client: require('./lib/client').Client, 4 | } 5 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "wsq", 3 | "version": "0.2.0", 4 | "description": "Task queue on top of websockets", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "mocha --compilers coffee:coffee-script/register --require test/coverage.js -R tap test && istanbul report text-summary lcov", 8 | "prepublish": "coffee -b -c -o lib src/*.coffee" 9 | }, 10 | "author": "Johan Nordberg ", 11 | "repository": { 12 | "type": "git", 13 | "url": "https://github.com/jnordberg/wsq.git" 14 | }, 15 | "license": "MIT", 16 | "dependencies": { 17 | "async": "^2.1.4", 18 | "dnode": "^1.2.2", 19 | "levelup": "^1.3.3", 20 | "multiplex": "^6.7.0", 21 | "pump": "^1.0.1", 22 | "websocket-stream": "^3.3.3" 23 | }, 24 | "devDependencies": { 25 | "abstract-blob-store": "^3.2.0", 26 | "browserify": "^13.1.1", 27 | "coffee-coverage": "^1.0.1", 28 | "coffee-script": "^1.12.1", 29 | "coffeeify": "^2.1.0", 30 | "coveralls": "^2.11.15", 31 | "istanbul": "^0.4.5", 32 | "memdown": "^1.2.4", 33 | "mocha": "^3.2.0", 34 | "through": "^2.3.8", 35 | "uglifyjs": "^2.4.10" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /server.js: -------------------------------------------------------------------------------- 1 | module.exports = require('./lib/server').Server -------------------------------------------------------------------------------- /src/client.coffee: -------------------------------------------------------------------------------- 1 | ### Client implementation. ### 2 | 3 | async = require 'async' 4 | dnode = require 'dnode' 5 | multiplex = require 'multiplex' 6 | WebSocket = require 'websocket-stream' 7 | {PassThrough} = require 'stream' 8 | 9 | {EventEmitter} = require 'events' 10 | {Worker} = require './worker' 11 | {Task} = require './task' 12 | {Stream} = require 'stream' 13 | {randomString} = require './common' 14 | 15 | # workaround for browsers where not all streams will inherit from browserifys Stream class 16 | # TODO: more robust stream detection 17 | isStream = (value) -> value instanceof Stream or value.readable is true 18 | 19 | class Client extends EventEmitter 20 | 21 | defaults = 22 | backoff: (tries) -> Math.min (tries * 10) ** 2, 60 * 1000 23 | 24 | constructor: (@address, options={}) -> 25 | @ready = false 26 | @workers = [] 27 | @queues = {} 28 | @activeStreams = {} 29 | 30 | @options = {} 31 | for key of defaults 32 | @options[key] = options[key] ? defaults[key] 33 | 34 | @subscribed = false 35 | 36 | @setMaxListeners Infinity 37 | @connectionTries = 0 38 | @connect() 39 | 40 | connect: => 41 | @_closed = false 42 | return if @socket? 43 | clearTimeout @_connectTimer 44 | @socket = new WebSocket @address 45 | @socket.on 'close', => 46 | wasConnected = @remote? 47 | @socket = null 48 | @remote = null 49 | unless @_closed 50 | delay = @options.backoff @connectionTries++ 51 | @_connectTimer = setTimeout @connect, delay 52 | @onDisconnect() if wasConnected 53 | @socket.on 'error', (error) => @emit 'error', error 54 | @rpc = dnode null, {weak: false} 55 | @rpc.on 'remote', (remote) => 56 | @remote = remote 57 | @connectionTries = 0 58 | @onConnect() 59 | @multiplex = multiplex() 60 | @socket.pipe(@multiplex).pipe(@socket) 61 | rpcStream = @multiplex.createSharedStream 'rpc' 62 | rpcStream.pipe(@rpc).pipe(rpcStream) 63 | 64 | close: -> 65 | @_closed = true 66 | clearTimeout @_connectTimer 67 | return unless @socket? 68 | @socket.end() 69 | @remote = null 70 | @onDisconnect() 71 | 72 | onConnect: -> 73 | async.forEach @getFreeWorkers(), (worker, callback) => 74 | @remote.registerWorker worker.toRPC(), callback 75 | , @errorCallback 76 | @setupEvents() if @subscribed 77 | @emit 'connect' 78 | 79 | onDisconnect: -> 80 | for id, stream of @activeStreams 81 | stream.emit 'error', new Error 'Lost connection.' 82 | @activeStreams = {} 83 | @emit 'disconnect' 84 | 85 | setupEvents: -> 86 | @eventStream = @multiplex.createStream 'events' 87 | @eventStream.on 'data', (data) => 88 | try 89 | event = JSON.parse data 90 | catch error 91 | error.message = "Unable to parse event stream: #{ error.message }" 92 | @emit 'error', error 93 | return 94 | if @listenerCount(event.event) > 0 95 | [type] = event.event.split ' ' 96 | if type is 'task' and @listenerCount event.event 97 | [task, extra...] = event.args 98 | task = Task.fromRPC task 99 | task.client = this 100 | @emit event.event, task, extra... 101 | else 102 | @emit event.event, event.args... 103 | if @eventProxy? 104 | @eventProxy.write event 105 | return 106 | 107 | getEventStream: -> 108 | unless @eventProxy? 109 | @eventProxy = new PassThrough {objectMode: true} 110 | unless @subscribed 111 | @subscribed = true 112 | @setupEvents() if @remote? 113 | return @eventProxy 114 | 115 | onError: (error) => @emit 'error', error 116 | 117 | errorCallback: (error) => @onError error if error? 118 | 119 | on: (event, handler) -> 120 | if event[...4] is 'task' and not @subscribed 121 | @subscribed = true 122 | @setupEvents() if @remote? 123 | super event, handler 124 | 125 | getQueue: (name) -> 126 | unless @queues[name]? 127 | @queues[name] = new ClientQueue name, this 128 | return @queues[name] 129 | 130 | addTask: (task, callback=@errorCallback) -> 131 | task.client = this 132 | unless @remote? 133 | @once 'connect', => @addTask task, callback 134 | return 135 | streams = @encodeStreams task.data 136 | async.forEach streams, (stream, callback) => 137 | callbackOnce = (error) => 138 | delete @activeStreams[stream.id] 139 | if callback? 140 | callback error 141 | callback = null 142 | destination = @multiplex.createStream 'write:' + stream.id, {chunked: true, halfOpen: true} 143 | @activeStreams[stream.id] = stream.value 144 | stream.value.on 'error', callbackOnce 145 | stream.value.on 'end', callbackOnce 146 | stream.value.resume?() 147 | stream.value.pipe destination 148 | , (error) => 149 | if error? 150 | callback error 151 | else 152 | @remote.addTask task.toRPC(true), callback 153 | 154 | removeTask: (task, callback=@errorCallback) -> 155 | unless @remote? 156 | @once 'connect', => @removeTask task, callback 157 | return 158 | @remote.removeTask task.toRPC(), callback 159 | 160 | retryTask: (task, callback=@errorCallback) -> 161 | unless @remote? 162 | @once 'connect', => @retryTask task, callback 163 | return 164 | @remote.retryTask task.toRPC(), callback 165 | 166 | getTaskData: (task, callback) -> 167 | unless @remote? 168 | @once 'connect', => @getTaskData task, callback 169 | return 170 | if task.data? 171 | @resolveStreams task.data # TODO: don't mutate data 172 | callback null, task.data 173 | else 174 | @remote.getTaskData task.toRPC(), (error, data) => 175 | unless error? 176 | @resolveStreams data 177 | callback error, data 178 | 179 | resolveStreams: (data) -> 180 | streams = [] 181 | walk = (d) => 182 | for key, value of d 183 | if value?.__stream? 184 | id = value.__stream 185 | stream = @multiplex.createStream 'read:' + id, {chunked: true, halfOpen: true} 186 | @activeStreams[id] = stream 187 | stream.on 'error', => delete @activeStreams[id] 188 | stream.on 'end', => delete @activeStreams[id] 189 | d[key] = stream 190 | streams.push stream 191 | else if typeof value is 'object' 192 | walk value 193 | return 194 | walk data 195 | return streams 196 | 197 | encodeStreams: (data) -> 198 | streams = [] 199 | do walk = (data) -> 200 | for key, value of data 201 | if isStream value 202 | id = randomString 24 203 | data[key] = {__stream: id} 204 | streams.push {id, value} 205 | else if typeof value is 'object' 206 | walk value 207 | return 208 | return streams 209 | 210 | addWorker: (worker) -> 211 | unless @remote? 212 | @once 'connect', => @addWorker worker 213 | return 214 | worker.client = this 215 | @workers.push worker 216 | do register = => @remote?.registerWorker worker.toRPC() 217 | worker.on 'start', (task) => 218 | task.on 'local-progress', (percent) => 219 | @remote?.taskProgress task.toRPC(), percent 220 | task.once 'local-success', => 221 | @taskSuccessful task 222 | task.once 'local-failure', (error) => 223 | @taskFailure task, error 224 | worker.on 'finish', register 225 | 226 | taskSuccessful: (task, callback=@errorCallback) -> 227 | unless @remote? 228 | @once 'connect', => @taskSuccessful task, callback 229 | return 230 | @remote.taskSuccessful task.toRPC(), callback 231 | 232 | taskFailure: (task, error, callback=@errorCallback) -> 233 | unless @remote? 234 | @once 'connect', => @taskFailure task, error 235 | return 236 | @remote.taskFailure task.toRPC(), error, callback 237 | 238 | getFreeWorkers: -> @workers.filter (worker) -> worker.isFree() 239 | 240 | queue: (name) -> @getQueue name 241 | 242 | listTasks: (queue, filter, callback) -> 243 | unless @remote? 244 | @once 'connect', => @listTasks queue, filter, callback 245 | return 246 | @remote.listTasks queue, filter, (error, tasks) => 247 | unless error? 248 | tasks = tasks.map (task) => 249 | rv = Task.fromRPC task 250 | rv.client = this 251 | return rv 252 | callback error, tasks 253 | 254 | listQueues: (callback) -> 255 | unless @remote? 256 | @once 'connect', => @listQueues callback 257 | return 258 | @remote.listQueues (error, result) => 259 | unless error? 260 | queues = result.map (name) => @getQueue name 261 | callback error, queues 262 | 263 | class ClientQueue 264 | ### Convenience. ### 265 | 266 | constructor: (@name, @client) -> 267 | 268 | all: (callback) -> @client.listTasks @name, 'all', callback 269 | 270 | waiting: (callback) -> @client.listTasks @name, 'waiting', callback 271 | 272 | active: (callback) -> @client.listTasks @name, 'active', callback 273 | 274 | completed: (callback) -> @client.listTasks @name, 'completed', callback 275 | 276 | failed: (callback) -> @client.listTasks @name, 'failed', callback 277 | 278 | on: (event, handler) -> 279 | @client.on event, (task, args...) => 280 | if task.queue is @name 281 | handler task, args... 282 | 283 | once: (event, handler) -> 284 | @client.once event, (task, args...) => 285 | if task.queue is @name 286 | handler task, args... 287 | 288 | process: (processFn) -> 289 | worker = Worker.create @name, processFn 290 | @client.addWorker worker 291 | return worker 292 | 293 | add: (data, options, callback) -> 294 | if arguments.length is 2 and typeof options is 'function' 295 | callback = options 296 | options = null 297 | options ?= {} 298 | task = Task.create @name, options, data 299 | @client.addTask task, callback 300 | return task 301 | 302 | 303 | module.exports = {Client} 304 | -------------------------------------------------------------------------------- /src/common.coffee: -------------------------------------------------------------------------------- 1 | ### Common utils shared between client and server. ### 2 | 3 | crypto = require 'crypto' 4 | 5 | urlsafeCharset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' 6 | 7 | randomString = (length=32, charset=urlsafeCharset) -> 8 | rv = '' 9 | clen = charset.length 10 | for byte in crypto.randomBytes length 11 | rv += charset.charAt byte % clen 12 | return rv 13 | 14 | module.exports = { 15 | randomString 16 | urlsafeCharset 17 | } 18 | -------------------------------------------------------------------------------- /src/server.coffee: -------------------------------------------------------------------------------- 1 | ### Server implementation. ### 2 | 3 | async = require 'async' 4 | dnode = require 'dnode' 5 | levelup = require 'levelup' 6 | multiplex = require 'multiplex' 7 | pump = require 'pump' 8 | WebSocket = require 'websocket-stream' 9 | {EventEmitter} = require 'events' 10 | {PassThrough} = require 'stream' 11 | {randomString} = require './common' 12 | {Task} = require './task' 13 | 14 | hrnow = -> 15 | [s, ns] = process.hrtime hrnow.start.hr 16 | return hrnow.start.ts + s * 1e3 + ns / 1e6 17 | hrnow.start = {ts: Date.now(), hr: process.hrtime()} 18 | 19 | resolveStreamIds = (data) -> 20 | rv = [] 21 | do walk = (data) => 22 | for key, value of data 23 | if value.__stream? 24 | rv.push value.__stream 25 | else if typeof value is 'object' 26 | walk value 27 | return 28 | return rv 29 | 30 | class Server extends EventEmitter 31 | 32 | defaults = 33 | 34 | workerTimeout: 1000 # 1 second 35 | # How long to wait for workers to respond when assigning a new task. 36 | 37 | # dbLocation - string, required 38 | # Location of database on disk, passed to levelup. 39 | 40 | dbOptions: {} 41 | # Database options given to levelup. See https://github.com/Level/levelup#options 42 | # Note that keyEncoding and valueEncoding will be overwritten if present. 43 | 44 | # socketOptions - object, required 45 | # Options passed to ws, see: 46 | # https://github.com/websockets/ws/blob/master/doc/ws.md#new-wsserveroptions-callback 47 | 48 | # blobStore - blob store instance, required 49 | # Blob store instance complying to the "abstract-blob-store" interface, see: 50 | # https://github.com/maxogden/abstract-blob-store 51 | 52 | heartbeatInterval: 5000 # 5 seconds 53 | # How often to ping clients to keep the connection alive. Set to zero to disable. 54 | 55 | 56 | requiredOptions = ['dbLocation', 'socketOptions', 'blobStore'] 57 | 58 | constructor: (options) -> 59 | ### Create new queue server with *options*. ### 60 | 61 | unless options? 62 | throw new Error 'Missing options' 63 | 64 | for key in requiredOptions 65 | unless options[key]? 66 | throw new Error "Missing options.#{ key }" 67 | 68 | @options = {} 69 | for key of defaults 70 | @options[key] = options[key] ? defaults[key] 71 | 72 | @options.dbOptions.keyEncoding = 'utf8' 73 | @options.dbOptions.valueEncoding = 'json' 74 | 75 | @store = options.blobStore 76 | @database = levelup options.dbLocation, @options.dbOptions 77 | 78 | @queues = {} 79 | @connections = {} 80 | 81 | @eventStream = new PassThrough {autoDestroy: false} 82 | 83 | @restoreQueues => 84 | @socketServer = new WebSocket.Server options.socketOptions 85 | @socketServer.on 'stream', @handleConnection 86 | @emit 'ready' 87 | 88 | close: (callback) -> 89 | @socketServer?.close callback 90 | 91 | broadcastEvent: (event, args...) -> 92 | @emit event, args... 93 | @eventStream.write JSON.stringify {event, args} 94 | 95 | handleConnection: (stream) => 96 | connection = new Connection stream, this 97 | connection.id = randomString 24 98 | @connections[connection.id] = connection 99 | connection.on 'close', => delete @connections[connection.id] 100 | @emit 'connection', connection 101 | 102 | getQueue: (name) -> 103 | unless @queues[name]? 104 | @queues[name] = new Queue name, this 105 | return @queues[name] 106 | 107 | restoreQueues: (callback) -> 108 | restoreTask = (data) => 109 | task = Task.fromDB data.value 110 | queue = @getQueue task.queue 111 | switch task.state 112 | when 'waiting' 113 | queue.waiting.push task 114 | when 'active' 115 | queue.active[task.id] = task 116 | queue.taskTimer task 117 | when 'failed' 118 | queue.failed.push task 119 | when 'completed' 120 | queue.completed.push task 121 | else 122 | @emit 'error', new Error "Encountered task with invalid state '#{ task.state }' in database." 123 | stream = @database.createReadStream() 124 | stream.on 'data', restoreTask 125 | stream.on 'error', (error) => @emit 'error', error 126 | stream.on 'end', => 127 | for name, queue of @queues 128 | for state in ['waiting', 'completed', 'failed'] 129 | queue[state].sort (a, b) -> a.queueTime - b.queueTime 130 | callback() 131 | 132 | class Queue extends EventEmitter 133 | 134 | constructor: (@name, @server) -> 135 | @workers = [] 136 | @waiting = [] 137 | @completed = [] 138 | @failed = [] 139 | @active = {} 140 | @timers = {} 141 | 142 | addWorker: (worker) -> 143 | @workers.push worker 144 | @server.broadcastEvent 'worker added', 145 | id: worker.id, 146 | connection: worker.connection 147 | queue: @name 148 | setImmediate @process 149 | 150 | removeWorker: (workerId) -> 151 | @workers = @workers.filter (worker) => 152 | if worker.id is workerId 153 | @server.broadcastEvent 'worker removed', 154 | id: worker.id 155 | connection: worker.connection 156 | queue: @name 157 | return false 158 | return true 159 | 160 | removeTask: (task, callback) -> 161 | if @active[task.id]? 162 | clearTimeout @timers[task.id] 163 | task = @active[task.id] 164 | delete @active[task.id] 165 | else 166 | matchFilter = (t) -> 167 | if t.id is task.id 168 | task = t 169 | return false 170 | return true 171 | @failed = @failed.filter matchFilter 172 | @completed = @completed.filter matchFilter 173 | @waiting = @waiting.filter matchFilter 174 | @delTask task, callback 175 | 176 | retryTask: (task, callback) -> 177 | idx = @failed.findIndex (t) -> t.id is task.id 178 | if idx is -1 179 | callback new Error "Task #{ task.id } not in failed list." 180 | return 181 | 182 | task = @failed[idx] 183 | @failed.splice idx, 1 184 | 185 | task.retries = 0 186 | task.error = undefined 187 | @addTask task, callback 188 | 189 | emitError: (error) -> 190 | if error? 191 | @emit 'error', error 192 | 193 | getActive: -> 194 | tasks = [] 195 | for id, task of @active 196 | tasks.push task 197 | return tasks 198 | 199 | getWaiting: -> @waiting 200 | 201 | getFailed: -> @failed 202 | 203 | getCompleted: -> @completed 204 | 205 | getAll: -> 206 | tasks = @getActive() 207 | tasks = tasks.concat @getWaiting() 208 | tasks = tasks.concat @getFailed() 209 | tasks = tasks.concat @getCompleted() 210 | return tasks 211 | 212 | findTask: (taskId) -> 213 | return @active[taskId] if @active[taskId]? 214 | task = null 215 | for type in ['waiting', 'failed', 'completed'] 216 | task = @[type].find (t) -> t.id is taskId 217 | break if task? 218 | return task 219 | 220 | getTaskData: (task) -> 221 | task = @findTask task.id 222 | unless task? 223 | throw new Error "No task with id #{ task.id } in queue #{ @name }" 224 | return task.data 225 | 226 | putTask: (task, callback=@emitError) -> 227 | @server.database.put task.id, task.toDB(), callback 228 | 229 | delTask: (task, callback=@emitError) -> 230 | removeBlob = (key, callback) => @server.store.remove {key}, callback 231 | removeAllBlobs = (callback) => 232 | ids = resolveStreamIds task.data 233 | async.forEach ids, removeBlob, callback 234 | deleteTask = (callback) => @server.database.del task.id, callback 235 | broadcast = (callback) => 236 | @server.broadcastEvent 'task deleted', task.toRPC() 237 | callback() 238 | async.series [removeAllBlobs, deleteTask, broadcast], callback 239 | 240 | addTask: (task, callback=@emitError) -> 241 | task.state = 'waiting' 242 | task.queueTime = hrnow() 243 | @putTask task, (error) => 244 | if error? 245 | @emit 'error', error 246 | else 247 | @waiting.push task 248 | @server.broadcastEvent 'task added', task.toRPC() 249 | @server.broadcastEvent 'task queued', task.toRPC() 250 | setImmediate => @process() 251 | callback error 252 | 253 | sanityCheck: (method, task) -> 254 | unless @active[task.id]? 255 | @server.emit 'error', new Error "#{ method } - Task #{ task.id } not active" 256 | return false 257 | activeWorker = @active[task.id].workerId 258 | if activeWorker isnt task.workerId 259 | @server.emit 'error', new Error "#{ method } - Wrong worker, got #{ task.workerId } expected #{ activeWorker }" 260 | return false 261 | return true 262 | 263 | taskComplete: (task) -> 264 | return unless @sanityCheck 'complete', task 265 | task = @active[task.id] 266 | clearTimeout @timers[task.id] 267 | delete @active[task.id] 268 | task.state = 'completed' 269 | @server.broadcastEvent 'task completed', task.toRPC() 270 | if task.options.autoremove 271 | @delTask task 272 | else 273 | task.queueTime = hrnow() 274 | @putTask task 275 | @completed.push task 276 | 277 | taskFailure: (task, error) -> 278 | return unless @sanityCheck 'failure', task 279 | task = @active[task.id] 280 | clearTimeout @timers[task.id] 281 | delete @active[task.id] 282 | task.state = 'failed' 283 | task.error = error.message 284 | task.queueTime = hrnow() 285 | willRetry = !(++task.retries > task.options.retries and task.options.retries isnt -1) 286 | @server.broadcastEvent 'task failed', task.toRPC(), willRetry 287 | if willRetry 288 | task.state = 'waiting' 289 | task.error = null 290 | @server.broadcastEvent 'task queued', task.toRPC() 291 | @waiting.push task 292 | setImmediate => @process() 293 | else 294 | @failed.push task 295 | @putTask task 296 | 297 | taskProgress: (task, percent) -> 298 | return unless @sanityCheck 'progress', task 299 | @active[task.id].progress = percent 300 | @putTask @active[task.id] 301 | @server.broadcastEvent 'task progress', task.toRPC(), percent 302 | @taskTimer task 303 | 304 | taskTimer: (task) -> 305 | unless task = @active[task.id] 306 | @emit 'error', new Error 'Task not active.' 307 | return 308 | if task.options.timeout isnt -1 309 | timeout = => 310 | error = new Error 'Timed out.' 311 | task._worker?.abort?() 312 | @taskFailure task, error 313 | clearTimeout @timers[task.id] 314 | @timers[task.id] = setTimeout timeout, task.options.timeout 315 | 316 | process: => 317 | canProcess = => @workers.length > 0 and @waiting.length > 0 318 | startTask = (callback) => 319 | worker = @workers.shift() 320 | task = @waiting.shift() 321 | 322 | timedOut = => 323 | error = new Error "Worker #{ worker.id } didn't respond." 324 | @taskFailure task, error 325 | callback() 326 | callback = null # ignore late responses by worker 327 | 328 | task.workerId = worker.id 329 | task.state = 'active' 330 | task.progress = 0 331 | task._worker = worker 332 | @active[task.id] = task 333 | @server.broadcastEvent 'task started', task.toRPC() 334 | 335 | @putTask task, (error) => 336 | return callback error if error? 337 | timer = setTimeout timedOut, @server.options.workerTimeout 338 | worker.start task.toRPC(true), (error) => 339 | return unless callback? 340 | workerEvent = 341 | id: worker.id, 342 | connection: worker.connection 343 | queue: @name 344 | @server.broadcastEvent 'worker started', workerEvent, task.toRPC() 345 | clearTimeout timer 346 | if error? 347 | @taskFailure task, error 348 | else 349 | @taskTimer task 350 | callback() 351 | 352 | async.whilst canProcess, startTask, @emitError 353 | 354 | 355 | class Connection extends EventEmitter 356 | 357 | RPC_METHODS = [ 358 | 'addTask' 359 | 'getTaskData' 360 | 'listQueues' 361 | 'listTasks' 362 | 'registerWorker' 363 | 'removeTask' 364 | 'retryTask' 365 | 'taskFailure' 366 | 'taskProgress' 367 | 'taskSuccessful' 368 | ] 369 | 370 | constructor: (@stream, @server) -> 371 | @seenWorkers = {} 372 | # setup multiplex stream 373 | @multiplex = multiplex @handleStream.bind(this) 374 | pump @stream, @multiplex, @stream, (error) => 375 | if error? and error.message isnt 'premature close' 376 | @onError error 377 | else 378 | @onEnd() 379 | rpcStream = @multiplex.createSharedStream 'rpc' 380 | @rpc = dnode @getRpcMethods(), {weak: false} 381 | pump @rpc, rpcStream, @rpc 382 | if @server.options.heartbeatInterval > 0 383 | @pingCounter = 0 384 | @stream.socket.on 'pong', => @pingCounter = 0 385 | @heartbeatTimer = setInterval @heartbeat, @server.options.heartbeatInterval 386 | @heartbeat() 387 | 388 | onError: (error) => 389 | @cleanup() 390 | @emit 'error', error 391 | @emit 'close' 392 | 393 | onEnd: => 394 | @cleanup() 395 | @emit 'close' 396 | 397 | cleanup: -> 398 | clearTimeout @heartbeatTimer 399 | for workerId, queueName of @seenWorkers 400 | @server.getQueue(queueName).removeWorker(workerId) 401 | if @_eventStream? 402 | @server.eventStream.unpipe @_eventStream 403 | @_eventStream = null 404 | 405 | heartbeat: => 406 | if @pingCounter >= 3 407 | @stream.destroy new Error 'Ping timeout' 408 | else 409 | @stream.socket.ping null, null, true 410 | @pingCounter++ 411 | 412 | getRpcMethods: -> 413 | rv = {} 414 | for methodName in RPC_METHODS then do (methodName) => 415 | rv[methodName] = (args...) => 416 | @pingCounter = 0 # TODO: refactor heartbeat to reset on any stream activity 417 | this[methodName].apply this, args 418 | return rv 419 | 420 | handleStream: (stream, id) => 421 | [type, id] = id.split ':' 422 | switch type 423 | when 'write' 424 | destination = @server.store.createWriteStream id 425 | stream.pipe destination 426 | when 'read' 427 | source = @server.store.createReadStream id 428 | stream.on 'error', -> # discard, handled by client 429 | source.on 'error', (error) -> stream.destroy error 430 | source.pipe stream 431 | when 'events' 432 | @_eventStream = stream 433 | @server.eventStream.pipe stream 434 | else 435 | @emit 'error', new Error "Can't handle stream type #{ type }" 436 | 437 | registerWorker: (worker) -> 438 | @seenWorkers[worker.id] = worker.queue 439 | queue = @server.getQueue worker.queue 440 | worker.connection = @id 441 | queue.addWorker worker 442 | 443 | taskSuccessful: (task) -> 444 | task = Task.fromRPC task 445 | queue = @server.getQueue task.queue 446 | queue.taskComplete task 447 | 448 | taskProgress: (task, percent) -> 449 | task = Task.fromRPC task 450 | queue = @server.getQueue task.queue 451 | queue.taskProgress task, percent 452 | 453 | taskFailure: (task, error) -> 454 | task = Task.fromRPC task 455 | queue = @server.getQueue task.queue 456 | queue.taskFailure task, error 457 | 458 | addTask: (task, callback) -> 459 | task = Task.fromRPC task 460 | queue = @server.getQueue task.queue 461 | queue.addTask task, callback 462 | 463 | removeTask: (task, callback) -> 464 | task = Task.fromRPC task 465 | queue = @server.getQueue task.queue 466 | queue.removeTask task, callback 467 | 468 | retryTask: (task, callback) -> 469 | task = Task.fromRPC task 470 | queue = @server.getQueue task.queue 471 | queue.retryTask task, callback 472 | 473 | getTaskData: (task, callback) -> 474 | queue = @server.getQueue task.queue 475 | try 476 | data = queue.getTaskData {id: task.id} 477 | catch error 478 | callback error, data 479 | 480 | listTasks: (queue, filter, callback) -> 481 | queue = @server.getQueue queue 482 | switch filter 483 | when 'all', null 484 | tasks = queue.getAll() 485 | when 'failed' 486 | tasks = queue.getFailed() 487 | when 'completed' 488 | tasks = queue.getCompleted() 489 | when 'waiting' 490 | tasks = queue.getWaiting() 491 | when 'active' 492 | tasks = queue.getActive() 493 | else 494 | callback new Error "Unknown filter: #{ filter }" 495 | return 496 | callback null, tasks.map (task) -> task.toRPC() 497 | 498 | listQueues: (callback) -> 499 | callback null, Object.keys @server.queues 500 | 501 | 502 | module.exports = {Server} -------------------------------------------------------------------------------- /src/task.coffee: -------------------------------------------------------------------------------- 1 | ### Shared task implementation. ### 2 | 3 | assert = require 'assert' 4 | {EventEmitter} = require 'events' 5 | {randomString} = require './common' 6 | 7 | DB_KEYS = [ 8 | 'data', 'id', 'options', 'progress', 'queue', 9 | 'retries', 'state', 'workerId', 'error', 'queueTime' 10 | ] 11 | 12 | class Task extends EventEmitter 13 | 14 | defaults = 15 | 16 | timeout: 60 * 1000 # 1 minute 17 | # How long to wait for the task to complete without hearing from the worker in milliseconds. 18 | # Set to -1 to disable timeout (not recommended, use progress updates for long running tasks instead) 19 | 20 | retries: 0 21 | # How many times the task should be re-queued on failure. A value of zero 22 | # means no retries before the task have to be re-queued or removed explicitly. 23 | # Can also be set to -1 to retry forever. 24 | 25 | autoremove: false 26 | # Wether to remove the task and any associated streams that where buffered on completion. 27 | # Note that failed tasks will always have to be handled explicitly. 28 | 29 | constructor: (@id, @queue, @data, options={}) -> 30 | ### Create new task with *@id* on *@queue* containing *@data*, see defaults for available *options*. ### 31 | @options = {} 32 | @retries = 0 33 | @progress = 0 34 | @state = 'unknown' 35 | @aborted = false 36 | @_listeners = {} 37 | for key of defaults 38 | @options[key] = options[key] ? defaults[key] 39 | 40 | updateProgress: (percent) -> 41 | ### Set task progress to *percent* expressed as a fraction between 0 and 1. ### 42 | @progress = percent 43 | @emit 'local-progress', percent 44 | 45 | touch: -> 46 | ### Send a progress update to server, refreshing the task timeout. Useful if you can't 47 | mesure progress but need to keep a long running task alive. ### 48 | @emit 'local-progress', @progress 49 | 50 | _isLocal: (event) -> event[...5] is 'local' or event is 'abort' 51 | 52 | _setupListener: (event) -> 53 | return if @_listeners[event]? or @_isLocal event 54 | assert @client?, 'No client assigned' 55 | @_listeners[event] = (task, extra...) => 56 | if task.id is @id 57 | @emit event, task, extra... 58 | return 59 | @client.on "task #{ event }", @_listeners[event] 60 | 61 | on: (event, handler) -> 62 | @_setupListener event 63 | super event, handler 64 | 65 | once: (event, handler) -> 66 | @_setupListener event 67 | super event, handler 68 | 69 | remove: (callback) -> 70 | @client.removeTask this, callback 71 | 72 | retry: (callback) -> 73 | @client.retryTask this, callback 74 | 75 | getData: (callback) -> 76 | @client.getTaskData this, callback 77 | 78 | toRPC: (includeData=false) -> 79 | ### Private, used to serialize the task before it is sent over the wire. ### 80 | rv = {@id, @queue, @options, @retries, @state} 81 | rv.workerId = @workerId if @workerId? 82 | rv.error = @error if @error? 83 | rv.data = @data if includeData 84 | return rv 85 | 86 | toDB: -> 87 | ### Private, used to serialize the task before storing in database. ### 88 | rv = {} 89 | for key in DB_KEYS 90 | rv[key] = this[key] 91 | return rv 92 | 93 | Task.create = (queue, options, data) -> 94 | ### Create new task on *queue* with *options* and *data*. ### 95 | if arguments.length is 2 96 | data = options 97 | options = {} 98 | id = randomString 24 99 | return new Task id, queue, data, options 100 | 101 | Task.fromRPC = (data) -> 102 | ### Private, deserialize task comming from rpc. ### 103 | task = new Task data.id, data.queue, data.data, data.options 104 | task.workerId = data.workerId if data.workerId? 105 | task.retries = data.retries if data.retries? 106 | task.error = data.error if data.error? 107 | task.state = data.state if data.state? 108 | return task 109 | 110 | Task.fromDB = (data) -> 111 | ### Private, deserialize task comming from database. ### 112 | task = new Task 113 | for key in DB_KEYS 114 | task[key] = data[key] 115 | return task 116 | 117 | module.exports = {Task} -------------------------------------------------------------------------------- /src/worker.coffee: -------------------------------------------------------------------------------- 1 | ### Client worker implementation. ### 2 | 3 | {EventEmitter} = require 'events' 4 | {Task} = require './task' 5 | {randomString} = require './common' 6 | 7 | unless setImmediate? 8 | setImmediate = process.nextTick 9 | 10 | class Worker extends EventEmitter 11 | 12 | constructor: (@id, @queue, @process) -> 13 | @activeTask = null 14 | 15 | start: (task, callback) => 16 | ### Start processing *task*. This method is called by the server. ### 17 | if task not instanceof Task 18 | task = Task.fromRPC task 19 | if @activeTask? 20 | callback new Error 'Already running.' 21 | return 22 | if task.queue isnt @queue 23 | callback new Error 'Task queue mismatch.' 24 | return 25 | @activeTask = task 26 | task.workerId = @id 27 | @emit 'start', task 28 | setImmediate => 29 | @client.resolveStreams task.data 30 | called = false 31 | @process task, (error) => 32 | return if task.aborted 33 | throw new Error 'Worker callback called multiple times.' if called 34 | called = true 35 | @activeTask = null 36 | if error? 37 | task.emit 'local-failure', error 38 | else 39 | task.emit 'local-success' 40 | @emit 'finish', task 41 | callback() 42 | 43 | abort: => 44 | ### Abort running task. Called by server if task has timed out or is manually aborted. ### 45 | return unless @activeTask? 46 | @activeTask.aborted = true 47 | @activeTask.state = 'failed' 48 | @activeTask.emit 'abort' 49 | @emit 'finish', @activeTask 50 | @activeTask = null 51 | 52 | isFree: -> not @activeTask? 53 | 54 | toRPC: -> {@id, @queue, @start, @abort} 55 | 56 | 57 | Worker.create = (queue, processFn) -> 58 | new Worker randomString(24), queue, processFn 59 | 60 | 61 | module.exports = {Worker} -------------------------------------------------------------------------------- /test/coverage.js: -------------------------------------------------------------------------------- 1 | var path = require('path') 2 | var coffeeCoverage = require('coffee-coverage') 3 | 4 | coffeeCoverage.register({ 5 | instrumentor: 'istanbul', 6 | basePath: path.resolve(__dirname, '..'), 7 | exclude: ['/test', '/node_modules', '/.git', '/examples', '/dist'], 8 | writeOnExit: 'coverage/coverage-coffee.json' 9 | }) -------------------------------------------------------------------------------- /test/index.coffee: -------------------------------------------------------------------------------- 1 | 2 | db = require 'memdown' 3 | store = require 'abstract-blob-store' 4 | 5 | {Client} = require './../src/client' 6 | {Server} = require './../src/server' 7 | {Worker} = require './../src/worker' 8 | 9 | async = require 'async' 10 | assert = require 'assert' 11 | through = require 'through' 12 | 13 | server = null 14 | client = null 15 | 16 | serverOpts = 17 | blobStore: new store() 18 | dbLocation: 'nowhere' 19 | dbOptions: {db} 20 | socketOptions: {} 21 | heartbeatInterval: 50 22 | workerTimeout: 100 23 | 24 | makeServer = (port=4242) -> 25 | serverOpts.socketOptions.port = port 26 | new Server serverOpts 27 | 28 | describe 'server', -> 29 | 30 | it 'throws without options', -> 31 | assert.throws -> server = new Server 32 | 33 | it 'throws with missing options', -> 34 | assert.throws -> server = new Server {} 35 | 36 | it 'starts', -> 37 | server = makeServer() 38 | server.on 'error', (error) -> assert.ifError error 39 | 40 | it 'should close stream if client does not respond to ping', (done) -> 41 | @slow 800 42 | sawError = false 43 | server.once 'connection', (conn) -> 44 | conn.on 'error', (error) -> 45 | assert error?, 'should have error' 46 | assert.equal error.message, 'Ping timeout' 47 | sawError = true 48 | cli = new Client 'ws://localhost:4242' 49 | cli.on 'connect', -> 50 | pongs = 0 51 | cli.socket.socket.pong = -> pongs++ 52 | cli.on 'disconnect', -> 53 | assert.equal pongs, 3 54 | assert sawError, 'should have seen error' 55 | cli.close() 56 | done() 57 | 58 | describe 'client', -> 59 | 60 | it 'connects', (done) -> 61 | client = new Client 'ws://localhost:4242' 62 | client.once 'connect', -> done() 63 | 64 | it 'reconnects', (done) -> 65 | server.close() 66 | client.once 'connect', -> done() 67 | server = makeServer() 68 | 69 | it 'closes', (done) -> 70 | client.once 'disconnect', -> done() 71 | client.close() 72 | 73 | it 'connects again', (done) -> 74 | client.connect() 75 | client.once 'connect', -> done() 76 | 77 | it 'should list active queues', (done) -> 78 | client.queue('listme').add {foo: 1}, (error) -> 79 | assert.ifError error 80 | client.listQueues (error, queues) -> 81 | assert.ifError error 82 | assert.equal queues.length, 1 83 | assert.equal queues[0].name, 'listme' 84 | done() 85 | 86 | it 'should receive event stream', (done) -> 87 | c = new Client 'ws://localhost:4242' 88 | q = c.queue 'evtstrm' 89 | s = c.getEventStream() 90 | sawData = false 91 | s.on 'data', (data) -> 92 | return unless data.args[0].queue is 'evtstrm' 93 | assert.equal data.args[0].id, t.id 94 | sawData = true 95 | t = q.add {foo: 'bark'}, -> 96 | assert sawData, 'should have seen data' 97 | c.socket.on 'end', -> 98 | client.queue('evtstrm').add {foo: 'nark'}, done 99 | c.close() 100 | 101 | describe 'queue', -> 102 | 103 | it 'adds a task', (done) -> 104 | queue = client.queue 'test1' 105 | queue.add {foo: 'bar'}, (error) -> 106 | assert.ifError error 107 | done() 108 | 109 | it 'shows added task as waiting', (done) -> 110 | queue = client.queue 'test1' 111 | queue.waiting (error, tasks) -> 112 | assert.ifError error 113 | assert.equal tasks.length, 1 114 | done() 115 | 116 | it 'runs task when worker is added', (done) -> 117 | queue = client.queue 'test1' 118 | queue.process (task, callback) -> 119 | assert.deepEqual task.data, {foo: 'bar'} 120 | callback() 121 | done() 122 | 123 | it 'lists completed task', (done) -> 124 | queue = client.queue 'test1' 125 | queue.completed (error, tasks) -> 126 | assert.ifError error 127 | assert.equal tasks.length, 1 128 | assert.equal tasks[0].state, 'completed' 129 | done() 130 | 131 | it 'adds another task with different options', (done) -> 132 | queue = client.queue 'test2' 133 | queue.add {foo: 'bar'}, {timeout: 1234, retries: 1, autoremove: true}, (error) -> 134 | assert.ifError error 135 | done() 136 | 137 | it 'retries task on failure', (done) -> 138 | queue = client.queue 'test2' 139 | run = 0 140 | queue.process (task, callback) -> 141 | if ++run < 2 142 | error = new Error 'Task failed' 143 | callback error 144 | if run is 2 145 | done() 146 | 147 | it 'removes completed tasks with autoremove option set', (done) -> 148 | queue = client.queue 'test2' 149 | queue.completed (error, tasks) -> 150 | assert.ifError error 151 | assert.equal tasks.length, 0 152 | done() 153 | 154 | it 'adds another task with streams', (done) -> 155 | queue = client.queue 'test3' 156 | stream = through() 157 | setImmediate -> 158 | stream.write 'foobarz' 159 | stream.end() 160 | queue.add {foo: {stream}}, (error) -> 161 | assert.ifError error 162 | done() 163 | 164 | it 'runs streaming tasks', (done) -> 165 | queue = client.queue 'test3' 166 | queue.process (task, callback) -> 167 | assert task.data.foo.stream? 168 | task.data.foo.stream.on 'data', (data) -> 169 | assert.equal data.toString(), 'foobarz' 170 | task.data.foo.stream.on 'end', -> 171 | callback() 172 | done() 173 | 174 | it 'removes stored stream along with task', (done) -> 175 | queue = client.queue 'test3' 176 | queue.completed (error, tasks) -> 177 | assert.ifError error 178 | assert.equal tasks.length, 1 179 | task = tasks[0] 180 | task.remove (error) -> 181 | assert.ifError error 182 | queue.completed (error, tasks) -> 183 | assert.ifError error 184 | assert.equal tasks.length, 0 185 | assert.equal Object.keys(server.store.data).length, 0 186 | done() 187 | 188 | it 'does not add task if stream emits error', (done) -> 189 | queue = client.queue 'test3' 190 | stream = through() 191 | setImmediate -> 192 | stream.emit 'error', new Error 'Stream error' 193 | queue.add {some: {stream}}, (error) -> 194 | assert.ok error? 195 | assert.equal error.message, 'Stream error' 196 | queue.all (error, tasks) -> 197 | assert.ifError error 198 | assert.equal tasks.length, 0 199 | done() 200 | 201 | it 'adds task after reconnecting', (done) -> 202 | @slow 200 203 | queue = client.queue 'test4' 204 | client.close() 205 | queue.add {foo: 'bar'}, (error) -> 206 | assert.ifError error 207 | done() 208 | setImmediate -> client.connect() 209 | 210 | it 'fails added task', (done) -> 211 | queue = client.queue 'test4' 212 | queue.process (task, callback) -> 213 | assert.deepEqual task.data, {foo: 'bar'} 214 | callback new Error 'Fail' 215 | done() 216 | 217 | it 'lists failed task', (done) -> 218 | queue = client.queue 'test4' 219 | queue.failed (error, tasks) -> 220 | assert.ifError error 221 | assert.equal tasks.length, 1 222 | assert.equal tasks[0].state, 'failed' 223 | assert.equal tasks[0].error, 'Fail' 224 | done() 225 | 226 | it 'emits broadcast events', (done) -> 227 | queue = client.queue 'test5' 228 | queue.on 'task added', (task) -> 229 | assert.equal task.id, outerTask.id 230 | done() 231 | outerTask = queue.add {a: [1]} 232 | 233 | it 'handles disconnections when adding streams', (done) -> 234 | @slow 500 235 | srv2 = makeServer 5252 236 | cli2 = new Client 'ws://localhost:5252' 237 | cli2.on 'error', (error) -> 238 | assert error.code is 'ECONNREFUSED' or error.code is 'ECONNRESET' 239 | cli2.on 'connect', -> 240 | queue = cli2.queue 'test6' 241 | stream = through() 242 | t = setInterval (-> stream.write 'foo'), 2 243 | setTimeout (-> srv2.close()), 100 244 | queue.add {a: {b: stream}}, (error) -> 245 | assert.equal error.message, 'Lost connection.' 246 | clearTimeout t 247 | srv2.close() 248 | done() 249 | 250 | it 'handles disconnections when receiving streams', (done) -> 251 | @slow 300 252 | srv3 = makeServer 6262 253 | cli3 = new Client 'ws://localhost:6262' 254 | cli3.on 'error', (error) -> 255 | assert.equal error.code, 'ECONNREFUSED' 256 | 257 | stream = through() 258 | stream.pause() 259 | stream.queue 'foobar' 260 | stream.queue null 261 | 262 | queue = cli3.queue 'test6' 263 | task = queue.add {stream}, (error) -> assert.ifError error 264 | queue.process (task, callback) -> 265 | srv3.close() 266 | task.data.stream.on 'error', (error) -> 267 | assert.equal error.message, 'Lost connection.' 268 | cli3.close() 269 | done() 270 | 271 | it 'lists active tasks', (done) -> 272 | @slow 300 273 | cli = new Client 'ws://localhost:4242' 274 | queue = cli.queue 'test7' 275 | queue.add {foo: 'bar'} 276 | queue.process (task, callback) -> 277 | queue.active (error, tasks) -> 278 | assert.ifError error 279 | assert.equal tasks.length, 1 280 | callback() 281 | setTimeout -> 282 | cli.close() 283 | done() 284 | , 100 285 | 286 | it 'lists all tasks', (done) -> 287 | queue = client.queue 'test7' 288 | queue.add {foo: 'bar'} 289 | queue.all (error, tasks) -> 290 | assert.ifError error 291 | assert.equal tasks.length, 2 292 | done() 293 | 294 | it 'stops workers on timeout', (done) -> 295 | @slow 300 296 | queue = client.queue 'test8' 297 | task = queue.add {foo: 'bar'}, {timeout: 100} 298 | abortFired = false 299 | queue.process (task, callback) -> 300 | task.on 'abort', -> abortFired = true 301 | setTimeout callback, 200 302 | task.on 'failed', (task) -> 303 | assert.equal abortFired, true 304 | assert.equal task.error, 'Timed out.' 305 | done() 306 | 307 | it 'fails task if worker does not respond', (done) -> 308 | @slow 400 309 | queue = client.queue 'noresponse' 310 | worker = Worker.create 'noresponse', -> assert false, 'should not start processing' 311 | worker.start = -> 312 | client.addWorker worker 313 | task = queue.add {foo: 1}, (error) -> assert.ifError error 314 | task.on 'failed', (task) -> 315 | assert.equal task.error, "Worker #{ worker.id } didn't respond." 316 | done() 317 | 318 | describe 'server persitence', -> 319 | 320 | it 'perists tasks', (done) -> 321 | server2 = makeServer 5252 322 | server2.once 'ready', -> 323 | queue = server.getQueue 'test1' 324 | assert.equal queue.getAll().length, 1 325 | server2.close done 326 | 327 | it 'perists and resumes active tasks', (done) -> 328 | @slow 200 329 | server2 = makeServer 5253 330 | client2 = new Client 'ws://localhost:5253', {backoff: -> 10} 331 | 332 | sawError = false 333 | client2.on 'error', (error) -> sawError = true 334 | 335 | numConnects = 0 336 | client2.on 'connect', -> numConnects++ 337 | 338 | queue = client2.queue 'persist2' 339 | queue.once 'worker started', -> 340 | server2.close -> 341 | server2 = makeServer 5253 342 | queue.process (task, callback) -> 343 | setTimeout callback, 50 344 | 345 | task = queue.add {foo: 'bar'} 346 | task.on 'completed', -> 347 | assert sawError, 'should see connection errors on client' 348 | assert.equal numConnects, 2 349 | do done 350 | 351 | it 'keeps the order of tasks', (done) -> 352 | queue = client.queue 'persist1' 353 | async.map [1..10], ((i, cb) -> queue.add {i}, cb), (error) -> 354 | assert.ifError error 355 | queue.all (error, tasks1) -> 356 | assert.ifError error 357 | assert.equal tasks1.length, 10 358 | server2 = makeServer 5566 359 | server2.once 'ready', -> 360 | client2 = new Client 'ws://localhost:5566' 361 | queue2 = client2.queue 'persist1' 362 | queue2.all (error, tasks2) -> 363 | assert.ifError error 364 | id1 = tasks1.map (t) -> t.id 365 | id2 = tasks2.map (t) -> t.id 366 | assert.deepEqual id1, id2 367 | done() 368 | 369 | describe 'task', -> 370 | 371 | it 'emits events', (done) -> 372 | queue = client.queue 'test9' 373 | task = queue.add {a: 1} 374 | task.on 'queued', -> done() 375 | 376 | it 'updates progress', (done) -> 377 | @slow 300 378 | queue = client.queue 'test9' 379 | sawProgress = false 380 | queue.waiting (error, tasks) -> 381 | assert.ifError error 382 | assert tasks.length, 1 383 | task = tasks[0] 384 | task.on 'progress', (task, progress) -> 385 | assert.equal progress, 0.5 386 | sawProgress = true 387 | cb = -> 388 | assert.ok sawProgress, 'should have seen progress' 389 | done() 390 | queue.process (task, callback) -> 391 | task.updateProgress 0.5 392 | setTimeout cb, 100 393 | setTimeout callback, 100 394 | 395 | it 'should retry', (done) -> 396 | @slow 200 397 | queue = client.queue 'test10' 398 | numCalls = 0 399 | queue.process (task, callback) -> 400 | if ++numCalls < 3 401 | error = new Error "Fail #{ numCalls }" 402 | callback error 403 | 404 | task = queue.add {foo: 1}, {retries: 1}, (error) -> assert.ifError error 405 | 406 | task.on 'completed', (task) -> 407 | assert.equal numCalls, 3 408 | assert.equal task.error, undefined 409 | done() 410 | 411 | task.on 'failed', (task, willRetry) -> 412 | if numCalls is 1 413 | assert willRetry, 'should retry' 414 | assert.equal task.retries, 1 415 | else if numCalls is 2 416 | assert !willRetry, 'should not retry' 417 | assert.equal task.retries, 2 418 | task.retry (error) -> assert.ifError error 419 | else 420 | assert false, 'unexpected failed event' 421 | 422 | it 'should retrieve data', (done) -> 423 | queue = client.queue 'test10' 424 | queue.completed (error, tasks) -> 425 | assert.ifError error 426 | assert.equal tasks.length, 1 427 | [task] = tasks 428 | assert !task.data? 429 | task.getData (error, data) -> 430 | assert.ifError error 431 | assert.deepEqual data, {foo: 1} 432 | done() 433 | --------------------------------------------------------------------------------