├── .babelrc.es5 ├── .babelrc.lib ├── .env ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── bottleneck.d.ts ├── bottleneck.d.ts.ejs ├── bottleneck_types.ejs ├── bower.json ├── es5.js ├── lib ├── Batcher.js ├── Bottleneck.js ├── BottleneckError.js ├── DLList.js ├── Events.js ├── Group.js ├── IORedisConnection.js ├── Job.js ├── LocalDatastore.js ├── Queues.js ├── RedisConnection.js ├── RedisDatastore.js ├── Scripts.js ├── States.js ├── Sync.js ├── es5.js ├── index.js ├── lua.json ├── parser.js └── version.json ├── light.d.ts ├── light.d.ts.ejs ├── light.js ├── package-lock.json ├── package.json ├── rollup.config.es5.js ├── rollup.config.light.js ├── scripts ├── assemble_lua.js ├── build.sh ├── test_all.sh └── version.js ├── src ├── Batcher.coffee ├── Bottleneck.coffee ├── BottleneckError.coffee ├── DLList.coffee ├── Events.coffee ├── Group.coffee ├── IORedisConnection.coffee ├── Job.coffee ├── LocalDatastore.coffee ├── Queues.coffee ├── RedisConnection.coffee ├── RedisDatastore.coffee ├── Scripts.coffee ├── States.coffee ├── Sync.coffee ├── es5.coffee ├── index.coffee ├── parser.coffee └── redis │ ├── blacklist_client.lua │ ├── check.lua │ ├── conditions_check.lua │ ├── current_reservoir.lua │ ├── done.lua │ ├── free.lua │ ├── get_time.lua │ ├── group_check.lua │ ├── heartbeat.lua │ ├── increment_reservoir.lua │ ├── init.lua │ ├── process_tick.lua │ ├── queued.lua │ ├── refresh_expiration.lua │ ├── refs.lua │ ├── register.lua │ ├── register_client.lua │ ├── running.lua │ ├── submit.lua │ ├── update_settings.lua │ ├── validate_client.lua │ └── validate_keys.lua ├── test.ts └── test ├── DLList.js ├── batcher.js ├── bottleneck.js ├── cluster.js ├── context.js ├── general.js ├── group.js ├── ioredis.js ├── node_redis.js ├── priority.js ├── promises.js ├── retries.js ├── spawn ├── increaseKeepAlive.js └── refreshKeepAlive.js ├── states.js └── stop.js /.babelrc.es5: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["@babel/preset-env", {}] 4 | ] 5 | } -------------------------------------------------------------------------------- /.babelrc.lib: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["@babel/preset-env", { 4 | "targets": { 5 | "node": "6.0" 6 | } 7 | }] 8 | ] 9 | } -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | REDIS_HOST=127.0.0.1 2 | REDIS_PORT=6379 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | test.js 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - 8 4 | branches: 5 | only: 6 | - master 7 | - next 8 | services: 9 | - redis-server 10 | env: 11 | global: 12 | - "REDIS_HOST=127.0.0.1" 13 | - "REDIS_PORT=6379" 14 | cache: 15 | directories: 16 | - $HOME/.npm 17 | install: 18 | - npm i 19 | sudo: required 20 | after_success: npx codecov --file=./coverage/lcov.info 21 | script: npm run test-all 22 | 23 | before_install: 24 | - npm i -g npm@5.10 25 | - npm --version -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Simon Grondin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /bottleneck.d.ts.ejs: -------------------------------------------------------------------------------- 1 | declare module "bottleneck" { 2 | <%- include('bottleneck_types') %> 3 | } 4 | -------------------------------------------------------------------------------- /bower.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bottleneck", 3 | "main": "bottleneck.js", 4 | "version": "2.19.5", 5 | "homepage": "https://github.com/SGrondin/bottleneck", 6 | "authors": [ 7 | "SGrondin " 8 | ], 9 | "description": "Distributed task scheduler and rate limiter", 10 | "moduleType": [ 11 | "globals", 12 | "node" 13 | ], 14 | "keywords": [ 15 | "async", 16 | "rate", 17 | "limiter", 18 | "limiting", 19 | "throttle", 20 | "throttling", 21 | "load", 22 | "ddos" 23 | ], 24 | "license": "MIT", 25 | "ignore": [ 26 | "**/.*", 27 | "node_modules", 28 | "bower_components" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /lib/Batcher.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var Batcher, Events, parser; 4 | parser = require("./parser"); 5 | Events = require("./Events"); 6 | 7 | Batcher = function () { 8 | class Batcher { 9 | constructor(options = {}) { 10 | this.options = options; 11 | parser.load(this.options, this.defaults, this); 12 | this.Events = new Events(this); 13 | this._arr = []; 14 | 15 | this._resetPromise(); 16 | 17 | this._lastFlush = Date.now(); 18 | } 19 | 20 | _resetPromise() { 21 | return this._promise = new this.Promise((res, rej) => { 22 | return this._resolve = res; 23 | }); 24 | } 25 | 26 | _flush() { 27 | clearTimeout(this._timeout); 28 | this._lastFlush = Date.now(); 29 | 30 | this._resolve(); 31 | 32 | this.Events.trigger("batch", this._arr); 33 | this._arr = []; 34 | return this._resetPromise(); 35 | } 36 | 37 | add(data) { 38 | var ret; 39 | 40 | this._arr.push(data); 41 | 42 | ret = this._promise; 43 | 44 | if (this._arr.length === this.maxSize) { 45 | this._flush(); 46 | } else if (this.maxTime != null && this._arr.length === 1) { 47 | this._timeout = setTimeout(() => { 48 | return this._flush(); 49 | }, this.maxTime); 50 | } 51 | 52 | return ret; 53 | } 54 | 55 | } 56 | 57 | ; 58 | Batcher.prototype.defaults = { 59 | maxTime: null, 60 | maxSize: null, 61 | Promise: Promise 62 | }; 63 | return Batcher; 64 | }.call(void 0); 65 | 66 | module.exports = Batcher; -------------------------------------------------------------------------------- /lib/BottleneckError.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var BottleneckError; 4 | BottleneckError = class BottleneckError extends Error {}; 5 | module.exports = BottleneckError; -------------------------------------------------------------------------------- /lib/DLList.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var DLList; 4 | DLList = class DLList { 5 | constructor(incr, decr) { 6 | this.incr = incr; 7 | this.decr = decr; 8 | this._first = null; 9 | this._last = null; 10 | this.length = 0; 11 | } 12 | 13 | push(value) { 14 | var node; 15 | this.length++; 16 | 17 | if (typeof this.incr === "function") { 18 | this.incr(); 19 | } 20 | 21 | node = { 22 | value, 23 | prev: this._last, 24 | next: null 25 | }; 26 | 27 | if (this._last != null) { 28 | this._last.next = node; 29 | this._last = node; 30 | } else { 31 | this._first = this._last = node; 32 | } 33 | 34 | return void 0; 35 | } 36 | 37 | shift() { 38 | var value; 39 | 40 | if (this._first == null) { 41 | return; 42 | } else { 43 | this.length--; 44 | 45 | if (typeof this.decr === "function") { 46 | this.decr(); 47 | } 48 | } 49 | 50 | value = this._first.value; 51 | 52 | if ((this._first = this._first.next) != null) { 53 | this._first.prev = null; 54 | } else { 55 | this._last = null; 56 | } 57 | 58 | return value; 59 | } 60 | 61 | first() { 62 | if (this._first != null) { 63 | return this._first.value; 64 | } 65 | } 66 | 67 | getArray() { 68 | var node, ref, results; 69 | node = this._first; 70 | results = []; 71 | 72 | while (node != null) { 73 | results.push((ref = node, node = node.next, ref.value)); 74 | } 75 | 76 | return results; 77 | } 78 | 79 | forEachShift(cb) { 80 | var node; 81 | node = this.shift(); 82 | 83 | while (node != null) { 84 | cb(node), node = this.shift(); 85 | } 86 | 87 | return void 0; 88 | } 89 | 90 | debug() { 91 | var node, ref, ref1, ref2, results; 92 | node = this._first; 93 | results = []; 94 | 95 | while (node != null) { 96 | results.push((ref = node, node = node.next, { 97 | value: ref.value, 98 | prev: (ref1 = ref.prev) != null ? ref1.value : void 0, 99 | next: (ref2 = ref.next) != null ? ref2.value : void 0 100 | })); 101 | } 102 | 103 | return results; 104 | } 105 | 106 | }; 107 | module.exports = DLList; -------------------------------------------------------------------------------- /lib/Events.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 4 | 5 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 6 | 7 | var Events; 8 | Events = class Events { 9 | constructor(instance) { 10 | this.instance = instance; 11 | this._events = {}; 12 | 13 | if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) { 14 | throw new Error("An Emitter already exists for this object"); 15 | } 16 | 17 | this.instance.on = (name, cb) => { 18 | return this._addListener(name, "many", cb); 19 | }; 20 | 21 | this.instance.once = (name, cb) => { 22 | return this._addListener(name, "once", cb); 23 | }; 24 | 25 | this.instance.removeAllListeners = (name = null) => { 26 | if (name != null) { 27 | return delete this._events[name]; 28 | } else { 29 | return this._events = {}; 30 | } 31 | }; 32 | } 33 | 34 | _addListener(name, status, cb) { 35 | var base; 36 | 37 | if ((base = this._events)[name] == null) { 38 | base[name] = []; 39 | } 40 | 41 | this._events[name].push({ 42 | cb, 43 | status 44 | }); 45 | 46 | return this.instance; 47 | } 48 | 49 | listenerCount(name) { 50 | if (this._events[name] != null) { 51 | return this._events[name].length; 52 | } else { 53 | return 0; 54 | } 55 | } 56 | 57 | trigger(name, ...args) { 58 | var _this = this; 59 | 60 | return _asyncToGenerator(function* () { 61 | var e, promises; 62 | 63 | try { 64 | if (name !== "debug") { 65 | _this.trigger("debug", `Event triggered: ${name}`, args); 66 | } 67 | 68 | if (_this._events[name] == null) { 69 | return; 70 | } 71 | 72 | _this._events[name] = _this._events[name].filter(function (listener) { 73 | return listener.status !== "none"; 74 | }); 75 | promises = _this._events[name].map( 76 | /*#__PURE__*/ 77 | function () { 78 | var _ref = _asyncToGenerator(function* (listener) { 79 | var e, returned; 80 | 81 | if (listener.status === "none") { 82 | return; 83 | } 84 | 85 | if (listener.status === "once") { 86 | listener.status = "none"; 87 | } 88 | 89 | try { 90 | returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0; 91 | 92 | if (typeof (returned != null ? returned.then : void 0) === "function") { 93 | return yield returned; 94 | } else { 95 | return returned; 96 | } 97 | } catch (error) { 98 | e = error; 99 | 100 | if ("name" !== "error") { 101 | _this.trigger("error", e); 102 | } 103 | 104 | return null; 105 | } 106 | }); 107 | 108 | return function (_x) { 109 | return _ref.apply(this, arguments); 110 | }; 111 | }()); 112 | return (yield Promise.all(promises)).find(function (x) { 113 | return x != null; 114 | }); 115 | } catch (error) { 116 | e = error; 117 | 118 | if ("name" !== "error") { 119 | _this.trigger("error", e); 120 | } 121 | 122 | return null; 123 | } 124 | })(); 125 | } 126 | 127 | }; 128 | module.exports = Events; -------------------------------------------------------------------------------- /lib/Group.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } 4 | 5 | function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } 6 | 7 | function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } 8 | 9 | function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } 10 | 11 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 12 | 13 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 14 | 15 | var Events, Group, IORedisConnection, RedisConnection, Scripts, parser; 16 | parser = require("./parser"); 17 | Events = require("./Events"); 18 | RedisConnection = require("./RedisConnection"); 19 | IORedisConnection = require("./IORedisConnection"); 20 | Scripts = require("./Scripts"); 21 | 22 | Group = function () { 23 | class Group { 24 | constructor(limiterOptions = {}) { 25 | this.deleteKey = this.deleteKey.bind(this); 26 | this.limiterOptions = limiterOptions; 27 | parser.load(this.limiterOptions, this.defaults, this); 28 | this.Events = new Events(this); 29 | this.instances = {}; 30 | this.Bottleneck = require("./Bottleneck"); 31 | 32 | this._startAutoCleanup(); 33 | 34 | this.sharedConnection = this.connection != null; 35 | 36 | if (this.connection == null) { 37 | if (this.limiterOptions.datastore === "redis") { 38 | this.connection = new RedisConnection(Object.assign({}, this.limiterOptions, { 39 | Events: this.Events 40 | })); 41 | } else if (this.limiterOptions.datastore === "ioredis") { 42 | this.connection = new IORedisConnection(Object.assign({}, this.limiterOptions, { 43 | Events: this.Events 44 | })); 45 | } 46 | } 47 | } 48 | 49 | key(key = "") { 50 | var ref; 51 | return (ref = this.instances[key]) != null ? ref : (() => { 52 | var limiter; 53 | limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, { 54 | id: `${this.id}-${key}`, 55 | timeout: this.timeout, 56 | connection: this.connection 57 | })); 58 | this.Events.trigger("created", limiter, key); 59 | return limiter; 60 | })(); 61 | } 62 | 63 | deleteKey(key = "") { 64 | var _this = this; 65 | 66 | return _asyncToGenerator(function* () { 67 | var deleted, instance; 68 | instance = _this.instances[key]; 69 | 70 | if (_this.connection) { 71 | deleted = yield _this.connection.__runCommand__(['del', ...Scripts.allKeys(`${_this.id}-${key}`)]); 72 | } 73 | 74 | if (instance != null) { 75 | delete _this.instances[key]; 76 | yield instance.disconnect(); 77 | } 78 | 79 | return instance != null || deleted > 0; 80 | })(); 81 | } 82 | 83 | limiters() { 84 | var k, ref, results, v; 85 | ref = this.instances; 86 | results = []; 87 | 88 | for (k in ref) { 89 | v = ref[k]; 90 | results.push({ 91 | key: k, 92 | limiter: v 93 | }); 94 | } 95 | 96 | return results; 97 | } 98 | 99 | keys() { 100 | return Object.keys(this.instances); 101 | } 102 | 103 | clusterKeys() { 104 | var _this2 = this; 105 | 106 | return _asyncToGenerator(function* () { 107 | var cursor, end, found, i, k, keys, len, next, start; 108 | 109 | if (_this2.connection == null) { 110 | return _this2.Promise.resolve(_this2.keys()); 111 | } 112 | 113 | keys = []; 114 | cursor = null; 115 | start = `b_${_this2.id}-`.length; 116 | end = "_settings".length; 117 | 118 | while (cursor !== 0) { 119 | var _ref = yield _this2.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${_this2.id}-*_settings`, "count", 10000]); 120 | 121 | var _ref2 = _slicedToArray(_ref, 2); 122 | 123 | next = _ref2[0]; 124 | found = _ref2[1]; 125 | cursor = ~~next; 126 | 127 | for (i = 0, len = found.length; i < len; i++) { 128 | k = found[i]; 129 | keys.push(k.slice(start, -end)); 130 | } 131 | } 132 | 133 | return keys; 134 | })(); 135 | } 136 | 137 | _startAutoCleanup() { 138 | var _this3 = this; 139 | 140 | var base; 141 | clearInterval(this.interval); 142 | return typeof (base = this.interval = setInterval( 143 | /*#__PURE__*/ 144 | _asyncToGenerator(function* () { 145 | var e, k, ref, results, time, v; 146 | time = Date.now(); 147 | ref = _this3.instances; 148 | results = []; 149 | 150 | for (k in ref) { 151 | v = ref[k]; 152 | 153 | try { 154 | if (yield v._store.__groupCheck__(time)) { 155 | results.push(_this3.deleteKey(k)); 156 | } else { 157 | results.push(void 0); 158 | } 159 | } catch (error) { 160 | e = error; 161 | results.push(v.Events.trigger("error", e)); 162 | } 163 | } 164 | 165 | return results; 166 | }), this.timeout / 2)).unref === "function" ? base.unref() : void 0; 167 | } 168 | 169 | updateSettings(options = {}) { 170 | parser.overwrite(options, this.defaults, this); 171 | parser.overwrite(options, options, this.limiterOptions); 172 | 173 | if (options.timeout != null) { 174 | return this._startAutoCleanup(); 175 | } 176 | } 177 | 178 | disconnect(flush = true) { 179 | var ref; 180 | 181 | if (!this.sharedConnection) { 182 | return (ref = this.connection) != null ? ref.disconnect(flush) : void 0; 183 | } 184 | } 185 | 186 | } 187 | 188 | ; 189 | Group.prototype.defaults = { 190 | timeout: 1000 * 60 * 5, 191 | connection: null, 192 | Promise: Promise, 193 | id: "group-key" 194 | }; 195 | return Group; 196 | }.call(void 0); 197 | 198 | module.exports = Group; -------------------------------------------------------------------------------- /lib/IORedisConnection.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } 4 | 5 | function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } 6 | 7 | function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } 8 | 9 | function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } 10 | 11 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 12 | 13 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 14 | 15 | var Events, IORedisConnection, Scripts, parser; 16 | parser = require("./parser"); 17 | Events = require("./Events"); 18 | Scripts = require("./Scripts"); 19 | 20 | IORedisConnection = function () { 21 | class IORedisConnection { 22 | constructor(options = {}) { 23 | parser.load(options, this.defaults, this); 24 | 25 | if (this.Redis == null) { 26 | this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option. 27 | } 28 | 29 | if (this.Events == null) { 30 | this.Events = new Events(this); 31 | } 32 | 33 | this.terminated = false; 34 | 35 | if (this.clusterNodes != null) { 36 | this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); 37 | this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); 38 | } else if (this.client != null && this.client.duplicate == null) { 39 | this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options); 40 | } else { 41 | if (this.client == null) { 42 | this.client = new this.Redis(this.clientOptions); 43 | } 44 | 45 | this.subscriber = this.client.duplicate(); 46 | } 47 | 48 | this.limiters = {}; 49 | this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => { 50 | this._loadScripts(); 51 | 52 | return { 53 | client: this.client, 54 | subscriber: this.subscriber 55 | }; 56 | }); 57 | } 58 | 59 | _setup(client, sub) { 60 | client.setMaxListeners(0); 61 | return new this.Promise((resolve, reject) => { 62 | client.on("error", e => { 63 | return this.Events.trigger("error", e); 64 | }); 65 | 66 | if (sub) { 67 | client.on("message", (channel, message) => { 68 | var ref; 69 | return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; 70 | }); 71 | } 72 | 73 | if (client.status === "ready") { 74 | return resolve(); 75 | } else { 76 | return client.once("ready", resolve); 77 | } 78 | }); 79 | } 80 | 81 | _loadScripts() { 82 | return Scripts.names.forEach(name => { 83 | return this.client.defineCommand(name, { 84 | lua: Scripts.payload(name) 85 | }); 86 | }); 87 | } 88 | 89 | __runCommand__(cmd) { 90 | var _this = this; 91 | 92 | return _asyncToGenerator(function* () { 93 | var _, deleted; 94 | 95 | yield _this.ready; 96 | 97 | var _ref = yield _this.client.pipeline([cmd]).exec(); 98 | 99 | var _ref2 = _slicedToArray(_ref, 1); 100 | 101 | var _ref2$ = _slicedToArray(_ref2[0], 2); 102 | 103 | _ = _ref2$[0]; 104 | deleted = _ref2$[1]; 105 | return deleted; 106 | })(); 107 | } 108 | 109 | __addLimiter__(instance) { 110 | return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => { 111 | return new this.Promise((resolve, reject) => { 112 | return this.subscriber.subscribe(channel, () => { 113 | this.limiters[channel] = instance; 114 | return resolve(); 115 | }); 116 | }); 117 | })); 118 | } 119 | 120 | __removeLimiter__(instance) { 121 | var _this2 = this; 122 | 123 | return [instance.channel(), instance.channel_client()].forEach( 124 | /*#__PURE__*/ 125 | function () { 126 | var _ref3 = _asyncToGenerator(function* (channel) { 127 | if (!_this2.terminated) { 128 | yield _this2.subscriber.unsubscribe(channel); 129 | } 130 | 131 | return delete _this2.limiters[channel]; 132 | }); 133 | 134 | return function (_x) { 135 | return _ref3.apply(this, arguments); 136 | }; 137 | }()); 138 | } 139 | 140 | __scriptArgs__(name, id, args, cb) { 141 | var keys; 142 | keys = Scripts.keys(name, id); 143 | return [keys.length].concat(keys, args, cb); 144 | } 145 | 146 | __scriptFn__(name) { 147 | return this.client[name].bind(this.client); 148 | } 149 | 150 | disconnect(flush = true) { 151 | var i, k, len, ref; 152 | ref = Object.keys(this.limiters); 153 | 154 | for (i = 0, len = ref.length; i < len; i++) { 155 | k = ref[i]; 156 | clearInterval(this.limiters[k]._store.heartbeat); 157 | } 158 | 159 | this.limiters = {}; 160 | this.terminated = true; 161 | 162 | if (flush) { 163 | return this.Promise.all([this.client.quit(), this.subscriber.quit()]); 164 | } else { 165 | this.client.disconnect(); 166 | this.subscriber.disconnect(); 167 | return this.Promise.resolve(); 168 | } 169 | } 170 | 171 | } 172 | 173 | ; 174 | IORedisConnection.prototype.datastore = "ioredis"; 175 | IORedisConnection.prototype.defaults = { 176 | Redis: null, 177 | clientOptions: {}, 178 | clusterNodes: null, 179 | client: null, 180 | Promise: Promise, 181 | Events: null 182 | }; 183 | return IORedisConnection; 184 | }.call(void 0); 185 | 186 | module.exports = IORedisConnection; -------------------------------------------------------------------------------- /lib/Job.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 4 | 5 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 6 | 7 | var BottleneckError, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser; 8 | NUM_PRIORITIES = 10; 9 | DEFAULT_PRIORITY = 5; 10 | parser = require("./parser"); 11 | BottleneckError = require("./BottleneckError"); 12 | Job = class Job { 13 | constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) { 14 | this.task = task; 15 | this.args = args; 16 | this.rejectOnDrop = rejectOnDrop; 17 | this.Events = Events; 18 | this._states = _states; 19 | this.Promise = Promise; 20 | this.options = parser.load(options, jobDefaults); 21 | this.options.priority = this._sanitizePriority(this.options.priority); 22 | 23 | if (this.options.id === jobDefaults.id) { 24 | this.options.id = `${this.options.id}-${this._randomIndex()}`; 25 | } 26 | 27 | this.promise = new this.Promise((_resolve, _reject) => { 28 | this._resolve = _resolve; 29 | this._reject = _reject; 30 | }); 31 | this.retryCount = 0; 32 | } 33 | 34 | _sanitizePriority(priority) { 35 | var sProperty; 36 | sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority; 37 | 38 | if (sProperty < 0) { 39 | return 0; 40 | } else if (sProperty > NUM_PRIORITIES - 1) { 41 | return NUM_PRIORITIES - 1; 42 | } else { 43 | return sProperty; 44 | } 45 | } 46 | 47 | _randomIndex() { 48 | return Math.random().toString(36).slice(2); 49 | } 50 | 51 | doDrop({ 52 | error, 53 | message = "This job has been dropped by Bottleneck" 54 | } = {}) { 55 | if (this._states.remove(this.options.id)) { 56 | if (this.rejectOnDrop) { 57 | this._reject(error != null ? error : new BottleneckError(message)); 58 | } 59 | 60 | this.Events.trigger("dropped", { 61 | args: this.args, 62 | options: this.options, 63 | task: this.task, 64 | promise: this.promise 65 | }); 66 | return true; 67 | } else { 68 | return false; 69 | } 70 | } 71 | 72 | _assertStatus(expected) { 73 | var status; 74 | status = this._states.jobStatus(this.options.id); 75 | 76 | if (!(status === expected || expected === "DONE" && status === null)) { 77 | throw new BottleneckError(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`); 78 | } 79 | } 80 | 81 | doReceive() { 82 | this._states.start(this.options.id); 83 | 84 | return this.Events.trigger("received", { 85 | args: this.args, 86 | options: this.options 87 | }); 88 | } 89 | 90 | doQueue(reachedHWM, blocked) { 91 | this._assertStatus("RECEIVED"); 92 | 93 | this._states.next(this.options.id); 94 | 95 | return this.Events.trigger("queued", { 96 | args: this.args, 97 | options: this.options, 98 | reachedHWM, 99 | blocked 100 | }); 101 | } 102 | 103 | doRun() { 104 | if (this.retryCount === 0) { 105 | this._assertStatus("QUEUED"); 106 | 107 | this._states.next(this.options.id); 108 | } else { 109 | this._assertStatus("EXECUTING"); 110 | } 111 | 112 | return this.Events.trigger("scheduled", { 113 | args: this.args, 114 | options: this.options 115 | }); 116 | } 117 | 118 | doExecute(chained, clearGlobalState, run, free) { 119 | var _this = this; 120 | 121 | return _asyncToGenerator(function* () { 122 | var error, eventInfo, passed; 123 | 124 | if (_this.retryCount === 0) { 125 | _this._assertStatus("RUNNING"); 126 | 127 | _this._states.next(_this.options.id); 128 | } else { 129 | _this._assertStatus("EXECUTING"); 130 | } 131 | 132 | eventInfo = { 133 | args: _this.args, 134 | options: _this.options, 135 | retryCount: _this.retryCount 136 | }; 137 | 138 | _this.Events.trigger("executing", eventInfo); 139 | 140 | try { 141 | passed = yield chained != null ? chained.schedule(_this.options, _this.task, ..._this.args) : _this.task(..._this.args); 142 | 143 | if (clearGlobalState()) { 144 | _this.doDone(eventInfo); 145 | 146 | yield free(_this.options, eventInfo); 147 | 148 | _this._assertStatus("DONE"); 149 | 150 | return _this._resolve(passed); 151 | } 152 | } catch (error1) { 153 | error = error1; 154 | return _this._onFailure(error, eventInfo, clearGlobalState, run, free); 155 | } 156 | })(); 157 | } 158 | 159 | doExpire(clearGlobalState, run, free) { 160 | var error, eventInfo; 161 | 162 | if (this._states.jobStatus(this.options.id === "RUNNING")) { 163 | this._states.next(this.options.id); 164 | } 165 | 166 | this._assertStatus("EXECUTING"); 167 | 168 | eventInfo = { 169 | args: this.args, 170 | options: this.options, 171 | retryCount: this.retryCount 172 | }; 173 | error = new BottleneckError(`This job timed out after ${this.options.expiration} ms.`); 174 | return this._onFailure(error, eventInfo, clearGlobalState, run, free); 175 | } 176 | 177 | _onFailure(error, eventInfo, clearGlobalState, run, free) { 178 | var _this2 = this; 179 | 180 | return _asyncToGenerator(function* () { 181 | var retry, retryAfter; 182 | 183 | if (clearGlobalState()) { 184 | retry = yield _this2.Events.trigger("failed", error, eventInfo); 185 | 186 | if (retry != null) { 187 | retryAfter = ~~retry; 188 | 189 | _this2.Events.trigger("retry", `Retrying ${_this2.options.id} after ${retryAfter} ms`, eventInfo); 190 | 191 | _this2.retryCount++; 192 | return run(retryAfter); 193 | } else { 194 | _this2.doDone(eventInfo); 195 | 196 | yield free(_this2.options, eventInfo); 197 | 198 | _this2._assertStatus("DONE"); 199 | 200 | return _this2._reject(error); 201 | } 202 | } 203 | })(); 204 | } 205 | 206 | doDone(eventInfo) { 207 | this._assertStatus("EXECUTING"); 208 | 209 | this._states.next(this.options.id); 210 | 211 | return this.Events.trigger("done", eventInfo); 212 | } 213 | 214 | }; 215 | module.exports = Job; -------------------------------------------------------------------------------- /lib/LocalDatastore.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 4 | 5 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 6 | 7 | var BottleneckError, LocalDatastore, parser; 8 | parser = require("./parser"); 9 | BottleneckError = require("./BottleneckError"); 10 | LocalDatastore = class LocalDatastore { 11 | constructor(instance, storeOptions, storeInstanceOptions) { 12 | this.instance = instance; 13 | this.storeOptions = storeOptions; 14 | this.clientId = this.instance._randomIndex(); 15 | parser.load(storeInstanceOptions, storeInstanceOptions, this); 16 | this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now(); 17 | this._running = 0; 18 | this._done = 0; 19 | this._unblockTime = 0; 20 | this.ready = this.Promise.resolve(); 21 | this.clients = {}; 22 | 23 | this._startHeartbeat(); 24 | } 25 | 26 | _startHeartbeat() { 27 | var base; 28 | 29 | if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) { 30 | return typeof (base = this.heartbeat = setInterval(() => { 31 | var amount, incr, maximum, now, reservoir; 32 | now = Date.now(); 33 | 34 | if (this.storeOptions.reservoirRefreshInterval != null && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) { 35 | this._lastReservoirRefresh = now; 36 | this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount; 37 | 38 | this.instance._drainAll(this.computeCapacity()); 39 | } 40 | 41 | if (this.storeOptions.reservoirIncreaseInterval != null && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) { 42 | var _this$storeOptions = this.storeOptions; 43 | amount = _this$storeOptions.reservoirIncreaseAmount; 44 | maximum = _this$storeOptions.reservoirIncreaseMaximum; 45 | reservoir = _this$storeOptions.reservoir; 46 | this._lastReservoirIncrease = now; 47 | incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount; 48 | 49 | if (incr > 0) { 50 | this.storeOptions.reservoir += incr; 51 | return this.instance._drainAll(this.computeCapacity()); 52 | } 53 | } 54 | }, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0; 55 | } else { 56 | return clearInterval(this.heartbeat); 57 | } 58 | } 59 | 60 | __publish__(message) { 61 | var _this = this; 62 | 63 | return _asyncToGenerator(function* () { 64 | yield _this.yieldLoop(); 65 | return _this.instance.Events.trigger("message", message.toString()); 66 | })(); 67 | } 68 | 69 | __disconnect__(flush) { 70 | var _this2 = this; 71 | 72 | return _asyncToGenerator(function* () { 73 | yield _this2.yieldLoop(); 74 | clearInterval(_this2.heartbeat); 75 | return _this2.Promise.resolve(); 76 | })(); 77 | } 78 | 79 | yieldLoop(t = 0) { 80 | return new this.Promise(function (resolve, reject) { 81 | return setTimeout(resolve, t); 82 | }); 83 | } 84 | 85 | computePenalty() { 86 | var ref; 87 | return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000; 88 | } 89 | 90 | __updateSettings__(options) { 91 | var _this3 = this; 92 | 93 | return _asyncToGenerator(function* () { 94 | yield _this3.yieldLoop(); 95 | parser.overwrite(options, options, _this3.storeOptions); 96 | 97 | _this3._startHeartbeat(); 98 | 99 | _this3.instance._drainAll(_this3.computeCapacity()); 100 | 101 | return true; 102 | })(); 103 | } 104 | 105 | __running__() { 106 | var _this4 = this; 107 | 108 | return _asyncToGenerator(function* () { 109 | yield _this4.yieldLoop(); 110 | return _this4._running; 111 | })(); 112 | } 113 | 114 | __queued__() { 115 | var _this5 = this; 116 | 117 | return _asyncToGenerator(function* () { 118 | yield _this5.yieldLoop(); 119 | return _this5.instance.queued(); 120 | })(); 121 | } 122 | 123 | __done__() { 124 | var _this6 = this; 125 | 126 | return _asyncToGenerator(function* () { 127 | yield _this6.yieldLoop(); 128 | return _this6._done; 129 | })(); 130 | } 131 | 132 | __groupCheck__(time) { 133 | var _this7 = this; 134 | 135 | return _asyncToGenerator(function* () { 136 | yield _this7.yieldLoop(); 137 | return _this7._nextRequest + _this7.timeout < time; 138 | })(); 139 | } 140 | 141 | computeCapacity() { 142 | var maxConcurrent, reservoir; 143 | var _this$storeOptions2 = this.storeOptions; 144 | maxConcurrent = _this$storeOptions2.maxConcurrent; 145 | reservoir = _this$storeOptions2.reservoir; 146 | 147 | if (maxConcurrent != null && reservoir != null) { 148 | return Math.min(maxConcurrent - this._running, reservoir); 149 | } else if (maxConcurrent != null) { 150 | return maxConcurrent - this._running; 151 | } else if (reservoir != null) { 152 | return reservoir; 153 | } else { 154 | return null; 155 | } 156 | } 157 | 158 | conditionsCheck(weight) { 159 | var capacity; 160 | capacity = this.computeCapacity(); 161 | return capacity == null || weight <= capacity; 162 | } 163 | 164 | __incrementReservoir__(incr) { 165 | var _this8 = this; 166 | 167 | return _asyncToGenerator(function* () { 168 | var reservoir; 169 | yield _this8.yieldLoop(); 170 | reservoir = _this8.storeOptions.reservoir += incr; 171 | 172 | _this8.instance._drainAll(_this8.computeCapacity()); 173 | 174 | return reservoir; 175 | })(); 176 | } 177 | 178 | __currentReservoir__() { 179 | var _this9 = this; 180 | 181 | return _asyncToGenerator(function* () { 182 | yield _this9.yieldLoop(); 183 | return _this9.storeOptions.reservoir; 184 | })(); 185 | } 186 | 187 | isBlocked(now) { 188 | return this._unblockTime >= now; 189 | } 190 | 191 | check(weight, now) { 192 | return this.conditionsCheck(weight) && this._nextRequest - now <= 0; 193 | } 194 | 195 | __check__(weight) { 196 | var _this10 = this; 197 | 198 | return _asyncToGenerator(function* () { 199 | var now; 200 | yield _this10.yieldLoop(); 201 | now = Date.now(); 202 | return _this10.check(weight, now); 203 | })(); 204 | } 205 | 206 | __register__(index, weight, expiration) { 207 | var _this11 = this; 208 | 209 | return _asyncToGenerator(function* () { 210 | var now, wait; 211 | yield _this11.yieldLoop(); 212 | now = Date.now(); 213 | 214 | if (_this11.conditionsCheck(weight)) { 215 | _this11._running += weight; 216 | 217 | if (_this11.storeOptions.reservoir != null) { 218 | _this11.storeOptions.reservoir -= weight; 219 | } 220 | 221 | wait = Math.max(_this11._nextRequest - now, 0); 222 | _this11._nextRequest = now + wait + _this11.storeOptions.minTime; 223 | return { 224 | success: true, 225 | wait, 226 | reservoir: _this11.storeOptions.reservoir 227 | }; 228 | } else { 229 | return { 230 | success: false 231 | }; 232 | } 233 | })(); 234 | } 235 | 236 | strategyIsBlock() { 237 | return this.storeOptions.strategy === 3; 238 | } 239 | 240 | __submit__(queueLength, weight) { 241 | var _this12 = this; 242 | 243 | return _asyncToGenerator(function* () { 244 | var blocked, now, reachedHWM; 245 | yield _this12.yieldLoop(); 246 | 247 | if (_this12.storeOptions.maxConcurrent != null && weight > _this12.storeOptions.maxConcurrent) { 248 | throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${_this12.storeOptions.maxConcurrent}`); 249 | } 250 | 251 | now = Date.now(); 252 | reachedHWM = _this12.storeOptions.highWater != null && queueLength === _this12.storeOptions.highWater && !_this12.check(weight, now); 253 | blocked = _this12.strategyIsBlock() && (reachedHWM || _this12.isBlocked(now)); 254 | 255 | if (blocked) { 256 | _this12._unblockTime = now + _this12.computePenalty(); 257 | _this12._nextRequest = _this12._unblockTime + _this12.storeOptions.minTime; 258 | 259 | _this12.instance._dropAllQueued(); 260 | } 261 | 262 | return { 263 | reachedHWM, 264 | blocked, 265 | strategy: _this12.storeOptions.strategy 266 | }; 267 | })(); 268 | } 269 | 270 | __free__(index, weight) { 271 | var _this13 = this; 272 | 273 | return _asyncToGenerator(function* () { 274 | yield _this13.yieldLoop(); 275 | _this13._running -= weight; 276 | _this13._done += weight; 277 | 278 | _this13.instance._drainAll(_this13.computeCapacity()); 279 | 280 | return { 281 | running: _this13._running 282 | }; 283 | })(); 284 | } 285 | 286 | }; 287 | module.exports = LocalDatastore; -------------------------------------------------------------------------------- /lib/Queues.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var DLList, Events, Queues; 4 | DLList = require("./DLList"); 5 | Events = require("./Events"); 6 | Queues = class Queues { 7 | constructor(num_priorities) { 8 | var i; 9 | this.Events = new Events(this); 10 | this._length = 0; 11 | 12 | this._lists = function () { 13 | var j, ref, results; 14 | results = []; 15 | 16 | for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) { 17 | results.push(new DLList(() => { 18 | return this.incr(); 19 | }, () => { 20 | return this.decr(); 21 | })); 22 | } 23 | 24 | return results; 25 | }.call(this); 26 | } 27 | 28 | incr() { 29 | if (this._length++ === 0) { 30 | return this.Events.trigger("leftzero"); 31 | } 32 | } 33 | 34 | decr() { 35 | if (--this._length === 0) { 36 | return this.Events.trigger("zero"); 37 | } 38 | } 39 | 40 | push(job) { 41 | return this._lists[job.options.priority].push(job); 42 | } 43 | 44 | queued(priority) { 45 | if (priority != null) { 46 | return this._lists[priority].length; 47 | } else { 48 | return this._length; 49 | } 50 | } 51 | 52 | shiftAll(fn) { 53 | return this._lists.forEach(function (list) { 54 | return list.forEachShift(fn); 55 | }); 56 | } 57 | 58 | getFirst(arr = this._lists) { 59 | var j, len, list; 60 | 61 | for (j = 0, len = arr.length; j < len; j++) { 62 | list = arr[j]; 63 | 64 | if (list.length > 0) { 65 | return list; 66 | } 67 | } 68 | 69 | return []; 70 | } 71 | 72 | shiftLastFrom(priority) { 73 | return this.getFirst(this._lists.slice(priority).reverse()).shift(); 74 | } 75 | 76 | }; 77 | module.exports = Queues; -------------------------------------------------------------------------------- /lib/RedisConnection.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 4 | 5 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 6 | 7 | var Events, RedisConnection, Scripts, parser; 8 | parser = require("./parser"); 9 | Events = require("./Events"); 10 | Scripts = require("./Scripts"); 11 | 12 | RedisConnection = function () { 13 | class RedisConnection { 14 | constructor(options = {}) { 15 | parser.load(options, this.defaults, this); 16 | 17 | if (this.Redis == null) { 18 | this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option. 19 | } 20 | 21 | if (this.Events == null) { 22 | this.Events = new Events(this); 23 | } 24 | 25 | this.terminated = false; 26 | 27 | if (this.client == null) { 28 | this.client = this.Redis.createClient(this.clientOptions); 29 | } 30 | 31 | this.subscriber = this.client.duplicate(); 32 | this.limiters = {}; 33 | this.shas = {}; 34 | this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => { 35 | return this._loadScripts(); 36 | }).then(() => { 37 | return { 38 | client: this.client, 39 | subscriber: this.subscriber 40 | }; 41 | }); 42 | } 43 | 44 | _setup(client, sub) { 45 | client.setMaxListeners(0); 46 | return new this.Promise((resolve, reject) => { 47 | client.on("error", e => { 48 | return this.Events.trigger("error", e); 49 | }); 50 | 51 | if (sub) { 52 | client.on("message", (channel, message) => { 53 | var ref; 54 | return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; 55 | }); 56 | } 57 | 58 | if (client.ready) { 59 | return resolve(); 60 | } else { 61 | return client.once("ready", resolve); 62 | } 63 | }); 64 | } 65 | 66 | _loadScript(name) { 67 | return new this.Promise((resolve, reject) => { 68 | var payload; 69 | payload = Scripts.payload(name); 70 | return this.client.multi([["script", "load", payload]]).exec((err, replies) => { 71 | if (err != null) { 72 | return reject(err); 73 | } 74 | 75 | this.shas[name] = replies[0]; 76 | return resolve(replies[0]); 77 | }); 78 | }); 79 | } 80 | 81 | _loadScripts() { 82 | return this.Promise.all(Scripts.names.map(k => { 83 | return this._loadScript(k); 84 | })); 85 | } 86 | 87 | __runCommand__(cmd) { 88 | var _this = this; 89 | 90 | return _asyncToGenerator(function* () { 91 | yield _this.ready; 92 | return new _this.Promise((resolve, reject) => { 93 | return _this.client.multi([cmd]).exec_atomic(function (err, replies) { 94 | if (err != null) { 95 | return reject(err); 96 | } else { 97 | return resolve(replies[0]); 98 | } 99 | }); 100 | }); 101 | })(); 102 | } 103 | 104 | __addLimiter__(instance) { 105 | return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => { 106 | return new this.Promise((resolve, reject) => { 107 | var handler; 108 | 109 | handler = chan => { 110 | if (chan === channel) { 111 | this.subscriber.removeListener("subscribe", handler); 112 | this.limiters[channel] = instance; 113 | return resolve(); 114 | } 115 | }; 116 | 117 | this.subscriber.on("subscribe", handler); 118 | return this.subscriber.subscribe(channel); 119 | }); 120 | })); 121 | } 122 | 123 | __removeLimiter__(instance) { 124 | var _this2 = this; 125 | 126 | return this.Promise.all([instance.channel(), instance.channel_client()].map( 127 | /*#__PURE__*/ 128 | function () { 129 | var _ref = _asyncToGenerator(function* (channel) { 130 | if (!_this2.terminated) { 131 | yield new _this2.Promise((resolve, reject) => { 132 | return _this2.subscriber.unsubscribe(channel, function (err, chan) { 133 | if (err != null) { 134 | return reject(err); 135 | } 136 | 137 | if (chan === channel) { 138 | return resolve(); 139 | } 140 | }); 141 | }); 142 | } 143 | 144 | return delete _this2.limiters[channel]; 145 | }); 146 | 147 | return function (_x) { 148 | return _ref.apply(this, arguments); 149 | }; 150 | }())); 151 | } 152 | 153 | __scriptArgs__(name, id, args, cb) { 154 | var keys; 155 | keys = Scripts.keys(name, id); 156 | return [this.shas[name], keys.length].concat(keys, args, cb); 157 | } 158 | 159 | __scriptFn__(name) { 160 | return this.client.evalsha.bind(this.client); 161 | } 162 | 163 | disconnect(flush = true) { 164 | var i, k, len, ref; 165 | ref = Object.keys(this.limiters); 166 | 167 | for (i = 0, len = ref.length; i < len; i++) { 168 | k = ref[i]; 169 | clearInterval(this.limiters[k]._store.heartbeat); 170 | } 171 | 172 | this.limiters = {}; 173 | this.terminated = true; 174 | this.client.end(flush); 175 | this.subscriber.end(flush); 176 | return this.Promise.resolve(); 177 | } 178 | 179 | } 180 | 181 | ; 182 | RedisConnection.prototype.datastore = "redis"; 183 | RedisConnection.prototype.defaults = { 184 | Redis: null, 185 | clientOptions: {}, 186 | client: null, 187 | Promise: Promise, 188 | Events: null 189 | }; 190 | return RedisConnection; 191 | }.call(void 0); 192 | 193 | module.exports = RedisConnection; -------------------------------------------------------------------------------- /lib/RedisDatastore.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } 4 | 5 | function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } 6 | 7 | function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } 8 | 9 | function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } 10 | 11 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 12 | 13 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 14 | 15 | var BottleneckError, IORedisConnection, RedisConnection, RedisDatastore, parser; 16 | parser = require("./parser"); 17 | BottleneckError = require("./BottleneckError"); 18 | RedisConnection = require("./RedisConnection"); 19 | IORedisConnection = require("./IORedisConnection"); 20 | RedisDatastore = class RedisDatastore { 21 | constructor(instance, storeOptions, storeInstanceOptions) { 22 | this.instance = instance; 23 | this.storeOptions = storeOptions; 24 | this.originalId = this.instance.id; 25 | this.clientId = this.instance._randomIndex(); 26 | parser.load(storeInstanceOptions, storeInstanceOptions, this); 27 | this.clients = {}; 28 | this.capacityPriorityCounters = {}; 29 | this.sharedConnection = this.connection != null; 30 | 31 | if (this.connection == null) { 32 | this.connection = this.instance.datastore === "redis" ? new RedisConnection({ 33 | Redis: this.Redis, 34 | clientOptions: this.clientOptions, 35 | Promise: this.Promise, 36 | Events: this.instance.Events 37 | }) : this.instance.datastore === "ioredis" ? new IORedisConnection({ 38 | Redis: this.Redis, 39 | clientOptions: this.clientOptions, 40 | clusterNodes: this.clusterNodes, 41 | Promise: this.Promise, 42 | Events: this.instance.Events 43 | }) : void 0; 44 | } 45 | 46 | this.instance.connection = this.connection; 47 | this.instance.datastore = this.connection.datastore; 48 | this.ready = this.connection.ready.then(clients => { 49 | this.clients = clients; 50 | return this.runScript("init", this.prepareInitSettings(this.clearDatastore)); 51 | }).then(() => { 52 | return this.connection.__addLimiter__(this.instance); 53 | }).then(() => { 54 | return this.runScript("register_client", [this.instance.queued()]); 55 | }).then(() => { 56 | var base; 57 | 58 | if (typeof (base = this.heartbeat = setInterval(() => { 59 | return this.runScript("heartbeat", []).catch(e => { 60 | return this.instance.Events.trigger("error", e); 61 | }); 62 | }, this.heartbeatInterval)).unref === "function") { 63 | base.unref(); 64 | } 65 | 66 | return this.clients; 67 | }); 68 | } 69 | 70 | __publish__(message) { 71 | var _this = this; 72 | 73 | return _asyncToGenerator(function* () { 74 | var client; 75 | 76 | var _ref = yield _this.ready; 77 | 78 | client = _ref.client; 79 | return client.publish(_this.instance.channel(), `message:${message.toString()}`); 80 | })(); 81 | } 82 | 83 | onMessage(channel, message) { 84 | var _this2 = this; 85 | 86 | return _asyncToGenerator(function* () { 87 | var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type; 88 | 89 | try { 90 | pos = message.indexOf(":"); 91 | var _ref2 = [message.slice(0, pos), message.slice(pos + 1)]; 92 | type = _ref2[0]; 93 | data = _ref2[1]; 94 | 95 | if (type === "capacity") { 96 | return yield _this2.instance._drainAll(data.length > 0 ? ~~data : void 0); 97 | } else if (type === "capacity-priority") { 98 | var _data$split = data.split(":"); 99 | 100 | var _data$split2 = _slicedToArray(_data$split, 3); 101 | 102 | rawCapacity = _data$split2[0]; 103 | priorityClient = _data$split2[1]; 104 | counter = _data$split2[2]; 105 | capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0; 106 | 107 | if (priorityClient === _this2.clientId) { 108 | drained = yield _this2.instance._drainAll(capacity); 109 | newCapacity = capacity != null ? capacity - (drained || 0) : ""; 110 | return yield _this2.clients.client.publish(_this2.instance.channel(), `capacity-priority:${newCapacity}::${counter}`); 111 | } else if (priorityClient === "") { 112 | clearTimeout(_this2.capacityPriorityCounters[counter]); 113 | delete _this2.capacityPriorityCounters[counter]; 114 | return _this2.instance._drainAll(capacity); 115 | } else { 116 | return _this2.capacityPriorityCounters[counter] = setTimeout( 117 | /*#__PURE__*/ 118 | _asyncToGenerator(function* () { 119 | var e; 120 | 121 | try { 122 | delete _this2.capacityPriorityCounters[counter]; 123 | yield _this2.runScript("blacklist_client", [priorityClient]); 124 | return yield _this2.instance._drainAll(capacity); 125 | } catch (error) { 126 | e = error; 127 | return _this2.instance.Events.trigger("error", e); 128 | } 129 | }), 1000); 130 | } 131 | } else if (type === "message") { 132 | return _this2.instance.Events.trigger("message", data); 133 | } else if (type === "blocked") { 134 | return yield _this2.instance._dropAllQueued(); 135 | } 136 | } catch (error) { 137 | e = error; 138 | return _this2.instance.Events.trigger("error", e); 139 | } 140 | })(); 141 | } 142 | 143 | __disconnect__(flush) { 144 | clearInterval(this.heartbeat); 145 | 146 | if (this.sharedConnection) { 147 | return this.connection.__removeLimiter__(this.instance); 148 | } else { 149 | return this.connection.disconnect(flush); 150 | } 151 | } 152 | 153 | runScript(name, args) { 154 | var _this3 = this; 155 | 156 | return _asyncToGenerator(function* () { 157 | if (!(name === "init" || name === "register_client")) { 158 | yield _this3.ready; 159 | } 160 | 161 | return new _this3.Promise((resolve, reject) => { 162 | var all_args, arr; 163 | all_args = [Date.now(), _this3.clientId].concat(args); 164 | 165 | _this3.instance.Events.trigger("debug", `Calling Redis script: ${name}.lua`, all_args); 166 | 167 | arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) { 168 | if (err != null) { 169 | return reject(err); 170 | } 171 | 172 | return resolve(replies); 173 | }); 174 | return _this3.connection.__scriptFn__(name)(...arr); 175 | }).catch(e => { 176 | if (e.message === "SETTINGS_KEY_NOT_FOUND") { 177 | if (name === "heartbeat") { 178 | return _this3.Promise.resolve(); 179 | } else { 180 | return _this3.runScript("init", _this3.prepareInitSettings(false)).then(() => { 181 | return _this3.runScript(name, args); 182 | }); 183 | } 184 | } else if (e.message === "UNKNOWN_CLIENT") { 185 | return _this3.runScript("register_client", [_this3.instance.queued()]).then(() => { 186 | return _this3.runScript(name, args); 187 | }); 188 | } else { 189 | return _this3.Promise.reject(e); 190 | } 191 | }); 192 | })(); 193 | } 194 | 195 | prepareArray(arr) { 196 | var i, len, results, x; 197 | results = []; 198 | 199 | for (i = 0, len = arr.length; i < len; i++) { 200 | x = arr[i]; 201 | results.push(x != null ? x.toString() : ""); 202 | } 203 | 204 | return results; 205 | } 206 | 207 | prepareObject(obj) { 208 | var arr, k, v; 209 | arr = []; 210 | 211 | for (k in obj) { 212 | v = obj[k]; 213 | arr.push(k, v != null ? v.toString() : ""); 214 | } 215 | 216 | return arr; 217 | } 218 | 219 | prepareInitSettings(clear) { 220 | var args; 221 | args = this.prepareObject(Object.assign({}, this.storeOptions, { 222 | id: this.originalId, 223 | version: this.instance.version, 224 | groupTimeout: this.timeout, 225 | clientTimeout: this.clientTimeout 226 | })); 227 | args.unshift(clear ? 1 : 0, this.instance.version); 228 | return args; 229 | } 230 | 231 | convertBool(b) { 232 | return !!b; 233 | } 234 | 235 | __updateSettings__(options) { 236 | var _this4 = this; 237 | 238 | return _asyncToGenerator(function* () { 239 | yield _this4.runScript("update_settings", _this4.prepareObject(options)); 240 | return parser.overwrite(options, options, _this4.storeOptions); 241 | })(); 242 | } 243 | 244 | __running__() { 245 | return this.runScript("running", []); 246 | } 247 | 248 | __queued__() { 249 | return this.runScript("queued", []); 250 | } 251 | 252 | __done__() { 253 | return this.runScript("done", []); 254 | } 255 | 256 | __groupCheck__() { 257 | var _this5 = this; 258 | 259 | return _asyncToGenerator(function* () { 260 | return _this5.convertBool((yield _this5.runScript("group_check", []))); 261 | })(); 262 | } 263 | 264 | __incrementReservoir__(incr) { 265 | return this.runScript("increment_reservoir", [incr]); 266 | } 267 | 268 | __currentReservoir__() { 269 | return this.runScript("current_reservoir", []); 270 | } 271 | 272 | __check__(weight) { 273 | var _this6 = this; 274 | 275 | return _asyncToGenerator(function* () { 276 | return _this6.convertBool((yield _this6.runScript("check", _this6.prepareArray([weight])))); 277 | })(); 278 | } 279 | 280 | __register__(index, weight, expiration) { 281 | var _this7 = this; 282 | 283 | return _asyncToGenerator(function* () { 284 | var reservoir, success, wait; 285 | 286 | var _ref4 = yield _this7.runScript("register", _this7.prepareArray([index, weight, expiration])); 287 | 288 | var _ref5 = _slicedToArray(_ref4, 3); 289 | 290 | success = _ref5[0]; 291 | wait = _ref5[1]; 292 | reservoir = _ref5[2]; 293 | return { 294 | success: _this7.convertBool(success), 295 | wait, 296 | reservoir 297 | }; 298 | })(); 299 | } 300 | 301 | __submit__(queueLength, weight) { 302 | var _this8 = this; 303 | 304 | return _asyncToGenerator(function* () { 305 | var blocked, e, maxConcurrent, overweight, reachedHWM, strategy; 306 | 307 | try { 308 | var _ref6 = yield _this8.runScript("submit", _this8.prepareArray([queueLength, weight])); 309 | 310 | var _ref7 = _slicedToArray(_ref6, 3); 311 | 312 | reachedHWM = _ref7[0]; 313 | blocked = _ref7[1]; 314 | strategy = _ref7[2]; 315 | return { 316 | reachedHWM: _this8.convertBool(reachedHWM), 317 | blocked: _this8.convertBool(blocked), 318 | strategy 319 | }; 320 | } catch (error) { 321 | e = error; 322 | 323 | if (e.message.indexOf("OVERWEIGHT") === 0) { 324 | var _e$message$split = e.message.split(":"); 325 | 326 | var _e$message$split2 = _slicedToArray(_e$message$split, 3); 327 | 328 | overweight = _e$message$split2[0]; 329 | weight = _e$message$split2[1]; 330 | maxConcurrent = _e$message$split2[2]; 331 | throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${maxConcurrent}`); 332 | } else { 333 | throw e; 334 | } 335 | } 336 | })(); 337 | } 338 | 339 | __free__(index, weight) { 340 | var _this9 = this; 341 | 342 | return _asyncToGenerator(function* () { 343 | var running; 344 | running = yield _this9.runScript("free", _this9.prepareArray([index])); 345 | return { 346 | running 347 | }; 348 | })(); 349 | } 350 | 351 | }; 352 | module.exports = RedisDatastore; -------------------------------------------------------------------------------- /lib/Scripts.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var headers, lua, templates; 4 | lua = require("./lua.json"); 5 | headers = { 6 | refs: lua["refs.lua"], 7 | validate_keys: lua["validate_keys.lua"], 8 | validate_client: lua["validate_client.lua"], 9 | refresh_expiration: lua["refresh_expiration.lua"], 10 | process_tick: lua["process_tick.lua"], 11 | conditions_check: lua["conditions_check.lua"], 12 | get_time: lua["get_time.lua"] 13 | }; 14 | 15 | exports.allKeys = function (id) { 16 | return [ 17 | /* 18 | HASH 19 | */ 20 | `b_${id}_settings`, 21 | /* 22 | HASH 23 | job index -> weight 24 | */ 25 | `b_${id}_job_weights`, 26 | /* 27 | ZSET 28 | job index -> expiration 29 | */ 30 | `b_${id}_job_expirations`, 31 | /* 32 | HASH 33 | job index -> client 34 | */ 35 | `b_${id}_job_clients`, 36 | /* 37 | ZSET 38 | client -> sum running 39 | */ 40 | `b_${id}_client_running`, 41 | /* 42 | HASH 43 | client -> num queued 44 | */ 45 | `b_${id}_client_num_queued`, 46 | /* 47 | ZSET 48 | client -> last job registered 49 | */ 50 | `b_${id}_client_last_registered`, 51 | /* 52 | ZSET 53 | client -> last seen 54 | */ 55 | `b_${id}_client_last_seen`]; 56 | }; 57 | 58 | templates = { 59 | init: { 60 | keys: exports.allKeys, 61 | headers: ["process_tick"], 62 | refresh_expiration: true, 63 | code: lua["init.lua"] 64 | }, 65 | group_check: { 66 | keys: exports.allKeys, 67 | headers: [], 68 | refresh_expiration: false, 69 | code: lua["group_check.lua"] 70 | }, 71 | register_client: { 72 | keys: exports.allKeys, 73 | headers: ["validate_keys"], 74 | refresh_expiration: false, 75 | code: lua["register_client.lua"] 76 | }, 77 | blacklist_client: { 78 | keys: exports.allKeys, 79 | headers: ["validate_keys", "validate_client"], 80 | refresh_expiration: false, 81 | code: lua["blacklist_client.lua"] 82 | }, 83 | heartbeat: { 84 | keys: exports.allKeys, 85 | headers: ["validate_keys", "validate_client", "process_tick"], 86 | refresh_expiration: false, 87 | code: lua["heartbeat.lua"] 88 | }, 89 | update_settings: { 90 | keys: exports.allKeys, 91 | headers: ["validate_keys", "validate_client", "process_tick"], 92 | refresh_expiration: true, 93 | code: lua["update_settings.lua"] 94 | }, 95 | running: { 96 | keys: exports.allKeys, 97 | headers: ["validate_keys", "validate_client", "process_tick"], 98 | refresh_expiration: false, 99 | code: lua["running.lua"] 100 | }, 101 | queued: { 102 | keys: exports.allKeys, 103 | headers: ["validate_keys", "validate_client"], 104 | refresh_expiration: false, 105 | code: lua["queued.lua"] 106 | }, 107 | done: { 108 | keys: exports.allKeys, 109 | headers: ["validate_keys", "validate_client", "process_tick"], 110 | refresh_expiration: false, 111 | code: lua["done.lua"] 112 | }, 113 | check: { 114 | keys: exports.allKeys, 115 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], 116 | refresh_expiration: false, 117 | code: lua["check.lua"] 118 | }, 119 | submit: { 120 | keys: exports.allKeys, 121 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], 122 | refresh_expiration: true, 123 | code: lua["submit.lua"] 124 | }, 125 | register: { 126 | keys: exports.allKeys, 127 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], 128 | refresh_expiration: true, 129 | code: lua["register.lua"] 130 | }, 131 | free: { 132 | keys: exports.allKeys, 133 | headers: ["validate_keys", "validate_client", "process_tick"], 134 | refresh_expiration: true, 135 | code: lua["free.lua"] 136 | }, 137 | current_reservoir: { 138 | keys: exports.allKeys, 139 | headers: ["validate_keys", "validate_client", "process_tick"], 140 | refresh_expiration: false, 141 | code: lua["current_reservoir.lua"] 142 | }, 143 | increment_reservoir: { 144 | keys: exports.allKeys, 145 | headers: ["validate_keys", "validate_client", "process_tick"], 146 | refresh_expiration: true, 147 | code: lua["increment_reservoir.lua"] 148 | } 149 | }; 150 | exports.names = Object.keys(templates); 151 | 152 | exports.keys = function (name, id) { 153 | return templates[name].keys(id); 154 | }; 155 | 156 | exports.payload = function (name) { 157 | var template; 158 | template = templates[name]; 159 | return Array.prototype.concat(headers.refs, template.headers.map(function (h) { 160 | return headers[h]; 161 | }), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n"); 162 | }; -------------------------------------------------------------------------------- /lib/States.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var BottleneckError, States; 4 | BottleneckError = require("./BottleneckError"); 5 | States = class States { 6 | constructor(status1) { 7 | this.status = status1; 8 | this._jobs = {}; 9 | this.counts = this.status.map(function () { 10 | return 0; 11 | }); 12 | } 13 | 14 | next(id) { 15 | var current, next; 16 | current = this._jobs[id]; 17 | next = current + 1; 18 | 19 | if (current != null && next < this.status.length) { 20 | this.counts[current]--; 21 | this.counts[next]++; 22 | return this._jobs[id]++; 23 | } else if (current != null) { 24 | this.counts[current]--; 25 | return delete this._jobs[id]; 26 | } 27 | } 28 | 29 | start(id) { 30 | var initial; 31 | initial = 0; 32 | this._jobs[id] = initial; 33 | return this.counts[initial]++; 34 | } 35 | 36 | remove(id) { 37 | var current; 38 | current = this._jobs[id]; 39 | 40 | if (current != null) { 41 | this.counts[current]--; 42 | delete this._jobs[id]; 43 | } 44 | 45 | return current != null; 46 | } 47 | 48 | jobStatus(id) { 49 | var ref; 50 | return (ref = this.status[this._jobs[id]]) != null ? ref : null; 51 | } 52 | 53 | statusJobs(status) { 54 | var k, pos, ref, results, v; 55 | 56 | if (status != null) { 57 | pos = this.status.indexOf(status); 58 | 59 | if (pos < 0) { 60 | throw new BottleneckError(`status must be one of ${this.status.join(', ')}`); 61 | } 62 | 63 | ref = this._jobs; 64 | results = []; 65 | 66 | for (k in ref) { 67 | v = ref[k]; 68 | 69 | if (v === pos) { 70 | results.push(k); 71 | } 72 | } 73 | 74 | return results; 75 | } else { 76 | return Object.keys(this._jobs); 77 | } 78 | } 79 | 80 | statusCounts() { 81 | return this.counts.reduce((acc, v, i) => { 82 | acc[this.status[i]] = v; 83 | return acc; 84 | }, {}); 85 | } 86 | 87 | }; 88 | module.exports = States; -------------------------------------------------------------------------------- /lib/Sync.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } 4 | 5 | function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } 6 | 7 | var DLList, Sync; 8 | DLList = require("./DLList"); 9 | Sync = class Sync { 10 | constructor(name, Promise) { 11 | this.schedule = this.schedule.bind(this); 12 | this.name = name; 13 | this.Promise = Promise; 14 | this._running = 0; 15 | this._queue = new DLList(); 16 | } 17 | 18 | isEmpty() { 19 | return this._queue.length === 0; 20 | } 21 | 22 | _tryToRun() { 23 | var _this = this; 24 | 25 | return _asyncToGenerator(function* () { 26 | var args, cb, error, reject, resolve, returned, task; 27 | 28 | if (_this._running < 1 && _this._queue.length > 0) { 29 | _this._running++; 30 | 31 | var _this$_queue$shift = _this._queue.shift(); 32 | 33 | task = _this$_queue$shift.task; 34 | args = _this$_queue$shift.args; 35 | resolve = _this$_queue$shift.resolve; 36 | reject = _this$_queue$shift.reject; 37 | cb = yield _asyncToGenerator(function* () { 38 | try { 39 | returned = yield task(...args); 40 | return function () { 41 | return resolve(returned); 42 | }; 43 | } catch (error1) { 44 | error = error1; 45 | return function () { 46 | return reject(error); 47 | }; 48 | } 49 | })(); 50 | _this._running--; 51 | 52 | _this._tryToRun(); 53 | 54 | return cb(); 55 | } 56 | })(); 57 | } 58 | 59 | schedule(task, ...args) { 60 | var promise, reject, resolve; 61 | resolve = reject = null; 62 | promise = new this.Promise(function (_resolve, _reject) { 63 | resolve = _resolve; 64 | return reject = _reject; 65 | }); 66 | 67 | this._queue.push({ 68 | task, 69 | args, 70 | resolve, 71 | reject 72 | }); 73 | 74 | this._tryToRun(); 75 | 76 | return promise; 77 | } 78 | 79 | }; 80 | module.exports = Sync; -------------------------------------------------------------------------------- /lib/es5.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | require("regenerator-runtime/runtime"); 4 | 5 | module.exports = require("./Bottleneck"); -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | module.exports = require("./Bottleneck"); -------------------------------------------------------------------------------- /lib/parser.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.load = function (received, defaults, onto = {}) { 4 | var k, ref, v; 5 | 6 | for (k in defaults) { 7 | v = defaults[k]; 8 | onto[k] = (ref = received[k]) != null ? ref : v; 9 | } 10 | 11 | return onto; 12 | }; 13 | 14 | exports.overwrite = function (received, defaults, onto = {}) { 15 | var k, v; 16 | 17 | for (k in received) { 18 | v = received[k]; 19 | 20 | if (defaults[k] !== void 0) { 21 | onto[k] = v; 22 | } 23 | } 24 | 25 | return onto; 26 | }; -------------------------------------------------------------------------------- /lib/version.json: -------------------------------------------------------------------------------- 1 | {"version":"2.19.5"} 2 | -------------------------------------------------------------------------------- /light.d.ts.ejs: -------------------------------------------------------------------------------- 1 | declare module "bottleneck/light" { 2 | <%- include('bottleneck_types') -%> 3 | } 4 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bottleneck", 3 | "version": "2.19.5", 4 | "description": "Distributed task scheduler and rate limiter", 5 | "main": "lib/index.js", 6 | "typings": "bottleneck.d.ts", 7 | "scripts": { 8 | "test": "mocha test", 9 | "test-all": "./scripts/test_all.sh" 10 | }, 11 | "repository": { 12 | "type": "git", 13 | "url": "https://github.com/SGrondin/bottleneck" 14 | }, 15 | "keywords": [ 16 | "async rate limiter", 17 | "rate limiter", 18 | "rate limiting", 19 | "async", 20 | "rate", 21 | "limiting", 22 | "limiter", 23 | "throttle", 24 | "throttling", 25 | "throttler", 26 | "load", 27 | "clustering" 28 | ], 29 | "author": { 30 | "name": "Simon Grondin" 31 | }, 32 | "license": "MIT", 33 | "bugs": { 34 | "url": "https://github.com/SGrondin/bottleneck/issues" 35 | }, 36 | "devDependencies": { 37 | "@babel/core": "^7.5.0", 38 | "@babel/preset-env": "^7.5.0", 39 | "@types/es6-promise": "0.0.33", 40 | "assert": "^1.5.0", 41 | "coffeescript": "2.4.x", 42 | "ejs-cli": "github:SGrondin/ejs-cli#master", 43 | "ioredis": "^4.11.1", 44 | "leakage": "^0.4.0", 45 | "mocha": "^6.1.4", 46 | "redis": "^2.8.0", 47 | "regenerator-runtime": "^0.12.1", 48 | "rollup": "^0.66.6", 49 | "rollup-plugin-babel": "^4.3.3", 50 | "rollup-plugin-commonjs": "^9.3.4", 51 | "rollup-plugin-json": "^3.1.0", 52 | "rollup-plugin-node-resolve": "^3.4.0", 53 | "typescript": "^2.6.2" 54 | }, 55 | "dependencies": {} 56 | } 57 | -------------------------------------------------------------------------------- /rollup.config.es5.js: -------------------------------------------------------------------------------- 1 | import json from 'rollup-plugin-json'; 2 | import resolve from 'rollup-plugin-node-resolve'; 3 | import commonjs from 'rollup-plugin-commonjs'; 4 | import babel from 'rollup-plugin-babel'; 5 | 6 | const bannerLines = [ 7 | 'This file contains the full Bottleneck library (MIT) compiled to ES5.', 8 | 'https://github.com/SGrondin/bottleneck', 9 | 'It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.', 10 | 'See the following link for Copyright and License information:', 11 | 'https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js', 12 | ].map(x => ` * ${x}`).join('\n'); 13 | const banner = `/**\n${bannerLines}\n */`; 14 | 15 | export default { 16 | input: 'lib/es5.js', 17 | output: { 18 | name: 'Bottleneck', 19 | file: 'es5.js', 20 | sourcemap: false, 21 | globals: {}, 22 | format: 'umd', 23 | banner 24 | }, 25 | external: [], 26 | plugins: [ 27 | json(), 28 | resolve(), 29 | commonjs(), 30 | babel({ 31 | exclude: 'node_modules/**' 32 | }) 33 | ] 34 | }; 35 | -------------------------------------------------------------------------------- /rollup.config.light.js: -------------------------------------------------------------------------------- 1 | import commonjs from 'rollup-plugin-commonjs'; 2 | import json from 'rollup-plugin-json'; 3 | import resolve from 'rollup-plugin-node-resolve'; 4 | 5 | const bannerLines = [ 6 | 'This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.', 7 | 'https://github.com/SGrondin/bottleneck', 8 | ].map(x => ` * ${x}`).join('\n'); 9 | const banner = `/**\n${bannerLines}\n */`; 10 | 11 | const missing = `export default () => console.log('You must import the full version of Bottleneck in order to use this feature.');`; 12 | const exclude = [ 13 | 'RedisDatastore.js', 14 | 'RedisConnection.js', 15 | 'IORedisConnection.js', 16 | 'Scripts.js' 17 | ]; 18 | 19 | export default { 20 | input: 'lib/index.js', 21 | output: { 22 | name: 'Bottleneck', 23 | file: 'light.js', 24 | sourcemap: false, 25 | globals: {}, 26 | format: 'umd', 27 | banner 28 | }, 29 | external: [], 30 | plugins: [ 31 | json(), 32 | { 33 | load: id => { 34 | const chunks = id.split('/'); 35 | const file = chunks[chunks.length - 1]; 36 | if (exclude.indexOf(file) >= 0) { 37 | return missing 38 | } 39 | } 40 | }, 41 | resolve(), 42 | commonjs() 43 | ] 44 | }; 45 | -------------------------------------------------------------------------------- /scripts/assemble_lua.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs') 2 | 3 | var input = __dirname + '/../src/redis' 4 | var loaded = {} 5 | 6 | var promises = fs.readdirSync(input).map(function (file) { 7 | return new Promise(function (resolve, reject) { 8 | fs.readFile(input + '/' + file, function (err, data) { 9 | if (err != null) { 10 | return reject(err) 11 | } 12 | loaded[file] = data.toString('utf8') 13 | return resolve() 14 | }) 15 | }) 16 | }) 17 | 18 | Promise.all(promises) 19 | .then(function () { 20 | console.log(JSON.stringify(loaded, Object.keys(loaded).sort(), 2)) 21 | }) 22 | .catch(function (err) { 23 | console.error(err) 24 | process.exit(1) 25 | }) 26 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | if [ ! -d node_modules ]; then 6 | echo "[B] Run 'npm install' first" 7 | exit 1 8 | fi 9 | 10 | 11 | clean() { 12 | rm -f .babelrc 13 | rm -rf lib/* 14 | node scripts/version.js > lib/version.json 15 | node scripts/assemble_lua.js > lib/lua.json 16 | } 17 | 18 | makeLib10() { 19 | echo '[B] Compiling Bottleneck to Node 10+...' 20 | npx coffee --compile --bare --no-header src/*.coffee 21 | mv src/*.js lib/ 22 | } 23 | 24 | makeLib6() { 25 | echo '[B] Compiling Bottleneck to Node 6+...' 26 | ln -s .babelrc.lib .babelrc 27 | npx coffee --compile --bare --no-header --transpile src/*.coffee 28 | mv src/*.js lib/ 29 | } 30 | 31 | makeES5() { 32 | echo '[B] Compiling Bottleneck to ES5...' 33 | ln -s .babelrc.es5 .babelrc 34 | npx coffee --compile --bare --no-header src/*.coffee 35 | mv src/*.js lib/ 36 | 37 | echo '[B] Assembling ES5 bundle...' 38 | npx rollup -c rollup.config.es5.js 39 | } 40 | 41 | makeLight() { 42 | makeLib10 43 | 44 | echo '[B] Assembling light bundle...' 45 | npx rollup -c rollup.config.light.js 46 | } 47 | 48 | makeTypings() { 49 | echo '[B] Compiling and testing TS typings...' 50 | npx ejs-cli bottleneck.d.ts.ejs > bottleneck.d.ts 51 | npx ejs-cli light.d.ts.ejs > light.d.ts 52 | npx tsc --noEmit --strict test.ts 53 | } 54 | 55 | if [ "$1" = 'dev' ]; then 56 | clean 57 | makeLib10 58 | elif [ "$1" = 'bench' ]; then 59 | clean 60 | makeLib6 61 | elif [ "$1" = 'es5' ]; then 62 | clean 63 | makeES5 64 | elif [ "$1" = 'light' ]; then 65 | clean 66 | makeLight 67 | elif [ "$1" = 'typings' ]; then 68 | makeTypings 69 | else 70 | clean 71 | makeES5 72 | 73 | clean 74 | makeLight 75 | 76 | clean 77 | makeLib6 78 | makeTypings 79 | fi 80 | 81 | rm -f .babelrc 82 | 83 | echo '[B] Done!' 84 | -------------------------------------------------------------------------------- /scripts/test_all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source .env 6 | 7 | echo 'ioredis tests' 8 | DATASTORE=ioredis npm test 9 | 10 | echo 'NodeRedis tests' 11 | DATASTORE=redis npm test 12 | 13 | echo 'ES5 bundle tests' 14 | BUILD=es5 npm test 15 | 16 | echo 'Light bundle tests' 17 | BUILD=light npm test 18 | 19 | echo 'Local tests' 20 | npm test 21 | -------------------------------------------------------------------------------- /scripts/version.js: -------------------------------------------------------------------------------- 1 | const packagejson = require('../package.json') 2 | 3 | console.log(JSON.stringify({version: packagejson.version})) 4 | -------------------------------------------------------------------------------- /src/Batcher.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | Events = require "./Events" 3 | 4 | class Batcher 5 | defaults: 6 | maxTime: null 7 | maxSize: null 8 | Promise: Promise 9 | 10 | constructor: (@options={}) -> 11 | parser.load @options, @defaults, @ 12 | @Events = new Events @ 13 | @_arr = [] 14 | @_resetPromise() 15 | @_lastFlush = Date.now() 16 | 17 | _resetPromise: -> 18 | @_promise = new @Promise (res, rej) => @_resolve = res 19 | 20 | _flush: -> 21 | clearTimeout @_timeout 22 | @_lastFlush = Date.now() 23 | @_resolve() 24 | @Events.trigger "batch", @_arr 25 | @_arr = [] 26 | @_resetPromise() 27 | 28 | add: (data) -> 29 | @_arr.push data 30 | ret = @_promise 31 | if @_arr.length == @maxSize 32 | @_flush() 33 | else if @maxTime? and @_arr.length == 1 34 | @_timeout = setTimeout => 35 | @_flush() 36 | , @maxTime 37 | ret 38 | 39 | module.exports = Batcher 40 | -------------------------------------------------------------------------------- /src/Bottleneck.coffee: -------------------------------------------------------------------------------- 1 | NUM_PRIORITIES = 10 2 | DEFAULT_PRIORITY = 5 3 | 4 | parser = require "./parser" 5 | Queues = require "./Queues" 6 | Job = require "./Job" 7 | LocalDatastore = require "./LocalDatastore" 8 | RedisDatastore = require "./RedisDatastore" 9 | Events = require "./Events" 10 | States = require "./States" 11 | Sync = require "./Sync" 12 | 13 | class Bottleneck 14 | Bottleneck.default = Bottleneck 15 | Bottleneck.Events = Events 16 | Bottleneck.version = Bottleneck::version = require("./version.json").version 17 | Bottleneck.strategy = Bottleneck::strategy = { LEAK:1, OVERFLOW:2, OVERFLOW_PRIORITY:4, BLOCK:3 } 18 | Bottleneck.BottleneckError = Bottleneck::BottleneckError = require "./BottleneckError" 19 | Bottleneck.Group = Bottleneck::Group = require "./Group" 20 | Bottleneck.RedisConnection = Bottleneck::RedisConnection = require "./RedisConnection" 21 | Bottleneck.IORedisConnection = Bottleneck::IORedisConnection = require "./IORedisConnection" 22 | Bottleneck.Batcher = Bottleneck::Batcher = require "./Batcher" 23 | jobDefaults: 24 | priority: DEFAULT_PRIORITY 25 | weight: 1 26 | expiration: null 27 | id: "" 28 | storeDefaults: 29 | maxConcurrent: null 30 | minTime: 0 31 | highWater: null 32 | strategy: Bottleneck::strategy.LEAK 33 | penalty: null 34 | reservoir: null 35 | reservoirRefreshInterval: null 36 | reservoirRefreshAmount: null 37 | reservoirIncreaseInterval: null 38 | reservoirIncreaseAmount: null 39 | reservoirIncreaseMaximum: null 40 | localStoreDefaults: 41 | Promise: Promise 42 | timeout: null 43 | heartbeatInterval: 250 44 | redisStoreDefaults: 45 | Promise: Promise 46 | timeout: null 47 | heartbeatInterval: 5000 48 | clientTimeout: 10000 49 | Redis: null 50 | clientOptions: {} 51 | clusterNodes: null 52 | clearDatastore: false 53 | connection: null 54 | instanceDefaults: 55 | datastore: "local" 56 | connection: null 57 | id: "" 58 | rejectOnDrop: true 59 | trackDoneStatus: false 60 | Promise: Promise 61 | stopDefaults: 62 | enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs." 63 | dropWaitingJobs: true 64 | dropErrorMessage: "This limiter has been stopped." 65 | 66 | constructor: (options={}, invalid...) -> 67 | @_validateOptions options, invalid 68 | parser.load options, @instanceDefaults, @ 69 | @_queues = new Queues NUM_PRIORITIES 70 | @_scheduled = {} 71 | @_states = new States ["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(if @trackDoneStatus then ["DONE"] else []) 72 | @_limiter = null 73 | @Events = new Events @ 74 | @_submitLock = new Sync "submit", @Promise 75 | @_registerLock = new Sync "register", @Promise 76 | storeOptions = parser.load options, @storeDefaults, {} 77 | 78 | @_store = if @datastore == "redis" or @datastore == "ioredis" or @connection? 79 | storeInstanceOptions = parser.load options, @redisStoreDefaults, {} 80 | new RedisDatastore @, storeOptions, storeInstanceOptions 81 | else if @datastore == "local" 82 | storeInstanceOptions = parser.load options, @localStoreDefaults, {} 83 | new LocalDatastore @, storeOptions, storeInstanceOptions 84 | else 85 | throw new Bottleneck::BottleneckError "Invalid datastore type: #{@datastore}" 86 | 87 | @_queues.on "leftzero", => @_store.heartbeat?.ref?() 88 | @_queues.on "zero", => @_store.heartbeat?.unref?() 89 | 90 | _validateOptions: (options, invalid) -> 91 | unless options? and typeof options == "object" and invalid.length == 0 92 | throw new Bottleneck::BottleneckError "Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1." 93 | 94 | ready: -> @_store.ready 95 | 96 | clients: -> @_store.clients 97 | 98 | channel: -> "b_#{@id}" 99 | 100 | channel_client: -> "b_#{@id}_#{@_store.clientId}" 101 | 102 | publish: (message) -> @_store.__publish__ message 103 | 104 | disconnect: (flush=true) -> @_store.__disconnect__ flush 105 | 106 | chain: (@_limiter) -> @ 107 | 108 | queued: (priority) -> @_queues.queued priority 109 | 110 | clusterQueued: -> @_store.__queued__() 111 | 112 | empty: -> @queued() == 0 and @_submitLock.isEmpty() 113 | 114 | running: -> @_store.__running__() 115 | 116 | done: -> @_store.__done__() 117 | 118 | jobStatus: (id) -> @_states.jobStatus id 119 | 120 | jobs: (status) -> @_states.statusJobs status 121 | 122 | counts: -> @_states.statusCounts() 123 | 124 | _randomIndex: -> Math.random().toString(36).slice(2) 125 | 126 | check: (weight=1) -> @_store.__check__ weight 127 | 128 | _clearGlobalState: (index) -> 129 | if @_scheduled[index]? 130 | clearTimeout @_scheduled[index].expiration 131 | delete @_scheduled[index] 132 | true 133 | else false 134 | 135 | _free: (index, job, options, eventInfo) -> 136 | try 137 | { running } = await @_store.__free__ index, options.weight 138 | @Events.trigger "debug", "Freed #{options.id}", eventInfo 139 | if running == 0 and @empty() then @Events.trigger "idle" 140 | catch e 141 | @Events.trigger "error", e 142 | 143 | _run: (index, job, wait) -> 144 | job.doRun() 145 | clearGlobalState = @_clearGlobalState.bind @, index 146 | run = @_run.bind @, index, job 147 | free = @_free.bind @, index, job 148 | 149 | @_scheduled[index] = 150 | timeout: setTimeout => 151 | job.doExecute @_limiter, clearGlobalState, run, free 152 | , wait 153 | expiration: if job.options.expiration? then setTimeout -> 154 | job.doExpire clearGlobalState, run, free 155 | , wait + job.options.expiration 156 | job: job 157 | 158 | _drainOne: (capacity) -> 159 | @_registerLock.schedule => 160 | if @queued() == 0 then return @Promise.resolve null 161 | queue = @_queues.getFirst() 162 | { options, args } = next = queue.first() 163 | if capacity? and options.weight > capacity then return @Promise.resolve null 164 | @Events.trigger "debug", "Draining #{options.id}", { args, options } 165 | index = @_randomIndex() 166 | @_store.__register__ index, options.weight, options.expiration 167 | .then ({ success, wait, reservoir }) => 168 | @Events.trigger "debug", "Drained #{options.id}", { success, args, options } 169 | if success 170 | queue.shift() 171 | empty = @empty() 172 | if empty then @Events.trigger "empty" 173 | if reservoir == 0 then @Events.trigger "depleted", empty 174 | @_run index, next, wait 175 | @Promise.resolve options.weight 176 | else 177 | @Promise.resolve null 178 | 179 | _drainAll: (capacity, total=0) -> 180 | @_drainOne(capacity) 181 | .then (drained) => 182 | if drained? 183 | newCapacity = if capacity? then capacity - drained else capacity 184 | @_drainAll(newCapacity, total + drained) 185 | else @Promise.resolve total 186 | .catch (e) => @Events.trigger "error", e 187 | 188 | _dropAllQueued: (message) -> @_queues.shiftAll (job) -> job.doDrop { message } 189 | 190 | stop: (options={}) -> 191 | options = parser.load options, @stopDefaults 192 | waitForExecuting = (at) => 193 | finished = => 194 | counts = @_states.counts 195 | (counts[0] + counts[1] + counts[2] + counts[3]) == at 196 | new @Promise (resolve, reject) => 197 | if finished() then resolve() 198 | else 199 | @on "done", => 200 | if finished() 201 | @removeAllListeners "done" 202 | resolve() 203 | done = if options.dropWaitingJobs 204 | @_run = (index, next) -> next.doDrop { message: options.dropErrorMessage } 205 | @_drainOne = => @Promise.resolve null 206 | @_registerLock.schedule => @_submitLock.schedule => 207 | for k, v of @_scheduled 208 | if @jobStatus(v.job.options.id) == "RUNNING" 209 | clearTimeout v.timeout 210 | clearTimeout v.expiration 211 | v.job.doDrop { message: options.dropErrorMessage } 212 | @_dropAllQueued options.dropErrorMessage 213 | waitForExecuting(0) 214 | else 215 | @schedule { priority: NUM_PRIORITIES - 1, weight: 0 }, => waitForExecuting(1) 216 | @_receive = (job) -> job._reject new Bottleneck::BottleneckError options.enqueueErrorMessage 217 | @stop = => @Promise.reject new Bottleneck::BottleneckError "stop() has already been called" 218 | done 219 | 220 | _addToQueue: (job) => 221 | { args, options } = job 222 | try 223 | { reachedHWM, blocked, strategy } = await @_store.__submit__ @queued(), options.weight 224 | catch error 225 | @Events.trigger "debug", "Could not queue #{options.id}", { args, options, error } 226 | job.doDrop { error } 227 | return false 228 | 229 | if blocked 230 | job.doDrop() 231 | return true 232 | else if reachedHWM 233 | shifted = if strategy == Bottleneck::strategy.LEAK then @_queues.shiftLastFrom(options.priority) 234 | else if strategy == Bottleneck::strategy.OVERFLOW_PRIORITY then @_queues.shiftLastFrom(options.priority + 1) 235 | else if strategy == Bottleneck::strategy.OVERFLOW then job 236 | if shifted? then shifted.doDrop() 237 | if not shifted? or strategy == Bottleneck::strategy.OVERFLOW 238 | if not shifted? then job.doDrop() 239 | return reachedHWM 240 | 241 | job.doQueue reachedHWM, blocked 242 | @_queues.push job 243 | await @_drainAll() 244 | reachedHWM 245 | 246 | _receive: (job) -> 247 | if @_states.jobStatus(job.options.id)? 248 | job._reject new Bottleneck::BottleneckError "A job with the same id already exists (id=#{job.options.id})" 249 | false 250 | else 251 | job.doReceive() 252 | @_submitLock.schedule @_addToQueue, job 253 | 254 | submit: (args...) -> 255 | if typeof args[0] == "function" 256 | [fn, args..., cb] = args 257 | options = parser.load {}, @jobDefaults 258 | else 259 | [options, fn, args..., cb] = args 260 | options = parser.load options, @jobDefaults 261 | 262 | task = (args...) => 263 | new @Promise (resolve, reject) -> 264 | fn args..., (args...) -> 265 | (if args[0]? then reject else resolve) args 266 | 267 | job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise 268 | job.promise 269 | .then (args) -> cb? args... 270 | .catch (args) -> if Array.isArray args then cb? args... else cb? args 271 | @_receive job 272 | 273 | schedule: (args...) -> 274 | if typeof args[0] == "function" 275 | [task, args...] = args 276 | options = {} 277 | else 278 | [options, task, args...] = args 279 | job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise 280 | @_receive job 281 | job.promise 282 | 283 | wrap: (fn) -> 284 | schedule = @schedule.bind @ 285 | wrapped = (args...) -> schedule fn.bind(@), args... 286 | wrapped.withOptions = (options, args...) -> schedule options, fn, args... 287 | wrapped 288 | 289 | updateSettings: (options={}) -> 290 | await @_store.__updateSettings__ parser.overwrite options, @storeDefaults 291 | parser.overwrite options, @instanceDefaults, @ 292 | @ 293 | 294 | currentReservoir: -> @_store.__currentReservoir__() 295 | 296 | incrementReservoir: (incr=0) -> @_store.__incrementReservoir__ incr 297 | 298 | module.exports = Bottleneck 299 | -------------------------------------------------------------------------------- /src/BottleneckError.coffee: -------------------------------------------------------------------------------- 1 | class BottleneckError extends Error 2 | 3 | module.exports = BottleneckError 4 | -------------------------------------------------------------------------------- /src/DLList.coffee: -------------------------------------------------------------------------------- 1 | class DLList 2 | constructor: (@incr, @decr) -> 3 | @_first = null 4 | @_last = null 5 | @length = 0 6 | push: (value) -> 7 | @length++ 8 | @incr?() 9 | node = { value, prev: @_last, next: null } 10 | if @_last? 11 | @_last.next = node 12 | @_last = node 13 | else @_first = @_last = node 14 | undefined 15 | shift: () -> 16 | if not @_first? then return 17 | else 18 | @length-- 19 | @decr?() 20 | value = @_first.value 21 | if (@_first = @_first.next)? 22 | @_first.prev = null 23 | else 24 | @_last = null 25 | value 26 | first: () -> if @_first? then @_first.value 27 | getArray: () -> 28 | node = @_first 29 | while node? then (ref = node; node = node.next; ref.value) 30 | forEachShift: (cb) -> 31 | node = @shift() 32 | while node? then (cb node; node = @shift()) 33 | undefined 34 | debug: () -> 35 | node = @_first 36 | while node? then (ref = node; node = node.next; { value: ref.value, prev: ref.prev?.value, next: ref.next?.value }) 37 | 38 | module.exports = DLList 39 | -------------------------------------------------------------------------------- /src/Events.coffee: -------------------------------------------------------------------------------- 1 | class Events 2 | constructor: (@instance) -> 3 | @_events = {} 4 | if @instance.on? or @instance.once? or @instance.removeAllListeners? 5 | throw new Error "An Emitter already exists for this object" 6 | @instance.on = (name, cb) => @_addListener name, "many", cb 7 | @instance.once = (name, cb) => @_addListener name, "once", cb 8 | @instance.removeAllListeners = (name=null) => 9 | if name? then delete @_events[name] else @_events = {} 10 | _addListener: (name, status, cb) -> 11 | @_events[name] ?= [] 12 | @_events[name].push {cb, status} 13 | @instance 14 | listenerCount: (name) -> 15 | if @_events[name]? then @_events[name].length else 0 16 | trigger: (name, args...) -> 17 | try 18 | if name != "debug" then @trigger "debug", "Event triggered: #{name}", args 19 | return unless @_events[name]? 20 | @_events[name] = @_events[name].filter (listener) -> listener.status != "none" 21 | promises = @_events[name].map (listener) => 22 | return if listener.status == "none" 23 | if listener.status == "once" then listener.status = "none" 24 | try 25 | returned = listener.cb?(args...) 26 | if typeof returned?.then == "function" 27 | await returned 28 | else 29 | returned 30 | catch e 31 | if "name" != "error" then @trigger "error", e 32 | null 33 | (await Promise.all promises).find (x) -> x? 34 | catch e 35 | if "name" != "error" then @trigger "error", e 36 | null 37 | 38 | module.exports = Events 39 | -------------------------------------------------------------------------------- /src/Group.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | Events = require "./Events" 3 | RedisConnection = require "./RedisConnection" 4 | IORedisConnection = require "./IORedisConnection" 5 | Scripts = require "./Scripts" 6 | 7 | class Group 8 | defaults: 9 | timeout: 1000 * 60 * 5 10 | connection: null 11 | Promise: Promise 12 | id: "group-key" 13 | 14 | constructor: (@limiterOptions={}) -> 15 | parser.load @limiterOptions, @defaults, @ 16 | @Events = new Events @ 17 | @instances = {} 18 | @Bottleneck = require "./Bottleneck" 19 | @_startAutoCleanup() 20 | @sharedConnection = @connection? 21 | 22 | if !@connection? 23 | if @limiterOptions.datastore == "redis" 24 | @connection = new RedisConnection Object.assign {}, @limiterOptions, { @Events } 25 | else if @limiterOptions.datastore == "ioredis" 26 | @connection = new IORedisConnection Object.assign {}, @limiterOptions, { @Events } 27 | 28 | key: (key="") -> @instances[key] ? do => 29 | limiter = @instances[key] = new @Bottleneck Object.assign @limiterOptions, { 30 | id: "#{@id}-#{key}", 31 | @timeout, 32 | @connection 33 | } 34 | @Events.trigger "created", limiter, key 35 | limiter 36 | 37 | deleteKey: (key="") => 38 | instance = @instances[key] 39 | if @connection 40 | deleted = await @connection.__runCommand__ ['del', Scripts.allKeys("#{@id}-#{key}")...] 41 | if instance? 42 | delete @instances[key] 43 | await instance.disconnect() 44 | instance? or deleted > 0 45 | 46 | limiters: -> { key: k, limiter: v } for k, v of @instances 47 | 48 | keys: -> Object.keys @instances 49 | 50 | clusterKeys: -> 51 | if !@connection? then return @Promise.resolve @keys() 52 | keys = [] 53 | cursor = null 54 | start = "b_#{@id}-".length 55 | end = "_settings".length 56 | until cursor == 0 57 | [next, found] = await @connection.__runCommand__ ["scan", (cursor ? 0), "match", "b_#{@id}-*_settings", "count", 10000] 58 | cursor = ~~next 59 | keys.push(k.slice(start, -end)) for k in found 60 | keys 61 | 62 | _startAutoCleanup: -> 63 | clearInterval @interval 64 | (@interval = setInterval => 65 | time = Date.now() 66 | for k, v of @instances 67 | try if await v._store.__groupCheck__(time) then @deleteKey k 68 | catch e then v.Events.trigger "error", e 69 | , (@timeout / 2)).unref?() 70 | 71 | updateSettings: (options={}) -> 72 | parser.overwrite options, @defaults, @ 73 | parser.overwrite options, options, @limiterOptions 74 | @_startAutoCleanup() if options.timeout? 75 | 76 | disconnect: (flush=true) -> 77 | if !@sharedConnection 78 | @connection?.disconnect flush 79 | 80 | module.exports = Group 81 | -------------------------------------------------------------------------------- /src/IORedisConnection.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | Events = require "./Events" 3 | Scripts = require "./Scripts" 4 | 5 | class IORedisConnection 6 | datastore: "ioredis" 7 | defaults: 8 | Redis: null 9 | clientOptions: {} 10 | clusterNodes: null 11 | client: null 12 | Promise: Promise 13 | Events: null 14 | 15 | constructor: (options={}) -> 16 | parser.load options, @defaults, @ 17 | @Redis ?= eval("require")("ioredis") # Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option. 18 | @Events ?= new Events @ 19 | @terminated = false 20 | 21 | if @clusterNodes? 22 | @client = new @Redis.Cluster @clusterNodes, @clientOptions 23 | @subscriber = new @Redis.Cluster @clusterNodes, @clientOptions 24 | else if @client? and !@client.duplicate? 25 | @subscriber = new @Redis.Cluster @client.startupNodes, @client.options 26 | else 27 | @client ?= new @Redis @clientOptions 28 | @subscriber = @client.duplicate() 29 | @limiters = {} 30 | 31 | @ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)] 32 | .then => 33 | @_loadScripts() 34 | { @client, @subscriber } 35 | 36 | _setup: (client, sub) -> 37 | client.setMaxListeners 0 38 | new @Promise (resolve, reject) => 39 | client.on "error", (e) => @Events.trigger "error", e 40 | if sub 41 | client.on "message", (channel, message) => 42 | @limiters[channel]?._store.onMessage channel, message 43 | if client.status == "ready" then resolve() 44 | else client.once "ready", resolve 45 | 46 | _loadScripts: -> Scripts.names.forEach (name) => @client.defineCommand name, { lua: Scripts.payload(name) } 47 | 48 | __runCommand__: (cmd) -> 49 | await @ready 50 | [[_, deleted]] = await @client.pipeline([cmd]).exec() 51 | deleted 52 | 53 | __addLimiter__: (instance) -> 54 | @Promise.all [instance.channel(), instance.channel_client()].map (channel) => 55 | new @Promise (resolve, reject) => 56 | @subscriber.subscribe channel, => 57 | @limiters[channel] = instance 58 | resolve() 59 | 60 | __removeLimiter__: (instance) -> 61 | [instance.channel(), instance.channel_client()].forEach (channel) => 62 | await @subscriber.unsubscribe channel unless @terminated 63 | delete @limiters[channel] 64 | 65 | __scriptArgs__: (name, id, args, cb) -> 66 | keys = Scripts.keys name, id 67 | [keys.length].concat keys, args, cb 68 | 69 | __scriptFn__: (name) -> 70 | @client[name].bind(@client) 71 | 72 | disconnect: (flush=true) -> 73 | clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters 74 | @limiters = {} 75 | @terminated = true 76 | 77 | if flush 78 | @Promise.all [@client.quit(), @subscriber.quit()] 79 | else 80 | @client.disconnect() 81 | @subscriber.disconnect() 82 | @Promise.resolve() 83 | 84 | module.exports = IORedisConnection 85 | -------------------------------------------------------------------------------- /src/Job.coffee: -------------------------------------------------------------------------------- 1 | NUM_PRIORITIES = 10 2 | DEFAULT_PRIORITY = 5 3 | 4 | parser = require "./parser" 5 | BottleneckError = require "./BottleneckError" 6 | 7 | class Job 8 | constructor: (@task, @args, options, jobDefaults, @rejectOnDrop, @Events, @_states, @Promise) -> 9 | @options = parser.load options, jobDefaults 10 | @options.priority = @_sanitizePriority @options.priority 11 | if @options.id == jobDefaults.id then @options.id = "#{@options.id}-#{@_randomIndex()}" 12 | @promise = new @Promise (@_resolve, @_reject) => 13 | @retryCount = 0 14 | 15 | _sanitizePriority: (priority) -> 16 | sProperty = if ~~priority != priority then DEFAULT_PRIORITY else priority 17 | if sProperty < 0 then 0 else if sProperty > NUM_PRIORITIES-1 then NUM_PRIORITIES-1 else sProperty 18 | 19 | _randomIndex: -> Math.random().toString(36).slice(2) 20 | 21 | doDrop: ({ error, message="This job has been dropped by Bottleneck" } = {}) -> 22 | if @_states.remove @options.id 23 | if @rejectOnDrop then @_reject (error ? new BottleneckError message) 24 | @Events.trigger "dropped", { @args, @options, @task, @promise } 25 | true 26 | else 27 | false 28 | 29 | _assertStatus: (expected) -> 30 | status = @_states.jobStatus @options.id 31 | if not (status == expected or (expected == "DONE" and status == null)) 32 | throw new BottleneckError "Invalid job status #{status}, expected #{expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues" 33 | 34 | doReceive: () -> 35 | @_states.start @options.id 36 | @Events.trigger "received", { @args, @options } 37 | 38 | doQueue: (reachedHWM, blocked) -> 39 | @_assertStatus "RECEIVED" 40 | @_states.next @options.id 41 | @Events.trigger "queued", { @args, @options, reachedHWM, blocked } 42 | 43 | doRun: () -> 44 | if @retryCount == 0 45 | @_assertStatus "QUEUED" 46 | @_states.next @options.id 47 | else @_assertStatus "EXECUTING" 48 | @Events.trigger "scheduled", { @args, @options } 49 | 50 | doExecute: (chained, clearGlobalState, run, free) -> 51 | if @retryCount == 0 52 | @_assertStatus "RUNNING" 53 | @_states.next @options.id 54 | else @_assertStatus "EXECUTING" 55 | eventInfo = { @args, @options, @retryCount } 56 | @Events.trigger "executing", eventInfo 57 | 58 | try 59 | passed = await if chained? 60 | chained.schedule @options, @task, @args... 61 | else @task @args... 62 | 63 | if clearGlobalState() 64 | @doDone eventInfo 65 | await free @options, eventInfo 66 | @_assertStatus "DONE" 67 | @_resolve passed 68 | catch error 69 | @_onFailure error, eventInfo, clearGlobalState, run, free 70 | 71 | doExpire: (clearGlobalState, run, free) -> 72 | if @_states.jobStatus @options.id == "RUNNING" 73 | @_states.next @options.id 74 | @_assertStatus "EXECUTING" 75 | eventInfo = { @args, @options, @retryCount } 76 | error = new BottleneckError "This job timed out after #{@options.expiration} ms." 77 | @_onFailure error, eventInfo, clearGlobalState, run, free 78 | 79 | _onFailure: (error, eventInfo, clearGlobalState, run, free) -> 80 | if clearGlobalState() 81 | retry = await @Events.trigger "failed", error, eventInfo 82 | if retry? 83 | retryAfter = ~~retry 84 | @Events.trigger "retry", "Retrying #{@options.id} after #{retryAfter} ms", eventInfo 85 | @retryCount++ 86 | run retryAfter 87 | else 88 | @doDone eventInfo 89 | await free @options, eventInfo 90 | @_assertStatus "DONE" 91 | @_reject error 92 | 93 | doDone: (eventInfo) -> 94 | @_assertStatus "EXECUTING" 95 | @_states.next @options.id 96 | @Events.trigger "done", eventInfo 97 | 98 | module.exports = Job 99 | -------------------------------------------------------------------------------- /src/LocalDatastore.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | BottleneckError = require "./BottleneckError" 3 | 4 | class LocalDatastore 5 | constructor: (@instance, @storeOptions, storeInstanceOptions) -> 6 | @clientId = @instance._randomIndex() 7 | parser.load storeInstanceOptions, storeInstanceOptions, @ 8 | @_nextRequest = @_lastReservoirRefresh = @_lastReservoirIncrease = Date.now() 9 | @_running = 0 10 | @_done = 0 11 | @_unblockTime = 0 12 | @ready = @Promise.resolve() 13 | @clients = {} 14 | @_startHeartbeat() 15 | 16 | _startHeartbeat: -> 17 | if !@heartbeat? and (( 18 | @storeOptions.reservoirRefreshInterval? and @storeOptions.reservoirRefreshAmount? 19 | ) or ( 20 | @storeOptions.reservoirIncreaseInterval? and @storeOptions.reservoirIncreaseAmount? 21 | )) 22 | (@heartbeat = setInterval => 23 | now = Date.now() 24 | 25 | if @storeOptions.reservoirRefreshInterval? and now >= @_lastReservoirRefresh + @storeOptions.reservoirRefreshInterval 26 | @_lastReservoirRefresh = now 27 | @storeOptions.reservoir = @storeOptions.reservoirRefreshAmount 28 | @instance._drainAll @computeCapacity() 29 | 30 | if @storeOptions.reservoirIncreaseInterval? and now >= @_lastReservoirIncrease + @storeOptions.reservoirIncreaseInterval 31 | { reservoirIncreaseAmount: amount, reservoirIncreaseMaximum: maximum, reservoir } = @storeOptions 32 | @_lastReservoirIncrease = now 33 | incr = if maximum? then Math.min amount, maximum - reservoir else amount 34 | if incr > 0 35 | @storeOptions.reservoir += incr 36 | @instance._drainAll @computeCapacity() 37 | 38 | , @heartbeatInterval).unref?() 39 | else clearInterval @heartbeat 40 | 41 | __publish__: (message) -> 42 | await @yieldLoop() 43 | @instance.Events.trigger "message", message.toString() 44 | 45 | __disconnect__: (flush) -> 46 | await @yieldLoop() 47 | clearInterval @heartbeat 48 | @Promise.resolve() 49 | 50 | yieldLoop: (t=0) -> new @Promise (resolve, reject) -> setTimeout resolve, t 51 | 52 | computePenalty: -> @storeOptions.penalty ? ((15 * @storeOptions.minTime) or 5000) 53 | 54 | __updateSettings__: (options) -> 55 | await @yieldLoop() 56 | parser.overwrite options, options, @storeOptions 57 | @_startHeartbeat() 58 | @instance._drainAll @computeCapacity() 59 | true 60 | 61 | __running__: -> 62 | await @yieldLoop() 63 | @_running 64 | 65 | __queued__: -> 66 | await @yieldLoop() 67 | @instance.queued() 68 | 69 | __done__: -> 70 | await @yieldLoop() 71 | @_done 72 | 73 | __groupCheck__: (time) -> 74 | await @yieldLoop() 75 | (@_nextRequest + @timeout) < time 76 | 77 | computeCapacity: -> 78 | { maxConcurrent, reservoir } = @storeOptions 79 | if maxConcurrent? and reservoir? then Math.min((maxConcurrent - @_running), reservoir) 80 | else if maxConcurrent? then maxConcurrent - @_running 81 | else if reservoir? then reservoir 82 | else null 83 | 84 | conditionsCheck: (weight) -> 85 | capacity = @computeCapacity() 86 | not capacity? or weight <= capacity 87 | 88 | __incrementReservoir__: (incr) -> 89 | await @yieldLoop() 90 | reservoir = @storeOptions.reservoir += incr 91 | @instance._drainAll @computeCapacity() 92 | reservoir 93 | 94 | __currentReservoir__: -> 95 | await @yieldLoop() 96 | @storeOptions.reservoir 97 | 98 | isBlocked: (now) -> @_unblockTime >= now 99 | 100 | check: (weight, now) -> @conditionsCheck(weight) and (@_nextRequest - now) <= 0 101 | 102 | __check__: (weight) -> 103 | await @yieldLoop() 104 | now = Date.now() 105 | @check weight, now 106 | 107 | __register__: (index, weight, expiration) -> 108 | await @yieldLoop() 109 | now = Date.now() 110 | if @conditionsCheck weight 111 | @_running += weight 112 | if @storeOptions.reservoir? then @storeOptions.reservoir -= weight 113 | wait = Math.max @_nextRequest - now, 0 114 | @_nextRequest = now + wait + @storeOptions.minTime 115 | { success: true, wait, reservoir: @storeOptions.reservoir } 116 | else { success: false } 117 | 118 | strategyIsBlock: -> @storeOptions.strategy == 3 119 | 120 | __submit__: (queueLength, weight) -> 121 | await @yieldLoop() 122 | if @storeOptions.maxConcurrent? and weight > @storeOptions.maxConcurrent 123 | throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{@storeOptions.maxConcurrent}") 124 | now = Date.now() 125 | reachedHWM = @storeOptions.highWater? and queueLength == @storeOptions.highWater and not @check(weight, now) 126 | blocked = @strategyIsBlock() and (reachedHWM or @isBlocked now) 127 | if blocked 128 | @_unblockTime = now + @computePenalty() 129 | @_nextRequest = @_unblockTime + @storeOptions.minTime 130 | @instance._dropAllQueued() 131 | { reachedHWM, blocked, strategy: @storeOptions.strategy } 132 | 133 | __free__: (index, weight) -> 134 | await @yieldLoop() 135 | @_running -= weight 136 | @_done += weight 137 | @instance._drainAll @computeCapacity() 138 | { running: @_running } 139 | 140 | module.exports = LocalDatastore 141 | -------------------------------------------------------------------------------- /src/Queues.coffee: -------------------------------------------------------------------------------- 1 | DLList = require "./DLList" 2 | Events = require "./Events" 3 | 4 | class Queues 5 | 6 | constructor: (num_priorities) -> 7 | @Events = new Events @ 8 | @_length = 0 9 | @_lists = for i in [1..num_priorities] then new DLList (=> @incr()), (=> @decr()) 10 | 11 | incr: -> if @_length++ == 0 then @Events.trigger "leftzero" 12 | 13 | decr: -> if --@_length == 0 then @Events.trigger "zero" 14 | 15 | push: (job) -> @_lists[job.options.priority].push job 16 | 17 | queued: (priority) -> if priority? then @_lists[priority].length else @_length 18 | 19 | shiftAll: (fn) -> @_lists.forEach (list) -> list.forEachShift fn 20 | 21 | getFirst: (arr=@_lists) -> 22 | for list in arr 23 | return list if list.length > 0 24 | [] 25 | 26 | shiftLastFrom: (priority) -> @getFirst(@_lists[priority..].reverse()).shift() 27 | 28 | module.exports = Queues 29 | -------------------------------------------------------------------------------- /src/RedisConnection.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | Events = require "./Events" 3 | Scripts = require "./Scripts" 4 | 5 | class RedisConnection 6 | datastore: "redis" 7 | defaults: 8 | Redis: null 9 | clientOptions: {} 10 | client: null 11 | Promise: Promise 12 | Events: null 13 | 14 | constructor: (options={}) -> 15 | parser.load options, @defaults, @ 16 | @Redis ?= eval("require")("redis") # Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option. 17 | @Events ?= new Events @ 18 | @terminated = false 19 | 20 | @client ?= @Redis.createClient @clientOptions 21 | @subscriber = @client.duplicate() 22 | @limiters = {} 23 | @shas = {} 24 | 25 | @ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)] 26 | .then => @_loadScripts() 27 | .then => { @client, @subscriber } 28 | 29 | _setup: (client, sub) -> 30 | client.setMaxListeners 0 31 | new @Promise (resolve, reject) => 32 | client.on "error", (e) => @Events.trigger "error", e 33 | if sub 34 | client.on "message", (channel, message) => 35 | @limiters[channel]?._store.onMessage channel, message 36 | if client.ready then resolve() 37 | else client.once "ready", resolve 38 | 39 | _loadScript: (name) -> 40 | new @Promise (resolve, reject) => 41 | payload = Scripts.payload name 42 | @client.multi([["script", "load", payload]]).exec (err, replies) => 43 | if err? then return reject err 44 | @shas[name] = replies[0] 45 | resolve replies[0] 46 | 47 | _loadScripts: -> @Promise.all(Scripts.names.map (k) => @_loadScript k) 48 | 49 | __runCommand__: (cmd) -> 50 | await @ready 51 | new @Promise (resolve, reject) => 52 | @client.multi([cmd]).exec_atomic (err, replies) -> 53 | if err? then reject(err) else resolve(replies[0]) 54 | 55 | __addLimiter__: (instance) -> 56 | @Promise.all [instance.channel(), instance.channel_client()].map (channel) => 57 | new @Promise (resolve, reject) => 58 | handler = (chan) => 59 | if chan == channel 60 | @subscriber.removeListener "subscribe", handler 61 | @limiters[channel] = instance 62 | resolve() 63 | @subscriber.on "subscribe", handler 64 | @subscriber.subscribe channel 65 | 66 | __removeLimiter__: (instance) -> 67 | @Promise.all [instance.channel(), instance.channel_client()].map (channel) => 68 | unless @terminated 69 | await new @Promise (resolve, reject) => 70 | @subscriber.unsubscribe channel, (err, chan) -> 71 | if err? then return reject err 72 | if chan == channel then return resolve() 73 | delete @limiters[channel] 74 | 75 | __scriptArgs__: (name, id, args, cb) -> 76 | keys = Scripts.keys name, id 77 | [@shas[name], keys.length].concat keys, args, cb 78 | 79 | __scriptFn__: (name) -> 80 | @client.evalsha.bind(@client) 81 | 82 | disconnect: (flush=true) -> 83 | clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters 84 | @limiters = {} 85 | @terminated = true 86 | 87 | @client.end flush 88 | @subscriber.end flush 89 | @Promise.resolve() 90 | 91 | module.exports = RedisConnection 92 | -------------------------------------------------------------------------------- /src/RedisDatastore.coffee: -------------------------------------------------------------------------------- 1 | parser = require "./parser" 2 | BottleneckError = require "./BottleneckError" 3 | RedisConnection = require "./RedisConnection" 4 | IORedisConnection = require "./IORedisConnection" 5 | 6 | class RedisDatastore 7 | constructor: (@instance, @storeOptions, storeInstanceOptions) -> 8 | @originalId = @instance.id 9 | @clientId = @instance._randomIndex() 10 | parser.load storeInstanceOptions, storeInstanceOptions, @ 11 | @clients = {} 12 | @capacityPriorityCounters = {} 13 | @sharedConnection = @connection? 14 | 15 | @connection ?= if @instance.datastore == "redis" then new RedisConnection { @Redis, @clientOptions, @Promise, Events: @instance.Events } 16 | else if @instance.datastore == "ioredis" then new IORedisConnection { @Redis, @clientOptions, @clusterNodes, @Promise, Events: @instance.Events } 17 | 18 | @instance.connection = @connection 19 | @instance.datastore = @connection.datastore 20 | 21 | @ready = @connection.ready 22 | .then (@clients) => @runScript "init", @prepareInitSettings @clearDatastore 23 | .then => @connection.__addLimiter__ @instance 24 | .then => @runScript "register_client", [@instance.queued()] 25 | .then => 26 | (@heartbeat = setInterval => 27 | @runScript "heartbeat", [] 28 | .catch (e) => @instance.Events.trigger "error", e 29 | , @heartbeatInterval).unref?() 30 | @clients 31 | 32 | __publish__: (message) -> 33 | { client } = await @ready 34 | client.publish(@instance.channel(), "message:#{message.toString()}") 35 | 36 | onMessage: (channel, message) -> 37 | try 38 | pos = message.indexOf(":") 39 | [type, data] = [message.slice(0, pos), message.slice(pos+1)] 40 | if type == "capacity" 41 | await @instance._drainAll(if data.length > 0 then ~~data) 42 | else if type == "capacity-priority" 43 | [rawCapacity, priorityClient, counter] = data.split(":") 44 | capacity = if rawCapacity.length > 0 then ~~rawCapacity 45 | if priorityClient == @clientId 46 | drained = await @instance._drainAll(capacity) 47 | newCapacity = if capacity? then capacity - (drained or 0) else "" 48 | await @clients.client.publish(@instance.channel(), "capacity-priority:#{newCapacity}::#{counter}") 49 | else if priorityClient == "" 50 | clearTimeout @capacityPriorityCounters[counter] 51 | delete @capacityPriorityCounters[counter] 52 | @instance._drainAll(capacity) 53 | else 54 | @capacityPriorityCounters[counter] = setTimeout => 55 | try 56 | delete @capacityPriorityCounters[counter] 57 | await @runScript "blacklist_client", [priorityClient] 58 | await @instance._drainAll(capacity) 59 | catch e then @instance.Events.trigger "error", e 60 | , 1000 61 | else if type == "message" 62 | @instance.Events.trigger "message", data 63 | else if type == "blocked" 64 | await @instance._dropAllQueued() 65 | catch e then @instance.Events.trigger "error", e 66 | 67 | __disconnect__: (flush) -> 68 | clearInterval @heartbeat 69 | if @sharedConnection 70 | @connection.__removeLimiter__ @instance 71 | else 72 | @connection.disconnect flush 73 | 74 | runScript: (name, args) -> 75 | await @ready unless name == "init" or name == "register_client" 76 | new @Promise (resolve, reject) => 77 | all_args = [Date.now(), @clientId].concat args 78 | @instance.Events.trigger "debug", "Calling Redis script: #{name}.lua", all_args 79 | arr = @connection.__scriptArgs__ name, @originalId, all_args, (err, replies) -> 80 | if err? then return reject err 81 | return resolve replies 82 | @connection.__scriptFn__(name) arr... 83 | .catch (e) => 84 | if e.message == "SETTINGS_KEY_NOT_FOUND" 85 | if name == "heartbeat" then @Promise.resolve() 86 | else 87 | @runScript("init", @prepareInitSettings(false)) 88 | .then => @runScript(name, args) 89 | else if e.message == "UNKNOWN_CLIENT" 90 | @runScript("register_client", [@instance.queued()]) 91 | .then => @runScript(name, args) 92 | else @Promise.reject e 93 | 94 | prepareArray: (arr) -> (if x? then x.toString() else "") for x in arr 95 | 96 | prepareObject: (obj) -> 97 | arr = [] 98 | for k, v of obj then arr.push k, (if v? then v.toString() else "") 99 | arr 100 | 101 | prepareInitSettings: (clear) -> 102 | args = @prepareObject Object.assign({}, @storeOptions, { 103 | id: @originalId 104 | version: @instance.version 105 | groupTimeout: @timeout 106 | @clientTimeout 107 | }) 108 | args.unshift (if clear then 1 else 0), @instance.version 109 | args 110 | 111 | convertBool: (b) -> !!b 112 | 113 | __updateSettings__: (options) -> 114 | await @runScript "update_settings", @prepareObject options 115 | parser.overwrite options, options, @storeOptions 116 | 117 | __running__: -> @runScript "running", [] 118 | 119 | __queued__: -> @runScript "queued", [] 120 | 121 | __done__: -> @runScript "done", [] 122 | 123 | __groupCheck__: -> @convertBool await @runScript "group_check", [] 124 | 125 | __incrementReservoir__: (incr) -> @runScript "increment_reservoir", [incr] 126 | 127 | __currentReservoir__: -> @runScript "current_reservoir", [] 128 | 129 | __check__: (weight) -> @convertBool await @runScript "check", @prepareArray [weight] 130 | 131 | __register__: (index, weight, expiration) -> 132 | [success, wait, reservoir] = await @runScript "register", @prepareArray [index, weight, expiration] 133 | return { 134 | success: @convertBool(success), 135 | wait, 136 | reservoir 137 | } 138 | 139 | __submit__: (queueLength, weight) -> 140 | try 141 | [reachedHWM, blocked, strategy] = await @runScript "submit", @prepareArray [queueLength, weight] 142 | return { 143 | reachedHWM: @convertBool(reachedHWM), 144 | blocked: @convertBool(blocked), 145 | strategy 146 | } 147 | catch e 148 | if e.message.indexOf("OVERWEIGHT") == 0 149 | [overweight, weight, maxConcurrent] = e.message.split ":" 150 | throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{maxConcurrent}") 151 | else 152 | throw e 153 | 154 | __free__: (index, weight) -> 155 | running = await @runScript "free", @prepareArray [index] 156 | return { running } 157 | 158 | module.exports = RedisDatastore 159 | -------------------------------------------------------------------------------- /src/Scripts.coffee: -------------------------------------------------------------------------------- 1 | lua = require "./lua.json" 2 | 3 | headers = 4 | refs: lua["refs.lua"] 5 | validate_keys: lua["validate_keys.lua"] 6 | validate_client: lua["validate_client.lua"] 7 | refresh_expiration: lua["refresh_expiration.lua"] 8 | process_tick: lua["process_tick.lua"] 9 | conditions_check: lua["conditions_check.lua"] 10 | get_time: lua["get_time.lua"] 11 | 12 | exports.allKeys = (id) -> [ 13 | ### 14 | HASH 15 | ### 16 | "b_#{id}_settings" 17 | 18 | ### 19 | HASH 20 | job index -> weight 21 | ### 22 | "b_#{id}_job_weights" 23 | 24 | ### 25 | ZSET 26 | job index -> expiration 27 | ### 28 | "b_#{id}_job_expirations" 29 | 30 | ### 31 | HASH 32 | job index -> client 33 | ### 34 | "b_#{id}_job_clients" 35 | 36 | ### 37 | ZSET 38 | client -> sum running 39 | ### 40 | "b_#{id}_client_running" 41 | 42 | ### 43 | HASH 44 | client -> num queued 45 | ### 46 | "b_#{id}_client_num_queued" 47 | 48 | ### 49 | ZSET 50 | client -> last job registered 51 | ### 52 | "b_#{id}_client_last_registered" 53 | 54 | ### 55 | ZSET 56 | client -> last seen 57 | ### 58 | "b_#{id}_client_last_seen" 59 | ] 60 | 61 | templates = 62 | init: 63 | keys: exports.allKeys 64 | headers: ["process_tick"] 65 | refresh_expiration: true 66 | code: lua["init.lua"] 67 | group_check: 68 | keys: exports.allKeys 69 | headers: [] 70 | refresh_expiration: false 71 | code: lua["group_check.lua"] 72 | register_client: 73 | keys: exports.allKeys 74 | headers: ["validate_keys"] 75 | refresh_expiration: false 76 | code: lua["register_client.lua"] 77 | blacklist_client: 78 | keys: exports.allKeys 79 | headers: ["validate_keys", "validate_client"] 80 | refresh_expiration: false 81 | code: lua["blacklist_client.lua"] 82 | heartbeat: 83 | keys: exports.allKeys 84 | headers: ["validate_keys", "validate_client", "process_tick"] 85 | refresh_expiration: false 86 | code: lua["heartbeat.lua"] 87 | update_settings: 88 | keys: exports.allKeys 89 | headers: ["validate_keys", "validate_client", "process_tick"] 90 | refresh_expiration: true 91 | code: lua["update_settings.lua"] 92 | running: 93 | keys: exports.allKeys 94 | headers: ["validate_keys", "validate_client", "process_tick"] 95 | refresh_expiration: false 96 | code: lua["running.lua"] 97 | queued: 98 | keys: exports.allKeys 99 | headers: ["validate_keys", "validate_client"] 100 | refresh_expiration: false 101 | code: lua["queued.lua"] 102 | done: 103 | keys: exports.allKeys 104 | headers: ["validate_keys", "validate_client", "process_tick"] 105 | refresh_expiration: false 106 | code: lua["done.lua"] 107 | check: 108 | keys: exports.allKeys 109 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] 110 | refresh_expiration: false 111 | code: lua["check.lua"] 112 | submit: 113 | keys: exports.allKeys 114 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] 115 | refresh_expiration: true 116 | code: lua["submit.lua"] 117 | register: 118 | keys: exports.allKeys 119 | headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] 120 | refresh_expiration: true 121 | code: lua["register.lua"] 122 | free: 123 | keys: exports.allKeys 124 | headers: ["validate_keys", "validate_client", "process_tick"] 125 | refresh_expiration: true 126 | code: lua["free.lua"] 127 | current_reservoir: 128 | keys: exports.allKeys 129 | headers: ["validate_keys", "validate_client", "process_tick"] 130 | refresh_expiration: false 131 | code: lua["current_reservoir.lua"] 132 | increment_reservoir: 133 | keys: exports.allKeys 134 | headers: ["validate_keys", "validate_client", "process_tick"] 135 | refresh_expiration: true 136 | code: lua["increment_reservoir.lua"] 137 | 138 | exports.names = Object.keys templates 139 | 140 | exports.keys = (name, id) -> 141 | templates[name].keys id 142 | 143 | exports.payload = (name) -> 144 | template = templates[name] 145 | Array::concat( 146 | headers.refs, 147 | template.headers.map((h) -> headers[h]), 148 | (if template.refresh_expiration then headers.refresh_expiration else ""), 149 | template.code 150 | ) 151 | .join("\n") 152 | -------------------------------------------------------------------------------- /src/States.coffee: -------------------------------------------------------------------------------- 1 | BottleneckError = require "./BottleneckError" 2 | class States 3 | constructor: (@status) -> 4 | @_jobs = {} 5 | @counts = @status.map(-> 0) 6 | 7 | next: (id) -> 8 | current = @_jobs[id] 9 | next = current + 1 10 | if current? and next < @status.length 11 | @counts[current]-- 12 | @counts[next]++ 13 | @_jobs[id]++ 14 | else if current? 15 | @counts[current]-- 16 | delete @_jobs[id] 17 | 18 | start: (id) -> 19 | initial = 0 20 | @_jobs[id] = initial 21 | @counts[initial]++ 22 | 23 | remove: (id) -> 24 | current = @_jobs[id] 25 | if current? 26 | @counts[current]-- 27 | delete @_jobs[id] 28 | current? 29 | 30 | jobStatus: (id) -> @status[@_jobs[id]] ? null 31 | 32 | statusJobs: (status) -> 33 | if status? 34 | pos = @status.indexOf status 35 | if pos < 0 36 | throw new BottleneckError "status must be one of #{@status.join ', '}" 37 | k for k,v of @_jobs when v == pos 38 | else 39 | Object.keys @_jobs 40 | 41 | statusCounts: -> @counts.reduce(((acc, v, i) => acc[@status[i]] = v; acc), {}) 42 | 43 | module.exports = States 44 | -------------------------------------------------------------------------------- /src/Sync.coffee: -------------------------------------------------------------------------------- 1 | DLList = require "./DLList" 2 | class Sync 3 | constructor: (@name, @Promise) -> 4 | @_running = 0 5 | @_queue = new DLList() 6 | isEmpty: -> @_queue.length == 0 7 | _tryToRun: -> 8 | if (@_running < 1) and @_queue.length > 0 9 | @_running++ 10 | { task, args, resolve, reject } = @_queue.shift() 11 | cb = try 12 | returned = await task args... 13 | () -> resolve returned 14 | catch error 15 | () -> reject error 16 | @_running-- 17 | @_tryToRun() 18 | cb() 19 | schedule: (task, args...) => 20 | resolve = reject = null 21 | promise = new @Promise (_resolve, _reject) -> 22 | resolve = _resolve 23 | reject = _reject 24 | @_queue.push { task, args, resolve, reject } 25 | @_tryToRun() 26 | promise 27 | 28 | module.exports = Sync 29 | -------------------------------------------------------------------------------- /src/es5.coffee: -------------------------------------------------------------------------------- 1 | require("regenerator-runtime/runtime") 2 | 3 | module.exports = require "./Bottleneck" 4 | -------------------------------------------------------------------------------- /src/index.coffee: -------------------------------------------------------------------------------- 1 | module.exports = require "./Bottleneck" 2 | -------------------------------------------------------------------------------- /src/parser.coffee: -------------------------------------------------------------------------------- 1 | exports.load = (received, defaults, onto={}) -> 2 | for k, v of defaults 3 | onto[k] = received[k] ? v 4 | onto 5 | 6 | exports.overwrite = (received, defaults, onto={}) -> 7 | for k, v of received 8 | if defaults[k] != undefined 9 | onto[k] = v 10 | onto 11 | -------------------------------------------------------------------------------- /src/redis/blacklist_client.lua: -------------------------------------------------------------------------------- 1 | local blacklist = ARGV[num_static_argv + 1] 2 | 3 | if redis.call('zscore', client_last_seen_key, blacklist) then 4 | redis.call('zadd', client_last_seen_key, 0, blacklist) 5 | end 6 | 7 | 8 | return {} 9 | -------------------------------------------------------------------------------- /src/redis/check.lua: -------------------------------------------------------------------------------- 1 | local weight = tonumber(ARGV[num_static_argv + 1]) 2 | 3 | local capacity = process_tick(now, false)['capacity'] 4 | local nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest')) 5 | 6 | return conditions_check(capacity, weight) and nextRequest - now <= 0 7 | -------------------------------------------------------------------------------- /src/redis/conditions_check.lua: -------------------------------------------------------------------------------- 1 | local conditions_check = function (capacity, weight) 2 | return capacity == nil or weight <= capacity 3 | end 4 | -------------------------------------------------------------------------------- /src/redis/current_reservoir.lua: -------------------------------------------------------------------------------- 1 | return process_tick(now, false)['reservoir'] 2 | -------------------------------------------------------------------------------- /src/redis/done.lua: -------------------------------------------------------------------------------- 1 | process_tick(now, false) 2 | 3 | return tonumber(redis.call('hget', settings_key, 'done')) 4 | -------------------------------------------------------------------------------- /src/redis/free.lua: -------------------------------------------------------------------------------- 1 | local index = ARGV[num_static_argv + 1] 2 | 3 | redis.call('zadd', job_expirations_key, 0, index) 4 | 5 | return process_tick(now, false)['running'] 6 | -------------------------------------------------------------------------------- /src/redis/get_time.lua: -------------------------------------------------------------------------------- 1 | redis.replicate_commands() 2 | 3 | local get_time = function () 4 | local time = redis.call('time') 5 | 6 | return tonumber(time[1]..string.sub(time[2], 1, 3)) 7 | end 8 | -------------------------------------------------------------------------------- /src/redis/group_check.lua: -------------------------------------------------------------------------------- 1 | return not (redis.call('exists', settings_key) == 1) 2 | -------------------------------------------------------------------------------- /src/redis/heartbeat.lua: -------------------------------------------------------------------------------- 1 | process_tick(now, true) 2 | -------------------------------------------------------------------------------- /src/redis/increment_reservoir.lua: -------------------------------------------------------------------------------- 1 | local incr = tonumber(ARGV[num_static_argv + 1]) 2 | 3 | redis.call('hincrby', settings_key, 'reservoir', incr) 4 | 5 | local reservoir = process_tick(now, true)['reservoir'] 6 | 7 | local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) 8 | refresh_expiration(0, 0, groupTimeout) 9 | 10 | return reservoir 11 | -------------------------------------------------------------------------------- /src/redis/init.lua: -------------------------------------------------------------------------------- 1 | local clear = tonumber(ARGV[num_static_argv + 1]) 2 | local limiter_version = ARGV[num_static_argv + 2] 3 | local num_local_argv = num_static_argv + 2 4 | 5 | if clear == 1 then 6 | redis.call('del', unpack(KEYS)) 7 | end 8 | 9 | if redis.call('exists', settings_key) == 0 then 10 | -- Create 11 | local args = {'hmset', settings_key} 12 | 13 | for i = num_local_argv + 1, #ARGV do 14 | table.insert(args, ARGV[i]) 15 | end 16 | 17 | redis.call(unpack(args)) 18 | redis.call('hmset', settings_key, 19 | 'nextRequest', now, 20 | 'lastReservoirRefresh', now, 21 | 'lastReservoirIncrease', now, 22 | 'running', 0, 23 | 'done', 0, 24 | 'unblockTime', 0, 25 | 'capacityPriorityCounter', 0 26 | ) 27 | 28 | else 29 | -- Apply migrations 30 | local settings = redis.call('hmget', settings_key, 31 | 'id', 32 | 'version' 33 | ) 34 | local id = settings[1] 35 | local current_version = settings[2] 36 | 37 | if current_version ~= limiter_version then 38 | local version_digits = {} 39 | for k, v in string.gmatch(current_version, "([^.]+)") do 40 | table.insert(version_digits, tonumber(k)) 41 | end 42 | 43 | -- 2.10.0 44 | if version_digits[2] < 10 then 45 | redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '') 46 | redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '') 47 | redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '') 48 | redis.call('hsetnx', settings_key, 'done', 0) 49 | redis.call('hset', settings_key, 'version', '2.10.0') 50 | end 51 | 52 | -- 2.11.1 53 | if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then 54 | if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then 55 | redis.call('hmset', settings_key, 56 | 'lastReservoirRefresh', now, 57 | 'version', '2.11.1' 58 | ) 59 | end 60 | end 61 | 62 | -- 2.14.0 63 | if version_digits[2] < 14 then 64 | local old_running_key = 'b_'..id..'_running' 65 | local old_executing_key = 'b_'..id..'_executing' 66 | 67 | if redis.call('exists', old_running_key) == 1 then 68 | redis.call('rename', old_running_key, job_weights_key) 69 | end 70 | if redis.call('exists', old_executing_key) == 1 then 71 | redis.call('rename', old_executing_key, job_expirations_key) 72 | end 73 | redis.call('hset', settings_key, 'version', '2.14.0') 74 | end 75 | 76 | -- 2.15.2 77 | if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then 78 | redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0) 79 | redis.call('hset', settings_key, 'version', '2.15.2') 80 | end 81 | 82 | -- 2.17.0 83 | if version_digits[2] < 17 then 84 | redis.call('hsetnx', settings_key, 'clientTimeout', 10000) 85 | redis.call('hset', settings_key, 'version', '2.17.0') 86 | end 87 | 88 | -- 2.18.0 89 | if version_digits[2] < 18 then 90 | redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '') 91 | redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '') 92 | redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '') 93 | redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now) 94 | redis.call('hset', settings_key, 'version', '2.18.0') 95 | end 96 | 97 | end 98 | 99 | process_tick(now, false) 100 | end 101 | 102 | local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) 103 | refresh_expiration(0, 0, groupTimeout) 104 | 105 | return {} 106 | -------------------------------------------------------------------------------- /src/redis/process_tick.lua: -------------------------------------------------------------------------------- 1 | local process_tick = function (now, always_publish) 2 | 3 | local compute_capacity = function (maxConcurrent, running, reservoir) 4 | if maxConcurrent ~= nil and reservoir ~= nil then 5 | return math.min((maxConcurrent - running), reservoir) 6 | elseif maxConcurrent ~= nil then 7 | return maxConcurrent - running 8 | elseif reservoir ~= nil then 9 | return reservoir 10 | else 11 | return nil 12 | end 13 | end 14 | 15 | local settings = redis.call('hmget', settings_key, 16 | 'id', 17 | 'maxConcurrent', 18 | 'running', 19 | 'reservoir', 20 | 'reservoirRefreshInterval', 21 | 'reservoirRefreshAmount', 22 | 'lastReservoirRefresh', 23 | 'reservoirIncreaseInterval', 24 | 'reservoirIncreaseAmount', 25 | 'reservoirIncreaseMaximum', 26 | 'lastReservoirIncrease', 27 | 'capacityPriorityCounter', 28 | 'clientTimeout' 29 | ) 30 | local id = settings[1] 31 | local maxConcurrent = tonumber(settings[2]) 32 | local running = tonumber(settings[3]) 33 | local reservoir = tonumber(settings[4]) 34 | local reservoirRefreshInterval = tonumber(settings[5]) 35 | local reservoirRefreshAmount = tonumber(settings[6]) 36 | local lastReservoirRefresh = tonumber(settings[7]) 37 | local reservoirIncreaseInterval = tonumber(settings[8]) 38 | local reservoirIncreaseAmount = tonumber(settings[9]) 39 | local reservoirIncreaseMaximum = tonumber(settings[10]) 40 | local lastReservoirIncrease = tonumber(settings[11]) 41 | local capacityPriorityCounter = tonumber(settings[12]) 42 | local clientTimeout = tonumber(settings[13]) 43 | 44 | local initial_capacity = compute_capacity(maxConcurrent, running, reservoir) 45 | 46 | -- 47 | -- Process 'running' changes 48 | -- 49 | local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now) 50 | 51 | if #expired > 0 then 52 | redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now) 53 | 54 | local flush_batch = function (batch, acc) 55 | local weights = redis.call('hmget', job_weights_key, unpack(batch)) 56 | redis.call('hdel', job_weights_key, unpack(batch)) 57 | local clients = redis.call('hmget', job_clients_key, unpack(batch)) 58 | redis.call('hdel', job_clients_key, unpack(batch)) 59 | 60 | -- Calculate sum of removed weights 61 | for i = 1, #weights do 62 | acc['total'] = acc['total'] + (tonumber(weights[i]) or 0) 63 | end 64 | 65 | -- Calculate sum of removed weights by client 66 | local client_weights = {} 67 | for i = 1, #clients do 68 | local removed = tonumber(weights[i]) or 0 69 | if removed > 0 then 70 | acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed 71 | end 72 | end 73 | end 74 | 75 | local acc = { 76 | ['total'] = 0, 77 | ['client_weights'] = {} 78 | } 79 | local batch_size = 1000 80 | 81 | -- Compute changes to Zsets and apply changes to Hashes 82 | for i = 1, #expired, batch_size do 83 | local batch = {} 84 | for j = i, math.min(i + batch_size - 1, #expired) do 85 | table.insert(batch, expired[j]) 86 | end 87 | 88 | flush_batch(batch, acc) 89 | end 90 | 91 | -- Apply changes to Zsets 92 | if acc['total'] > 0 then 93 | redis.call('hincrby', settings_key, 'done', acc['total']) 94 | running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total'])) 95 | end 96 | 97 | for client, weight in pairs(acc['client_weights']) do 98 | redis.call('zincrby', client_running_key, -weight, client) 99 | end 100 | end 101 | 102 | -- 103 | -- Process 'reservoir' changes 104 | -- 105 | local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil 106 | if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then 107 | reservoir = reservoirRefreshAmount 108 | redis.call('hmset', settings_key, 109 | 'reservoir', reservoir, 110 | 'lastReservoirRefresh', now 111 | ) 112 | end 113 | 114 | local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil 115 | if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then 116 | local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval) 117 | local incr = reservoirIncreaseAmount * num_intervals 118 | if reservoirIncreaseMaximum ~= nil then 119 | incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0)) 120 | end 121 | if incr > 0 then 122 | reservoir = (reservoir or 0) + incr 123 | end 124 | redis.call('hmset', settings_key, 125 | 'reservoir', reservoir, 126 | 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval) 127 | ) 128 | end 129 | 130 | -- 131 | -- Clear unresponsive clients 132 | -- 133 | local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout)) 134 | local unresponsive_lookup = {} 135 | local terminated_clients = {} 136 | for i = 1, #unresponsive do 137 | unresponsive_lookup[unresponsive[i]] = true 138 | if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then 139 | table.insert(terminated_clients, unresponsive[i]) 140 | end 141 | end 142 | if #terminated_clients > 0 then 143 | redis.call('zrem', client_running_key, unpack(terminated_clients)) 144 | redis.call('hdel', client_num_queued_key, unpack(terminated_clients)) 145 | redis.call('zrem', client_last_registered_key, unpack(terminated_clients)) 146 | redis.call('zrem', client_last_seen_key, unpack(terminated_clients)) 147 | end 148 | 149 | -- 150 | -- Broadcast capacity changes 151 | -- 152 | local final_capacity = compute_capacity(maxConcurrent, running, reservoir) 153 | 154 | if always_publish or (initial_capacity ~= nil and final_capacity == nil) then 155 | -- always_publish or was not unlimited, now unlimited 156 | redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or '')) 157 | 158 | elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then 159 | -- capacity was increased 160 | -- send the capacity message to the limiter having the lowest number of running jobs 161 | -- the tiebreaker is the limiter having not registered a job in the longest time 162 | 163 | local lowest_concurrency_value = nil 164 | local lowest_concurrency_clients = {} 165 | local lowest_concurrency_last_registered = {} 166 | local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores') 167 | 168 | for i = 1, #client_concurrencies, 2 do 169 | local client = client_concurrencies[i] 170 | local concurrency = tonumber(client_concurrencies[i+1]) 171 | 172 | if ( 173 | lowest_concurrency_value == nil or lowest_concurrency_value == concurrency 174 | ) and ( 175 | not unresponsive_lookup[client] 176 | ) and ( 177 | tonumber(redis.call('hget', client_num_queued_key, client)) > 0 178 | ) then 179 | lowest_concurrency_value = concurrency 180 | table.insert(lowest_concurrency_clients, client) 181 | local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client)) 182 | table.insert(lowest_concurrency_last_registered, last_registered) 183 | end 184 | end 185 | 186 | if #lowest_concurrency_clients > 0 then 187 | local position = 1 188 | local earliest = lowest_concurrency_last_registered[1] 189 | 190 | for i,v in ipairs(lowest_concurrency_last_registered) do 191 | if v < earliest then 192 | position = i 193 | earliest = v 194 | end 195 | end 196 | 197 | local next_client = lowest_concurrency_clients[position] 198 | redis.call('publish', 'b_'..id, 199 | 'capacity-priority:'..(final_capacity or '').. 200 | ':'..next_client.. 201 | ':'..capacityPriorityCounter 202 | ) 203 | redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1') 204 | else 205 | redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or '')) 206 | end 207 | end 208 | 209 | return { 210 | ['capacity'] = final_capacity, 211 | ['running'] = running, 212 | ['reservoir'] = reservoir 213 | } 214 | end 215 | -------------------------------------------------------------------------------- /src/redis/queued.lua: -------------------------------------------------------------------------------- 1 | local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout')) 2 | local valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf') 3 | local client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients)) 4 | 5 | local sum = 0 6 | for i = 1, #client_queued do 7 | sum = sum + tonumber(client_queued[i]) 8 | end 9 | 10 | return sum 11 | -------------------------------------------------------------------------------- /src/redis/refresh_expiration.lua: -------------------------------------------------------------------------------- 1 | local refresh_expiration = function (now, nextRequest, groupTimeout) 2 | 3 | if groupTimeout ~= nil then 4 | local ttl = (nextRequest + groupTimeout) - now 5 | 6 | for i = 1, #KEYS do 7 | redis.call('pexpire', KEYS[i], ttl) 8 | end 9 | end 10 | 11 | end 12 | -------------------------------------------------------------------------------- /src/redis/refs.lua: -------------------------------------------------------------------------------- 1 | local settings_key = KEYS[1] 2 | local job_weights_key = KEYS[2] 3 | local job_expirations_key = KEYS[3] 4 | local job_clients_key = KEYS[4] 5 | local client_running_key = KEYS[5] 6 | local client_num_queued_key = KEYS[6] 7 | local client_last_registered_key = KEYS[7] 8 | local client_last_seen_key = KEYS[8] 9 | 10 | local now = tonumber(ARGV[1]) 11 | local client = ARGV[2] 12 | 13 | local num_static_argv = 2 14 | -------------------------------------------------------------------------------- /src/redis/register.lua: -------------------------------------------------------------------------------- 1 | local index = ARGV[num_static_argv + 1] 2 | local weight = tonumber(ARGV[num_static_argv + 2]) 3 | local expiration = tonumber(ARGV[num_static_argv + 3]) 4 | 5 | local state = process_tick(now, false) 6 | local capacity = state['capacity'] 7 | local reservoir = state['reservoir'] 8 | 9 | local settings = redis.call('hmget', settings_key, 10 | 'nextRequest', 11 | 'minTime', 12 | 'groupTimeout' 13 | ) 14 | local nextRequest = tonumber(settings[1]) 15 | local minTime = tonumber(settings[2]) 16 | local groupTimeout = tonumber(settings[3]) 17 | 18 | if conditions_check(capacity, weight) then 19 | 20 | redis.call('hincrby', settings_key, 'running', weight) 21 | redis.call('hset', job_weights_key, index, weight) 22 | if expiration ~= nil then 23 | redis.call('zadd', job_expirations_key, now + expiration, index) 24 | end 25 | redis.call('hset', job_clients_key, index, client) 26 | redis.call('zincrby', client_running_key, weight, client) 27 | redis.call('hincrby', client_num_queued_key, client, -1) 28 | redis.call('zadd', client_last_registered_key, now, client) 29 | 30 | local wait = math.max(nextRequest - now, 0) 31 | local newNextRequest = now + wait + minTime 32 | 33 | if reservoir == nil then 34 | redis.call('hset', settings_key, 35 | 'nextRequest', newNextRequest 36 | ) 37 | else 38 | reservoir = reservoir - weight 39 | redis.call('hmset', settings_key, 40 | 'reservoir', reservoir, 41 | 'nextRequest', newNextRequest 42 | ) 43 | end 44 | 45 | refresh_expiration(now, newNextRequest, groupTimeout) 46 | 47 | return {true, wait, reservoir} 48 | 49 | else 50 | return {false} 51 | end 52 | -------------------------------------------------------------------------------- /src/redis/register_client.lua: -------------------------------------------------------------------------------- 1 | local queued = tonumber(ARGV[num_static_argv + 1]) 2 | 3 | -- Could have been re-registered concurrently 4 | if not redis.call('zscore', client_last_seen_key, client) then 5 | redis.call('zadd', client_running_key, 0, client) 6 | redis.call('hset', client_num_queued_key, client, queued) 7 | redis.call('zadd', client_last_registered_key, 0, client) 8 | end 9 | 10 | redis.call('zadd', client_last_seen_key, now, client) 11 | 12 | return {} 13 | -------------------------------------------------------------------------------- /src/redis/running.lua: -------------------------------------------------------------------------------- 1 | return process_tick(now, false)['running'] 2 | -------------------------------------------------------------------------------- /src/redis/submit.lua: -------------------------------------------------------------------------------- 1 | local queueLength = tonumber(ARGV[num_static_argv + 1]) 2 | local weight = tonumber(ARGV[num_static_argv + 2]) 3 | 4 | local capacity = process_tick(now, false)['capacity'] 5 | 6 | local settings = redis.call('hmget', settings_key, 7 | 'id', 8 | 'maxConcurrent', 9 | 'highWater', 10 | 'nextRequest', 11 | 'strategy', 12 | 'unblockTime', 13 | 'penalty', 14 | 'minTime', 15 | 'groupTimeout' 16 | ) 17 | local id = settings[1] 18 | local maxConcurrent = tonumber(settings[2]) 19 | local highWater = tonumber(settings[3]) 20 | local nextRequest = tonumber(settings[4]) 21 | local strategy = tonumber(settings[5]) 22 | local unblockTime = tonumber(settings[6]) 23 | local penalty = tonumber(settings[7]) 24 | local minTime = tonumber(settings[8]) 25 | local groupTimeout = tonumber(settings[9]) 26 | 27 | if maxConcurrent ~= nil and weight > maxConcurrent then 28 | return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent) 29 | end 30 | 31 | local reachedHWM = (highWater ~= nil and queueLength == highWater 32 | and not ( 33 | conditions_check(capacity, weight) 34 | and nextRequest - now <= 0 35 | ) 36 | ) 37 | 38 | local blocked = strategy == 3 and (reachedHWM or unblockTime >= now) 39 | 40 | if blocked then 41 | local computedPenalty = penalty 42 | if computedPenalty == nil then 43 | if minTime == 0 then 44 | computedPenalty = 5000 45 | else 46 | computedPenalty = 15 * minTime 47 | end 48 | end 49 | 50 | local newNextRequest = now + computedPenalty + minTime 51 | 52 | redis.call('hmset', settings_key, 53 | 'unblockTime', now + computedPenalty, 54 | 'nextRequest', newNextRequest 55 | ) 56 | 57 | local clients_queued_reset = redis.call('hkeys', client_num_queued_key) 58 | local queued_reset = {} 59 | for i = 1, #clients_queued_reset do 60 | table.insert(queued_reset, clients_queued_reset[i]) 61 | table.insert(queued_reset, 0) 62 | end 63 | redis.call('hmset', client_num_queued_key, unpack(queued_reset)) 64 | 65 | redis.call('publish', 'b_'..id, 'blocked:') 66 | 67 | refresh_expiration(now, newNextRequest, groupTimeout) 68 | end 69 | 70 | if not blocked and not reachedHWM then 71 | redis.call('hincrby', client_num_queued_key, client, 1) 72 | end 73 | 74 | return {reachedHWM, blocked, strategy} 75 | -------------------------------------------------------------------------------- /src/redis/update_settings.lua: -------------------------------------------------------------------------------- 1 | local args = {'hmset', settings_key} 2 | 3 | for i = num_static_argv + 1, #ARGV do 4 | table.insert(args, ARGV[i]) 5 | end 6 | 7 | redis.call(unpack(args)) 8 | 9 | process_tick(now, true) 10 | 11 | local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) 12 | refresh_expiration(0, 0, groupTimeout) 13 | 14 | return {} 15 | -------------------------------------------------------------------------------- /src/redis/validate_client.lua: -------------------------------------------------------------------------------- 1 | if not redis.call('zscore', client_last_seen_key, client) then 2 | return redis.error_reply('UNKNOWN_CLIENT') 3 | end 4 | 5 | redis.call('zadd', client_last_seen_key, now, client) 6 | -------------------------------------------------------------------------------- /src/redis/validate_keys.lua: -------------------------------------------------------------------------------- 1 | if not (redis.call('exists', settings_key) == 1) then 2 | return redis.error_reply('SETTINGS_KEY_NOT_FOUND') 3 | end 4 | -------------------------------------------------------------------------------- /test.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | import Bottleneck from "bottleneck"; 4 | // import * as assert from "assert"; 5 | function assert(b: boolean): void { } 6 | 7 | /* 8 | This file is run by scripts/build.sh. 9 | It is used to validate the typings in bottleneck.d.ts. 10 | The command is: tsc --noEmit --strictNullChecks test.ts 11 | This file cannot be run directly. 12 | In order to do that, you must comment out the first line, 13 | and change "bottleneck" to "." on the third line. 14 | */ 15 | 16 | function withCb(foo: number, bar: () => void, cb: (err: any, result: string) => void) { 17 | let s: string = `cb ${foo}`; 18 | cb(null, s); 19 | } 20 | 21 | console.log(Bottleneck); 22 | 23 | let limiter = new Bottleneck({ 24 | maxConcurrent: 5, 25 | minTime: 1000, 26 | highWater: 20, 27 | strategy: Bottleneck.strategy.LEAK, 28 | reservoirRefreshInterval: 1000 * 60, 29 | reservoirRefreshAmount: 10, 30 | reservoirIncreaseInterval: 1000 * 60, 31 | reservoirIncreaseAmount: 2, 32 | reservoirIncreaseMaximum: 15 33 | }); 34 | 35 | limiter.ready().then(() => { console.log('Ready') }); 36 | limiter.clients().client; 37 | limiter.disconnect(); 38 | 39 | limiter.currentReservoir().then(function (x) { 40 | if (x != null) { 41 | let i: number = x; 42 | } 43 | }); 44 | 45 | limiter.incrementReservoir(5).then(function (x) { 46 | if (x != null) { 47 | let i: number = x; 48 | } 49 | }); 50 | 51 | limiter.running().then(function (x) { 52 | let i: number = x; 53 | }); 54 | 55 | limiter.clusterQueued().then(function (x) { 56 | let i: number = x; 57 | }); 58 | 59 | limiter.done().then(function (x) { 60 | let i: number = x; 61 | }); 62 | 63 | limiter.submit(withCb, 1, () => {}, (err, result) => { 64 | let s: string = result; 65 | console.log(s); 66 | assert(s == "cb 1"); 67 | }); 68 | 69 | function withPromise(foo: number, bar: () => void): PromiseLike { 70 | let s: string = `promise ${foo}`; 71 | return Promise.resolve(s); 72 | } 73 | 74 | let foo: Promise = limiter.schedule(withPromise, 1, () => {}); 75 | foo.then(function (result: string) { 76 | let s: string = result; 77 | console.log(s); 78 | assert(s == "promise 1"); 79 | }); 80 | 81 | limiter.on("message", (msg) => console.log(msg)); 82 | 83 | limiter.publish(JSON.stringify({ a: "abc", b: { c: 123 }})); 84 | 85 | function checkEventInfo(info: Bottleneck.EventInfo) { 86 | const numArgs: number = info.args.length; 87 | const id: string = info.options.id; 88 | } 89 | 90 | limiter.on('dropped', (info) => { 91 | checkEventInfo(info) 92 | const task: Function = info.task; 93 | const promise: Promise = info.promise; 94 | }) 95 | 96 | limiter.on('received', (info) => { 97 | checkEventInfo(info) 98 | }) 99 | 100 | limiter.on('queued', (info) => { 101 | checkEventInfo(info) 102 | const blocked: boolean = info.blocked; 103 | const reachedHWM: boolean = info.reachedHWM; 104 | }) 105 | 106 | limiter.on('scheduled', (info) => { 107 | checkEventInfo(info) 108 | }) 109 | 110 | limiter.on('executing', (info) => { 111 | checkEventInfo(info) 112 | const count: number = info.retryCount; 113 | }) 114 | 115 | limiter.on('failed', (error, info) => { 116 | checkEventInfo(info) 117 | const message: string = error.message; 118 | const count: number = info.retryCount; 119 | return Promise.resolve(10) 120 | }) 121 | 122 | limiter.on('failed', (error, info) => { 123 | checkEventInfo(info) 124 | const message: string = error.message; 125 | const count: number = info.retryCount; 126 | return Promise.resolve(null) 127 | }) 128 | 129 | limiter.on('failed', (error, info) => { 130 | checkEventInfo(info) 131 | const message: string = error.message; 132 | const count: number = info.retryCount; 133 | return Promise.resolve() 134 | }) 135 | 136 | limiter.on('failed', (error, info) => { 137 | checkEventInfo(info) 138 | const message: string = error.message; 139 | const count: number = info.retryCount; 140 | return 10 141 | }) 142 | 143 | limiter.on('failed', (error, info) => { 144 | checkEventInfo(info) 145 | const message: string = error.message; 146 | const count: number = info.retryCount; 147 | return null 148 | }) 149 | 150 | limiter.on('failed', (error, info) => { 151 | checkEventInfo(info) 152 | const message: string = error.message; 153 | const count: number = info.retryCount; 154 | }) 155 | 156 | limiter.on('retry', (message: string, info) => { 157 | checkEventInfo(info) 158 | const count: number = info.retryCount; 159 | }) 160 | 161 | limiter.on('done', (info) => { 162 | checkEventInfo(info) 163 | const count: number = info.retryCount; 164 | }) 165 | 166 | let group = new Bottleneck.Group({ 167 | maxConcurrent: 5, 168 | minTime: 1000, 169 | highWater: 10, 170 | strategy: Bottleneck.strategy.LEAK, 171 | datastore: "ioredis", 172 | clearDatastore: true, 173 | clientOptions: {}, 174 | clusterNodes: [] 175 | }); 176 | 177 | group.on('created', (limiter, key) => { 178 | assert(limiter.empty()) 179 | assert(key.length > 0) 180 | }) 181 | 182 | group.key("foo").submit(withCb, 2, () => {}, (err, result) => { 183 | let s: string = `${result} foo`; 184 | console.log(s); 185 | assert(s == "cb 2 foo"); 186 | }); 187 | 188 | group.key("bar").submit({ priority: 4 }, withCb, 3, () => {}, (err, result) => { 189 | let s: string = `${result} bar`; 190 | console.log(s); 191 | assert(s == "cb 3 foo"); 192 | }); 193 | 194 | let f1: Promise = group.key("pizza").schedule(withPromise, 2, () => {}); 195 | f1.then(function (result: string) { 196 | let s: string = result; 197 | console.log(s); 198 | assert(s == "promise 2"); 199 | }); 200 | 201 | let f2: Promise = group.key("pie").schedule({ priority: 4 }, withPromise, 3, () => {}); 202 | f2.then(function (result: string) { 203 | let s: string = result; 204 | console.log(s); 205 | assert(s == "promise 3"); 206 | }); 207 | 208 | let wrapped = limiter.wrap((a: number, b: number) => { 209 | let s: string = `Total: ${a + b}`; 210 | return Promise.resolve(s); 211 | }); 212 | 213 | wrapped(1, 2).then((x) => { 214 | let s: string = x; 215 | console.log(s); 216 | assert(s == "Total: 3"); 217 | }); 218 | 219 | wrapped.withOptions({ priority: 1, id: 'some-id' }, 9, 9).then((x) => { 220 | let s: string = x; 221 | console.log(s); 222 | assert(s == "Total: 18"); 223 | }) 224 | 225 | let counts = limiter.counts(); 226 | console.log(`${counts.EXECUTING + 2}`); 227 | console.log(limiter.jobStatus('some-id')) 228 | console.log(limiter.jobs()); 229 | console.log(limiter.jobs(Bottleneck.Status.RUNNING)); 230 | 231 | 232 | group.deleteKey("pizza") 233 | .then(function (deleted: boolean) { 234 | console.log(deleted) 235 | }); 236 | group.updateSettings({ timeout: 5, maxConcurrent: null, reservoir: null }); 237 | 238 | let keys: string[] = group.keys(); 239 | assert(keys.length == 3); 240 | 241 | group.clusterKeys() 242 | .then(function (allKeys: string[]) { 243 | let count = allKeys.length; 244 | }) 245 | 246 | let queued: number = limiter.chain(group.key("pizza")).queued(); 247 | 248 | limiter.stop({ 249 | dropWaitingJobs: true, 250 | dropErrorMessage: "Begone!", 251 | enqueueErrorMessage: "Denied!" 252 | }).then(() => { 253 | console.log('All stopped.') 254 | }) 255 | 256 | wrapped(4, 5).catch((e) => { 257 | assert(e.message === "Denied!") 258 | }) 259 | 260 | const id: string = limiter.id; 261 | const datastore: string = limiter.datastore; 262 | const channel: string = limiter.channel(); 263 | 264 | const redisConnection = new Bottleneck.RedisConnection({ 265 | client: "NodeRedis client object", 266 | clientOptions: {} 267 | }) 268 | 269 | redisConnection.ready() 270 | .then(function (redisConnectionClients) { 271 | const client = redisConnectionClients.client; 272 | const subscriber = redisConnectionClients.subscriber; 273 | }) 274 | 275 | redisConnection.on("error", (err) => { 276 | console.log(err.message) 277 | }) 278 | 279 | const limiterWithConn = new Bottleneck({ 280 | connection: redisConnection 281 | }) 282 | 283 | const ioredisConnection = new Bottleneck.IORedisConnection({ 284 | client: "ioredis client object", 285 | clientOptions: {}, 286 | clusterNodes: [] 287 | }) 288 | 289 | ioredisConnection.ready() 290 | .then(function (ioredisConnectionClients) { 291 | const client = ioredisConnectionClients.client; 292 | const subscriber = ioredisConnectionClients.subscriber; 293 | }) 294 | 295 | ioredisConnection.on("error", (err: Bottleneck.BottleneckError) => { 296 | console.log(err.message) 297 | }) 298 | 299 | const groupWithConn = new Bottleneck.Group({ 300 | connection: ioredisConnection 301 | }) 302 | 303 | const limiterWithConnFromGroup = new Bottleneck({ 304 | connection: groupWithConn.connection 305 | }) 306 | 307 | const groupWithConnFromLimiter = new Bottleneck.Group({ 308 | connection: limiterWithConn.connection 309 | }) 310 | 311 | 312 | const batcher = new Bottleneck.Batcher({ 313 | maxTime: 1000, 314 | maxSize: 10 315 | }) 316 | 317 | batcher.on("batch", (batch) => { 318 | const len: number = batch.length 319 | console.log("Number of elements:", len) 320 | }) 321 | 322 | batcher.on("error", (err: Bottleneck.BottleneckError) => { 323 | console.log(err.message) 324 | }) 325 | 326 | batcher.add("abc") 327 | batcher.add({ xyz: 5 }) 328 | .then(() => console.log("Flushed!")) 329 | 330 | const object = {} 331 | const emitter = new Bottleneck.Events(object) 332 | const listenerCount: number = emitter.listenerCount('info') 333 | emitter.trigger('info', 'hello', 'world', 123).then(function (result) { 334 | console.log(result) 335 | }) 336 | -------------------------------------------------------------------------------- /test/DLList.js: -------------------------------------------------------------------------------- 1 | var DLList = require('../lib/DLList') 2 | var assert = require('assert') 3 | var c = require('./context')({datastore: 'local'}) 4 | 5 | var fakeQueues = function () { 6 | this._length = 0 7 | this.incr = () => this._length++ 8 | this.decr = () => this._length-- 9 | this.fns = [this.incr, this.decr] 10 | } 11 | 12 | describe('DLList', function () { 13 | 14 | it('Should be created and be empty', function () { 15 | var queues = new fakeQueues() 16 | var list = new DLList() 17 | c.mustEqual(list.getArray().length, 0) 18 | }) 19 | 20 | it('Should be possible to append once', function () { 21 | var queues = new fakeQueues() 22 | var list = new DLList(...queues.fns) 23 | list.push(5) 24 | var arr = list.getArray() 25 | c.mustEqual(arr.length, 1) 26 | c.mustEqual(list.length, 1) 27 | c.mustEqual(queues._length, 1) 28 | c.mustEqual(arr[0], 5) 29 | }) 30 | 31 | it('Should be possible to append multiple times', function () { 32 | var queues = new fakeQueues() 33 | var list = new DLList(...queues.fns) 34 | list.push(5) 35 | list.push(6) 36 | var arr = list.getArray() 37 | c.mustEqual(arr.length, 2) 38 | c.mustEqual(list.length, 2) 39 | c.mustEqual(queues._length, 2) 40 | c.mustEqual(arr[0], 5) 41 | c.mustEqual(arr[1], 6) 42 | 43 | list.push(10) 44 | 45 | arr = list.getArray() 46 | c.mustEqual(arr.length, 3) 47 | c.mustEqual(list.length, 3) 48 | c.mustEqual(arr[0], 5) 49 | c.mustEqual(arr[1], 6) 50 | c.mustEqual(arr[2], 10) 51 | }) 52 | 53 | it('Should be possible to shift an empty list', function () { 54 | var queues = new fakeQueues() 55 | var list = new DLList(...queues.fns) 56 | c.mustEqual(list.length, 0) 57 | assert(list.shift() === undefined) 58 | var arr = list.getArray() 59 | c.mustEqual(arr.length, 0) 60 | c.mustEqual(list.length, 0) 61 | assert(list.shift() === undefined) 62 | arr = list.getArray() 63 | c.mustEqual(arr.length, 0) 64 | c.mustEqual(list.length, 0) 65 | c.mustEqual(queues._length, 0) 66 | }) 67 | 68 | it('Should be possible to append then shift once', function () { 69 | var queues = new fakeQueues() 70 | var list = new DLList(...queues.fns) 71 | list.push(5) 72 | c.mustEqual(list.length, 1) 73 | c.mustEqual(list.shift(), 5) 74 | var arr = list.getArray() 75 | c.mustEqual(arr.length, 0) 76 | c.mustEqual(list.length, 0) 77 | c.mustEqual(queues._length, 0) 78 | }) 79 | 80 | it('Should be possible to append then shift multiple times', function () { 81 | var queues = new fakeQueues() 82 | var list = new DLList(...queues.fns) 83 | list.push(5) 84 | c.mustEqual(list.length, 1) 85 | c.mustEqual(list.shift(), 5) 86 | c.mustEqual(list.length, 0) 87 | 88 | list.push(6) 89 | c.mustEqual(list.length, 1) 90 | c.mustEqual(list.shift(), 6) 91 | c.mustEqual(list.length, 0) 92 | c.mustEqual(queues._length, 0) 93 | }) 94 | 95 | it('Should pass a full test', function () { 96 | var queues = new fakeQueues() 97 | var list = new DLList(...queues.fns) 98 | list.push(10) 99 | c.mustEqual(list.length, 1) 100 | list.push("11") 101 | c.mustEqual(list.length, 2) 102 | list.push(12) 103 | c.mustEqual(list.length, 3) 104 | c.mustEqual(queues._length, 3) 105 | 106 | c.mustEqual(list.shift(), 10) 107 | c.mustEqual(list.length, 2) 108 | c.mustEqual(list.shift(),"11") 109 | c.mustEqual(list.length, 1) 110 | 111 | list.push(true) 112 | c.mustEqual(list.length, 2) 113 | 114 | var arr = list.getArray() 115 | c.mustEqual(arr[0], 12) 116 | c.mustEqual(arr[1], true) 117 | c.mustEqual(arr.length, 2) 118 | c.mustEqual(queues._length, 2) 119 | }) 120 | 121 | it('Should return the first value without shifting', function () { 122 | var queues = new fakeQueues() 123 | var list = new DLList(...queues.fns) 124 | assert(list.first() === undefined) 125 | assert(list.first() === undefined) 126 | 127 | list.push(1) 128 | c.mustEqual(list.first(), 1) 129 | c.mustEqual(list.first(), 1) 130 | 131 | list.push(2) 132 | c.mustEqual(list.first(), 1) 133 | c.mustEqual(list.first(), 1) 134 | 135 | c.mustEqual(list.shift(), 1) 136 | c.mustEqual(list.first(), 2) 137 | c.mustEqual(list.first(), 2) 138 | 139 | c.mustEqual(list.shift(), 2) 140 | assert(list.first() === undefined) 141 | assert(list.first() === undefined) 142 | 143 | assert(list.first() === undefined) 144 | assert(list.shift() === undefined) 145 | assert(list.first() === undefined) 146 | }) 147 | 148 | }) 149 | -------------------------------------------------------------------------------- /test/batcher.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | describe('Batcher', function () { 6 | var c 7 | 8 | afterEach(function () { 9 | return c.limiter.disconnect(false) 10 | }) 11 | 12 | it('Should batch by time and size', function () { 13 | c = makeTest() 14 | var batcher = new Bottleneck.Batcher({ 15 | maxTime: 50, 16 | maxSize: 3 17 | }) 18 | var t0 = Date.now() 19 | var batches = [] 20 | 21 | batcher.on('batch', function (batcher) { 22 | batches.push(batcher) 23 | }) 24 | 25 | return Promise.all([ 26 | batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), 27 | batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)), 28 | batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)), 29 | batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)), 30 | batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5)) 31 | ]) 32 | .then(function (data) { 33 | c.mustEqual( 34 | data.map((([t, x]) => [Math.floor(t / 50), x])), 35 | [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]] 36 | ) 37 | 38 | return c.last() 39 | }) 40 | .then(function (results) { 41 | c.checkDuration(50, 20) 42 | c.mustEqual(batches, [[1, 2, 3], [4, 5]]) 43 | }) 44 | }) 45 | 46 | it('Should batch by time', function () { 47 | c = makeTest() 48 | var batcher = new Bottleneck.Batcher({ 49 | maxTime: 50 50 | }) 51 | var t0 = Date.now() 52 | var batches = [] 53 | 54 | batcher.on('batch', function (batcher) { 55 | batches.push(batcher) 56 | }) 57 | 58 | return Promise.all([ 59 | batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), 60 | batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)) 61 | ]) 62 | .then(function (data) { 63 | c.mustEqual( 64 | data.map((([t, x]) => [Math.floor(t / 50), x])), 65 | [[1, 1], [1, 2]] 66 | ) 67 | 68 | return Promise.all([ 69 | batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)), 70 | batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)) 71 | ]) 72 | }) 73 | .then(function (data) { 74 | c.mustEqual( 75 | data.map((([t, x]) => [Math.floor(t / 50), x])), 76 | [[2, 3], [2, 4]] 77 | ) 78 | 79 | return c.last() 80 | }) 81 | .then(function (results) { 82 | c.checkDuration(100) 83 | c.mustEqual(batches, [[1, 2], [3, 4]]) 84 | }) 85 | }) 86 | 87 | it('Should batch by size', function () { 88 | c = makeTest() 89 | var batcher = new Bottleneck.Batcher({ 90 | maxSize: 2 91 | }) 92 | var batches = [] 93 | 94 | batcher.on('batch', function (batcher) { 95 | batches.push(batcher) 96 | }) 97 | 98 | return Promise.all([ 99 | batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, 1)), 100 | batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, 2)) 101 | ]) 102 | .then(function () { 103 | return Promise.all([ 104 | batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, 3)), 105 | batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, 4)) 106 | ]) 107 | }) 108 | .then(c.last) 109 | .then(function (results) { 110 | c.checkDuration(0) 111 | c.mustEqual(batches, [[1, 2], [3, 4]]) 112 | }) 113 | }) 114 | 115 | it('Should stagger flushes', function () { 116 | c = makeTest() 117 | var batcher = new Bottleneck.Batcher({ 118 | maxTime: 50, 119 | maxSize: 3 120 | }) 121 | var t0 = Date.now() 122 | var batches = [] 123 | 124 | batcher.on('batch', function (batcher) { 125 | batches.push(batcher) 126 | }) 127 | 128 | return Promise.all([ 129 | batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), 130 | batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)) 131 | ]) 132 | .then(function (data) { 133 | c.mustEqual( 134 | data.map((([t, x]) => [Math.floor(t / 50), x])), 135 | [[1, 1], [1, 2]] 136 | ) 137 | 138 | var promises = [] 139 | promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3))) 140 | 141 | return c.wait(10) 142 | .then(function () { 143 | promises.push(batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4))) 144 | 145 | return Promise.all(promises) 146 | }) 147 | }) 148 | .then(function (data) { 149 | c.mustEqual( 150 | data.map((([t, x]) => [Math.floor(t / 50), x])), 151 | [[2, 3], [2, 4]] 152 | ) 153 | 154 | return c.last() 155 | }) 156 | .then(function (results) { 157 | c.checkDuration(120, 20) 158 | c.mustEqual(batches, [[1, 2], [3, 4]]) 159 | }) 160 | }) 161 | 162 | it('Should force then stagger flushes', function () { 163 | c = makeTest() 164 | var batcher = new Bottleneck.Batcher({ 165 | maxTime: 50, 166 | maxSize: 3 167 | }) 168 | var t0 = Date.now() 169 | var batches = [] 170 | 171 | batcher.on('batch', function (batcher) { 172 | batches.push(batcher) 173 | }) 174 | 175 | var promises = [] 176 | promises.push(batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1))) 177 | promises.push(batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2))) 178 | 179 | return c.wait(10) 180 | .then(function () { 181 | promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3))) 182 | 183 | return Promise.all(promises) 184 | }) 185 | .then(function (data) { 186 | c.mustEqual( 187 | data.map((([t, x]) => [Math.floor(t / 50), x])), 188 | [[0, 1], [0, 2], [0, 3]] 189 | ) 190 | 191 | return Promise.all([ 192 | batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)), 193 | batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5)), 194 | ]) 195 | }) 196 | .then(function (data) { 197 | c.mustEqual( 198 | data.map((([t, x]) => [Math.floor(t / 50), x])), 199 | [[1, 4], [1, 5]] 200 | ) 201 | 202 | return c.last() 203 | }) 204 | .then(function (results) { 205 | c.checkDuration(85, 25) 206 | c.mustEqual(batches, [[1, 2, 3], [4, 5]]) 207 | }) 208 | }) 209 | }) 210 | -------------------------------------------------------------------------------- /test/bottleneck.js: -------------------------------------------------------------------------------- 1 | if (process.env.BUILD === 'es5') { 2 | module.exports = require('../es5.js') 3 | } else if (process.env.BUILD === 'light') { 4 | module.exports = require('../light.js') 5 | } else { 6 | module.exports = require('../lib/index.js') 7 | } 8 | -------------------------------------------------------------------------------- /test/context.js: -------------------------------------------------------------------------------- 1 | global.TEST = true 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | module.exports = function (options={}) { 6 | var mustEqual = function (a, b) { 7 | var strA = JSON.stringify(a) 8 | var strB = JSON.stringify(b) 9 | if (strA !== strB) { 10 | console.log(strA + ' !== ' + strB, (new Error('').stack)) 11 | assert(strA === strB) 12 | } 13 | } 14 | 15 | var start 16 | var calls = [] 17 | 18 | // set options.datastore 19 | var setRedisClientOptions = function (options) { 20 | options.clearDatastore = true 21 | if (options.clientOptions == null) { 22 | options.clientOptions = { 23 | host: process.env.REDIS_HOST, 24 | port: process.env.REDIS_PORT, 25 | } 26 | } 27 | } 28 | 29 | if (options.datastore == null && process.env.DATASTORE === 'redis') { 30 | options.datastore = 'redis' 31 | setRedisClientOptions(options) 32 | } else if (options.datastore == null && process.env.DATASTORE === 'ioredis') { 33 | options.datastore = 'ioredis' 34 | setRedisClientOptions(options) 35 | } else { 36 | options.datastore = 'local' 37 | } 38 | 39 | var limiter = new Bottleneck(options) 40 | // limiter.on("debug", function (str, args) { console.log(`${Date.now()-start} ${str} ${JSON.stringify(args)}`) }) 41 | if (!options.errorEventsExpected) { 42 | limiter.on("error", function (err) { 43 | console.log('(CONTEXT) ERROR EVENT', err) 44 | }) 45 | } 46 | limiter.ready().then(function (client) { 47 | start = Date.now() 48 | }) 49 | var getResults = function () { 50 | return { 51 | elapsed: Date.now() - start, 52 | callsDuration: calls.length > 0 ? calls[calls.length - 1].time : null, 53 | calls: calls 54 | } 55 | } 56 | 57 | var context = { 58 | job: function (err, ...result) { 59 | var cb = result.pop() 60 | calls.push({err: err, result: result, time: Date.now()-start}) 61 | if (process.env.DEBUG) console.log(result, calls) 62 | cb.apply({}, [err].concat(result)) 63 | }, 64 | slowJob: function (duration, err, ...result) { 65 | setTimeout(function () { 66 | var cb = result.pop() 67 | calls.push({err: err, result: result, time: Date.now()-start}) 68 | if (process.env.DEBUG) console.log(result, calls) 69 | cb.apply({}, [err].concat(result)) 70 | }, duration) 71 | }, 72 | promise: function (err, ...result) { 73 | return new Promise(function (resolve, reject) { 74 | if (process.env.DEBUG) console.log('In c.promise. Result: ', result) 75 | calls.push({err: err, result: result, time: Date.now()-start}) 76 | if (process.env.DEBUG) console.log(result, calls) 77 | if (err === null) { 78 | return resolve(result) 79 | } else { 80 | return reject(err) 81 | } 82 | }) 83 | }, 84 | slowPromise: function (duration, err, ...result) { 85 | return new Promise(function (resolve, reject) { 86 | setTimeout(function () { 87 | if (process.env.DEBUG) console.log('In c.slowPromise. Result: ', result) 88 | calls.push({err: err, result: result, time: Date.now()-start}) 89 | if (process.env.DEBUG) console.log(result, calls) 90 | if (err === null) { 91 | return resolve(result) 92 | } else { 93 | return reject(err) 94 | } 95 | }, duration) 96 | }) 97 | }, 98 | pNoErrVal: function (promise, ...expected) { 99 | if (process.env.DEBUG) console.log('In c.pNoErrVal. Expected:', expected) 100 | return promise.then(function (actual) { 101 | mustEqual(actual, expected) 102 | }) 103 | }, 104 | noErrVal: function (...expected) { 105 | return function (err, ...actual) { 106 | mustEqual(err, null) 107 | mustEqual(actual, expected) 108 | } 109 | }, 110 | last: function (options) { 111 | var opt = options != null ? options : {} 112 | return limiter.schedule(opt, function () { return Promise.resolve(getResults()) }) 113 | .catch(function (err) { console.error("Error in context.last:", err)}) 114 | }, 115 | wait: function (wait) { 116 | return new Promise(function (resolve, reject) { 117 | setTimeout(resolve, wait) 118 | }) 119 | }, 120 | limiter: limiter, 121 | mustEqual: mustEqual, 122 | mustExist: function (a) { assert(a != null) }, 123 | results: getResults, 124 | checkResultsOrder: function (order) { 125 | mustEqual(order.length, calls.length) 126 | for (var i = 0; i < Math.max(calls.length, order.length); i++) { 127 | mustEqual(order[i], calls[i].result) 128 | } 129 | }, 130 | checkDuration: function (shouldBe, minBound = 10) { 131 | var results = getResults() 132 | var min = shouldBe - minBound 133 | var max = shouldBe + 50 134 | if (!(results.callsDuration > min && results.callsDuration < max)) { 135 | console.error('Duration not around ' + shouldBe + '. Was ' + results.callsDuration) 136 | } 137 | assert(results.callsDuration > min && results.callsDuration < max) 138 | } 139 | } 140 | 141 | return context 142 | } 143 | -------------------------------------------------------------------------------- /test/group.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | describe('Group', function () { 6 | var c 7 | 8 | afterEach(function () { 9 | return c.limiter.disconnect(false) 10 | }) 11 | 12 | it('Should create limiters', function (done) { 13 | c = makeTest() 14 | var group = new Bottleneck.Group({ 15 | maxConcurrent: 1, minTime: 100 16 | }) 17 | 18 | var results = [] 19 | 20 | var job = function (...result) { 21 | results.push(result) 22 | return new Promise(function (resolve, reject) { 23 | setTimeout(function () { 24 | return resolve() 25 | }, 50) 26 | }) 27 | } 28 | 29 | group.key('A').schedule(job, 1, 2) 30 | group.key('A').schedule(job, 3) 31 | group.key('A').schedule(job, 4) 32 | setTimeout(function () { 33 | group.key('B').schedule(job, 5) 34 | }, 20) 35 | setTimeout(function () { 36 | group.key('C').schedule(job, 6) 37 | group.key('C').schedule(job, 7) 38 | }, 40) 39 | 40 | group.key('A').submit(function (cb) { 41 | c.mustEqual(results, [[1,2], [5], [6], [3], [7], [4]]) 42 | cb() 43 | done() 44 | }, null) 45 | }) 46 | 47 | it('Should set up the limiter IDs (default)', function () { 48 | c = makeTest() 49 | var group = new Bottleneck.Group({ 50 | maxConcurrent: 1, minTime: 100 51 | }) 52 | 53 | c.mustEqual(group.key('A').id, 'group-key-A') 54 | c.mustEqual(group.key('B').id, 'group-key-B') 55 | c.mustEqual(group.key('XYZ').id, 'group-key-XYZ') 56 | 57 | var ids = group.keys().map(function (key) { 58 | var limiter = group.key(key) 59 | c.mustEqual(limiter._store.timeout, group.timeout) 60 | return limiter.id 61 | }) 62 | c.mustEqual(ids.sort(), ['group-key-A', 'group-key-B', 'group-key-XYZ']) 63 | }) 64 | 65 | it('Should set up the limiter IDs (custom)', function () { 66 | c = makeTest() 67 | var group = new Bottleneck.Group({ 68 | maxConcurrent: 1, minTime: 100, 69 | id: 'custom-id' 70 | }) 71 | 72 | c.mustEqual(group.key('A').id, 'custom-id-A') 73 | c.mustEqual(group.key('B').id, 'custom-id-B') 74 | c.mustEqual(group.key('XYZ').id, 'custom-id-XYZ') 75 | 76 | var ids = group.keys().map(function (key) { 77 | var limiter = group.key(key) 78 | c.mustEqual(limiter._store.timeout, group.timeout) 79 | return limiter.id 80 | }) 81 | c.mustEqual(ids.sort(), ['custom-id-A', 'custom-id-B', 'custom-id-XYZ']) 82 | }) 83 | 84 | it('Should pass new limiter to \'created\' event', function () { 85 | c = makeTest() 86 | var group = new Bottleneck.Group({ 87 | maxConcurrent: 1, minTime: 100 88 | }) 89 | 90 | var keys = [] 91 | var ids = [] 92 | var promises = [] 93 | group.on('created', function (created, key) { 94 | keys.push(key) 95 | promises.push( 96 | created.updateSettings({ id: key }) 97 | .then(function (limiter) { 98 | ids.push(limiter.id) 99 | }) 100 | ) 101 | }) 102 | 103 | group.key('A') 104 | group.key('B') 105 | group.key('A') 106 | group.key('B') 107 | group.key('B') 108 | group.key('BB') 109 | group.key('C') 110 | group.key('A') 111 | 112 | return Promise.all(promises) 113 | .then(function () { 114 | c.mustEqual(keys, ids) 115 | return c.limiter.ready() 116 | }) 117 | 118 | }) 119 | 120 | it('Should pass error on failure', function (done) { 121 | var failureMessage = 'SOMETHING BLEW UP!!' 122 | c = makeTest() 123 | var group = new Bottleneck.Group({ 124 | maxConcurrent: 1, minTime: 100 125 | }) 126 | c.mustEqual(Object.keys(group.limiters), []) 127 | 128 | var results = [] 129 | 130 | var job = function (...result) { 131 | results.push(result) 132 | return new Promise(function (resolve, reject) { 133 | setTimeout(function () { 134 | return resolve() 135 | }, 50) 136 | }) 137 | } 138 | 139 | group.key('A').schedule(job, 1, 2) 140 | group.key('A').schedule(job, 3) 141 | group.key('A').schedule(job, 4) 142 | group.key('B').schedule(() => Promise.reject(new Error(failureMessage))) 143 | .catch(function (err) { 144 | results.push(['CAUGHT', err.message]) 145 | }) 146 | setTimeout(function () { 147 | group.key('C').schedule(job, 6) 148 | group.key('C').schedule(job, 7) 149 | }, 40) 150 | 151 | 152 | group.key('A').submit(function (cb) { 153 | c.mustEqual(results, [[1,2], ['CAUGHT', failureMessage], [6], [3], [7], [4]]) 154 | cb() 155 | done() 156 | }, null) 157 | }) 158 | 159 | it('Should update its timeout', function () { 160 | c = makeTest() 161 | var group1 = new Bottleneck.Group({ 162 | maxConcurrent: 1, minTime: 100 163 | }) 164 | var group2 = new Bottleneck.Group({ 165 | maxConcurrent: 1, minTime: 100, timeout: 5000 166 | }) 167 | 168 | c.mustEqual(group1.timeout, 300000) 169 | c.mustEqual(group2.timeout, 5000) 170 | 171 | var p1 = group1.updateSettings({ timeout: 123 }) 172 | var p2 = group2.updateSettings({ timeout: 456 }) 173 | return Promise.all([p1, p2]) 174 | .then(function () { 175 | c.mustEqual(group1.timeout, 123) 176 | c.mustEqual(group2.timeout, 456) 177 | }) 178 | }) 179 | 180 | it('Should update its limiter options', function () { 181 | c = makeTest() 182 | var group = new Bottleneck.Group({ 183 | maxConcurrent: 1, minTime: 100 184 | }) 185 | 186 | var limiter1 = group.key('AAA') 187 | c.mustEqual(limiter1._store.storeOptions.minTime, 100) 188 | 189 | group.updateSettings({ minTime: 200 }) 190 | c.mustEqual(limiter1._store.storeOptions.minTime, 100) 191 | 192 | var limiter2 = group.key('BBB') 193 | c.mustEqual(limiter2._store.storeOptions.minTime, 200) 194 | }) 195 | 196 | it('Should support keys(), limiters(), deleteKey()', function () { 197 | c = makeTest() 198 | var group1 = new Bottleneck.Group({ 199 | maxConcurrent: 1 200 | }) 201 | var KEY_A = "AAA" 202 | var KEY_B = "BBB" 203 | 204 | return Promise.all([ 205 | c.pNoErrVal(group1.key(KEY_A).schedule(c.promise, null, 1), 1), 206 | c.pNoErrVal(group1.key(KEY_B).schedule(c.promise, null, 2), 2) 207 | ]) 208 | .then(function () { 209 | var keys = group1.keys() 210 | var limiters = group1.limiters() 211 | c.mustEqual(keys, [KEY_A, KEY_B]) 212 | c.mustEqual(limiters.length, 2) 213 | 214 | limiters.forEach(function (limiter, i) { 215 | c.mustEqual(limiter.key, keys[i]) 216 | assert(limiter.limiter instanceof Bottleneck) 217 | }) 218 | 219 | return group1.deleteKey(KEY_A) 220 | }) 221 | .then(function (deleted) { 222 | c.mustEqual(deleted, true) 223 | c.mustEqual(group1.keys().length, 1) 224 | return group1.deleteKey(KEY_A) 225 | }) 226 | .then(function (deleted) { 227 | c.mustEqual(deleted, false) 228 | c.mustEqual(group1.keys().length, 1) 229 | }) 230 | }) 231 | 232 | it('Should call autocleanup', function () { 233 | var KEY = 'test-key' 234 | var group = new Bottleneck.Group({ 235 | maxConcurrent: 1 236 | }) 237 | group.updateSettings({ timeout: 50 }) 238 | c = makeTest({ id: 'something', timeout: group.timeout }) 239 | 240 | group.instances[KEY] = c.limiter 241 | return group.key(KEY).schedule(function () { 242 | return Promise.resolve() 243 | }) 244 | .then(function () { 245 | assert(group.instances[KEY] != null) 246 | return new Promise(function (resolve, reject) { 247 | setTimeout(resolve, 100) 248 | }) 249 | }) 250 | .then(function () { 251 | assert(group.instances[KEY] == null) 252 | }) 253 | }) 254 | 255 | }) 256 | -------------------------------------------------------------------------------- /test/ioredis.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | var Redis = require('ioredis') 5 | 6 | if (process.env.DATASTORE === 'ioredis') { 7 | describe('ioredis-only', function () { 8 | var c 9 | 10 | afterEach(function () { 11 | return c.limiter.disconnect(false) 12 | }) 13 | 14 | it('Should accept ioredis lib override', function () { 15 | c = makeTest({ 16 | maxConcurrent: 2, 17 | Redis, 18 | clientOptions: {}, 19 | clusterNodes: [{ 20 | host: process.env.REDIS_HOST, 21 | port: process.env.REDIS_PORT 22 | }] 23 | }) 24 | 25 | c.mustEqual(c.limiter.datastore, 'ioredis') 26 | }) 27 | 28 | it('Should connect in Redis Cluster mode', function () { 29 | c = makeTest({ 30 | maxConcurrent: 2, 31 | clientOptions: {}, 32 | clusterNodes: [{ 33 | host: process.env.REDIS_HOST, 34 | port: process.env.REDIS_PORT 35 | }] 36 | }) 37 | 38 | c.mustEqual(c.limiter.datastore, 'ioredis') 39 | assert(c.limiter._store.connection.client.nodes().length >= 0) 40 | }) 41 | 42 | it('Should connect in Redis Cluster mode with premade client', function () { 43 | var client = new Redis.Cluster('') 44 | var connection = new Bottleneck.IORedisConnection({ client }) 45 | c = makeTest({ 46 | maxConcurrent: 2, 47 | clientOptions: {}, 48 | clusterNodes: [{ 49 | host: process.env.REDIS_HOST, 50 | port: process.env.REDIS_PORT 51 | }] 52 | }) 53 | 54 | c.mustEqual(c.limiter.datastore, 'ioredis') 55 | assert(c.limiter._store.connection.client.nodes().length >= 0) 56 | connection.disconnect(false) 57 | }) 58 | 59 | it('Should accept existing connections', function () { 60 | var connection = new Bottleneck.IORedisConnection() 61 | connection.id = 'super-connection' 62 | c = makeTest({ 63 | minTime: 50, 64 | connection 65 | }) 66 | 67 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) 68 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) 69 | 70 | return c.last() 71 | .then(function (results) { 72 | c.checkResultsOrder([[1], [2]]) 73 | c.checkDuration(50) 74 | c.mustEqual(c.limiter.connection.id, 'super-connection') 75 | c.mustEqual(c.limiter.datastore, 'ioredis') 76 | 77 | return c.limiter.disconnect() 78 | }) 79 | .then(function () { 80 | // Shared connections should not be disconnected by the limiter 81 | c.mustEqual(c.limiter.clients().client.status, 'ready') 82 | return connection.disconnect() 83 | }) 84 | }) 85 | 86 | it('Should accept existing redis clients', function () { 87 | var client = new Redis() 88 | client.id = 'super-client' 89 | 90 | var connection = new Bottleneck.IORedisConnection({ client }) 91 | connection.id = 'super-connection' 92 | c = makeTest({ 93 | minTime: 50, 94 | connection 95 | }) 96 | 97 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) 98 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) 99 | 100 | return c.last() 101 | .then(function (results) { 102 | c.checkResultsOrder([[1], [2]]) 103 | c.checkDuration(50) 104 | c.mustEqual(c.limiter.clients().client.id, 'super-client') 105 | c.mustEqual(c.limiter.connection.id, 'super-connection') 106 | c.mustEqual(c.limiter.datastore, 'ioredis') 107 | 108 | return c.limiter.disconnect() 109 | }) 110 | .then(function () { 111 | // Shared connections should not be disconnected by the limiter 112 | c.mustEqual(c.limiter.clients().client.status, 'ready') 113 | return connection.disconnect() 114 | }) 115 | }) 116 | 117 | it('Should trigger error events on the shared connection', function (done) { 118 | var connection = new Bottleneck.IORedisConnection({ 119 | clientOptions: { 120 | port: 1 121 | } 122 | }) 123 | connection.on('error', function (err) { 124 | c.mustEqual(c.limiter.datastore, 'ioredis') 125 | connection.disconnect() 126 | done() 127 | }) 128 | 129 | c = makeTest({ connection }) 130 | c.limiter.on('error', function (err) { 131 | done(err) 132 | }) 133 | }) 134 | }) 135 | } 136 | -------------------------------------------------------------------------------- /test/node_redis.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | var Redis = require('redis') 5 | 6 | if (process.env.DATASTORE === 'redis') { 7 | describe('node_redis-only', function () { 8 | var c 9 | 10 | afterEach(function () { 11 | return c.limiter.disconnect(false) 12 | }) 13 | 14 | it('Should accept node_redis lib override', function () { 15 | c = makeTest({ 16 | maxConcurrent: 2, 17 | Redis, 18 | clientOptions: {} 19 | }) 20 | 21 | c.mustEqual(c.limiter.datastore, 'redis') 22 | }) 23 | 24 | it('Should accept existing connections', function () { 25 | var connection = new Bottleneck.RedisConnection() 26 | connection.id = 'super-connection' 27 | c = makeTest({ 28 | minTime: 50, 29 | connection 30 | }) 31 | 32 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) 33 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) 34 | 35 | return c.last() 36 | .then(function (results) { 37 | c.checkResultsOrder([[1], [2]]) 38 | c.checkDuration(50) 39 | c.mustEqual(c.limiter.connection.id, 'super-connection') 40 | c.mustEqual(c.limiter.datastore, 'redis') 41 | 42 | return c.limiter.disconnect() 43 | }) 44 | .then(function () { 45 | // Shared connections should not be disconnected by the limiter 46 | c.mustEqual(c.limiter.clients().client.ready, true) 47 | return connection.disconnect() 48 | }) 49 | }) 50 | 51 | it('Should accept existing redis clients', function () { 52 | var client = Redis.createClient() 53 | client.id = 'super-client' 54 | 55 | var connection = new Bottleneck.RedisConnection({ client }) 56 | connection.id = 'super-connection' 57 | c = makeTest({ 58 | minTime: 50, 59 | connection 60 | }) 61 | 62 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) 63 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) 64 | 65 | return c.last() 66 | .then(function (results) { 67 | c.checkResultsOrder([[1], [2]]) 68 | c.checkDuration(50) 69 | c.mustEqual(c.limiter.clients().client.id, 'super-client') 70 | c.mustEqual(c.limiter.connection.id, 'super-connection') 71 | c.mustEqual(c.limiter.datastore, 'redis') 72 | 73 | return c.limiter.disconnect() 74 | }) 75 | .then(function () { 76 | // Shared connections should not be disconnected by the limiter 77 | c.mustEqual(c.limiter.clients().client.ready, true) 78 | return connection.disconnect() 79 | }) 80 | }) 81 | 82 | it('Should trigger error events on the shared connection', function (done) { 83 | var connection = new Bottleneck.RedisConnection({ 84 | clientOptions: { 85 | port: 1 86 | } 87 | }) 88 | connection.on('error', function (err) { 89 | c.mustEqual(c.limiter.datastore, 'redis') 90 | connection.disconnect() 91 | done() 92 | }) 93 | 94 | c = makeTest({ connection }) 95 | c.limiter.on('error', function (err) { 96 | done(err) 97 | }) 98 | }) 99 | }) 100 | } 101 | -------------------------------------------------------------------------------- /test/priority.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | describe('Priority', function () { 6 | var c 7 | 8 | afterEach(function () { 9 | return c.limiter.disconnect(false) 10 | }) 11 | 12 | it('Should do basic ordering', function () { 13 | c = makeTest({maxConcurrent: 1, minTime: 100, rejectOnDrop: false}) 14 | 15 | return Promise.all([ 16 | c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1), 17 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2), 18 | c.pNoErrVal(c.limiter.schedule({priority: 1}, c.promise, null, 5, 6), 5, 6), 19 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3), 20 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4) 21 | ]) 22 | .then(function () { 23 | return c.last() 24 | }) 25 | .then(function (results) { 26 | c.checkResultsOrder([[1], [5,6], [2] ,[3], [4]]) 27 | c.checkDuration(400) 28 | }) 29 | }) 30 | 31 | it('Should support LEAK', function () { 32 | c = makeTest({ 33 | maxConcurrent: 1, 34 | minTime: 100, 35 | highWater: 3, 36 | strategy: Bottleneck.strategy.LEAK, 37 | rejectOnDrop: false 38 | }) 39 | 40 | var called = false 41 | c.limiter.on('dropped', function (dropped) { 42 | c.mustExist(dropped.task) 43 | c.mustExist(dropped.args) 44 | c.mustExist(dropped.promise) 45 | called = true 46 | }) 47 | 48 | c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) 49 | c.limiter.submit(c.job, null, 2, c.noErrVal(2)) 50 | c.limiter.submit(c.job, null, 3, c.noErrVal(3)) 51 | c.limiter.submit(c.job, null, 4, c.noErrVal(4)) 52 | c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) 53 | c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6)) 54 | c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7)) 55 | 56 | return c.last({ weight: 0 }) 57 | .then(function (results) { 58 | c.checkDuration(200) 59 | c.checkResultsOrder([[1], [6], [5]]) 60 | c.mustEqual(called, true) 61 | }) 62 | }) 63 | 64 | it('Should support OVERFLOW', function () { 65 | c = makeTest({ 66 | maxConcurrent: 1, 67 | minTime: 100, 68 | highWater: 2, 69 | strategy: Bottleneck.strategy.OVERFLOW, 70 | rejectOnDrop: false 71 | }) 72 | var called = false 73 | c.limiter.on('dropped', function (dropped) { 74 | c.mustExist(dropped.task) 75 | c.mustExist(dropped.args) 76 | c.mustExist(dropped.promise) 77 | called = true 78 | }) 79 | 80 | c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) 81 | c.limiter.submit(c.job, null, 2, c.noErrVal(2)) 82 | c.limiter.submit(c.job, null, 3, c.noErrVal(3)) 83 | c.limiter.submit(c.job, null, 4, c.noErrVal(4)) 84 | c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) 85 | c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6)) 86 | 87 | return c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7)) 88 | .then(function () { 89 | return c.limiter.updateSettings({ highWater: null }) 90 | }) 91 | .then(c.last) 92 | .then(function (results) { 93 | c.checkDuration(200) 94 | c.checkResultsOrder([[1], [2], [3]]) 95 | c.mustEqual(called, true) 96 | }) 97 | }) 98 | 99 | it('Should support OVERFLOW_PRIORITY', function () { 100 | c = makeTest({ 101 | maxConcurrent: 1, 102 | minTime: 100, 103 | highWater: 2, 104 | strategy: Bottleneck.strategy.OVERFLOW_PRIORITY, 105 | rejectOnDrop: false 106 | }) 107 | var called = false 108 | c.limiter.on('dropped', function (dropped) { 109 | c.mustExist(dropped.task) 110 | c.mustExist(dropped.args) 111 | c.mustExist(dropped.promise) 112 | called = true 113 | }) 114 | 115 | c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) 116 | c.limiter.submit(c.job, null, 2, c.noErrVal(2)) 117 | c.limiter.submit(c.job, null, 3, c.noErrVal(3)) 118 | c.limiter.submit(c.job, null, 4, c.noErrVal(4)) 119 | c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) 120 | c.limiter.submit({priority: 2}, c.job, null, 6, c.noErrVal(6)) 121 | 122 | return c.limiter.submit({priority: 2}, c.job, null, 7, c.noErrVal(7)) 123 | .then(function () { 124 | return c.limiter.updateSettings({highWater: null}) 125 | }) 126 | .then(c.last) 127 | .then(function (results) { 128 | c.checkDuration(200) 129 | c.checkResultsOrder([[1], [5], [6]]) 130 | c.mustEqual(called, true) 131 | }) 132 | }) 133 | 134 | it('Should support BLOCK', function (done) { 135 | c = makeTest({ 136 | maxConcurrent: 1, 137 | minTime: 100, 138 | highWater: 2, 139 | trackDoneStatus: true, 140 | strategy: Bottleneck.strategy.BLOCK 141 | }) 142 | var called = 0 143 | 144 | c.limiter.on('dropped', function (dropped) { 145 | c.mustExist(dropped.task) 146 | c.mustExist(dropped.args) 147 | c.mustExist(dropped.promise) 148 | called++ 149 | if (called === 3) { 150 | c.limiter.updateSettings({ highWater: null }) 151 | .then(function () { 152 | return c.limiter.schedule(c.job, null, 8) 153 | }) 154 | .catch(function (err) { 155 | assert(err instanceof Bottleneck.BottleneckError) 156 | c.mustEqual(err.message, 'This job has been dropped by Bottleneck') 157 | c.limiter.removeAllListeners('error') 158 | done() 159 | }) 160 | } 161 | }) 162 | 163 | c.limiter.submit(c.slowJob, 20, null, 1, c.noErrVal(1)) 164 | c.limiter.submit(c.slowJob, 20, null, 2, (err) => c.mustExist(err)) 165 | c.limiter.submit(c.slowJob, 20, null, 3, (err) => c.mustExist(err)) 166 | c.limiter.submit(c.slowJob, 20, null, 4, (err) => c.mustExist(err)) 167 | }) 168 | 169 | it('Should have the right priority', function () { 170 | c = makeTest({maxConcurrent: 1, minTime: 100}) 171 | 172 | c.pNoErrVal(c.limiter.schedule({priority: 6}, c.slowPromise, 50, null, 1), 1) 173 | c.pNoErrVal(c.limiter.schedule({priority: 5}, c.promise, null, 2), 2) 174 | c.pNoErrVal(c.limiter.schedule({priority: 4}, c.promise, null, 3), 3) 175 | c.pNoErrVal(c.limiter.schedule({priority: 3}, c.promise, null, 4), 4) 176 | 177 | return c.last() 178 | .then(function (results) { 179 | c.checkDuration(300) 180 | c.checkResultsOrder([[1], [4], [3], [2]]) 181 | }) 182 | }) 183 | 184 | }) 185 | -------------------------------------------------------------------------------- /test/promises.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | describe('Promises', function () { 6 | var c 7 | 8 | afterEach(function () { 9 | return c.limiter.disconnect(false) 10 | }) 11 | 12 | it('Should support promises', function () { 13 | c = makeTest({maxConcurrent: 1, minTime: 100}) 14 | 15 | c.limiter.submit(c.job, null, 1, 9, c.noErrVal(1, 9)) 16 | c.limiter.submit(c.job, null, 2, c.noErrVal(2)) 17 | c.limiter.submit(c.job, null, 3, c.noErrVal(3)) 18 | c.pNoErrVal(c.limiter.schedule(c.promise, null, 4, 5), 4, 5) 19 | 20 | return c.last() 21 | .then(function (results) { 22 | c.checkResultsOrder([[1,9], [2], [3], [4,5]]) 23 | c.checkDuration(300) 24 | }) 25 | }) 26 | 27 | it('Should pass error on failure', function () { 28 | var failureMessage = 'failed' 29 | c = makeTest({maxConcurrent: 1, minTime: 100}) 30 | 31 | return c.limiter.schedule(c.promise, new Error(failureMessage)) 32 | .catch(function (err) { 33 | c.mustEqual(err.message, failureMessage) 34 | }) 35 | }) 36 | 37 | it('Should allow non-Promise returns', function () { 38 | c = makeTest() 39 | var str = 'This is a string' 40 | 41 | return c.limiter.schedule(() => str) 42 | .then(function (x) { 43 | c.mustEqual(x, str) 44 | }) 45 | }) 46 | 47 | it('Should get rejected when rejectOnDrop is true', function () { 48 | c = makeTest({ 49 | maxConcurrent: 1, 50 | minTime: 0, 51 | highWater: 1, 52 | strategy: Bottleneck.strategy.OVERFLOW, 53 | rejectOnDrop: true 54 | }) 55 | var dropped = 0 56 | var caught = 0 57 | var p1 58 | var p2 59 | 60 | c.limiter.on('dropped', function () { 61 | dropped++ 62 | }) 63 | 64 | p1 = c.pNoErrVal(c.limiter.schedule({id: 1}, c.slowPromise, 50, null, 1), 1) 65 | p2 = c.pNoErrVal(c.limiter.schedule({id: 2}, c.slowPromise, 50, null, 2), 2) 66 | 67 | return c.limiter.schedule({id: 3}, c.slowPromise, 50, null, 3) 68 | .catch(function (err) { 69 | c.mustEqual(err.message, 'This job has been dropped by Bottleneck') 70 | assert(err instanceof Bottleneck.BottleneckError) 71 | caught++ 72 | return Promise.all([p1, p2]) 73 | }) 74 | .then(c.last) 75 | .then(function (results) { 76 | c.checkResultsOrder([[1], [2]]) 77 | c.checkDuration(100) 78 | c.mustEqual(dropped, 1) 79 | c.mustEqual(caught, 1) 80 | }) 81 | }) 82 | 83 | it('Should automatically wrap an exception in a rejected promise - schedule()', function () { 84 | c = makeTest({maxConcurrent: 1, minTime: 100}) 85 | 86 | return c.limiter.schedule(() => { 87 | throw new Error('I will reject') 88 | }) 89 | .then(() => assert(false)) 90 | .catch(err => { 91 | assert(err.message === 'I will reject'); 92 | }) 93 | }) 94 | 95 | describe('Wrap', function () { 96 | it('Should wrap', function () { 97 | c = makeTest({maxConcurrent: 1, minTime: 100}) 98 | 99 | c.limiter.submit(c.job, null, 1, c.noErrVal(1)) 100 | c.limiter.submit(c.job, null, 2, c.noErrVal(2)) 101 | c.limiter.submit(c.job, null, 3, c.noErrVal(3)) 102 | 103 | var wrapped = c.limiter.wrap(c.promise) 104 | c.pNoErrVal(wrapped(null, 4), 4) 105 | 106 | return c.last() 107 | .then(function (results) { 108 | c.checkResultsOrder([[1], [2], [3], [4]]) 109 | c.checkDuration(300) 110 | }) 111 | }) 112 | 113 | it('Should automatically wrap a returned value in a resolved promise', function () { 114 | c = makeTest({maxConcurrent: 1, minTime: 100}) 115 | 116 | fn = c.limiter.wrap(() => { return 7 }); 117 | 118 | return fn().then(result => { 119 | assert(result === 7); 120 | }) 121 | }) 122 | 123 | it('Should automatically wrap an exception in a rejected promise', function () { 124 | c = makeTest({maxConcurrent: 1, minTime: 100}) 125 | 126 | fn = c.limiter.wrap(() => { throw new Error('I will reject') }); 127 | 128 | return fn().then(() => assert(false)).catch(error => { 129 | assert(error.message === 'I will reject'); 130 | }) 131 | }) 132 | 133 | it('Should inherit the original target for wrapped methods', function () { 134 | c = makeTest({maxConcurrent: 1, minTime: 100}) 135 | 136 | var object = { 137 | fn: c.limiter.wrap(function () { return this }) 138 | } 139 | 140 | return object.fn().then(result => { 141 | assert(result === object) 142 | }) 143 | }) 144 | 145 | it('Should inherit the original target on prototype methods', function () { 146 | c = makeTest({maxConcurrent: 1, minTime: 100}) 147 | 148 | class Animal { 149 | constructor(name) { this.name = name } 150 | getName() { return this.name } 151 | } 152 | 153 | Animal.prototype.getName = c.limiter.wrap(Animal.prototype.getName) 154 | let elephant = new Animal('Dumbo') 155 | 156 | return elephant.getName().then(result => { 157 | assert(result === 'Dumbo') 158 | }) 159 | }) 160 | 161 | it('Should pass errors back', function () { 162 | var failureMessage = 'BLEW UP!!!' 163 | c = makeTest({maxConcurrent: 1, minTime: 100}) 164 | 165 | var wrapped = c.limiter.wrap(c.promise) 166 | c.pNoErrVal(wrapped(null, 1), 1) 167 | c.pNoErrVal(wrapped(null, 2), 2) 168 | 169 | return wrapped(new Error(failureMessage), 3) 170 | .catch(function (err) { 171 | c.mustEqual(err.message, failureMessage) 172 | return c.last() 173 | }) 174 | .then(function (results) { 175 | c.checkResultsOrder([[1], [2], [3]]) 176 | c.checkDuration(200) 177 | }) 178 | }) 179 | 180 | it('Should allow passing options', function () { 181 | var failureMessage = 'BLEW UP!!!' 182 | c = makeTest({maxConcurrent: 1, minTime: 50}) 183 | 184 | var wrapped = c.limiter.wrap(c.promise) 185 | c.pNoErrVal(wrapped(null, 1), 1) 186 | c.pNoErrVal(wrapped(null, 2), 2) 187 | c.pNoErrVal(wrapped(null, 3), 3) 188 | c.pNoErrVal(wrapped(null, 4), 4) 189 | c.pNoErrVal(wrapped.withOptions({ priority: 1 }, null, 5), 5) 190 | 191 | return wrapped.withOptions({ priority: 1 }, new Error(failureMessage), 6) 192 | .catch(function (err) { 193 | c.mustEqual(err.message, failureMessage) 194 | return c.last() 195 | }) 196 | .then(function (results) { 197 | c.checkResultsOrder([[1], [2], [5], [6], [3], [4]]) 198 | c.checkDuration(250) 199 | }) 200 | }) 201 | }) 202 | }) 203 | -------------------------------------------------------------------------------- /test/retries.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | var child_process = require('child_process') 5 | 6 | describe('Retries', function () { 7 | var c 8 | 9 | afterEach(function () { 10 | return c.limiter.disconnect(false) 11 | }) 12 | 13 | it('Should retry when requested by the user (sync)', async function () { 14 | c = makeTest({ trackDoneStatus: true }) 15 | var failedEvents = 0 16 | var retryEvents = 0 17 | 18 | c.limiter.on('failed', function (error, info) { 19 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 20 | c.mustEqual(info.retryCount, failedEvents) 21 | failedEvents++ 22 | return 50 23 | }) 24 | 25 | c.limiter.on('retry', function (error, info) { 26 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 27 | retryEvents++ 28 | }) 29 | 30 | var times = 0 31 | const job = function () { 32 | times++ 33 | if (times <= 2) { 34 | return Promise.reject(new Error('boom')) 35 | } 36 | return Promise.resolve('Success!') 37 | } 38 | 39 | c.mustEqual(await c.limiter.schedule(job), 'Success!') 40 | const results = await c.results() 41 | assert(results.elapsed > 90 && results.elapsed < 130) 42 | c.mustEqual(failedEvents, 2) 43 | c.mustEqual(retryEvents, 2) 44 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 45 | c.mustEqual(c.limiter.counts().DONE, 1) 46 | }) 47 | 48 | it('Should retry when requested by the user (async)', async function () { 49 | c = makeTest({ trackDoneStatus: true }) 50 | var failedEvents = 0 51 | var retryEvents = 0 52 | 53 | c.limiter.on('failed', function (error, info) { 54 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 55 | c.mustEqual(info.retryCount, failedEvents) 56 | failedEvents++ 57 | return Promise.resolve(50) 58 | }) 59 | 60 | c.limiter.on('retry', function (error, info) { 61 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 62 | retryEvents++ 63 | }) 64 | 65 | var times = 0 66 | const job = function () { 67 | times++ 68 | if (times <= 2) { 69 | return Promise.reject(new Error('boom')) 70 | } 71 | return Promise.resolve('Success!') 72 | } 73 | 74 | c.mustEqual(await c.limiter.schedule(job), 'Success!') 75 | const results = await c.results() 76 | assert(results.elapsed > 90 && results.elapsed < 130) 77 | c.mustEqual(failedEvents, 2) 78 | c.mustEqual(retryEvents, 2) 79 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 80 | c.mustEqual(c.limiter.counts().DONE, 1) 81 | }) 82 | 83 | it('Should not retry when user returns an error (sync)', async function () { 84 | c = makeTest({ errorEventsExpected: true, trackDoneStatus: true }) 85 | var failedEvents = 0 86 | var retryEvents = 0 87 | var errorEvents = 0 88 | var caught = false 89 | 90 | c.limiter.on('failed', function (error, info) { 91 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 92 | c.mustEqual(info.retryCount, failedEvents) 93 | failedEvents++ 94 | throw new Error('Nope') 95 | }) 96 | 97 | c.limiter.on('retry', function (error, info) { 98 | retryEvents++ 99 | }) 100 | 101 | c.limiter.on('error', function (error, info) { 102 | c.mustEqual(error.message, 'Nope') 103 | errorEvents++ 104 | }) 105 | 106 | const job = function () { 107 | return Promise.reject(new Error('boom')) 108 | } 109 | 110 | try { 111 | await c.limiter.schedule(job) 112 | throw new Error('Should not reach') 113 | } catch (error) { 114 | c.mustEqual(error.message, 'boom') 115 | caught = true 116 | } 117 | c.mustEqual(failedEvents, 1) 118 | c.mustEqual(retryEvents, 0) 119 | c.mustEqual(errorEvents, 1) 120 | c.mustEqual(caught, true) 121 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 122 | c.mustEqual(c.limiter.counts().DONE, 1) 123 | }) 124 | 125 | it('Should not retry when user returns an error (async)', async function () { 126 | c = makeTest({ errorEventsExpected: true, trackDoneStatus: true }) 127 | var failedEvents = 0 128 | var retryEvents = 0 129 | var errorEvents = 0 130 | var caught = false 131 | 132 | c.limiter.on('failed', function (error, info) { 133 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 134 | c.mustEqual(info.retryCount, failedEvents) 135 | failedEvents++ 136 | return Promise.reject(new Error('Nope')) 137 | }) 138 | 139 | c.limiter.on('retry', function (error, info) { 140 | retryEvents++ 141 | }) 142 | 143 | c.limiter.on('error', function (error, info) { 144 | c.mustEqual(error.message, 'Nope') 145 | errorEvents++ 146 | }) 147 | 148 | const job = function () { 149 | return Promise.reject(new Error('boom')) 150 | } 151 | 152 | try { 153 | await c.limiter.schedule(job) 154 | throw new Error('Should not reach') 155 | } catch (error) { 156 | c.mustEqual(error.message, 'boom') 157 | caught = true 158 | } 159 | c.mustEqual(failedEvents, 1) 160 | c.mustEqual(retryEvents, 0) 161 | c.mustEqual(errorEvents, 1) 162 | c.mustEqual(caught, true) 163 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 164 | c.mustEqual(c.limiter.counts().DONE, 1) 165 | }) 166 | 167 | it('Should not retry when user returns null (sync)', async function () { 168 | c = makeTest({ trackDoneStatus: true }) 169 | var failedEvents = 0 170 | var retryEvents = 0 171 | var caught = false 172 | 173 | c.limiter.on('failed', function (error, info) { 174 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 175 | c.mustEqual(info.retryCount, failedEvents) 176 | failedEvents++ 177 | return null 178 | }) 179 | 180 | c.limiter.on('retry', function (error, info) { 181 | retryEvents++ 182 | }) 183 | 184 | const job = function () { 185 | return Promise.reject(new Error('boom')) 186 | } 187 | 188 | try { 189 | await c.limiter.schedule(job) 190 | throw new Error('Should not reach') 191 | } catch (error) { 192 | c.mustEqual(error.message, 'boom') 193 | caught = true 194 | } 195 | c.mustEqual(failedEvents, 1) 196 | c.mustEqual(retryEvents, 0) 197 | c.mustEqual(caught, true) 198 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 199 | c.mustEqual(c.limiter.counts().DONE, 1) 200 | }) 201 | 202 | it('Should not retry when user returns null (async)', async function () { 203 | c = makeTest({ trackDoneStatus: true }) 204 | var failedEvents = 0 205 | var retryEvents = 0 206 | var caught = false 207 | 208 | c.limiter.on('failed', function (error, info) { 209 | c.mustEqual(c.limiter.counts().EXECUTING, 1) 210 | c.mustEqual(info.retryCount, failedEvents) 211 | failedEvents++ 212 | return Promise.resolve(null) 213 | }) 214 | 215 | c.limiter.on('retry', function (error, info) { 216 | retryEvents++ 217 | }) 218 | 219 | const job = function () { 220 | return Promise.reject(new Error('boom')) 221 | } 222 | 223 | try { 224 | await c.limiter.schedule(job) 225 | throw new Error('Should not reach') 226 | } catch (error) { 227 | c.mustEqual(error.message, 'boom') 228 | caught = true 229 | } 230 | c.mustEqual(failedEvents, 1) 231 | c.mustEqual(retryEvents, 0) 232 | c.mustEqual(caught, true) 233 | c.mustEqual(c.limiter.counts().EXECUTING, 0) 234 | c.mustEqual(c.limiter.counts().DONE, 1) 235 | }) 236 | 237 | }) 238 | -------------------------------------------------------------------------------- /test/spawn/increaseKeepAlive.js: -------------------------------------------------------------------------------- 1 | var Bottleneck = require('../bottleneck.js') 2 | var now = Date.now() 3 | 4 | var limiter = new Bottleneck({ 5 | reservoir: 2, 6 | reservoirIncreaseAmount: 2, 7 | reservoirIncreaseInterval: 200 8 | }) 9 | var f1 = () => { 10 | var secDiff = Math.floor((Date.now() - now) / 100) 11 | return Promise.resolve(`[${secDiff}]`) 12 | } 13 | 14 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 15 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 16 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 17 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 18 | -------------------------------------------------------------------------------- /test/spawn/refreshKeepAlive.js: -------------------------------------------------------------------------------- 1 | var Bottleneck = require('../bottleneck.js') 2 | var now = Date.now() 3 | 4 | var limiter = new Bottleneck({ 5 | reservoir: 2, 6 | reservoirRefreshAmount: 2, 7 | reservoirRefreshInterval: 200 8 | }) 9 | var f1 = () => { 10 | var secDiff = Math.floor((Date.now() - now) / 100) 11 | return Promise.resolve(`[${secDiff}]`) 12 | } 13 | 14 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 15 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 16 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 17 | limiter.schedule(f1).then((x) => process.stdout.write(x)) 18 | -------------------------------------------------------------------------------- /test/states.js: -------------------------------------------------------------------------------- 1 | var States = require('../lib/States') 2 | var assert = require('assert') 3 | var c = require('./context')({datastore: 'local'}) 4 | var Bottleneck = require('./bottleneck') 5 | 6 | describe('States', function () { 7 | 8 | it('Should be created and be empty', function () { 9 | var states = new States(["A", "B", "C"]) 10 | c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) 11 | }) 12 | 13 | it('Should start new series', function () { 14 | var states = new States(["A", "B", "C"]) 15 | 16 | states.start('x') 17 | states.start('y') 18 | 19 | c.mustEqual(states.statusCounts(), { A: 2, B: 0, C: 0 }) 20 | }) 21 | 22 | it('Should increment', function () { 23 | var states = new States(["A", "B", "C"]) 24 | 25 | states.start('x') 26 | states.start('y') 27 | states.next('x') 28 | states.next('y') 29 | states.next('x') 30 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) 31 | 32 | states.next('z') 33 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) 34 | 35 | states.next('x') 36 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) 37 | 38 | states.next('x') 39 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) 40 | 41 | states.next('y') 42 | states.next('y') 43 | c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) 44 | }) 45 | 46 | it('Should remove', function () { 47 | var states = new States(["A", "B", "C"]) 48 | 49 | states.start('x') 50 | states.start('y') 51 | states.next('x') 52 | states.next('y') 53 | states.next('x') 54 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) 55 | 56 | states.remove('x') 57 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) 58 | 59 | states.remove('y') 60 | c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) 61 | }) 62 | 63 | it('Should return current status', function () { 64 | var states = new States(["A", "B", "C"]) 65 | 66 | states.start('x') 67 | states.start('y') 68 | states.next('x') 69 | states.next('y') 70 | states.next('x') 71 | c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) 72 | 73 | c.mustEqual(states.jobStatus('x'), 'C') 74 | c.mustEqual(states.jobStatus('y'), 'B') 75 | c.mustEqual(states.jobStatus('z'), null) 76 | }) 77 | 78 | it('Should return job ids for a status', function (done) { 79 | var states = new States(["A", "B", "C"]) 80 | 81 | states.start('x') 82 | states.start('y') 83 | states.start('z') 84 | states.next('x') 85 | states.next('y') 86 | states.next('x') 87 | states.next('z') 88 | c.mustEqual(states.statusCounts(), { A: 0, B: 2, C: 1 }) 89 | 90 | c.mustEqual(states.statusJobs().sort(), ['x', 'y', 'z']) 91 | c.mustEqual(states.statusJobs('A'), []) 92 | c.mustEqual(states.statusJobs('B').sort(), ['y', 'z']) 93 | c.mustEqual(states.statusJobs('C'), ['x']) 94 | try { 95 | states.statusJobs('Z') 96 | } catch (err) { 97 | if (process.env.BUILD !== 'es5' && process.env.BUILD !== 'light') { 98 | assert(err instanceof Bottleneck.BottleneckError) 99 | } 100 | done() 101 | } 102 | }) 103 | }) 104 | -------------------------------------------------------------------------------- /test/stop.js: -------------------------------------------------------------------------------- 1 | var makeTest = require('./context') 2 | var Bottleneck = require('./bottleneck') 3 | var assert = require('assert') 4 | 5 | describe('Stop', function () { 6 | var c 7 | 8 | afterEach(function () { 9 | return c.limiter.disconnect(false) 10 | }) 11 | 12 | it('Should stop and drop the queue', function (done) { 13 | c = makeTest({ 14 | maxConcurrent: 2, 15 | minTime: 100, 16 | trackDoneStatus: true 17 | }) 18 | var submitFailed = false 19 | var queuedDropped = false 20 | var scheduledDropped = false 21 | var dropped = 0 22 | 23 | c.limiter.on('dropped', function () { 24 | dropped++ 25 | }) 26 | 27 | c.pNoErrVal(c.limiter.schedule({id: '0'}, c.promise, null, 0), 0) 28 | 29 | c.pNoErrVal(c.limiter.schedule({id: '1'}, c.slowPromise, 100, null, 1), 1) 30 | 31 | c.limiter.schedule({id: '2'}, c.promise, null, 2) 32 | .catch(function (err) { 33 | c.mustEqual(err.message, 'Dropped!') 34 | scheduledDropped = true 35 | }) 36 | 37 | c.limiter.schedule({id: '3'}, c.promise, null, 3) 38 | .catch(function (err) { 39 | c.mustEqual(err.message, 'Dropped!') 40 | queuedDropped = true 41 | }) 42 | 43 | setTimeout(function () { 44 | var counts = c.limiter.counts() 45 | c.mustEqual(counts.RECEIVED, 0) 46 | c.mustEqual(counts.QUEUED, 1) 47 | c.mustEqual(counts.RUNNING, 1) 48 | c.mustEqual(counts.EXECUTING, 1) 49 | c.mustEqual(counts.DONE, 1) 50 | 51 | c.limiter.stop({ 52 | enqueueErrorMessage: 'Stopped!', 53 | dropErrorMessage: 'Dropped!' 54 | }) 55 | .then(function () { 56 | counts = c.limiter.counts() 57 | c.mustEqual(submitFailed, true) 58 | c.mustEqual(scheduledDropped, true) 59 | c.mustEqual(queuedDropped, true) 60 | c.mustEqual(dropped, 2) 61 | c.mustEqual(counts.RECEIVED, 0) 62 | c.mustEqual(counts.QUEUED, 0) 63 | c.mustEqual(counts.RUNNING, 0) 64 | c.mustEqual(counts.EXECUTING, 0) 65 | c.mustEqual(counts.DONE, 2) 66 | 67 | c.checkResultsOrder([[0], [1]]) 68 | done() 69 | }) 70 | 71 | c.limiter.schedule(() => Promise.resolve(true)) 72 | .catch(function (err) { 73 | c.mustEqual(err.message, 'Stopped!') 74 | submitFailed = true 75 | }) 76 | 77 | }, 125) 78 | }) 79 | 80 | it('Should stop and let the queue finish', function (done) { 81 | c = makeTest({ 82 | maxConcurrent: 1, 83 | minTime: 100, 84 | trackDoneStatus: true 85 | }) 86 | var submitFailed = false 87 | var dropped = 0 88 | 89 | c.limiter.on('dropped', function () { 90 | dropped++ 91 | }) 92 | 93 | c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) 94 | c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) 95 | c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) 96 | 97 | setTimeout(function () { 98 | var counts = c.limiter.counts() 99 | c.mustEqual(counts.RECEIVED, 0) 100 | c.mustEqual(counts.QUEUED, 1) 101 | c.mustEqual(counts.RUNNING, 1) 102 | c.mustEqual(counts.EXECUTING, 0) 103 | c.mustEqual(counts.DONE, 1) 104 | 105 | c.limiter.stop({ 106 | enqueueErrorMessage: 'Stopped!', 107 | dropWaitingJobs: false 108 | }) 109 | .then(function () { 110 | counts = c.limiter.counts() 111 | c.mustEqual(submitFailed, true) 112 | c.mustEqual(dropped, 0) 113 | c.mustEqual(counts.RECEIVED, 0) 114 | c.mustEqual(counts.QUEUED, 0) 115 | c.mustEqual(counts.RUNNING, 0) 116 | c.mustEqual(counts.EXECUTING, 0) 117 | c.mustEqual(counts.DONE, 4) 118 | 119 | c.checkResultsOrder([[1], [2], [3]]) 120 | done() 121 | }) 122 | 123 | c.limiter.schedule(() => Promise.resolve(true)) 124 | .catch(function (err) { 125 | c.mustEqual(err.message, 'Stopped!') 126 | submitFailed = true 127 | }) 128 | 129 | }, 75) 130 | }) 131 | 132 | it('Should still resolve when rejectOnDrop is false', function (done) { 133 | c = makeTest({ 134 | maxConcurrent: 1, 135 | minTime: 100, 136 | rejectOnDrop: false 137 | }) 138 | 139 | c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) 140 | c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) 141 | c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) 142 | 143 | c.limiter.stop() 144 | .then(function () { 145 | return c.limiter.stop() 146 | }) 147 | .then(function () { 148 | done(new Error("Should not be here")) 149 | }) 150 | .catch(function (err) { 151 | c.mustEqual(err.message, "stop() has already been called") 152 | done() 153 | }) 154 | }) 155 | 156 | it('Should not allow calling stop() twice when dropWaitingJobs=true', function (done) { 157 | c = makeTest({ 158 | maxConcurrent: 1, 159 | minTime: 100 160 | }) 161 | var failed = 0 162 | var handler = function (err) { 163 | c.mustEqual(err.message, "This limiter has been stopped.") 164 | failed++ 165 | } 166 | 167 | c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1).catch(handler) 168 | c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2).catch(handler) 169 | c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3).catch(handler) 170 | 171 | c.limiter.stop({ dropWaitingJobs: true }) 172 | .then(function () { 173 | return c.limiter.stop({ dropWaitingJobs: true }) 174 | }) 175 | .then(function () { 176 | done(new Error("Should not be here")) 177 | }) 178 | .catch(function (err) { 179 | c.mustEqual(err.message, "stop() has already been called") 180 | c.mustEqual(failed, 3) 181 | done() 182 | }) 183 | }) 184 | 185 | it('Should not allow calling stop() twice when dropWaitingJobs=false', function (done) { 186 | c = makeTest({ 187 | maxConcurrent: 1, 188 | minTime: 100 189 | }) 190 | 191 | c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) 192 | c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) 193 | c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) 194 | 195 | c.limiter.stop({ dropWaitingJobs: false }) 196 | .then(function () { 197 | return c.limiter.stop({ dropWaitingJobs: false }) 198 | }) 199 | .then(function () { 200 | done(new Error("Should not be here")) 201 | }) 202 | .catch(function (err) { 203 | c.mustEqual(err.message, "stop() has already been called") 204 | done() 205 | }) 206 | }) 207 | 208 | }) 209 | --------------------------------------------------------------------------------