├── .editorconfig ├── .gitignore ├── .prettierrc.json ├── .vscode └── settings.json ├── LICENSE ├── README.md ├── deno.jsonc ├── examples ├── deno │ ├── bot.ts │ └── worker.ts └── node │ ├── bot.ts │ └── worker.ts ├── package.json ├── src ├── README.md ├── deps.deno.ts ├── deps.node.ts ├── distribute.ts ├── mod.ts ├── node-shim.ts ├── platform.deno.ts ├── platform.node.ts ├── queue.ts ├── runner.ts ├── sequentialize.ts ├── sink.ts ├── source.ts └── worker.ts ├── test ├── promise-test-helpers.ts ├── queue.test.ts └── sequentialize.test.ts └── tsconfig.json /.editorconfig: -------------------------------------------------------------------------------- 1 | [*] 2 | end_of_line = lf 3 | charset = utf-8 4 | indent_style = space 5 | indent_size = 2 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | max_line_length = 80 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Dependency directories 15 | node_modules/ 16 | 17 | # package-lock.json will not be published, so no need to store it 18 | package-lock.json 19 | 20 | # Output of 'npm pack' 21 | *.tgz 22 | 23 | # Build output 24 | .tsbuildinfo 25 | out/ 26 | 27 | # IDE project files 28 | /.idea/ 29 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "es5", 4 | "tabWidth": 4, 5 | "semi": false, 6 | "arrowParens": "avoid" 7 | } 8 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "editor.codeActionsOnSave": { "source.fixAll": true }, 4 | "deno.enable": true, 5 | "deno.config": "./deno.jsonc" 6 | } 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021-2024 KnorpelSenf 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #

grammY runner

2 | 3 | --- 4 | 5 | While the core of [grammY](https://github.com/grammyjs/grammY) is extremely efficient, the package does not ship with a built-in mechanism for long polling at scale. 6 | (It does scale well with webhooks, though.) 7 | 8 | The grammY runner solves this by providing you with a sophisticated mechanism that can pull updates concurrently from the Telegram servers, and in turn execute your bot's middleware stack concurrently, all while catching errors, timeouts, and giving you full control over how much load is applied to your server. 9 | 10 | ## Do I Need This? 11 | 12 | Use the grammY runner package if 13 | 14 | - your bot needs to process a lot of updates (more than 1K/hour), or 15 | - your bot performs long-running operations such as large file transfers. 16 | 17 | Do **not** use grammY runner if 18 | 19 | - you are just getting started with grammY, or 20 | - your bot is running on webhooks. 21 | 22 | ## Quickstart 23 | 24 | Here is a quickstart for you, but [the real documentation is here on the website](https://grammy.dev/plugins/runner.html). 25 | The runner package has many more features, and they are documented there. 26 | 27 | ```bash 28 | npm i @grammyjs/runner 29 | ``` 30 | 31 | Import `run` from `@grammyjs/runner`, and replace `bot.start()` with `run(bot)`. It is that simple. Done! 32 | 33 | --- 34 | 35 | Okay okay, here is some example code: 36 | 37 | ```ts 38 | import { Bot } from "grammy"; 39 | import { run } from "@grammyjs/runner"; 40 | 41 | // Create bot 42 | const bot = new Bot(""); 43 | 44 | // Add the usual middleware, yada yada 45 | bot.on("message", (ctx) => ctx.reply("Got your message.")); 46 | 47 | // Run it concurrently! 48 | run(bot); 49 | ``` 50 | 51 | ## Concurrency Is Hard 52 | 53 | grammY runner makes it trivial to have very high update throughput. 54 | However, concurrency is generally very hard to get right, so please read [this section in the docs](https://grammy.dev/advanced/scaling.html#concurrency-is-hard). 55 | 56 | ## Resources 57 | 58 | ### [grammY runner in the grammY documentation](https://grammy.dev/plugins/runner.html) 59 | 60 | —more verbose documentation about concurrency in grammY. 61 | 62 | ### [grammY runner API Reference](https://deno.land/x/grammy_runner/mod.ts) 63 | 64 | —documentation of everything that grammY runner exports. 65 | 66 | ### [grammY Example Bots](https://github.com/grammyjs/examples) 67 | 68 | —repository full of example bots, look our for those that demonstrate how to use grammY runner. 69 | -------------------------------------------------------------------------------- /deno.jsonc: -------------------------------------------------------------------------------- 1 | { 2 | "lock": false, 3 | "tasks": { 4 | "backport": "deno run --no-prompt --allow-read=. --allow-write=. https://deno.land/x/deno2node@v1.7.1/src/cli.ts tsconfig.json", 5 | "check": "deno cache --check=all src/mod.ts", 6 | "dev": "deno fmt && deno lint && deno task test && deno task check", 7 | "test": "deno test ./test/" 8 | }, 9 | "fmt": { 10 | "indentWidth": 4, 11 | "proseWrap": "preserve", 12 | "exclude": ["./node_modules/", "./out/", "./package-lock.json"] 13 | }, 14 | "lint": { 15 | "exclude": ["./node_modules/", "./out/", "./package-lock.json"] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/deno/bot.ts: -------------------------------------------------------------------------------- 1 | import { Bot } from "https://deno.land/x/grammy@v1.13.1/mod.ts"; 2 | import { distribute, run } from "../../src/mod.ts"; 3 | 4 | // Create bot 5 | const bot = new Bot(""); 6 | 7 | // Add the usual middleware, yada yada 8 | bot.command("start", (ctx) => ctx.reply("Got your message.")); 9 | bot.use(distribute(new URL("./worker.ts", import.meta.url))); 10 | 11 | // Run it concurrently! 12 | run(bot); 13 | -------------------------------------------------------------------------------- /examples/deno/worker.ts: -------------------------------------------------------------------------------- 1 | import { BotWorker } from "../../src/mod.ts"; 2 | 3 | const bot = new BotWorker(""); 4 | 5 | bot.on("message", (ctx) => ctx.reply("yay!")); 6 | -------------------------------------------------------------------------------- /examples/node/bot.ts: -------------------------------------------------------------------------------- 1 | import { Bot } from "grammy"; 2 | import { distribute, run } from "../../out/mod"; 3 | 4 | // Create bot 5 | const bot = new Bot(""); 6 | 7 | // Add the usual middleware, yada yada 8 | bot.command("start", (ctx) => ctx.reply("Got your message.")); 9 | bot.use(distribute(__dirname + "/worker")); 10 | 11 | // Run it concurrently! 12 | run(bot); 13 | -------------------------------------------------------------------------------- /examples/node/worker.ts: -------------------------------------------------------------------------------- 1 | import { BotWorker } from "../../out/mod"; 2 | 3 | const bot = new BotWorker(""); 4 | 5 | bot.on("message", (ctx) => ctx.reply("yay!")); 6 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@grammyjs/runner", 3 | "description": "Scale grammY bots that use long polling", 4 | "version": "2.0.3", 5 | "author": "KnorpelSenf", 6 | "license": "MIT", 7 | "engines": { 8 | "node": ">=12.20.0 || >=14.13.1" 9 | }, 10 | "homepage": "https://grammy.dev/plugins/runner.html", 11 | "repository": { 12 | "type": "git", 13 | "url": "https://github.com/grammyjs/runner" 14 | }, 15 | "bugs": { 16 | "url": "https://github.com/grammyjs/runner/issues" 17 | }, 18 | "scripts": { 19 | "prepare": "npm run build", 20 | "build": "deno2node tsconfig.json" 21 | }, 22 | "dependencies": { 23 | "abort-controller": "^3.0.0" 24 | }, 25 | "peerDependencies": { 26 | "grammy": "^1.13.1" 27 | }, 28 | "devDependencies": { 29 | "@types/node": "^20.16.9", 30 | "deno2node": "^1.7.1" 31 | }, 32 | "files": [ 33 | "out/" 34 | ], 35 | "main": "./out/mod.js", 36 | "type": "commonjs", 37 | "exports": { 38 | ".": "./out/mod.js" 39 | }, 40 | "keywords": [ 41 | "grammy", 42 | "telegram", 43 | "bot", 44 | "api", 45 | "long", 46 | "polling" 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | # grammY runner 2 | 3 | Runs [grammY](https://grammy.dev/) bots concurrently and efficiently. 4 | 5 | Please visit the [main page of the repository](https://github.com/grammyjs/runner). 6 | 7 | Also, be sure to check out [the grammY runner page](https://grammy.dev/plugins/runner.html) in the docs. 8 | -------------------------------------------------------------------------------- /src/deps.deno.ts: -------------------------------------------------------------------------------- 1 | export { 2 | type Api, 3 | Bot, 4 | type BotConfig, 5 | BotError, 6 | type Context, 7 | } from "https://lib.deno.dev/x/grammy@v1/mod.ts"; 8 | export { 9 | type Update, 10 | type UserFromGetMe, 11 | } from "https://lib.deno.dev/x/grammy@v1/types.ts"; 12 | -------------------------------------------------------------------------------- /src/deps.node.ts: -------------------------------------------------------------------------------- 1 | export { type Api, Bot, type BotConfig, BotError, type Context } from "grammy"; 2 | export { type Update, type UserFromGetMe } from "grammy/types"; 3 | -------------------------------------------------------------------------------- /src/distribute.ts: -------------------------------------------------------------------------------- 1 | import { type Update, type UserFromGetMe } from "./deps.deno.ts"; 2 | import { 3 | createThread, 4 | type ModuleSpecifier, 5 | type Thread, 6 | } from "./platform.deno.ts"; 7 | 8 | class ThreadPool { 9 | public readonly threads: Thread[] = []; 10 | public readonly tasks = new Map void>(); 11 | 12 | constructor( 13 | specifier: ModuleSpecifier, 14 | me: UserFromGetMe, 15 | private readonly count = 4, 16 | ) { 17 | for (let i = 0; i < count; i++) { 18 | const thread = createThread( 19 | specifier, 20 | me, 21 | ); 22 | thread.onMessage((update_id) => { 23 | const task = this.tasks.get(update_id); 24 | task?.(); 25 | this.tasks.delete(update_id); 26 | }); 27 | this.threads.push(thread); 28 | } 29 | } 30 | 31 | async process(update: { update_id: number }) { 32 | const i = update.update_id % this.count; 33 | this.threads[i].postMessage(update); 34 | await new Promise((resolve) => { 35 | this.tasks.set(update.update_id, resolve); 36 | }); 37 | } 38 | } 39 | 40 | const workers = new Map(); 41 | function getWorker( 42 | specifier: ModuleSpecifier, 43 | me: UserFromGetMe, 44 | count?: number, 45 | ) { 46 | let worker = workers.get(specifier); 47 | if (worker === undefined) { 48 | worker = new ThreadPool(specifier, me, count); 49 | workers.set(specifier, worker); 50 | } 51 | return worker; 52 | } 53 | 54 | /** 55 | * Creates middleware that distributes updates across cores. 56 | * 57 | * This function should be used in combination with the `BotWorker` class. 58 | * Create an instance of `BotWorker` in a separate file. Let's assume that this 59 | * file is called `worker.ts`. This will define your actual bot logic. 60 | * 61 | * You can now do 62 | * 63 | * ```ts 64 | * const bot = new Bot(""); 65 | * 66 | * // Deno: 67 | * bot.use(distribute(new URL("./worker.ts", import.meta.url))); 68 | * // Node: 69 | * bot.use(distribute(__dirname + "/worker")); 70 | * ``` 71 | * 72 | * in a central place to use the bot worker in `worker.ts` and send updates to 73 | * it. 74 | * 75 | * Under the hood, `distribute` will create several web workers (Deno) or worker 76 | * threads (Node) using `worker.ts`. Updates are distributed among them in a 77 | * round-robin fashion. 78 | * 79 | * You can adjust the number of workers via `count` in an options object which 80 | * is passed as a second argument, i.e. `distribute(specifier, { count: 8 })`. 81 | * By default, 4 workers are created. 82 | * 83 | * @param specifier Module specifier to a file which creates a `BotWorker` 84 | * @param options Further options to control the number of workers 85 | */ 86 | export function distribute< 87 | C extends { update: { update_id: number }; me: UserFromGetMe }, 88 | >( 89 | specifier: ModuleSpecifier, 90 | options?: { 91 | /** Number of workers to create */ 92 | count?: number; 93 | }, 94 | ) { 95 | const count = options?.count; 96 | return (ctx: C) => getWorker(specifier, ctx.me, count).process(ctx.update); 97 | } 98 | -------------------------------------------------------------------------------- /src/mod.ts: -------------------------------------------------------------------------------- 1 | export * from "./distribute.ts"; 2 | export * from "./queue.ts"; 3 | export * from "./runner.ts"; 4 | export * from "./sequentialize.ts"; 5 | export * from "./sink.ts"; 6 | export * from "./source.ts"; 7 | export * from "./worker.ts"; 8 | -------------------------------------------------------------------------------- /src/node-shim.ts: -------------------------------------------------------------------------------- 1 | export * from "abort-controller"; 2 | -------------------------------------------------------------------------------- /src/platform.deno.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | export type ModuleSpecifier = string | URL; 5 | 6 | export interface Thread { 7 | onMessage: (callback: (o: O) => void | Promise) => void; 8 | postMessage: (i: I) => void | Promise; 9 | } 10 | 11 | export interface Seed { 12 | seed: Promise; 13 | } 14 | 15 | export function createThread( 16 | specifier: ModuleSpecifier, 17 | seed: S, 18 | ): Thread { 19 | const url = new URL(specifier, import.meta.url); 20 | const worker = new Worker(url.href, { type: "module" }); 21 | worker.postMessage(seed); 22 | return { 23 | onMessage(callback) { 24 | worker.onmessage = ({ data: o }: MessageEvent) => callback(o); 25 | }, 26 | postMessage(i) { 27 | worker.postMessage(i); 28 | }, 29 | }; 30 | } 31 | 32 | export function parentThread(): Thread & Seed { 33 | let resolve: undefined | ((seed: S) => void) = undefined; 34 | return { 35 | seed: new Promise((r) => resolve = r), 36 | onMessage(callback) { 37 | self.onmessage = ({ data }: MessageEvent) => { 38 | resolve?.(data); 39 | self.onmessage = ({ data: i }: MessageEvent) => callback(i); 40 | }; 41 | }, 42 | postMessage(o) { 43 | self.postMessage(o); 44 | }, 45 | }; 46 | } 47 | -------------------------------------------------------------------------------- /src/platform.node.ts: -------------------------------------------------------------------------------- 1 | import { parentPort, Worker, workerData } from "worker_threads"; 2 | 3 | export type ModuleSpecifier = string; 4 | 5 | export interface Thread { 6 | onMessage: (callback: (o: O) => void | Promise) => void; 7 | postMessage: (i: I) => void | Promise; 8 | } 9 | 10 | interface Seed { 11 | seed: Promise; 12 | } 13 | 14 | export function createThread( 15 | specifier: ModuleSpecifier, 16 | seed: S, 17 | ): Thread { 18 | const worker = new Worker(specifier, { workerData: seed }); 19 | return { 20 | onMessage(callback) { 21 | worker.on("message", callback); 22 | }, 23 | postMessage(i) { 24 | worker.postMessage(i); 25 | }, 26 | }; 27 | } 28 | 29 | export function parentThread(): Thread & Seed { 30 | return { 31 | seed: Promise.resolve(workerData), 32 | onMessage(callback) { 33 | parentPort?.on("message", callback); 34 | }, 35 | postMessage(o) { 36 | parentPort?.postMessage(o); 37 | }, 38 | }; 39 | } 40 | -------------------------------------------------------------------------------- /src/queue.ts: -------------------------------------------------------------------------------- 1 | // Maximal valid value that can be passed to `setTimeout` 2 | const MAX_TIMEOUT_VALUE = ~(1 << 31); // equals (2 ** 31 - 1) 3 | /** 4 | * A drift is an element in a doubly linked list, and it stores a task. A task 5 | * is represented as a Promise. Drifts remove themselves from the queue (they 6 | * _decay_) after their task completes or exceeds the timeout. The timeout is 7 | * the maximum time that drifts are allowed to be contained in the queue. 8 | * 9 | * Drifts are appended to the tail of the queue and _drift_ towards the head as 10 | * older elements are removed, hence the name. 11 | * 12 | * The task of every drift is created by a worker function that operates based 13 | * on a source element. A drift keeps a reference to that source element. In 14 | * case of a timeout, the timeout handlers will be supplied with the source 15 | * element because it is interesting to know for which source elements the 16 | * worker function produced a promise that timed out. 17 | * 18 | * A drift also stores the date at which it was added to the queue. 19 | * 20 | * In the context of `grammy`, each middleware invocation corresponds to a task 21 | * in a drift. Drifts are used to manage concurrent middleware execution. 22 | */ 23 | interface Drift { 24 | /** Previous drift in the queue. `null` iff this drift is the head element. */ 25 | prev: Drift | null; 26 | /** Next drift in the queue. `null` iff this drift is the tail element. */ 27 | next: Drift | null; 28 | 29 | /** 30 | * Task of the drift. Contains logic that removes this drift from the queue as 31 | * soon as the task completes by itself (either resolves or rejects). 32 | */ 33 | task: Promise; 34 | 35 | /** 36 | * Timestamp (milliseconds since The Epoch) when this drift was added. This 37 | * may be inspected when starting a new timer that might purge this drift upon 38 | * timeout. 39 | * 40 | * The timestamp will be set to `-1` when the drift is removed from the queue, 41 | * in other words, checking `date > 0` serves as a containment test. 42 | */ 43 | date: number; 44 | /** Reference to the source element that was used to start the task */ 45 | elem: Y; 46 | } 47 | 48 | /** 49 | * A _decaying deque_ is a special kind of doubly linked list that serves as a 50 | * queue for a special kind of nodes, called _drifts_. 51 | * 52 | * A decaying deque has a worker function that spawns a task for each element 53 | * that is added to the queue. This task then gets wrapped into a drift. The 54 | * drifts are then the actual elements (aka. links) in the queue. 55 | * 56 | * In addition, the decaying deque runs a timer that purges old elements from 57 | * the queue. This period of time is determined by the `taskTimeout`. 58 | * 59 | * When a task completes or exceeds its timeout, the corresponding drift is 60 | * removed from the queue. As a result, only drifts with pending tasks are 61 | * contained in the queue at all times. 62 | * 63 | * When a tasks completes with failure (`reject`s or exceeds the timeout), the 64 | * respective handler (`catchError` or `catchTimeout`) is called. 65 | * 66 | * The decaying deque has its name from the observation that new elements are 67 | * appended to the tail, and the old elements are removed at arbitrary positions 68 | * in the queue whenever a task completes, hence, the queue seems to _decay_. 69 | */ 70 | export class DecayingDeque { 71 | /** 72 | * Number of drifts in the queue. Equivalent to the number of currently 73 | * pending tasks. 74 | */ 75 | private len = 0; 76 | /** Head element (oldest), `null` iff the queue is empty */ 77 | private head: Drift | null = null; 78 | /** Tail element (newest), `null` iff the queue is empty */ 79 | private tail: Drift | null = null; 80 | 81 | /** 82 | * Number of currently pending tasks that we strive for (`add` calls will 83 | * resolve only after the number of pending tasks falls below this value. 84 | * 85 | * In the context of `grammy`, it is possible to `await` calls to `add` to 86 | * determine when to fetch more updates. 87 | */ 88 | public readonly concurrency: number; 89 | /** 90 | * Timer that waits for the head element to time out, will be rescheduled 91 | * whenever the head element changes. It is `undefined` iff the queue is 92 | * empty. 93 | */ 94 | private timer: ReturnType | undefined; 95 | /** 96 | * List of subscribers that wait for the queue to have capacity again. All 97 | * functions in this array will be called as soon as new capacity is 98 | * available, i.e. the number of pending tasks falls below `concurrency`. 99 | */ 100 | private subscribers: Array<(capacity: number) => void> = []; 101 | private emptySubscribers: Array<() => void> = []; 102 | 103 | /** 104 | * Creates a new decaying queue with the given parameters. 105 | * 106 | * @param taskTimeout Max period of time for a task 107 | * @param worker Task generator 108 | * @param concurrency `add` will return only after the number of pending tasks fell below `concurrency`. `false` means `1`, `true` means `Infinity`, numbers below `1` mean `1` 109 | * @param catchError Error handler, receives the error and the source element 110 | * @param catchTimeout Timeout handler, receives the source element and the promise of the task 111 | */ 112 | constructor( 113 | private readonly taskTimeout: number, 114 | private readonly worker: (t: Y) => Promise, 115 | concurrency: boolean | number, 116 | private readonly catchError: (err: R, elem: Y) => void | Promise, 117 | private readonly catchTimeout: (t: Y, task: Promise) => void, 118 | ) { 119 | if (concurrency === false) this.concurrency = 1; 120 | else if (concurrency === true) this.concurrency = Infinity; 121 | else this.concurrency = concurrency < 1 ? 1 : concurrency; 122 | } 123 | 124 | /** 125 | * Adds the provided elements to the queue and starts tasks for all of them 126 | * immediately. Returns a `Promise` that resolves with `concurrency - length` 127 | * once this value becomes positive. 128 | * @param elems Elements to be added 129 | * @returns `this.capacity()` 130 | */ 131 | add(elems: Y[]): Promise { 132 | const len = elems.length; 133 | this.len += len; 134 | 135 | if (len > 0) { 136 | let i = 0; 137 | const now = Date.now(); 138 | 139 | // emptyness check 140 | if (this.head === null) { 141 | this.head = this.tail = this.toDrift(elems[i++]!, now); 142 | // start timer because head element changed 143 | this.startTimer(); 144 | } 145 | 146 | let prev = this.tail!; 147 | while (i < len) { 148 | // create drift from source element 149 | const node = this.toDrift(elems[i++]!, now); 150 | // link it to previous element (append operation) 151 | prev.next = node; 152 | node.prev = prev; 153 | prev = node; 154 | } 155 | this.tail = prev; 156 | } 157 | 158 | return this.capacity(); 159 | } 160 | 161 | empty(): Promise { 162 | return new Promise((resolve) => { 163 | if (this.len === 0) resolve(); 164 | else this.emptySubscribers.push(resolve); 165 | }); 166 | } 167 | 168 | /** 169 | * Returns a `Promise` that resolves with `concurrency - length` once this 170 | * value becomes positive. Use `await queue.capacity()` to wait until the 171 | * queue has free space again. 172 | * 173 | * @returns `concurrency - length` once positive 174 | */ 175 | capacity(): Promise { 176 | return new Promise((resolve) => { 177 | const capacity = this.concurrency - this.len; 178 | if (capacity > 0) resolve(capacity); 179 | else this.subscribers.push(resolve); 180 | }); 181 | } 182 | 183 | /** 184 | * Called when a node completed its lifecycle and should be removed from the 185 | * queue. Effectively wraps the `remove` call and takes care of the timer. 186 | * 187 | * @param node Drift to decay 188 | */ 189 | private decay(node: Drift): void { 190 | // We only need to restart the timer if we decay the head element of the 191 | // queue, however, if the next element has the same date as `node`, we can 192 | // skip this step, too. 193 | if (this.head === node && node.date !== node.next?.date) { 194 | // Clear previous timeout 195 | if (this.timer !== undefined) clearTimeout(this.timer); 196 | // Emptyness check (do not start if queue is now empty) 197 | if (node.next === null) this.timer = undefined; 198 | // Reschedule timer for the next node's timeout 199 | else { 200 | this.startTimer(node.next.date + this.taskTimeout - Date.now()); 201 | } 202 | } 203 | this.remove(node); 204 | } 205 | 206 | /** 207 | * Removes an element from the queue. Calls subscribers if there is capacity 208 | * after performing this operation. 209 | * 210 | * @param node Drift to remove 211 | */ 212 | private remove(node: Drift): void { 213 | // Connecting the links of `prev` and `next` removes `node` 214 | if (this.head === node) this.head = node.next; 215 | else node.prev!.next = node.next; 216 | if (this.tail === node) this.tail = node.prev; 217 | else node.next!.prev = node.prev; 218 | 219 | // Mark this drift as no longer contained 220 | node.date = -1; 221 | 222 | // Notify subscribers if there is capacity by now 223 | const capacity = this.concurrency - --this.len; 224 | if (capacity > 0) { 225 | this.subscribers.forEach((resolve) => resolve(capacity)); 226 | this.subscribers = []; 227 | } 228 | // Notify subscribers if the queue is empty now 229 | if (this.len === 0) { 230 | this.emptySubscribers.forEach((resolve) => resolve()); 231 | this.emptySubscribers = []; 232 | } 233 | } 234 | 235 | /** 236 | * Takes a source element and starts the task for it by calling the worker 237 | * function. Then wraps this task into a drift. Also makes sure that the drift 238 | * removes itself from the queue once it completes, and that the error handler 239 | * is invoked if it fails (rejects). 240 | * 241 | * @param elem Source element 242 | * @param date Date when this drift is created 243 | * @returns The created drift 244 | */ 245 | private toDrift(elem: Y, date: number): Drift { 246 | const node: Drift = { 247 | prev: null, 248 | task: this.worker(elem) 249 | .catch(async (err) => { 250 | // Rethrow iff the drift is no longer contained (timed out) 251 | if (node.date > 0) await this.catchError(err, elem); 252 | else throw err; 253 | }) 254 | .finally(() => { 255 | // Decay the node once the task completes (unless the drift was 256 | // removed due to a timeout before) 257 | if (node.date > 0) this.decay(node); 258 | }), 259 | next: null, 260 | date, 261 | elem, 262 | }; 263 | return node; 264 | } 265 | 266 | /** 267 | * Starts a timer that fires off a timeout after the given period of time. 268 | * 269 | * @param ms Number of milliseconds to wait before the timeout kicks in 270 | */ 271 | private startTimer(ms = this.taskTimeout): void { 272 | this.timer = ms > MAX_TIMEOUT_VALUE 273 | ? undefined 274 | : setTimeout(() => this.timeout(), ms); 275 | } 276 | 277 | /** 278 | * Performs a timeout event. This removes the head element as well as all 279 | * subsequent drifts with the same date (added in the same millisecond). 280 | * 281 | * The timeout handler is called in sequence for every removed drift. 282 | */ 283 | private timeout(): void { 284 | // Rare cases of the event ordering might fire a timeout even though the 285 | // head element has just decayed. 286 | if (this.head === null) return; 287 | while (this.head.date === this.head.next?.date) { 288 | this.catchTimeout(this.head.elem, this.head.task); 289 | // No need to restart timer here, we'll modify head again anyway 290 | this.remove(this.head); 291 | } 292 | this.catchTimeout(this.head.elem, this.head.task); 293 | this.decay(this.head); 294 | } 295 | 296 | /** 297 | * Number of pending tasks in the queue. Equivalent to 298 | * `this.pendingTasks().length` (but much more efficient). 299 | */ 300 | get length() { 301 | return this.len; 302 | } 303 | 304 | /** 305 | * Creates a snapshot of the queue by computing a list of those elements that 306 | * are currently being processed. 307 | */ 308 | pendingTasks(): Y[] { 309 | const len = this.len; 310 | const snapshot: Y[] = Array(len); 311 | let drift = this.head!; 312 | for (let i = 0; i < len; i++) { 313 | snapshot[i] = drift.elem; 314 | drift = drift.next!; 315 | } 316 | return snapshot; 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /src/runner.ts: -------------------------------------------------------------------------------- 1 | import { type Update } from "./deps.deno.ts"; 2 | import { 3 | createConcurrentSink, 4 | type SinkOptions, 5 | type UpdateConsumer, 6 | type UpdateSink, 7 | } from "./sink.ts"; 8 | import { 9 | createSource, 10 | type SourceOptions, 11 | type UpdateSource, 12 | type UpdateSupplier, 13 | } from "./source.ts"; 14 | 15 | /** 16 | * Options to be passed to `run(bot, options)`. Collects the options for the 17 | * underlying update source, runner, and update sink. 18 | */ 19 | export interface RunOptions { 20 | /** 21 | * Options that influence the behavior of the update source. 22 | */ 23 | source?: SourceOptions; 24 | /** 25 | * Options that influence the behavior of the runner which connects source and sink. 26 | */ 27 | runner?: RunnerOptions; 28 | /** 29 | * Options that influence the behavior of the sink that processes the updates. 30 | */ 31 | sink?: SinkOptions; 32 | } 33 | 34 | /** 35 | * Options to be passed to the runner created internally by `run(bot)`. 36 | */ 37 | export interface RunnerOptions { 38 | /** 39 | * Options that can be passed when fetching new updates. All options here are 40 | * simply forwarded to `getUpdates`. The runner itself does not do anything 41 | * with them. 42 | */ 43 | fetch?: FetchOptions; 44 | /** 45 | * When a call to `getUpdates` fails, this option specifies the number of 46 | * milliseconds that the runner should keep on retrying the calls. 47 | */ 48 | maxRetryTime?: number; 49 | /** 50 | * Time to wait between retries of calls to `getUpdates`. Can be a number of 51 | * milliseconds to wait. Can be 'exponential' or 'quadratic' for increasing 52 | * backoff starting at 100 milliseconds. 53 | */ 54 | retryInterval?: "exponential" | "quadratic" | number; 55 | /** 56 | * The runner logs all errors from `getUpdates` calls via `console.error`. 57 | * Set this option to `false` to suppress output. 58 | */ 59 | silent?: boolean; 60 | } 61 | 62 | /** 63 | * Options that can be passed to the call to `getUpdates` when the runner 64 | * fetches new a new batch of updates. 65 | * 66 | * Corresponds to the options mentioned in 67 | * https://core.telegram.org/bots/api#getupdates but without the parameters that 68 | * the runner controls. 69 | */ 70 | export interface FetchOptions { 71 | /** 72 | * Timeout in seconds for long polling. Defaults to 30. 73 | */ 74 | timeout?: number; 75 | /** 76 | * A list of the update types you want your bot to receive. For example, 77 | * specify `["message", "edited_channel_post", "callback_query"]` to only 78 | * receive updates of these types. See 79 | * [Update](https://core.telegram.org/bots/api#update) for a complete list 80 | * of available update types. Specify an empty list to receive all update 81 | * types except `chat_member` (default). If not specified, the previous 82 | * setting will be used. 83 | */ 84 | allowed_updates?: ReadonlyArray>; 85 | } 86 | 87 | /** 88 | * This handle gives you control over a runner. It allows you to stop the bot, 89 | * start it again, and check whether it is running. 90 | */ 91 | export interface RunnerHandle { 92 | /** 93 | * Starts the bot. Note that calling `run` will automatically do this for 94 | * you, so you only have to call `start` if you create a runner yourself 95 | * with `createRunner`. 96 | */ 97 | start: () => void; 98 | /** 99 | * Stops the bot. The bot will no longer fetch updates from Telegram, and it 100 | * will interrupt the currently pending `getUpdates` call. 101 | * 102 | * This method returns a promise that will resolve as soon as all currently 103 | * running middleware is done executing. This means that you can `await 104 | * handle.stop()` to be sure that your bot really stopped completely. 105 | */ 106 | stop: () => Promise; 107 | /** 108 | * Returns the size of the underlying update sink. This number is equal to 109 | * the number of updates that are currently being processed. The size does 110 | * not count updates that have completed, errored, or timed out. 111 | */ 112 | size: () => number; 113 | /** 114 | * Returns a promise that resolves as soon as the runner stops, either by 115 | * being stopped or by crashing. If the bot crashes, it means that the error 116 | * handlers installed on the bot re-threw the error, in which case the bot 117 | * terminates. A runner handle does not give you access to errors thrown by 118 | * the bot. Returns `undefined` if and only if `isRunning` returns `false`. 119 | */ 120 | task: () => Promise | undefined; 121 | /** 122 | * Determines whether the bot is currently running or not. Note that this 123 | * will return `false` as soon as you call `stop` on the handle, even though 124 | * the promise returned by `stop` may not have resolved yet. 125 | */ 126 | isRunning: () => boolean; 127 | } 128 | 129 | /** 130 | * Adapter interface that specifies a minimal structure a bot has to obey in 131 | * order for `run` to be able to run it. All grammY bots automatically conform 132 | * with this structure. 133 | */ 134 | interface BotAdapter { 135 | init?: () => Promise; 136 | handleUpdate: (update: Y) => Promise; 137 | errorHandler: (error: R) => unknown; 138 | api: { 139 | getUpdates: ( 140 | args: { offset: number; limit: number; timeout: number }, 141 | signal: AbortSignal, 142 | ) => Promise; 143 | }; 144 | } 145 | 146 | /** 147 | * Runs a grammY bot with long polling. Updates are processed concurrently with 148 | * a default maximum concurrency of 500 updates. Calls to `getUpdates` will be 149 | * slowed down and the `limit` parameter will be adjusted as soon as this load 150 | * limit is reached. 151 | * 152 | * You should use this method if your bot processes a lot of updates (several 153 | * thousand per hour), or if your bot has long-running operations such as large 154 | * file transfers. 155 | * 156 | * Confer the grammY [documentation](https://grammy.dev/plugins/runner.html) to 157 | * learn more about how to scale a bot with grammY. 158 | * 159 | * @param bot A grammY bot 160 | * @param options Further configuration options 161 | * @returns A handle to manage your running bot 162 | */ 163 | export function run( 164 | bot: BotAdapter, 165 | options: RunOptions = {}, 166 | ): RunnerHandle { 167 | const { source: sourceOpts, runner: runnerOpts, sink: sinkOpts } = options; 168 | 169 | // create update fetch function 170 | const fetchUpdates = createUpdateFetcher(bot, runnerOpts); 171 | 172 | // create source 173 | const supplier: UpdateSupplier = { 174 | supply: async function (batchSize, signal) { 175 | if (bot.init !== undefined) await bot.init(); 176 | const updates = await fetchUpdates(batchSize, signal); 177 | supplier.supply = fetchUpdates; 178 | return updates; 179 | }, 180 | }; 181 | const source = createSource(supplier, sourceOpts); 182 | 183 | // create sink 184 | const consumer: UpdateConsumer = { 185 | consume: (update) => bot.handleUpdate(update), 186 | }; 187 | const sink = createConcurrentSink(consumer, async (error) => { 188 | try { 189 | await bot.errorHandler(error); 190 | } catch (error) { 191 | printError(error); 192 | } 193 | }, sinkOpts); 194 | 195 | // launch 196 | const runner = createRunner(source, sink); 197 | runner.start(); 198 | return runner; 199 | } 200 | 201 | /** 202 | * Takes a grammY bot and returns an update fetcher function for it. The 203 | * returned function has built-in retrying behavior that can be configured. 204 | * After every successful fetching operation, the `offset` parameter is 205 | * correctly incremented. As a result, you can simply invoke the created function 206 | * multiple times in a row, and you will obtain new updates every time. 207 | * 208 | * The update fetcher function has a default long polling timeout of 30 seconds. 209 | * Specify `sourceOptions` to configure what values to pass to `getUpdates` 210 | * calls. 211 | * 212 | * @param bot A grammY bot 213 | * @param options Further options on how to fetch updates 214 | * @returns A function that can fetch updates with automatic retry behavior 215 | */ 216 | export function createUpdateFetcher( 217 | bot: BotAdapter, 218 | options: RunnerOptions = {}, 219 | ) { 220 | const { 221 | fetch: fetchOpts, 222 | retryInterval = "exponential", 223 | maxRetryTime = 15 * 60 * 60 * 1000, // 15 hours in milliseconds 224 | silent, 225 | } = options; 226 | const backoff: (t: number) => number = retryInterval === "exponential" 227 | ? (t) => t + t 228 | : retryInterval === "quadratic" 229 | ? (t) => t + 100 230 | : (t) => t; 231 | const initialRetryIn = typeof retryInterval === "number" 232 | ? retryInterval 233 | : 100; 234 | 235 | let offset = 0; 236 | async function fetchUpdates(batchSize: number, signal: AbortSignal) { 237 | const args = { 238 | timeout: 30, 239 | ...fetchOpts, 240 | offset, 241 | limit: Math.max(1, Math.min(batchSize, 100)), // 1 <= batchSize <= 100 242 | }; 243 | 244 | const latestRetry = Date.now() + maxRetryTime; 245 | let retryIn = initialRetryIn; 246 | 247 | let updates: Y[] | undefined; 248 | do { 249 | try { 250 | updates = await bot.api.getUpdates(args, signal); 251 | } catch (error) { 252 | // do not retry if stopped 253 | if (signal.aborted) throw error; 254 | 255 | if (!silent) { 256 | console.error( 257 | "[grammY runner] Error while fetching updates:", 258 | ); 259 | console.error("[grammY runner]", error); 260 | } 261 | 262 | // preventing retries on unrecoverable errors 263 | await throwIfUnrecoverable(error); 264 | 265 | if (Date.now() + retryIn < latestRetry) { 266 | await new Promise((r) => setTimeout(r, retryIn)); 267 | retryIn = backoff(retryIn); 268 | } else { 269 | // do not retry for longer than `maxRetryTime` 270 | throw error; 271 | } 272 | } 273 | } while (updates === undefined); 274 | 275 | const lastId = updates[updates.length - 1]?.update_id; 276 | if (lastId !== undefined) offset = lastId + 1; 277 | return updates; 278 | } 279 | 280 | return fetchUpdates; 281 | } 282 | 283 | /** 284 | * Creates a runner that pulls in updates from the supplied source, and passes 285 | * them to the supplied sink. Returns a handle that lets you control the runner, 286 | * e.g. start it. 287 | * 288 | * @param source The source of updates 289 | * @param sink The sink for updates 290 | * @returns A handle to start and manage your bot 291 | */ 292 | export function createRunner( 293 | source: UpdateSource, 294 | sink: UpdateSink, 295 | ): RunnerHandle { 296 | let running = false; 297 | let task: Promise | undefined; 298 | 299 | async function runner(): Promise { 300 | if (!running) return; 301 | try { 302 | for await (const updates of source.generator()) { 303 | const capacity = await sink.handle(updates); 304 | if (!running) break; 305 | source.setGeneratorPace(capacity); 306 | } 307 | } catch (e) { 308 | // Error is thrown when `stop` is called, so we only rethrow the 309 | // error if the bot was not already stopped intentionally before. 310 | if (running) { 311 | running = false; 312 | task = undefined; 313 | throw e; 314 | } 315 | } 316 | running = false; 317 | task = undefined; 318 | } 319 | 320 | return { 321 | start: () => { 322 | running = true; 323 | task = runner(); 324 | }, 325 | size: () => sink.size(), 326 | stop: () => { 327 | const t = task!; 328 | running = false; 329 | task = undefined; 330 | source.close(); 331 | return t; 332 | }, 333 | task: () => task, 334 | isRunning: () => running && source.isActive(), 335 | }; 336 | } 337 | 338 | async function throwIfUnrecoverable(err: unknown) { 339 | if (typeof err !== "object" || err === null) return; 340 | const code = "error_code" in err ? err.error_code : undefined; 341 | if (code === 401 || code === 409) throw err; // unauthorized or conflict 342 | if (code === 429) { 343 | // server is closing, must wait some seconds 344 | if ( 345 | "parameters" in err && 346 | typeof err.parameters === "object" && 347 | err.parameters !== null && 348 | "retry_after" in err.parameters && 349 | typeof err.parameters.retry_after === "number" 350 | ) { 351 | const delay = err.parameters.retry_after; 352 | await new Promise((r) => setTimeout(r, 1000 * delay)); 353 | } 354 | } 355 | } 356 | 357 | function printError(error: unknown) { 358 | console.error("::: ERROR ERROR ERROR :::"); 359 | console.error(); 360 | console.error("The error handling of your bot threw"); 361 | console.error("an error itself! Make sure to handle"); 362 | console.error("all errors! Time:", new Date().toISOString()); 363 | console.error(); 364 | console.error("The default error handler rethrows all"); 365 | console.error("errors. Did you maybe forget to set"); 366 | console.error("an error handler with `bot.catch`?"); 367 | console.error(); 368 | console.error("Here is your error object:"); 369 | console.error(error); 370 | } 371 | -------------------------------------------------------------------------------- /src/sequentialize.ts: -------------------------------------------------------------------------------- 1 | interface Clot { 2 | chain: Promise; 3 | len: number; 4 | } 5 | 6 | /** 7 | * Using a runner for grammY allows your bot to run middleware concurrently. 8 | * This has the benefit that multiple messages can be processed concurrently, 9 | * hence making your bot drastically more scalable, but it comes at the cost 10 | * that race conditions may occur because some messages need to be processed in 11 | * order. 12 | * 13 | * The solution to this problem is by making sure that some updates wait for 14 | * others to be done processing before running their middleware. This can be 15 | * achieved by middleware. 16 | * 17 | * This function creates that middleware for you. You can pass in a constraint 18 | * function that determines what updates could clash, and you will be provided 19 | * by middleware that will ensure that clashes will not occur. A constraint is 20 | * simply a string that is derived from an update. 21 | * 22 | * As an example, you can use this constraint function to make sure that 23 | * messages inside the same chat are never processed concurrently: 24 | * 25 | * ```ts 26 | * // Correctly order updates with the same chat identifier 27 | * const constraint = (ctx: Context) => String(ctx.chat.id) 28 | * 29 | * bot.use(sequentialize(constraint)) 30 | * ``` 31 | * 32 | * It is possible to return an array of strings if multiple constraints should 33 | * hold, such as "process things inside the same chat in sequence, but also from 34 | * the same user across chats": 35 | * ```ts 36 | * const constraints = (ctx: Context) => [String(ctx.chat.id), String(ctx.from.id)] 37 | * 38 | * bot.use(sequentialize(constraints)) 39 | * ``` 40 | * 41 | * Sequentializing updates is especially important when using session middleware 42 | * in order to prevent write-after-read hazards. In this case, you should 43 | * provide the same function to determine constraints as you use to resolve the 44 | * session key. 45 | * 46 | * @param constraint Function that determines the constraints of an update 47 | * @returns Sequentializing middleware to be installed on the bot 48 | */ 49 | export function sequentialize( 50 | constraint: (ctx: C) => string | string[] | undefined, 51 | ) { 52 | const map = new Map(); 53 | return async (ctx: C, next: () => void | Promise) => { 54 | const con = constraint(ctx); 55 | const cs = (Array.isArray(con) ? con : [con]).filter( 56 | (cs): cs is string => !!cs, 57 | ); 58 | const clots = cs.map((c) => { 59 | let v = map.get(c); 60 | if (v === undefined) { 61 | v = { chain: Promise.resolve(), len: 0 }; 62 | map.set(c, v); 63 | } 64 | return v; 65 | }); 66 | const allClots = Promise.all(clots.map((p) => p.chain)); 67 | async function run() { 68 | try { 69 | await allClots; 70 | } catch { 71 | // One of the previous middleware rejected. It is also `await`ed 72 | // there, so we simply ignore the error here 73 | } 74 | try { 75 | await next(); 76 | } finally { 77 | cs.forEach((c) => { 78 | const cl = map.get(c); 79 | if (cl !== undefined && --cl.len === 0) map.delete(c); 80 | }); 81 | } 82 | } 83 | const task: Promise = run(); 84 | clots.forEach((pr) => { 85 | pr.len++; 86 | pr.chain = task; 87 | }); 88 | await task; // rethrows error 89 | }; 90 | } 91 | -------------------------------------------------------------------------------- /src/sink.ts: -------------------------------------------------------------------------------- 1 | import { DecayingDeque } from "./queue.ts"; 2 | 3 | /** 4 | * Update consumers are objects that can process an update from the Telegram Bot 5 | * API. When you call `run(bot)`, such an object will be created automatically 6 | * for you. The passed bot will process the updates. 7 | * 8 | * If you want your the updates to be consumed by a different source, for 9 | * instance pushing them to a message queue, you can construct your own update 10 | * sink by passing a custom update consumer. 11 | */ 12 | export interface UpdateConsumer { 13 | /** 14 | * Consumes an update and processes it. The returned promise should resolve as 15 | * soon as the processeing is complete. 16 | */ 17 | consume: (update: Y) => Promise; 18 | } 19 | 20 | /** 21 | * An update sink is an object that acts as the sink for updates for a runner. 22 | * It features a handle function that takes in a batch of updates in the form of 23 | * an array. It returns a promise that should resolve with a positive integral 24 | * number soon as the sink is ready to handle further updates. The resolved 25 | * number indicates how many updates the sink is ready to handle next. 26 | * 27 | * Note that calles might not guarantee that this constraint is respected. While 28 | * update sinks can use this mechanism to signal backpressure to the caller, it 29 | * should be able to queue up update internally if the underlying sink cannot 30 | * handle updates as rapidly as they are being supplied. 31 | */ 32 | export interface UpdateSink { 33 | /** 34 | * Handles a batch of updates in the form of an array. Resolves with an 35 | * integral number of updates that the update sink can handle, as soon as this 36 | * value is positive. 37 | */ 38 | handle: (updates: Y[]) => Promise; 39 | /** 40 | * Returns the size of the sink. The size is equal to the number of tasks 41 | * that are currently being processed. Calling `size()` is always equal to 42 | * `snapshot().length`. 43 | */ 44 | size: () => number; 45 | /** 46 | * Takes a snapshot of the sink. This synchronously returns all tasks that 47 | * are currently being processed, in the order they were added. 48 | * 49 | * In the context of grammY, this can be useful if the runner must be 50 | * terminated gracefully but shall not wait for the middleware to complete, 51 | * for instance because some middleware performs long-running operations. 52 | * You can then store the updates in order to process them again if desired, 53 | * without losing data. 54 | */ 55 | snapshot: () => Y[]; 56 | } 57 | 58 | /** 59 | * Options for creating an update sink. 60 | */ 61 | export interface SinkOptions { 62 | /** 63 | * Concurrency limit of the runner. Specifies how many updates should be 64 | * processed in parallel at maximum. 65 | * 66 | * Note that this is done using a best-effort approach. If Telegram ever 67 | * returns more updates than requested (which should not ever happen), this 68 | * limit can be exceeded. 69 | */ 70 | concurrency?: number; 71 | /** 72 | * Timeout options. Consist of a duration in milliseconds and a handler. 73 | */ 74 | timeout?: { 75 | /** 76 | * Maximal number of milliseconds that an update may take to be processed by 77 | * the underlying sink. 78 | */ 79 | milliseconds: number; 80 | /** 81 | * Handler function that will be called with updates that process longer 82 | * than allowed by `timeout`. The second argument to the handler function 83 | * will be the unresolved promise. Note however that the timeout handler 84 | * itself has to be synchronous. 85 | */ 86 | handler: (update: Y, task: Promise) => void; 87 | }; 88 | } 89 | 90 | /** 91 | * Creates an update sink that handles updates sequentially, i.e. one after 92 | * another. No update will be processed before the previous update has not 93 | * either been processed, or its processing has failed and the error has been 94 | * handled. 95 | * 96 | * In the context of grammY, this is also the default behavior of the built-in 97 | * `bot.start` method. Sequential sinks are very predictable and hence are well 98 | * suited for debugging your bot. They do not scale well and should hence not be 99 | * used in a larger bot, or one with long-running middleware. 100 | * 101 | * @param handler Update consumer 102 | * @param errorHandler Error handler for when the update consumer rejects 103 | * @param options Further options for creating the sink 104 | * @returns An update sink that handles updates one by one 105 | */ 106 | export function createSequentialSink( 107 | handler: UpdateConsumer, 108 | errorHandler: (error: R) => Promise, 109 | options: SinkOptions = {}, 110 | ): UpdateSink { 111 | const { 112 | milliseconds: timeout = Infinity, 113 | handler: timeoutHandler = () => {}, 114 | } = options.timeout ?? {}; 115 | const q = new DecayingDeque( 116 | timeout, 117 | handler.consume, 118 | false, 119 | errorHandler, 120 | timeoutHandler, 121 | ); 122 | return { 123 | handle: async (updates) => { 124 | const len = updates.length; 125 | for (let i = 0; i < len; i++) await q.add([updates[i]!]); 126 | return Infinity; 127 | }, 128 | size: () => q.length, 129 | snapshot: () => q.pendingTasks(), 130 | }; 131 | } 132 | 133 | /** 134 | * Creates an update sink that handles updates in batches. In other words, all 135 | * updates of one batch are processed concurrently, but one batch has to be done 136 | * processing before the next batch will be processed. 137 | * 138 | * In the context of grammY, creating a batch sink is rarely useful. If you want 139 | * to process updates concurrently, consider creating a concurrent sink. If you 140 | * want to process updates sequentially, consider using a sequential sink. 141 | * 142 | * This method was mainly added to provide compatibility with older frameworks 143 | * such as `telegraf`. If your bot specifically relies on this behavior, you may 144 | * want to choose creating a batch sink for compatibility reasons. 145 | * 146 | * @param handler Update consumer 147 | * @param errorHandler Error handler for when the update consumer rejects 148 | * @param options Further options for creating the sink 149 | * @returns An update sink that handles updates batch by batch 150 | */ 151 | export function createBatchSink( 152 | handler: UpdateConsumer, 153 | errorHandler: (error: R) => Promise, 154 | options: SinkOptions = {}, 155 | ): UpdateSink { 156 | const { 157 | milliseconds: timeout = Infinity, 158 | handler: timeoutHandler = () => {}, 159 | } = options.timeout ?? {}; 160 | const q = new DecayingDeque( 161 | timeout, 162 | handler.consume, 163 | false, 164 | errorHandler, 165 | timeoutHandler, 166 | ); 167 | const constInf = () => Infinity; 168 | return { 169 | handle: (updates) => q.add(updates).then(constInf), 170 | size: () => q.length, 171 | snapshot: () => q.pendingTasks(), 172 | }; 173 | } 174 | 175 | /** 176 | * Creates an update sink that handles updates concurrently. In other words, new 177 | * updates will be fetched—and their processing will be started—before the 178 | * processing of older updates completes. The maximal number of concurrently 179 | * handled updates can be limited (default: 500). 180 | * 181 | * In the context of grammY, this is the sink that is created by default when 182 | * calling `run(bot)`. 183 | * 184 | * @param handler Update consumer 185 | * @param errorHandler Error handler for when the update consumer rejects 186 | * @param concurrency Maximal number of updates to process concurrently 187 | * @param options Further options for creating the sink 188 | * @returns An update sink that handles updates concurrently 189 | */ 190 | export function createConcurrentSink( 191 | handler: UpdateConsumer, 192 | errorHandler: (error: R) => Promise, 193 | options: SinkOptions = {}, 194 | ): UpdateSink { 195 | const { 196 | concurrency = 500, 197 | timeout: { 198 | milliseconds: timeout = Infinity, 199 | handler: timeoutHandler = () => {}, 200 | } = {}, 201 | } = options; 202 | const q = new DecayingDeque( 203 | timeout, 204 | handler.consume, 205 | concurrency, 206 | errorHandler, 207 | timeoutHandler, 208 | ); 209 | return { 210 | handle: (updates) => q.add(updates), 211 | size: () => q.length, 212 | snapshot: () => q.pendingTasks(), 213 | }; 214 | } 215 | -------------------------------------------------------------------------------- /src/source.ts: -------------------------------------------------------------------------------- 1 | const STAT_LEN = 16; 2 | 3 | /** 4 | * Update suppliers are objects that can fetch a number of new updates from the 5 | * Telegram Bot API. When you call `run(bot)`, such an object will be created 6 | * automatically for you. It uses the passed bot to fetch updates. 7 | * 8 | * If you want to poll updates from a different source, such as a message queue, 9 | * you can construct your own update source by passing a custom update supplier. 10 | */ 11 | export interface UpdateSupplier { 12 | /** 13 | * Requests the next batch of updates of the specified size and returns them 14 | * as an array. The request should respect the given `AbortSignal`. If the 15 | * signal is raised, the currently pending request must be cancelled. 16 | */ 17 | supply: (batchSize: number, signal: AbortSignal) => Promise; 18 | } 19 | 20 | /** 21 | * An update source is an object that acts as the source of updates for a 22 | * runner. It features an async generator of updates that produces batches of 23 | * updates in the form of arrays. 24 | * 25 | * The size of the batches can be adjusted on the fly by setting the generator 26 | * pace. This will prevent the generator from yielding more than the specified 27 | * number of updates. It may yield fewer updates. 28 | * 29 | * Update sources can be closed. If you are currently polling updates from the 30 | * async iterator, closing the update source will raise an abort signal. 31 | * 32 | * If you then want to start pulling updates from the source again, you can 33 | * simply begin iterating over the generator again. 34 | * 35 | * An active flag signals whether the update source is currently active (pulling 36 | * in updates) or whether it has been terminated. 37 | */ 38 | export interface UpdateSource { 39 | /** 40 | * Returns this source's async generator. 41 | */ 42 | generator(): AsyncGenerator; 43 | /** 44 | * Sets the maximal pace of the generator. This limits how many updates the 45 | * generator will yield. 46 | * 47 | * @param pace A positive integer that sets the maximal generator pace 48 | */ 49 | setGeneratorPace(pace: number): void; 50 | /** 51 | * Returns whether the source is currently active. 52 | */ 53 | isActive(): boolean; 54 | /** 55 | * Closes the source, i.e. interrupts the current request for more updates. 56 | * The source can be re-opened by simply beginning to iterate over the 57 | * generator again. 58 | */ 59 | close(): void; 60 | } 61 | 62 | /** 63 | * Options controlling how the update source operates. 64 | */ 65 | export interface SourceOptions { 66 | /** 67 | * By default, the runner tries to pull in updates as fast as possible. This 68 | * means that the bot keeps the response times as short as possible. In 69 | * other words, the runner optimizes for high speed. 70 | * 71 | * However, a consequence of this is that the runner fetches many small 72 | * update batches from Telegram. This can increase the network traffic 73 | * substantially. 74 | * 75 | * You can use this option to decide on a scale from `0.0` to `1.0` (both 76 | * inclusive) if the runner should optimize for high speed or for low 77 | * network traffic. Specify `0.0` to fetch updates as fast as possible. 78 | * Specify `1.0` to fetch updates as efficiently as possible. 79 | * 80 | * Defaults to `0.0`. 81 | */ 82 | speedTrafficBalance?: number; 83 | /** 84 | * Defines a hard limit for the duration that the runner waits between calls 85 | * to `getUpdates`. 86 | * 87 | * Note that by default, the runner does not wait at all between these 88 | * calls. This is because if the speed traffic balance is set to `0.0` 89 | * (which is also the default), the next update is always fetched 90 | * immediately after the previous one is received. 91 | * 92 | * Defaults to 500 milliseconds. 93 | */ 94 | maxDelayMilliseconds?: number; 95 | } 96 | 97 | /** 98 | * Creates an update source based on the given update supplier. 99 | * 100 | * @param supplier An update supplier to use for requesting updates 101 | * @returns An update source 102 | */ 103 | export function createSource( 104 | supplier: UpdateSupplier, 105 | options: SourceOptions = {}, 106 | ): UpdateSource { 107 | const { speedTrafficBalance = 0.0, maxDelayMilliseconds = 500 } = options; 108 | 109 | let active = false; 110 | let endWait = () => {}; 111 | let waitHandle: ReturnType | undefined = undefined; 112 | let controller: AbortController; 113 | function deactivate() { 114 | active = false; 115 | clearTimeout(waitHandle); 116 | waitHandle = undefined; 117 | endWait(); 118 | } 119 | let updateGenerator = worker(); 120 | let pace = Infinity; 121 | 122 | const bounded = Math.max(0.0, Math.min(speedTrafficBalance, 1.0)); // [0;1] 123 | const balance = 100 * bounded / Math.max(1, maxDelayMilliseconds); // number of wanted updates per call 124 | // We take two cyclic buffers to store update counts and durations 125 | // for the last STAT_LEN update calls. 126 | const counts = Array(STAT_LEN).fill(100); 127 | const durations = Array(STAT_LEN).fill(1); 128 | // We also keep track of the sum of the values in each buffer 129 | let totalCounts = 100 * STAT_LEN; // sum of counts 130 | let totalDuration = 1 * STAT_LEN; // sum of durations 131 | // Write index for both buffers 132 | let index = 0; 133 | /** Records a pair ms/items and estimates the pause length */ 134 | const record = balance === 0 135 | ? () => 0 // do not perform any tracking if the balance is 0.0 136 | : (newCount: number, newDuration: number) => { 137 | // save old 138 | const oldCount = counts[index]; 139 | const oldDuration = durations[index]; 140 | // write to buffer 141 | counts[index] = newCount; 142 | durations[index] = newDuration; 143 | // update sums 144 | totalCounts += newCount - oldCount; 145 | totalDuration += newDuration - oldDuration; 146 | // move index 147 | index = (index + 1) % STAT_LEN; 148 | // estimate time to wait, and cap it smoothly at maxDelay 149 | const estimate = balance * totalDuration / (totalCounts || 1); 150 | const capped = maxDelayMilliseconds * Math.tanh(estimate); 151 | return capped; 152 | }; 153 | 154 | async function* worker() { 155 | active = true; 156 | let last = Date.now(); 157 | do { 158 | controller = new AbortController(); 159 | controller.signal.addEventListener("abort", deactivate); 160 | try { 161 | const items = await supplier.supply(pace, controller.signal); 162 | const now = Date.now(); 163 | yield items; 164 | const wait = record(items.length, now - last); 165 | last = Date.now(); 166 | if (wait > 0 && items.length < 100) { 167 | await new Promise((r) => { 168 | endWait = r; 169 | waitHandle = setTimeout(r, wait); 170 | }); 171 | } 172 | } catch (e) { 173 | close(); 174 | if (!controller.signal.aborted) throw e; 175 | break; 176 | } 177 | } while (active); 178 | } 179 | function close() { 180 | deactivate(); 181 | controller.abort(); 182 | updateGenerator = worker(); 183 | pace = Infinity; 184 | } 185 | 186 | return { 187 | generator: () => updateGenerator, 188 | setGeneratorPace: (newPace) => pace = newPace, 189 | isActive: () => active, 190 | close: () => close(), 191 | }; 192 | } 193 | -------------------------------------------------------------------------------- /src/worker.ts: -------------------------------------------------------------------------------- 1 | import { 2 | type Api, 3 | Bot, 4 | type BotConfig, 5 | BotError, 6 | type Context, 7 | type Update, 8 | type UserFromGetMe, 9 | } from "./deps.deno.ts"; 10 | import { parentThread } from "./platform.deno.ts"; 11 | 12 | /** 13 | * A `BotWorker` instance is a like a `Bot` instance in the sense that it can 14 | * process updates. It is different from `Bot` because it cannot pull in these 15 | * updates, so it cannot be be started or stopped. Instead, it has to receive 16 | * these updates from a central Bot instance that fetches updates. 17 | * 18 | * Create an instance of this class in a separate file. 19 | * 20 | * ```ts 21 | * // worker.ts 22 | * const bot = new BotWorker(""); // <-- pass your bot token here (again) 23 | * 24 | * bot.on("message", (ctx) => ctx.reply("yay!")); 25 | * ``` 26 | * 27 | * This is the place where you should define all your bot logic. Install 28 | * plugins, add handlers, process messages and other updates. Basically, instead 29 | * of creating a bot, you only create a bot worker. 30 | * 31 | * Next, you can define a very minimal central bot instance to pull in updates. 32 | * You can use this central instance to sequentialize your updates. However, it 33 | * generally makes sense to put as little logic as possible in it. 34 | * 35 | * Install the `distribute` middleware exported from grammY runner to send the 36 | * updates to your bot workers. 37 | * 38 | * Note that any plugins you install in the central bot instance will not be 39 | * available inside the bot worker. In face, you can even use different context 40 | * types in the central bot instance and in your bot workers. 41 | */ 42 | export class BotWorker< 43 | C extends Context = Context, 44 | A extends Api = Api, 45 | > extends Bot { 46 | constructor( 47 | public readonly token: string, 48 | config?: BotConfig, 49 | ) { 50 | super(token, config); 51 | const p = parentThread(); 52 | p.seed.then((me) => { 53 | if (!this.isInited()) { 54 | this.botInfo = me; 55 | } 56 | }); 57 | p.onMessage(async (update: Update) => { 58 | try { 59 | await this.handleUpdate(update); 60 | } catch (err) { 61 | // should always be true 62 | if (err instanceof BotError) { 63 | await this.errorHandler(err); 64 | } else { 65 | console.error( 66 | "FATAL: grammY worker unable to handle:", 67 | err, 68 | ); 69 | throw err; 70 | } 71 | } finally { 72 | p.postMessage(update.update_id); 73 | } 74 | }); 75 | this.start = () => { 76 | throw new Error("Cannot start a bot worker!"); 77 | }; 78 | this.stop = () => { 79 | throw new Error("Cannot stop a bot worker!"); 80 | }; 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /test/promise-test-helpers.ts: -------------------------------------------------------------------------------- 1 | import { deferred } from "https://deno.land/std@0.177.0/async/deferred.ts"; 2 | import { 3 | assert, 4 | assertEquals, 5 | assertThrows, 6 | } from "https://deno.land/std@0.177.0/testing/asserts.ts"; 7 | 8 | export interface T { 9 | pass: () => void; 10 | fail: () => void; 11 | sleep: (ms: number) => Promise; 12 | assert: typeof assert; 13 | assertThrows: typeof assertThrows; 14 | assertEquals: typeof assertEquals; 15 | } 16 | 17 | export function test(fn: (t: T) => void | Promise): () => Promise { 18 | return async () => { 19 | const def = deferred(); 20 | const t: T = { 21 | pass: () => def.resolve(), 22 | fail: () => def.reject(), 23 | sleep: (ms) => new Promise((r) => setTimeout(r, ms)), 24 | assertThrows: assertThrows, 25 | assert: assert, 26 | assertEquals: assertEquals, 27 | }; 28 | try { 29 | await fn(t); 30 | } catch (error) { 31 | def.reject(error); 32 | } 33 | await def; 34 | }; 35 | } 36 | -------------------------------------------------------------------------------- /test/queue.test.ts: -------------------------------------------------------------------------------- 1 | // deno-lint-ignore-file no-explicit-any 2 | import { DecayingDeque } from "../src/queue.ts"; 3 | import { T, test } from "./promise-test-helpers.ts"; 4 | 5 | Deno.test( 6 | "should allow infinite timeouts", 7 | test(async (t) => { 8 | let res = ""; 9 | const q = new DecayingDeque( 10 | Infinity, 11 | (v) => (res += v, Promise.resolve()), 12 | false, 13 | () => t.fail(), 14 | () => t.fail(), 15 | ); 16 | await q.add(["a"]); 17 | t.assertEquals(res, "a"); 18 | t.pass(); 19 | }), 20 | ); 21 | 22 | Deno.test( 23 | "should process a single update", 24 | test(async (t) => { 25 | let res = ""; 26 | const q = new DecayingDeque( 27 | 1000, 28 | (v) => (res += v, Promise.resolve()), 29 | false, 30 | () => t.fail(), 31 | () => t.fail(), 32 | ); 33 | await q.add(["a"]); 34 | t.assertEquals(res, "a"); 35 | t.pass(); 36 | }), 37 | ); 38 | 39 | Deno.test( 40 | "should process two updates in order", 41 | test(async (t) => { 42 | let res = ""; 43 | const q = new DecayingDeque( 44 | 1000, 45 | (v) => (res += v, Promise.resolve()), 46 | false, 47 | () => t.fail(), 48 | () => t.fail(), 49 | ); 50 | await q.add(["a", "b"]); 51 | t.assertEquals(res, "ab"); 52 | t.pass(); 53 | }), 54 | ); 55 | 56 | Deno.test( 57 | "should process updates from different calls", 58 | test(async (t) => { 59 | let res = ""; 60 | const q = new DecayingDeque( 61 | 1000, 62 | (v) => (res += v, Promise.resolve()), 63 | false, 64 | () => t.fail(), 65 | () => t.fail(), 66 | ); 67 | await q.add(["a"]); 68 | await q.add(["b"]); 69 | t.assertEquals(res, "ab"); 70 | t.pass(); 71 | }), 72 | ); 73 | 74 | Deno.test( 75 | "should create snapshots", 76 | test(async (t) => { 77 | const values = [..."abc"]; 78 | let r: () => void; 79 | const promise = new Promise((resolve) => (r = resolve)); 80 | const q = new DecayingDeque( 81 | 1000, 82 | () => promise, 83 | true, 84 | () => t.fail(), 85 | () => t.fail(), 86 | ); 87 | t.assertEquals(q.pendingTasks(), []); 88 | await q.add(values).then(() => r()); 89 | t.assertEquals(q.pendingTasks(), values); 90 | await promise; 91 | t.assertEquals(q.pendingTasks(), []); 92 | t.pass(); 93 | }), 94 | ); 95 | 96 | Deno.test( 97 | "should process delayed updates from different calls", 98 | test(async (t) => { 99 | let res = ""; 100 | const q = new DecayingDeque( 101 | 1000, 102 | (v) => (res += v, Promise.resolve()), 103 | false, 104 | () => t.fail(), 105 | () => t.fail(), 106 | ); 107 | await q.add(["a"]); 108 | setTimeout(async () => { 109 | await q.add(["b"]); 110 | t.assertEquals(res, "ab"); 111 | t.pass(); 112 | }, 10); 113 | }), 114 | ); 115 | 116 | Deno.test( 117 | "should catch errors", 118 | test((t) => { 119 | const q = new DecayingDeque( 120 | 1000, 121 | (v) => Promise.reject(v), 122 | false, 123 | (err, elem) => { 124 | t.assertEquals(err, "a"); 125 | t.assertEquals(elem, "a"); 126 | t.pass(); 127 | }, 128 | () => t.fail(), 129 | ); 130 | q.add(["a"]); 131 | }), 132 | ); 133 | 134 | Deno.test( 135 | "should catch multiple errors", 136 | test((t) => { 137 | let res = ""; 138 | const q = new DecayingDeque( 139 | 1000, 140 | (v) => Promise.reject(v), 141 | false, 142 | (err, elem) => { 143 | if ( 144 | (err !== "a" && err !== "b") || 145 | (elem !== "a" && elem !== "b") 146 | ) { 147 | t.fail(); 148 | } 149 | res += err; 150 | if (res === "ab") t.pass(); 151 | }, 152 | () => t.fail(), 153 | ); 154 | q.add(["a", "b"]); 155 | }), 156 | ); 157 | 158 | Deno.test( 159 | "should catch timeouts", 160 | test((t) => { 161 | const promise = new Promise(() => {}); 162 | const q = new DecayingDeque( 163 | 10, 164 | () => promise, 165 | false, 166 | () => t.fail(), 167 | (e) => { 168 | t.assertEquals(e, "a"); 169 | t.pass(); 170 | }, 171 | ); 172 | q.add(["a"]); 173 | }), 174 | ); 175 | 176 | Deno.test( 177 | "should catch multiple timeouts", 178 | test((t) => { 179 | const promise = new Promise(() => {}); 180 | let res = ""; 181 | const q = new DecayingDeque( 182 | 10, 183 | () => promise, 184 | false, 185 | () => t.fail(), 186 | (e) => { 187 | if (e !== "a" && e !== "b") t.fail(); 188 | res += e; 189 | if (res === "ab") t.pass(); 190 | }, 191 | ); 192 | q.add(["a", "b"]); 193 | }), 194 | ); 195 | 196 | async function patternTest(t: T, pattern: string, expected = pattern) { 197 | // `res` collects the results of promises that resolve, reject, or time out, 198 | // and these events have to happen in the correct order, 199 | // otherwise `res` will be built up the wrong way from the given update pattern 200 | let res = ""; 201 | const q = new DecayingDeque( 202 | 20, 203 | (c) => { 204 | if (c.match(/[a-z]/)) { 205 | // value 206 | return new Promise((resolve) => { 207 | setTimeout(() => { 208 | res += c; 209 | resolve(); 210 | }); 211 | }); 212 | } else if (c.match(/[0-9]/)) { 213 | // error 214 | return new Promise((_resolve, reject) => { 215 | setTimeout(() => { 216 | reject(c); 217 | }); 218 | }); 219 | } else { 220 | // timeout 221 | return new Promise(() => {}); 222 | } 223 | }, 224 | false, 225 | (v) => { 226 | res += v; 227 | }, 228 | (v) => (res += v), 229 | ); 230 | await q.add([...pattern]); 231 | t.assertEquals(res, expected); 232 | t.pass(); 233 | } 234 | 235 | Deno.test( 236 | "should handle simple update patterns", 237 | test((t) => patternTest(t, "a")), 238 | ); 239 | Deno.test( 240 | "should handle long value update patterns", 241 | test((t) => patternTest(t, "xxxxxxxxxx")), 242 | ); 243 | Deno.test( 244 | "should handle long error update patterns", 245 | test((t) => patternTest(t, "9999999999")), 246 | ); 247 | Deno.test( 248 | "should handle long timeout update patterns", 249 | test((t) => patternTest(t, "..........")), 250 | ); 251 | Deno.test( 252 | "should handle combined update patterns", 253 | test((t) => patternTest(t, "x9.")), 254 | ); 255 | Deno.test( 256 | "should handle mixed update patterns", 257 | test((t) => patternTest(t, "a9.b,", "a9b.,")), 258 | ); 259 | Deno.test( 260 | "should handle complex update patterns", 261 | test((t) => 262 | patternTest( 263 | t, 264 | "jadf.)(r45%4hj2h()$..x)=1kj5kfgg}]3567", 265 | "jadfr454hj2hx1kj5kfgg3567.)(%()$..)=}]", 266 | ) 267 | ), 268 | ); 269 | 270 | Deno.test( 271 | "should return the correct capacity value for a single element", 272 | test(async (t) => { 273 | const q = new DecayingDeque( 274 | 1000, 275 | () => Promise.resolve(), 276 | 12, 277 | () => t.fail(), 278 | () => t.fail(), 279 | ); 280 | t.assertEquals(await q.add(["a"]), 11); 281 | t.pass(); 282 | }), 283 | ); 284 | 285 | Deno.test( 286 | "should return the correct capacity value for multiple elements", 287 | test(async (t) => { 288 | const q = new DecayingDeque( 289 | 1000, 290 | () => Promise.resolve(), 291 | 12, 292 | () => t.fail(), 293 | () => t.fail(), 294 | ); 295 | t.assertEquals(await q.add([..."abcd"]), 8); 296 | t.pass(); 297 | }), 298 | ); 299 | 300 | Deno.test( 301 | "should complete the add call as soon as there is capacity again", 302 | test(async (t) => { 303 | const q = new DecayingDeque( 304 | 1000, 305 | () => Promise.resolve(), 306 | 3, 307 | () => t.fail(), 308 | () => t.fail(), 309 | ); 310 | t.assertEquals(await q.add([..."abcdef"]), 1); 311 | t.pass(); 312 | }), 313 | ); 314 | 315 | Deno.test( 316 | "should decelerate add calls", 317 | test(async (t) => { 318 | const updates = new Array(1000).fill("x"); 319 | const q = new DecayingDeque( 320 | 20, 321 | () => new Promise((resolve) => setTimeout(() => resolve())), 322 | 1000, 323 | () => t.fail(), 324 | () => t.fail(), 325 | ); 326 | await updates.reduce( 327 | (p, v) => 328 | p.then(() => 329 | q.add([v]).then((c) => { 330 | // we add a new element as soon as the previous `add` call resolves, and 331 | // we expect that this only happens as soon as there is capacity, 332 | // so we check that the capacity never falls below 1 333 | if (c < 1) t.fail(); 334 | }) 335 | ), 336 | Promise.resolve(), 337 | ); 338 | t.pass(); 339 | }), 340 | ); 341 | 342 | Deno.test( 343 | "should resolve tasks after timing out", 344 | test((t) => { 345 | let r: any; 346 | const q = new DecayingDeque( 347 | 10, 348 | () => new Promise((resolve) => (r = resolve)), 349 | false, 350 | () => t.fail(), 351 | (i, p) => { 352 | p.then((o: any) => { 353 | t.assertEquals(i, o); 354 | t.pass(); 355 | }); 356 | r(i); 357 | }, 358 | ); 359 | q.add(["a"]); 360 | }), 361 | ); 362 | 363 | Deno.test( 364 | "should rethrow errors for tasks that already timed out", 365 | test((t) => { 366 | let r: any; 367 | const q = new DecayingDeque( 368 | 10, 369 | () => new Promise((_resolve, reject) => (r = reject)), 370 | false, 371 | () => t.fail(), 372 | (i, p) => { 373 | p.catch((o) => { 374 | t.assertEquals(i, o); 375 | t.pass(); 376 | }); 377 | r(i); 378 | }, 379 | ); 380 | q.add(["a"]); 381 | }), 382 | ); 383 | 384 | Deno.test( 385 | "should handle concurrent add calls", 386 | test((t) => { 387 | const r: Array<(value: void | PromiseLike) => void> = []; 388 | const q = new DecayingDeque( 389 | 1000, 390 | () => new Promise((resolve) => r.push(resolve)), 391 | 3, 392 | () => t.fail(), 393 | () => t.fail(), 394 | ); 395 | let count = 0; 396 | q.add([..."aaaaa"]).then(() => ++count); 397 | q.add([..."bbbbb"]).then(() => ++count); 398 | q.add([..."ccccc"]).then(() => ++count); 399 | q.add([..."ddddd"]).then(() => ++count); 400 | q.add([..."eeeee"]).then(() => { 401 | t.assertEquals(++count, 5); 402 | t.pass(); 403 | }); 404 | r.forEach((f) => f()); 405 | }), 406 | ); 407 | 408 | Deno.test( 409 | "should purge many nodes after the same timeout", 410 | test((t) => { 411 | let count = 0; 412 | const updates = "0123456789".repeat(10); 413 | const q = new DecayingDeque( 414 | 5, 415 | () => new Promise(() => {}), 416 | false, 417 | () => t.fail(), 418 | () => count++, 419 | ); 420 | q.add([...updates]); 421 | setTimeout(() => { 422 | t.assertEquals(count, updates.length); 423 | t.assertEquals(q.length, 0); 424 | t.pass(); 425 | }, 20); 426 | }), 427 | ); 428 | -------------------------------------------------------------------------------- /test/sequentialize.test.ts: -------------------------------------------------------------------------------- 1 | import { sequentialize } from "../src/sequentialize.ts"; 2 | import { T, test } from "./promise-test-helpers.ts"; 3 | 4 | const seq = () => sequentialize((s) => s); 5 | 6 | Deno.test( 7 | "should call the middleware", 8 | test(async (t) => { 9 | const s = seq(); 10 | await s(["p"], () => t.pass()); 11 | }), 12 | ); 13 | 14 | Deno.test( 15 | "should handle failing middleware", 16 | test(async (t) => { 17 | const s = seq(); 18 | await s(["p"], () => { 19 | throw new Error("0"); 20 | }).catch((err) => { 21 | t.assertEquals(err.message, "0"); 22 | t.pass(); 23 | }); 24 | }), 25 | ); 26 | 27 | Deno.test( 28 | "should handle failing chained middleware", 29 | test(async (t: T) => { 30 | const s = seq(); 31 | let qx = false; 32 | let err: Error | undefined; 33 | const p = s(["p"], async () => { 34 | await t.sleep(10); 35 | throw (err = new Error("0")); 36 | }); 37 | const q = s(["p"], async () => { 38 | await t.sleep(10); 39 | qx = true; 40 | }); 41 | try { 42 | await p; 43 | t.fail(); 44 | } catch (e) { 45 | t.assertEquals(err, e); 46 | } 47 | await q; 48 | t.assert(qx); 49 | t.pass(); 50 | }), 51 | ); 52 | 53 | Deno.test( 54 | "should handle multiple errors", 55 | test(async (t: T) => { 56 | const s = seq(); 57 | 58 | const pxs: boolean[] = []; 59 | function makeResolve(...cs: string[]) { 60 | const index = pxs.length; 61 | pxs.push(false); 62 | return s(cs, async () => { 63 | await t.sleep(10); 64 | pxs[index] = true; 65 | }); 66 | } 67 | 68 | const errs: Error[] = []; 69 | function makeReject(...cs: string[]) { 70 | return s(cs, async () => { 71 | await t.sleep(10); 72 | const err = new Error(cs.join(" & ")); 73 | errs.push(err); 74 | throw err; 75 | }); 76 | } 77 | 78 | const ps: Promise[] = []; // resolving promises 79 | const pes: Promise[] = []; // rejecting promises 80 | 81 | ps.push(makeResolve("p", "x")); 82 | ps.push(makeResolve("p", "y")); 83 | pes.push(makeReject("p", "x")); 84 | pes.push(makeReject("p", "y")); 85 | ps.push(makeResolve("p", "x")); 86 | ps.push(makeResolve("p", "y")); 87 | pes.push(makeReject("p", "x")); 88 | pes.push(makeReject("p", "y")); 89 | ps.push(makeResolve("p", "x")); 90 | ps.push(makeResolve("p", "y")); 91 | pes.push(makeReject("p", "x")); 92 | pes.push(makeReject("p", "y")); 93 | 94 | let errCount = 0; 95 | const caughtPes = pes.map((p) => 96 | p.catch((err) => (errCount++, t.assert(errs.includes(err)))) 97 | ); 98 | await Promise.all(ps.concat(caughtPes)); 99 | t.assert(pxs.every(Boolean)); 100 | t.assertEquals(errCount, 6); 101 | t.pass(); 102 | }), 103 | ); 104 | 105 | Deno.test({ 106 | name: "should allow unrelated updates to be executed concurrently", 107 | fn: test(async (t: T) => { 108 | const s = seq(); 109 | let px = false; 110 | let qx = false; 111 | const p = s(["p"], async () => { 112 | await t.sleep(50); 113 | px = true; 114 | }); 115 | const q = s(["q"], async () => { 116 | await t.sleep(50); 117 | qx = true; 118 | }); 119 | await t.sleep(75); 120 | t.assert(px); 121 | t.assert(qx); 122 | await Promise.all([p, q]); 123 | t.pass(); 124 | }), 125 | }); 126 | 127 | Deno.test( 128 | "should slow down updates", 129 | test(async (t: T) => { 130 | const s = seq(); 131 | let px = false; 132 | let qx = false; 133 | const p = s(["p"], async () => { 134 | await t.sleep(50); 135 | px = true; 136 | }); 137 | const q = s(["p"], async () => { 138 | await t.sleep(50); 139 | qx = true; 140 | }); 141 | await t.sleep(75); 142 | t.assert(px); 143 | t.assert(!qx); 144 | await Promise.all([p, q]); 145 | t.pass(); 146 | }), 147 | ); 148 | 149 | Deno.test( 150 | "should work with several constraints", 151 | test(async (t: T) => { 152 | const s = seq(); 153 | let px = false; 154 | let qx = false; 155 | const p = s(["a", "b", "c", "d"], async () => { 156 | await t.sleep(50); 157 | px = true; 158 | }); 159 | const q = s(["a", "b", "c", "d"], async () => { 160 | await t.sleep(50); 161 | qx = true; 162 | }); 163 | await t.sleep(75); 164 | t.assert(px); 165 | t.assert(!qx); 166 | await Promise.all([p, q]); 167 | t.pass(); 168 | }), 169 | ); 170 | 171 | Deno.test( 172 | "should work with several partially overlapping constraints", 173 | test(async (t: T) => { 174 | const s = seq(); 175 | let px = false; 176 | let qx = false; 177 | const p = s(["a", "b", "c", "d"], async () => { 178 | await t.sleep(50); 179 | px = true; 180 | }); 181 | const q = s(["c", "e"], async () => { 182 | await t.sleep(50); 183 | qx = true; 184 | }); 185 | await t.sleep(75); 186 | t.assert(px); 187 | t.assert(!qx); 188 | await Promise.all([p, q]); 189 | t.pass(); 190 | }), 191 | ); 192 | 193 | Deno.test( 194 | "should respect old values", 195 | test(async (t: T) => { 196 | const s = seq(); 197 | let px = false; 198 | let qx = false; 199 | let rx = false; 200 | const p = s(["p"], async () => { 201 | await t.sleep(50); 202 | px = true; 203 | }); 204 | const q = s(["q"], async () => { 205 | await t.sleep(50); 206 | qx = true; 207 | }); 208 | await t.sleep(10); 209 | const r = s(["p"], async () => { 210 | await t.sleep(50); 211 | rx = true; 212 | }); 213 | await t.sleep(75); 214 | t.assert(px); 215 | t.assert(qx); 216 | t.assert(!rx); 217 | await Promise.all([p, q, r]); 218 | t.pass(); 219 | }), 220 | ); 221 | 222 | Deno.test( 223 | "should work with different previous dependencies", 224 | test(async (t: T) => { 225 | const s = seq(); 226 | let px = false; 227 | let qx = false; 228 | let rx = false; 229 | const p = s(["p"], async () => { 230 | await t.sleep(50); 231 | px = true; 232 | }); 233 | const q = s(["p", "q"], async () => { 234 | await t.sleep(50); 235 | qx = true; 236 | }); 237 | const r = s(["p", "q", "r"], async () => { 238 | await t.sleep(50); 239 | rx = true; 240 | }); 241 | await t.sleep(75); 242 | t.assert(px); 243 | t.assert(!qx); 244 | t.assert(!rx); 245 | await t.sleep(50); 246 | t.assert(px); 247 | t.assert(qx); 248 | t.assert(!rx); 249 | await t.sleep(50); 250 | t.assert(px); 251 | t.assert(qx); 252 | t.assert(rx); 253 | await Promise.all([p, q, r]); 254 | t.pass(); 255 | }), 256 | ); 257 | 258 | Deno.test( 259 | "should pass the waterfall test", 260 | test(async (t: T) => { 261 | const s = seq(); 262 | let ax = false; 263 | let bx = false; 264 | let cx = false; 265 | let dx = false; 266 | let ex = false; 267 | let fx = false; 268 | const a = s(["a"], async () => { 269 | await t.sleep(50); 270 | ax = true; 271 | }); 272 | const b = s(["a", "b"], async () => { 273 | await t.sleep(50); 274 | bx = true; 275 | }); 276 | const c = s(["b", "c"], async () => { 277 | await t.sleep(50); 278 | cx = true; 279 | }); 280 | const d = s(["c", "d", "a", "b"], async () => { 281 | await t.sleep(50); 282 | dx = true; 283 | }); 284 | const e = s(["d", "e"], async () => { 285 | await t.sleep(50); 286 | ex = true; 287 | }); 288 | const f = s(["e", "f", "c"], async () => { 289 | await t.sleep(50); 290 | fx = true; 291 | }); 292 | await t.sleep(75); 293 | t.assert(ax); 294 | t.assert(!bx); 295 | t.assert(!cx); 296 | t.assert(!dx); 297 | t.assert(!ex); 298 | t.assert(!fx); 299 | await t.sleep(50); 300 | t.assert(ax); 301 | t.assert(bx); 302 | t.assert(!cx); 303 | t.assert(!dx); 304 | t.assert(!ex); 305 | t.assert(!fx); 306 | await t.sleep(50); 307 | t.assert(ax); 308 | t.assert(bx); 309 | t.assert(cx); 310 | t.assert(!dx); 311 | t.assert(!ex); 312 | t.assert(!fx); 313 | await t.sleep(50); 314 | t.assert(ax); 315 | t.assert(bx); 316 | t.assert(cx); 317 | t.assert(dx); 318 | t.assert(!ex); 319 | t.assert(!fx); 320 | await t.sleep(50); 321 | t.assert(ax); 322 | t.assert(bx); 323 | t.assert(cx); 324 | t.assert(dx); 325 | t.assert(ex); 326 | t.assert(!fx); 327 | await t.sleep(50); 328 | t.assert(ax); 329 | t.assert(bx); 330 | t.assert(cx); 331 | t.assert(dx); 332 | t.assert(ex); 333 | t.assert(fx); 334 | await Promise.all([a, b, c, d, e, f]); 335 | t.pass(); 336 | }), 337 | ); 338 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "forceConsistentCasingInFileNames": true, 4 | "newLine": "lf", 5 | "noFallthroughCasesInSwitch": true, 6 | "noImplicitReturns": true, 7 | "noUnusedParameters": true, 8 | "rootDir": "./src/", 9 | "strict": true, 10 | "declaration": true, 11 | "moduleResolution": "node", 12 | "module": "commonjs", 13 | "outDir": "./out/", 14 | "skipLibCheck": true, 15 | "target": "es2019" 16 | }, 17 | "include": ["./src/"], 18 | "deno2node": { 19 | "shim": "./src/node-shim.ts" 20 | } 21 | } 22 | --------------------------------------------------------------------------------