├── .editorconfig ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── package.json ├── src ├── index.ts └── query.ts └── tsconfig.json /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | trim_trailing_whitespace = true 7 | insert_final_newline = true 8 | 9 | [*.{mts,ts,js,mjs}] 10 | indent_style = space 11 | indent_size = 2 12 | 13 | [*.{css}] 14 | indent_style = space 15 | indent_size = 2 16 | 17 | [*.{json,yml,yaml}] 18 | indent_style = space 19 | indent_size = 2 20 | 21 | [*.{md}] 22 | indent_style = space 23 | indent_size = 2 24 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | 3 | *.sh text eol=lf 4 | *.js text eol=lf 5 | *.mjs text eol=lf 6 | *.ts text eol=lf 7 | *.mts text eol=lf 8 | *.css text eol=lf 9 | *.json text eol=lf 10 | *.html text eol=lf 11 | *.md text eol=lf 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist 2 | node_modules 3 | coverage 4 | 5 | *.tsbuildinfo 6 | npm-debug.log 7 | 8 | .DS_Store 9 | .idea 10 | .vscode 11 | 12 | pnpm-lock.yaml 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016-2023 Boris Kaul . 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [ndx](https://github.com/ndx-search/ndx) · [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/ndx-search/ndx/blob/master/LICENSE) 2 | 3 | Lightweight Full-Text Indexing and Searching Library. 4 | 5 | This library were designed for a specific use case when all documents are 6 | stored on a disk (IndexedDB) and can be dynamically added or removed to an 7 | index. 8 | 9 | Query function supports only disjunction operators. Queries like `one two` will 10 | work as `"one" or "two"`. 11 | 12 | Inverted Index doesn't store term locations and query function won't be able 13 | to search for phrases like `"Super Mario"`. 14 | 15 | There are many [alternative solutions](https://github.com/leeoniya/uFuzzy#benchmark) with different tradeoffs that may better suit for your 16 | particular use cases. For a simple document search with a static dataset, I 17 | would recommend to use something like [fst](https://github.com/BurntSushi/fst) 18 | and deploy it as an edge function (wasm). 19 | 20 | ## Features 21 | 22 | - Multiple fields full-text indexing and searching. 23 | - Per-field score boosting. 24 | - [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) ranking function to rank 25 | matching documents. 26 | - [Trie](https://en.wikipedia.org/wiki/Trie) based dynamic 27 | [Inverted Index](https://en.wikipedia.org/wiki/Inverted_index). 28 | - Configurable tokenizer and term filter. 29 | - Free text queries with query expansion. 30 | 31 | ## Example 32 | 33 | ```js 34 | import { createIndex, indexAdd } from "ndx"; 35 | import { indexQuery } from "ndx/query"; 36 | 37 | const termFilter = (term) => term.toLowerCase(); 38 | 39 | function createDocumentIndex(fields) { 40 | // `createIndex()` creates an index data structure. 41 | // First argument specifies how many different fields we want to index. 42 | const index = createIndex( 43 | fields.length, 44 | // Tokenizer is a function that breaks text into words, phrases, symbols, 45 | // or other meaningful elements called tokens. 46 | (s) => s.split(" "), 47 | // Filter is a function that processes tokens and returns terms, terms are 48 | // used in Inverted Index to index documents. 49 | termFilter, 50 | ); 51 | // `fieldGetters` is an array with functions that will be used to retrieve 52 | // data from different fields. 53 | const fieldGetters = fields.map((f) => (doc) => doc[f.name]); 54 | // `fieldBoostFactors` is an array of boost factors for each field, in this 55 | // example all fields will have identical weight. 56 | const fieldBoostFactors = fields.map(() => 1); 57 | 58 | return { 59 | index, 60 | // `add()` will add documents to the index. 61 | add(doc) { 62 | indexAdd( 63 | index, 64 | fieldGetters, 65 | // Docum ent key, it can be an unique document id or a refernce to a 66 | // document if you want to store all documents in memory. 67 | doc.id, 68 | // Document. 69 | doc, 70 | ); 71 | }, 72 | // `remove()` will remove documents from the index. 73 | remove(id) { 74 | // When document is removed we are just marking document id as being 75 | // removed. Index data structure still contains references to the removed 76 | // document. 77 | indexRemove(index, removed, id); 78 | if (removed.size > 10) { 79 | // `indexVacuum()` removes all references to removed documents from the 80 | // index. 81 | indexVacuum(index, removed); 82 | } 83 | }, 84 | 85 | // `search()` will be used to perform queries. 86 | search(q) { 87 | return indexQuery( 88 | index, 89 | fieldBoostFactors, 90 | // BM25 ranking function constants: 91 | // BM25 k1 constant, controls non-linear term frequency normalization 92 | // (saturation). 93 | 1.2, 94 | // BM25 b constant, controls to what degree document length normalizes 95 | // tf values. 96 | 0.75, 97 | q, 98 | ); 99 | } 100 | }; 101 | } 102 | 103 | // Create a document index that will index `content` field. 104 | const index = createDocumentIndex([{ name: "content" }]); 105 | 106 | const docs = [ 107 | { 108 | "id": "1", 109 | "content": "Lorem ipsum dolor", 110 | }, 111 | { 112 | "id": "2", 113 | "content": "Lorem ipsum", 114 | } 115 | ]; 116 | 117 | // Add documents to the index. 118 | docs.forEach((d) => { index.add(d); }); 119 | 120 | // Perform a search query. 121 | index.search("Lorem"); 122 | // => [{ key: "2" , score: ... }, { key: "1", score: ... } ] 123 | // 124 | // document with an id `"2"` is ranked higher because it has a `"content"` 125 | // field with a less number of terms than document with an id `"1"`. 126 | 127 | index.search("dolor"); 128 | // => [{ key: "1", score: ... }] 129 | ``` 130 | 131 | ### Tokenizers and Filters 132 | 133 | `ndx` library doesn't provide any tokenizers or filters. There are other 134 | libraries that implement tokenizers, for example 135 | [Natural](https://github.com/NaturalNode/natural/) has a good collection of 136 | tokenizers and stemmers. 137 | 138 | ## License 139 | 140 | [MIT](http://opensource.org/licenses/MIT) 141 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ndx", 3 | "version": "2.0.1", 4 | "type": "module", 5 | "sideEffects": false, 6 | "exports": { 7 | ".": "./dist/index.js", 8 | "./query": "./dist/query.js" 9 | }, 10 | "description": "Lightweight Full-Text Indexing and Searching Library.", 11 | "scripts": { 12 | "prepublishOnly": "npm run clean && npm run dist", 13 | "dist": "tsc -b --pretty", 14 | "clean": "tsc -b --clean --pretty", 15 | "ts:watch": "tsc -b -w --pretty", 16 | "ts:force": "tsc -b --force --pretty", 17 | "test": "node --enable-source-maps --test", 18 | "test:only": "node --enable-source-maps --test-only --test" 19 | }, 20 | "files": [ 21 | "dist", 22 | "src", 23 | "!dist/**/*.tsbuildinfo", 24 | "!src/**/__tests__", 25 | "README.md", 26 | "LICENSE" 27 | ], 28 | "devDependencies": { 29 | "typescript": "^4.9.5" 30 | }, 31 | "license": "MIT", 32 | "keywords": [ 33 | "text", 34 | "full-text", 35 | "index", 36 | "inverted-index", 37 | "search", 38 | "relevance" 39 | ], 40 | "author": "Boris Kaul (https://github.com/localvoid)", 41 | "homepage": "https://github.com/localvoid/ndx", 42 | "bugs": "https://github.com/localvoid/ndx/issues", 43 | "repository": "github:localvoid/ndx" 44 | } -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Index data structure. 3 | * 4 | * @typeparam T Document key. 5 | */ 6 | export interface DocumentIndex { 7 | /** Additional information about documents. */ 8 | readonly docs: Map>; 9 | /** Inverted index root node. */ 10 | readonly root: InvertedIndexNode; 11 | /** Sum of field lengths in all documents. */ 12 | readonly fSum: Float64Array; 13 | /** Average of field lengths in all documents. */ 14 | readonly fAvg: Float64Array; 15 | /** Number of removed documents. */ 16 | removed: number; 17 | /** 18 | * Tokenizer is a function that breaks text into words, phrases, symbols, or 19 | * other meaningful elements called tokens. 20 | */ 21 | tokenizer(s: string): string[]; 22 | /** 23 | * Filter is a function that processes tokens and returns terms, terms are 24 | * used in Inverted Index to index documents. 25 | */ 26 | filter(s: string): string, 27 | } 28 | 29 | /** 30 | * Inverted Index Node. 31 | * 32 | * Inverted index is implemented with a 33 | * [trie](https://en.wikipedia.org/wiki/Trie) data structure. 34 | * 35 | * @typeparam T Document key. 36 | */ 37 | export interface InvertedIndexNode { 38 | /** Char code key. */ 39 | k: number; 40 | /** Children nodes. */ 41 | c: InvertedIndexNode[] | null; 42 | /** Documents associated with this node. */ 43 | d: DocumentPointer[] | null; 44 | } 45 | 46 | /** 47 | * Document Details object stores additional information about documents. 48 | * 49 | * @typeparam T Document key. 50 | */ 51 | export interface DocumentDetails { 52 | /** 53 | * Document key. It can be a simple unique ID or a direct reference to an 54 | * original document. 55 | */ 56 | readonly key: T; 57 | /** 58 | * Field count is an array that contains number of terms in each indexed 59 | * text field. 60 | */ 61 | readonly fCount: Int32Array; 62 | /** 63 | * Removed flag. 64 | */ 65 | removed: boolean; 66 | } 67 | 68 | /** 69 | * Document pointer contains information about term frequency for a document. 70 | * 71 | * @typeparam T Document key. 72 | */ 73 | export interface DocumentPointer { 74 | /** 75 | * Reference to a {@link DocumentDetails} object that is used for this 76 | * document. 77 | */ 78 | readonly details: DocumentDetails; 79 | /** 80 | * Term frequency in each field. 81 | */ 82 | readonly tf: Int32Array; 83 | } 84 | 85 | const _Int32Array = Int32Array; 86 | const _Float64Array = Float64Array; 87 | const _Map = Map; 88 | 89 | const SEARCH_CONTEXT = Object.seal({ 90 | found: false, 91 | i: 0, 92 | }); 93 | 94 | const findByCharCode = (array: InvertedIndexNode[], charCode: number) => { 95 | const ctx = SEARCH_CONTEXT; 96 | let low = 0; 97 | let high = array.length; 98 | 99 | while (low < high) { 100 | const mid = (low + high) >> 1; 101 | const c = array[mid].k - charCode; 102 | if (c < 0) { 103 | low = mid + 1; 104 | } else if (c > 0) { 105 | high = mid; 106 | } else { 107 | ctx.found = true; 108 | ctx.i = mid; 109 | return; 110 | } 111 | } 112 | ctx.found = false; 113 | ctx.i = low; 114 | }; 115 | 116 | /** 117 | * Creates an Index. 118 | * 119 | * @typeparam T Document key. 120 | * @param fieldsNum Number of fields. 121 | * @returns {@link Index} 122 | */ 123 | export const createIndex = ( 124 | fieldsNum: number, 125 | tokenizer: (s: string) => string[], 126 | filter: (s: string) => string, 127 | ): DocumentIndex => ({ 128 | docs: new _Map(), 129 | root: createInvertedIndexNode(0), 130 | fSum: new _Float64Array(fieldsNum), 131 | fAvg: new _Float64Array(fieldsNum), 132 | removed: 0, 133 | tokenizer, 134 | filter, 135 | }); 136 | 137 | 138 | /** 139 | * Creates inverted index node. 140 | * 141 | * @typeparam T Document key. 142 | * @param k Char code. 143 | * @returnd {@link InvertedIndexNode} instance. 144 | */ 145 | const createInvertedIndexNode = (k: number): InvertedIndexNode => ({ 146 | k, 147 | c: null, 148 | d: null, 149 | }); 150 | 151 | /** 152 | * Finds inverted index node that matches the `term`. 153 | * 154 | * @typeparam T Document key. 155 | * @param node Root node. 156 | * @param term Term. 157 | * @returns Inverted index node that contains `term` or an `undefined` value. 158 | */ 159 | export const findInvertedIndexNode = ( 160 | node: InvertedIndexNode | undefined, 161 | term: string, 162 | ): InvertedIndexNode | undefined => { 163 | const ctx = SEARCH_CONTEXT; 164 | let i = 0; 165 | while (node !== void 0 && i < term.length) { 166 | const c = node.c; 167 | if (c === null) { 168 | return void 0; 169 | } 170 | findByCharCode(c, term.charCodeAt(i++)); 171 | if (ctx.found === false) { 172 | return void 0; 173 | } 174 | node = c[ctx.i]; 175 | } 176 | return node; 177 | }; 178 | 179 | /** 180 | * Adds a document to the index. 181 | * 182 | * @typeparam T Document key. 183 | * @typeparam D Document type. 184 | * @param index {@link DocumentIndex}. 185 | * @param fieldGetters Field getters. 186 | * @param key Document key. 187 | * @param doc Document. 188 | */ 189 | export const indexAdd = ( 190 | index: DocumentIndex, 191 | fieldGetters: Array<(doc: D) => string>, 192 | key: T, 193 | doc: D, 194 | ): void => { 195 | const { root, fSum, fAvg, docs, tokenizer, filter } = index; 196 | const termCounts = new _Map(); 197 | const fCount = new _Int32Array(fieldGetters.length); 198 | 199 | for (let i = 0; i < fieldGetters.length; i++) { 200 | const field = fieldGetters[i](doc); 201 | if (field !== void 0) { 202 | // tokenize text 203 | const terms = tokenizer(field); 204 | 205 | // filter and count terms, ignore empty strings 206 | let filteredTermsCount = 0; 207 | for (let j = 0; j < terms.length; j++) { 208 | const term = filter(terms[j]); 209 | if (term !== "") { 210 | filteredTermsCount++; 211 | let fieldTermCounts = termCounts.get(term); 212 | if (fieldTermCounts === void 0) { 213 | fieldTermCounts = new _Int32Array(fSum.length); 214 | termCounts.set(term, fieldTermCounts); 215 | } 216 | fieldTermCounts[i] += 1; 217 | } 218 | } 219 | fSum[i] += filteredTermsCount; 220 | fAvg[i] = fSum[i] / (docs.size + 1); 221 | fCount[i] = filteredTermsCount; 222 | } 223 | } 224 | 225 | const details = { key, fCount, removed: false } satisfies DocumentDetails; 226 | docs.set(key, details); 227 | termCounts.forEach((termFrequency, term) => { 228 | const ctx = SEARCH_CONTEXT; 229 | let node = root; 230 | 231 | for (let i = 0; i < term.length; i++) { 232 | const charCode = term.charCodeAt(i); 233 | let newNode; 234 | if (node.c === null) { 235 | newNode = createInvertedIndexNode(charCode); 236 | node.c = [newNode]; 237 | node = newNode; 238 | continue; 239 | } 240 | findByCharCode(node.c, charCode); 241 | if (ctx.found === false) { 242 | newNode = createInvertedIndexNode(charCode); 243 | node.c.splice(ctx.i, 0, newNode); 244 | node = newNode; 245 | } else { 246 | node = node.c[ctx.i]; 247 | } 248 | } 249 | 250 | const doc = { details, tf: termFrequency } satisfies DocumentPointer; 251 | if (node.d === null) { 252 | node.d = [doc]; 253 | } else { 254 | node.d.push(doc); 255 | } 256 | }); 257 | }; 258 | 259 | /** 260 | * Remove document from the index. 261 | * 262 | * @typeparam T Document key. 263 | * @param index {@link DocumentIndex}. 264 | * @param key Document key. 265 | */ 266 | export const indexRemove = ( 267 | index: DocumentIndex, 268 | key: T, 269 | ): void => { 270 | const { docs, fSum, fAvg } = index; 271 | const docDetails = docs.get(key); 272 | 273 | if (docDetails !== void 0) { 274 | index.removed++; 275 | docDetails.removed = true; 276 | docs.delete(key); 277 | for (let i = 0; i < fSum.length; i++) { 278 | const fieldLength = docDetails.fCount[i]; 279 | if (fieldLength > 0) { 280 | fSum[i] -= fieldLength; 281 | fAvg[i] = fSum[i] / docs.size; 282 | } 283 | } 284 | } 285 | }; 286 | 287 | /** 288 | * Recursively cleans up removed documents from the index. 289 | * 290 | * @typeparam T Document key. 291 | * @param node {@link InvertedIndexNode} 292 | * @returns `1` when subtree contains any document. 293 | */ 294 | function _vacuumIndex(node: InvertedIndexNode): number { 295 | let i = 0; 296 | let ret = 0; 297 | const d = node.d; 298 | const c = node.c; 299 | 300 | if (d !== null) { 301 | while (i < d.length) { 302 | const doc = d[i]; 303 | if (doc.details.removed === true) { 304 | if (d.length > 1) { 305 | d[i] = d[d.length - 1]; 306 | } 307 | d.pop(); 308 | continue; 309 | } 310 | i++; 311 | } 312 | if (d.length > 0) { 313 | ret = 1; 314 | } 315 | } 316 | 317 | if (c !== null) { 318 | i = 0; 319 | while (i < c.length) { 320 | const r = _vacuumIndex(c[i]); 321 | ret |= r; 322 | if (r === 0) { 323 | c.splice(i, 1); 324 | } else { 325 | i++; 326 | } 327 | } 328 | } 329 | 330 | return ret; 331 | } 332 | 333 | /** 334 | * Cleans up removed documents from the {@link DocumentIndex}. 335 | * 336 | * @typeparam T Document key. 337 | * @param index {@link DocumentIndex}. 338 | */ 339 | export function indexVacuum(index: DocumentIndex): void { 340 | _vacuumIndex(index.root); 341 | index.removed = 0; 342 | } 343 | -------------------------------------------------------------------------------- /src/query.ts: -------------------------------------------------------------------------------- 1 | import type { DocumentIndex, InvertedIndexNode } from "./index.js"; 2 | import { findInvertedIndexNode } from "./index.js"; 3 | 4 | /** 5 | * Query Result. 6 | * 7 | * @typeparam T Document key. 8 | */ 9 | export interface QueryResult { 10 | /** Document key. */ 11 | readonly key: I; 12 | /** Result score. */ 13 | readonly score: number; 14 | } 15 | 16 | const sortByScore = ( 17 | a: QueryResult, 18 | b: QueryResult, 19 | ) => b.score - a.score; 20 | 21 | const max = Math.max; 22 | const log = Math.log; 23 | const fromCharCode = String.fromCharCode; 24 | 25 | /** 26 | * Performs a search with a simple free text query. 27 | * 28 | * All token separators work as a disjunction operator. 29 | * 30 | * @typeparam T Document key. 31 | * @param index {@link DocumentIndex}. 32 | * @param fieldBoost Field boost factors. 33 | * @param bm25k1 BM25 ranking function constant `k1`, controls non-linear term 34 | * frequency normalization (saturation). 35 | * @param bm25b BM25 ranking function constant `b`, controls to what degree 36 | * document length normalizes tf values. 37 | * @param s Query string. 38 | * @returns Array of {@link QueryResult} objects. 39 | */ 40 | export const indexQuery = ( 41 | index: DocumentIndex, 42 | fieldBoost: number[], 43 | bm25k1: number, 44 | bm25b: number, 45 | s: string, 46 | ): QueryResult[] => { 47 | const { docs, root, fAvg, tokenizer, filter } = index; 48 | const terms = tokenizer(s); 49 | const scores = new Map(); 50 | 51 | for (let i = 0; i < terms.length; i++) { 52 | const term = filter(terms[i]); 53 | if (term !== "") { 54 | const expandedTerms = expandTerm(index, term); 55 | const visitedDocuments = new Set(); 56 | for (let j = 0; j < expandedTerms.length; j++) { 57 | const eTerm = expandedTerms[j]; 58 | const expansionBoost = eTerm === term 59 | ? 1 60 | : log(1 + (1 / (1 + eTerm.length - term.length))); 61 | const termNode = findInvertedIndexNode(root, eTerm); 62 | let d; 63 | 64 | if (termNode !== void 0 && (d = termNode.d) !== null) { 65 | let documentFrequency = 0; 66 | for (let k = 0; k < d.length; k++) { 67 | const pointer = d[k]; 68 | if (pointer.details.removed === false) { 69 | documentFrequency++; 70 | } 71 | } 72 | if (documentFrequency > 0) { 73 | // calculating BM25 idf 74 | const idf = log(1 + (docs.size - documentFrequency + 0.5) / (documentFrequency + 0.5)); 75 | 76 | for (let k = 0; k < d.length; k++) { 77 | const pointer = d[k]; 78 | if (pointer.details.removed === false) { 79 | let score = 0; 80 | for (let x = 0; x < pointer.details.fCount.length; x++) { 81 | let tf = pointer.tf[x]; 82 | if (tf > 0) { 83 | // calculating BM25 tf 84 | const fieldLength = pointer.details.fCount[x]; 85 | const avgFieldLength = fAvg[x]; 86 | tf = ((bm25k1 + 1) * tf) / (bm25k1 * ((1 - bm25b) + bm25b * (fieldLength / avgFieldLength)) + tf); 87 | score += tf * idf * fieldBoost[x] * expansionBoost; 88 | } 89 | } 90 | if (score > 0) { 91 | const key = pointer.details.key; 92 | const prevScore = scores.get(key); 93 | scores.set( 94 | key, 95 | prevScore !== void 0 && visitedDocuments.has(key) 96 | ? max(prevScore, score) 97 | : prevScore === void 0 98 | ? score 99 | : prevScore + score 100 | ); 101 | visitedDocuments.add(key); 102 | } 103 | } 104 | } 105 | } 106 | } 107 | } 108 | } 109 | } 110 | const result = [] as QueryResult[]; 111 | scores.forEach((score, key) => { 112 | result.push({ key, score }); 113 | }); 114 | result.sort(sortByScore); 115 | 116 | return result; 117 | }; 118 | 119 | 120 | /** 121 | * Recursively goes through inverted index nodes and expands term with all possible combinations. 122 | * 123 | * @typeparam I Document ID type. 124 | * @param index {@link DocumentIndex} 125 | * @param results Results. 126 | * @param term Term. 127 | */ 128 | const _expandTerm = ( 129 | node: InvertedIndexNode, 130 | results: string[], 131 | term: string, 132 | ): void => { 133 | if (node.d !== null) { 134 | results.push(term); 135 | } 136 | const children = node.c; 137 | if (children !== null) { 138 | for (let i = 0; i < children.length; i++) { 139 | const c = children[i]; 140 | _expandTerm(c, results, term + fromCharCode(c.k)); 141 | } 142 | } 143 | }; 144 | 145 | /** 146 | * Expands term with all possible combinations. 147 | * 148 | * @typeparam I Document ID type. 149 | * @param index {@link DocumentIndex} 150 | * @param term Term. 151 | * @returns All terms that starts with [term] string. 152 | */ 153 | export const expandTerm = ( 154 | index: DocumentIndex, 155 | term: string, 156 | ): string[] => { 157 | const node = findInvertedIndexNode(index.root, term); 158 | const results = [] as string[]; 159 | if (node !== void 0) { 160 | _expandTerm(node, results, term); 161 | } 162 | 163 | return results; 164 | } 165 | 166 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "rootDir": "src", 4 | "outDir": "dist", 5 | "declarationDir": "dist", 6 | "target": "es2022", 7 | "module": "es2022", 8 | "moduleResolution": "node16", 9 | "declaration": true, 10 | "declarationMap": true, 11 | "sourceMap": true, 12 | "importHelpers": true, 13 | "noEmitHelpers": true, 14 | "removeComments": false, 15 | "resolveJsonModule": true, 16 | 17 | "forceConsistentCasingInFileNames": true, 18 | "strict": true, 19 | "noImplicitAny": true, 20 | "noImplicitReturns": true, 21 | "noImplicitOverride": true, 22 | "noUnusedLocals": true, 23 | "exactOptionalPropertyTypes": true, 24 | "useUnknownInCatchVariables": false, 25 | "skipLibCheck": true 26 | }, 27 | "include": [ 28 | "src/**/*.ts" 29 | ] 30 | } 31 | --------------------------------------------------------------------------------