├── .editorconfig ├── .env.example ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── data ├── hotpotqa │ └── hotpot_dev_v1_simplified.json └── strategyqa │ └── strategyqa_dev.json ├── package.json ├── pnpm-lock.yaml ├── src ├── cli.ts ├── config │ └── prompts │ │ ├── autocot.ts │ │ ├── cot-hotpotqa.ts │ │ ├── cot-strategyqa.ts │ │ ├── hotpotqa.ts │ │ ├── react.ts │ │ ├── score.ts │ │ └── strategyqa.ts ├── types │ └── score.ts └── utils │ ├── ai.ts │ ├── evals │ ├── autocot-hotpotqa.ts │ ├── autocot-strategyqa.ts │ ├── autocot.ts │ ├── cot-hotpotqa.ts │ ├── cot-strategyqa.ts │ ├── hotpotqa.ts │ ├── score.ts │ └── strategyqa.ts │ └── shuffle.ts └── tsconfig.json /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | end_of_line = lf 6 | charset = utf-8 7 | trim_trailing_whitespace = true 8 | insert_final_newline = true 9 | 10 | [*.yml] 11 | indent_style = space 12 | indent_size = 2 13 | 14 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=YOUR_KEY_HERE 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | /dist 3 | .env 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AutoReason: Automatic Few-Shot Reasoning Decomposition 2 | 3 | ## Paper 4 | 5 | You can read the paper on [arXiv](https://arxiv.org/abs/2412.06975) 6 | 7 | ## Abstract 8 | 9 | Chain of Thought (CoT) was introduced in recent research as a method for improving step-by-step reasoning in Large Language Models. However, CoT has limited applications such as its need for hand-crafted few-shot exemplar prompts and no capability to adjust itself to different queries. 10 | 11 | In this work, we propose a system to automatically generate rationales using CoT. Our method improves multi-step implicit reasoning capabilities by decomposing the implicit query into several explicit questions. This provides interpretability for the model, improving reasoning in weaker LLMs. We test our approach with two Q&A datasets: StrategyQA and HotpotQA. We show an increase in accuracy with both, especially on StrategyQA. 12 | 13 | ## Usage 14 | 15 | 1. Copy `.env.example` to `.env` and put your OpenAI API key in there. 16 | 2. Compile to JS from TS with `pnpm build` 17 | 3. Run the CLI with `node ./dist/src/cli.js` 18 | 19 | ## File-Folder Conventions 20 | 21 | - `src/data`: Datasets used in the evals. 22 | - `src/config/prompts`: AutoReason prompts, CoT prompts and base prompts for each of the datasets. These are used in the evals. 23 | - `src/utils/evals`: Evaluation/Testing methods for each dataset and method. 24 | 25 | ## Citation 26 | 27 | Please cite our paper if you are using it in your studies: 28 | 29 | ```tex 30 | @misc{sevinc2024autoreasonautomaticfewshotreasoning, 31 | title={AutoReason: Automatic Few-Shot Reasoning Decomposition}, 32 | author={Arda Sevinc and Abdurrahman Gumus}, 33 | year={2024}, 34 | eprint={2412.06975}, 35 | archivePrefix={arXiv}, 36 | primaryClass={cs.CL}, 37 | url={https://arxiv.org/abs/2412.06975}, 38 | } 39 | ``` 40 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "autoreason", 3 | "version": "1.0.0", 4 | "description": "", 5 | "exports": "./src/cli.ts", 6 | "bin": "./dist/cli.js", 7 | "type": "module", 8 | "engines": { 9 | "node": ">=20", 10 | "pnpm": ">=9" 11 | }, 12 | "scripts": { 13 | "build": "tsc", 14 | "dev": "tsc --watch", 15 | "test": "prettier --check . && xo && ava", 16 | "typecheck": "tsc --noEmit" 17 | }, 18 | "keywords": [ 19 | "ai" 20 | ], 21 | "files": [ 22 | "dist" 23 | ], 24 | "author": "Arda Sevinc ", 25 | "license": "Apache-2.0", 26 | "packageManager": "pnpm@9.14.2+sha512.6e2baf77d06b9362294152c851c4f278ede37ab1eba3a55fda317a4a17b209f4dbb973fb250a77abc463a341fcb1f17f17cfa24091c4eb319cda0d9b84278387", 27 | "prettier": "@ardasevinc/prettier-config", 28 | "devDependencies": { 29 | "@ardasevinc/prettier-config": "^0.1.4", 30 | "@sindresorhus/tsconfig": "^7.0.0", 31 | "ava": "^6.2.0", 32 | "chalk": "^5.3.0", 33 | "dotenv": "^16.4.7", 34 | "ts-node": "^10.9.2", 35 | "typescript": "^5.7.2", 36 | "xo": "^0.60.0" 37 | }, 38 | "dependencies": { 39 | "@inquirer/prompts": "^7.2.0", 40 | "meow": "^13.2.0", 41 | "openai": "^4.76.2" 42 | }, 43 | "ava": { 44 | "extensions": { 45 | "ts": "module" 46 | }, 47 | "nodeArguments": [ 48 | "--loader=ts-node/esm" 49 | ] 50 | }, 51 | "xo": { 52 | "prettier": true, 53 | "rules": { 54 | "n/prefer-global/process": 0 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/cli.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import 'dotenv/config.js'; // eslint-disable-line import/no-unassigned-import 3 | import { select } from '@inquirer/prompts'; 4 | import { evalHotpotQa } from './utils/evals/hotpotqa.js'; 5 | import { evalStrategyQa } from './utils/evals/strategyqa.js'; 6 | import { evalCotStrategyQa } from './utils/evals/cot-strategyqa.js'; 7 | import { evalCotHotpotQa } from './utils/evals/cot-hotpotqa.js'; 8 | import { evalAutoCotHotpotQa } from './utils/evals/autocot-hotpotqa.js'; 9 | import { evalAutoCotStrategyQa } from './utils/evals/autocot-strategyqa.js'; 10 | 11 | const runMode = await select({ 12 | message: 'Select run mode', 13 | choices: [ 14 | { 15 | name: 'Testing', 16 | value: 'test', 17 | }, 18 | { 19 | name: 'Evaluation', 20 | value: 'eval', 21 | }, 22 | { 23 | name: 'Exit', 24 | value: 'exit', 25 | }, 26 | ], 27 | }); 28 | 29 | switch (runMode) { 30 | case 'test': { 31 | break; 32 | } 33 | 34 | case 'eval': { 35 | const evalType = await select({ 36 | message: 'Select Evaluation Dataset', 37 | choices: [ 38 | { 39 | name: 'HotpotQA', 40 | value: 'hotpotqa', 41 | }, 42 | { 43 | name: 'CoT HotpotQA', 44 | value: 'cothotpotqa', 45 | }, 46 | { name: 'Auto CoT HotpotQA', value: 'autocothotpotqa' }, 47 | { 48 | name: 'StrategyQA', 49 | value: 'strategyqa', 50 | }, 51 | { 52 | name: 'CoT StrategyQA', 53 | value: 'cotstrategyqa', 54 | }, 55 | { name: 'Auto CoT StrategyQA', value: 'autocotstrategyqa' }, 56 | { 57 | name: 'Exit', 58 | value: 'exit', 59 | }, 60 | ], 61 | }); 62 | 63 | switch (evalType) { 64 | case 'hotpotqa': { 65 | await evalHotpotQa(); 66 | break; 67 | } 68 | 69 | case 'cothotpotqa': { 70 | await evalCotHotpotQa(); 71 | break; 72 | } 73 | 74 | case 'autocothotpotqa': { 75 | await evalAutoCotHotpotQa(); 76 | break; 77 | } 78 | 79 | case 'strategyqa': { 80 | await evalStrategyQa(); 81 | break; 82 | } 83 | 84 | case 'cotstrategyqa': { 85 | await evalCotStrategyQa(); 86 | break; 87 | } 88 | 89 | case 'autocotstrategyqa': { 90 | await evalAutoCotStrategyQa(); 91 | break; 92 | } 93 | 94 | case 'exit': { 95 | process.exit(0); 96 | } 97 | 98 | // eslint-disable-next-line no-fallthrough 99 | default: { 100 | console.log('default'); 101 | process.exit(1); 102 | } 103 | } 104 | 105 | break; 106 | } 107 | 108 | case 'exit': { 109 | process.exit(0); 110 | } 111 | 112 | // eslint-disable-next-line no-fallthrough 113 | default: { 114 | console.log('default'); 115 | process.exit(1); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/config/prompts/autocot.ts: -------------------------------------------------------------------------------- 1 | export const autoCotPrompt = ({ question }: { question: string }) => { 2 | return `You will formulate Chain of Thought (CoT) reasoning traces. 3 | CoT is a prompting technique that helps you to think about a problem in a structured way. It breaks down a problem into a series of logical reasoning traces. 4 | 5 | You will be given a question and using this question you will decompose the question into a series of logical reasoning traces. Only write the reasoning traces and do not answer the question yourself. 6 | 7 | Here are some examples of CoT reasoning traces: 8 | 9 | Question: Did Brazilian jiu-jitsu Gracie founders have at least a baker's dozen of kids between them? 10 | 11 | Reasoning traces: 12 | - Who were the founders of Brazilian jiu-jitsu? 13 | - What is the number represented by the baker's dozen? 14 | - How many children do Gracie founders have altogether 15 | - Is this number bigger than baker's dozen? 16 | 17 | Question: Is cow methane safer for environment than cars 18 | 19 | Reasoning traces: 20 | - How much methane is produced by cars annually? 21 | - How much methane is produced by cows annually? 22 | - Is methane produced by cows less than methane produced by cars? 23 | 24 | Question: ${question} 25 | 26 | Reasoning traces: 27 | `; 28 | }; 29 | -------------------------------------------------------------------------------- /src/config/prompts/cot-hotpotqa.ts: -------------------------------------------------------------------------------- 1 | export const cotHotpotQaPrompt = ({ question }: { question: string }) => { 2 | return `Your job is to answer some questions. Here are some examples of how you should answer: 3 | 4 | Q: Do hamsters provide food for any animals? 5 | Hamsters are prey animals. Prey are food for predators. Thus, hamsters provide food for some animals. 6 | Answer: yes 7 | 8 | Q: Could Brooke Shields succeed at University of Pennsylvania? 9 | Brooke Shields went to Princeton University. Princeton University is about as academically rigorous as the University of Pennsylvania. Thus, Brooke Shields could also succeed at the University of Pennsylvania. 10 | Answer: yes 11 | 12 | Q: Yes or no: Hydrogen's atomic number squared exceeds number of Spice Girls? 13 | "Hydrogen has an atomic number of 1. 1 squared is 1. There are 5 Spice Girls. Thus, Hydrogen's atomic number squared is less than 5. 14 | Answer: no 15 | 16 | Q: ${question} 17 | `; 18 | }; 19 | -------------------------------------------------------------------------------- /src/config/prompts/cot-strategyqa.ts: -------------------------------------------------------------------------------- 1 | export const cotStrategyQaPrompt = ({ question }: { question: string }) => { 2 | return `Your job is to answer some questions. Here are some examples of how you should answer: 3 | 4 | Q: Do hamsters provide food for any animals? 5 | Hamsters are prey animals. Prey are food for predators. Thus, hamsters provide food for some animals. 6 | Answer: yes 7 | 8 | Q: Could Brooke Shields succeed at University of Pennsylvania? 9 | Brooke Shields went to Princeton University. Princeton University is about as academically rigorous as the University of Pennsylvania. Thus, Brooke Shields could also succeed at the University of Pennsylvania. 10 | Answer: yes 11 | 12 | Q: Yes or no: Hydrogen's atomic number squared exceeds number of Spice Girls? 13 | "Hydrogen has an atomic number of 1. 1 squared is 1. There are 5 Spice Girls. Thus, Hydrogen's atomic number squared is less than 5. 14 | Answer: no 15 | 16 | Q: ${question} 17 | `; 18 | }; 19 | -------------------------------------------------------------------------------- /src/config/prompts/hotpotqa.ts: -------------------------------------------------------------------------------- 1 | export const baseHotpotqaPrompt = `You're an agent. Your job is to answer some questions. Here are the rules: 2 | 1. You will be given a question 3 | 2. You will answer the question with a short answer, it might yes/no or a short phrase 4 | 3. When you know the answer, write it in this format only: ""`; 5 | -------------------------------------------------------------------------------- /src/config/prompts/react.ts: -------------------------------------------------------------------------------- 1 | export const reactAgentPrompt = ``; 2 | -------------------------------------------------------------------------------- /src/config/prompts/score.ts: -------------------------------------------------------------------------------- 1 | import { type ScorePromptParameters } from '../../types/score.js'; 2 | 3 | export const scorePrompt = ({ 4 | question, 5 | answer, 6 | correctAnswer, 7 | }: ScorePromptParameters) => { 8 | return `Your job is to score an answer's correctness from 0 to 10. You will be given the question, the correct answer, and the answer you need to score. 9 | 0 means the answer is completely wrong, 10 means the answer is completely correct. Explain your reasoning first shortly, and then write the score as a literal number (0 to 10). 10 | 11 | Question: ${question} 12 | Answer: ${answer} 13 | Correct Answer: ${correctAnswer} 14 | Score: `; 15 | }; 16 | -------------------------------------------------------------------------------- /src/config/prompts/strategyqa.ts: -------------------------------------------------------------------------------- 1 | export const baseStrategyQaPrompt = `You're an agent. Your job is to answer some questions. Here are the rules: 2 | 1. You will be given a question 3 | 2. You will answer the question with true or false 4 | 3. When you know the answer, write it in this format only: "answer"`; 5 | -------------------------------------------------------------------------------- /src/types/score.ts: -------------------------------------------------------------------------------- 1 | export type ScorePromptParameters = { 2 | question: string; 3 | correctAnswer: string; 4 | answer: string; 5 | }; 6 | -------------------------------------------------------------------------------- /src/utils/ai.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | 3 | type ApiKeys = { 4 | openaiKey: string; 5 | }; 6 | 7 | class AiProvider { 8 | public static getInstance({ openaiKey }: ApiKeys): AiProvider { 9 | if (!AiProvider.instance) { 10 | AiProvider.instance = new AiProvider(openaiKey); 11 | } 12 | 13 | return AiProvider.instance; 14 | } 15 | 16 | private static instance: AiProvider; 17 | private readonly openAi: OpenAI; 18 | private readonly scorer: OpenAI; 19 | private readonly autocot: OpenAI; 20 | 21 | private constructor(private readonly apiKey: string) { 22 | this.apiKey = apiKey; 23 | this.openAi = new OpenAI({ 24 | apiKey: this.apiKey, 25 | maxRetries: 10, 26 | timeout: 60_000, 27 | }); 28 | this.scorer = new OpenAI({ 29 | apiKey: this.apiKey, 30 | maxRetries: 10, 31 | timeout: 60_000, 32 | }); 33 | this.autocot = new OpenAI({ 34 | apiKey: this.apiKey, 35 | maxRetries: 10, 36 | timeout: 60_000, 37 | }); 38 | } 39 | 40 | public getOpenAi(): OpenAI { 41 | return this.openAi; 42 | } 43 | 44 | public getScorer(): OpenAI { 45 | return this.scorer; 46 | } 47 | 48 | public getAutoCot(): OpenAI { 49 | return this.autocot; 50 | } 51 | } 52 | 53 | export { AiProvider }; 54 | -------------------------------------------------------------------------------- /src/utils/evals/autocot-hotpotqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import HotpotQa from '../../../data/hotpotqa/hotpot_dev_v1_simplified.json' assert { type: 'json' }; 5 | import { AiProvider } from '../ai.js'; 6 | import { shuffleArray } from '../shuffle.js'; 7 | import { score } from './score.js'; 8 | import { autoCotGenerator } from './autocot.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/hotpotqa/runs/autocot', 14 | `GPT-4-${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalAutoCotHotpotQa = async () => { 43 | shuffleArray(HotpotQa); 44 | const hotpotQa = HotpotQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = hotpotQa.map(async ({ question, answer }) => { 52 | const prompt = await autoCotGenerator({ question }); 53 | 54 | const messages: ChatCompletionMessageParam[] = [ 55 | { role: 'system', content: prompt }, 56 | { role: 'user', content: question }, 57 | ]; 58 | const result = await openai.chat.completions.create({ 59 | messages, 60 | model: 'gpt-4-1106-preview', 61 | max_tokens: 4000, 62 | stream: false, 63 | temperature: 0.5, 64 | }); 65 | 66 | if (!result.choices[0]?.message?.content) { 67 | throw new Error('No response in hotpotqa eval'); 68 | } 69 | 70 | const answerScore = await score({ 71 | question, 72 | correctAnswer: answer, 73 | answer: result.choices[0].message.content, 74 | }); 75 | 76 | const verdict = answerScore > 6 ? 'correct' : 'incorrect'; 77 | 78 | return { 79 | question, 80 | answer, 81 | response: result.choices[0].message.content, 82 | answerScore, 83 | verdict, 84 | }; 85 | }); 86 | const results = await Promise.all(resultsPromises); 87 | 88 | const savedPath = await saveEvalResults(results); 89 | console.log(`saved results to ${savedPath}`); 90 | }; 91 | 92 | export { evalAutoCotHotpotQa }; 93 | -------------------------------------------------------------------------------- /src/utils/evals/autocot-strategyqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import StrategyQa from '../../../data/strategyqa/strategyqa_dev.json' assert { type: 'json' }; 5 | import { AiProvider } from '../ai.js'; 6 | import { shuffleArray } from '../shuffle.js'; 7 | import { score } from './score.js'; 8 | import { autoCotGenerator } from './autocot.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/strategyqa/runs/autocot', 14 | `GPT-4-${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalAutoCotStrategyQa = async () => { 43 | shuffleArray(StrategyQa); 44 | const strategyQa = StrategyQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = strategyQa.map(async ({ question, answer }) => { 52 | const prompt = await autoCotGenerator({ question }); 53 | 54 | const messages: ChatCompletionMessageParam[] = [ 55 | { role: 'system', content: prompt }, 56 | { role: 'user', content: question }, 57 | ]; 58 | const result = await openai.chat.completions.create({ 59 | messages, 60 | model: 'gpt-4-1106-preview', 61 | max_tokens: 4000, 62 | stream: false, 63 | temperature: 0.5, 64 | }); 65 | 66 | if (!result.choices[0]?.message?.content) { 67 | throw new Error('No response in autocot strategyqa eval'); 68 | } 69 | 70 | const answerScore = await score({ 71 | question, 72 | correctAnswer: answer.toString(), 73 | answer: result.choices[0].message.content, 74 | }); 75 | 76 | const verdict = answerScore > 6 ? 'correct' : 'incorrect'; 77 | 78 | return { 79 | question, 80 | answer, 81 | response: result.choices[0].message.content, 82 | answerScore, 83 | verdict, 84 | }; 85 | }); 86 | const results = await Promise.all(resultsPromises); 87 | const savedPath = await saveEvalResults(results); 88 | console.log(`saved results to ${savedPath}`); 89 | }; 90 | 91 | export { evalAutoCotStrategyQa }; 92 | -------------------------------------------------------------------------------- /src/utils/evals/autocot.ts: -------------------------------------------------------------------------------- 1 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 2 | import { autoCotPrompt } from '../../config/prompts/autocot.js'; 3 | import { AiProvider } from '../ai.js'; 4 | 5 | export const autoCotGenerator = async ({ question }: { question: string }) => { 6 | const messages: ChatCompletionMessageParam[] = [ 7 | { role: 'system', content: autoCotPrompt({ question }) }, 8 | ]; 9 | 10 | const aiProvider = AiProvider.getInstance({ 11 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 12 | }); 13 | const autocot = aiProvider.getAutoCot(); 14 | 15 | try { 16 | const response = await autocot.chat.completions.create({ 17 | messages, 18 | model: 'gpt-4-1106-preview', 19 | max_tokens: 4000, 20 | stream: false, 21 | temperature: 0.7, 22 | }); 23 | 24 | if (!response.choices[0]?.message?.content) { 25 | throw new Error('No response in autocot generator'); 26 | } 27 | 28 | const { content } = response.choices[0].message; 29 | 30 | const formattedPrompt = formatAutoCot(content); 31 | 32 | return formattedPrompt; 33 | } catch (error) { 34 | throw new Error(`error in autocot generator: ${error as string}`); 35 | } 36 | }; 37 | 38 | const formatAutoCot = (reasoningTraces: string) => { 39 | return `Use these reasoning traces below to answer the question: 40 | 41 | ${reasoningTraces} 42 | Indicate your FINAL answer clearly below: 43 | `; 44 | }; 45 | -------------------------------------------------------------------------------- /src/utils/evals/cot-hotpotqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import HotpotQa from '../../../data/hotpotqa/hotpot_dev_v1_simplified.json' assert { type: 'json' }; 5 | import { cotHotpotQaPrompt } from '../../config/prompts/cot-hotpotqa.js'; 6 | import { AiProvider } from '../ai.js'; 7 | import { shuffleArray } from '../shuffle.js'; 8 | import { score } from './score.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/hotpotqa/runs/cot', 14 | `${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalCotHotpotQa = async () => { 43 | shuffleArray(HotpotQa); 44 | const hotpotQa = HotpotQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = hotpotQa.map(async ({ question, answer }) => { 52 | const messages: ChatCompletionMessageParam[] = [ 53 | { role: 'system', content: cotHotpotQaPrompt({ question }) }, 54 | { role: 'user', content: question }, 55 | ]; 56 | const result = await openai.chat.completions.create({ 57 | messages, 58 | model: 'gpt-3.5-turbo-1106', 59 | max_tokens: 4000, 60 | stream: false, 61 | temperature: 0.3, 62 | }); 63 | 64 | if (!result.choices[0]?.message?.content) { 65 | throw new Error('No response in hotpotqa eval'); 66 | } 67 | 68 | const answerScore = await score({ 69 | question, 70 | correctAnswer: answer, 71 | answer: result.choices[0].message.content, 72 | }); 73 | 74 | const verdict = answerScore > 6 ? 'correct' : 'incorrect'; 75 | 76 | return { 77 | question, 78 | answer, 79 | response: result.choices[0].message.content, 80 | answerScore, 81 | verdict, 82 | }; 83 | }); 84 | const results = await Promise.all(resultsPromises); 85 | 86 | const savedPath = await saveEvalResults(results); 87 | console.log(`saved results to ${savedPath}`); 88 | }; 89 | 90 | export { evalCotHotpotQa }; 91 | -------------------------------------------------------------------------------- /src/utils/evals/cot-strategyqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import StrategyQa from '../../../data/strategyqa/strategyqa_dev.json' assert { type: 'json' }; 5 | import { cotStrategyQaPrompt } from '../../config/prompts/cot-strategyqa.js'; 6 | import { AiProvider } from '../ai.js'; 7 | import { shuffleArray } from '../shuffle.js'; 8 | import { score } from './score.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/strategyqa/runs/cot', 14 | `GPT4-${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalCotStrategyQa = async () => { 43 | shuffleArray(StrategyQa); 44 | const strategyQa = StrategyQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = strategyQa.map(async ({ question, answer }) => { 52 | const messages: ChatCompletionMessageParam[] = [ 53 | { role: 'system', content: cotStrategyQaPrompt({ question }) }, 54 | { role: 'user', content: question }, 55 | ]; 56 | const result = await openai.chat.completions.create({ 57 | messages, 58 | model: 'gpt-4-1106-preview', 59 | max_tokens: 4000, 60 | stream: false, 61 | temperature: 0.4, 62 | }); 63 | 64 | if (!result.choices[0]?.message?.content) { 65 | throw new Error('No response in strategyqa eval'); 66 | } 67 | 68 | const answerScore = await score({ 69 | question, 70 | correctAnswer: answer.toString(), 71 | answer: result.choices[0].message.content, 72 | }); 73 | 74 | return { 75 | question, 76 | answer, 77 | response: result.choices[0].message.content, 78 | answerScore, 79 | verdict: answerScore > 6 ? 'correct' : 'incorrect', 80 | }; 81 | }); 82 | const results = await Promise.all(resultsPromises); 83 | const path = await saveEvalResults(results); 84 | console.log(`Saved eval results to ${path}`); 85 | }; 86 | 87 | export { evalCotStrategyQa }; 88 | -------------------------------------------------------------------------------- /src/utils/evals/hotpotqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import HotpotQa from '../../../data/hotpotqa/hotpot_dev_v1_simplified.json' assert { type: 'json' }; 5 | import { baseHotpotqaPrompt } from '../../config/prompts/hotpotqa.js'; 6 | import { AiProvider } from '../ai.js'; 7 | import { shuffleArray } from '../shuffle.js'; 8 | import { score } from './score.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/hotpotqa/runs', 14 | `GPT4-${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalHotpotQa = async () => { 43 | shuffleArray(HotpotQa); 44 | const hotpotQa = HotpotQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = hotpotQa.map(async ({ question, answer }) => { 52 | const messages: ChatCompletionMessageParam[] = [ 53 | { role: 'system', content: baseHotpotqaPrompt }, 54 | { role: 'user', content: question }, 55 | ]; 56 | const result = await openai.chat.completions.create({ 57 | messages, 58 | model: 'gpt-4-1106-preview', 59 | max_tokens: 4000, 60 | stream: false, 61 | temperature: 0.4, 62 | }); 63 | 64 | if (!result.choices[0]?.message?.content) { 65 | throw new Error('No response in hotpotqa eval'); 66 | } 67 | 68 | const answerScore = await score({ 69 | question, 70 | correctAnswer: answer, 71 | answer: result.choices[0].message.content, 72 | }); 73 | 74 | const verdict = answerScore > 6 ? 'correct' : 'incorrect'; 75 | 76 | return { 77 | question, 78 | answer, 79 | response: result.choices[0].message.content, 80 | answerScore, 81 | verdict, 82 | }; 83 | }); 84 | const results = await Promise.all(resultsPromises); 85 | 86 | const savedPath = await saveEvalResults(results); 87 | console.log(`saved results to ${savedPath}`); 88 | }; 89 | 90 | export { evalHotpotQa }; 91 | -------------------------------------------------------------------------------- /src/utils/evals/score.ts: -------------------------------------------------------------------------------- 1 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 2 | import { AiProvider } from '../ai.js'; 3 | import { scorePrompt } from '../../config/prompts/score.js'; 4 | import { type ScorePromptParameters } from '../../types/score.js'; 5 | 6 | const score = async ({ 7 | question, 8 | answer, 9 | correctAnswer, 10 | }: ScorePromptParameters) => { 11 | const ai = AiProvider.getInstance({ 12 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 13 | }); 14 | const openai = ai.getScorer(); 15 | 16 | const messages = [ 17 | { 18 | role: 'system', 19 | content: scorePrompt({ question, answer, correctAnswer }), 20 | }, 21 | ] as ChatCompletionMessageParam[]; 22 | 23 | try { 24 | const aiResponse = await openai.chat.completions.create({ 25 | temperature: 0.3, 26 | max_tokens: 4000, 27 | messages, 28 | model: 'gpt-4-1106-preview', 29 | stream: false, 30 | }); 31 | 32 | if (!aiResponse.choices[0]?.message?.content) { 33 | throw new Error('No response in score eval'); 34 | } 35 | 36 | const { content } = aiResponse.choices[0].message; 37 | 38 | const scoreNumber = Number.parseInt(content, 10); 39 | 40 | if (Number.isNaN(scoreNumber)) { 41 | // console.log(`Score is NaN: ${aiResponse.choices[0].message.content}`); 42 | // console.log('attempting to extract the score from the response'); 43 | 44 | const pattern = /\d+/; 45 | const match = pattern.exec(content); 46 | const number = match ? Number.parseInt(match[0], 10) : 0; 47 | 48 | return number; 49 | } 50 | 51 | return scoreNumber; 52 | } catch (error) { 53 | throw new Error(error as string); 54 | } 55 | }; 56 | 57 | export { score }; 58 | -------------------------------------------------------------------------------- /src/utils/evals/strategyqa.ts: -------------------------------------------------------------------------------- 1 | import { resolve } from 'node:path'; 2 | import { writeFile } from 'node:fs/promises'; 3 | import { type ChatCompletionMessageParam } from 'openai/resources/index.mjs'; 4 | import StrategyQa from '../../../data/strategyqa/strategyqa_dev.json' assert { type: 'json' }; 5 | import { baseStrategyQaPrompt } from '../../config/prompts/strategyqa.js'; 6 | import { AiProvider } from '../ai.js'; 7 | import { shuffleArray } from '../shuffle.js'; 8 | import { score } from './score.js'; 9 | 10 | const saveEvalResults = async (results: any[]) => { 11 | const date = new Date(); 12 | const path = resolve( 13 | 'data/strategyqa/runs', 14 | `GPT4-${date.getFullYear()}${ 15 | date.getMonth() + 1 16 | }${date.getDay()}-${date.getHours()}:${date.getMinutes()}.json`, 17 | ); 18 | 19 | const correctAnswers = results.filter( 20 | (result: any) => result.verdict.toLowerCase() === 'correct'.toLowerCase(), 21 | ).length; 22 | 23 | const totalAnswers = results.length; 24 | 25 | const percentScore = (correctAnswers / totalAnswers) * 100; 26 | 27 | const summary = { 28 | correctAnswers, 29 | totalAnswers, 30 | percentScore, 31 | }; 32 | 33 | const data = { 34 | summary, 35 | results, 36 | }; 37 | 38 | await writeFile(path, JSON.stringify(data, null, 2)); 39 | return path; 40 | }; 41 | 42 | const evalStrategyQa = async () => { 43 | shuffleArray(StrategyQa); 44 | const strategyQa = StrategyQa.slice(0, 20); 45 | 46 | const ai = AiProvider.getInstance({ 47 | openaiKey: process.env?.['OPENAI_API_KEY'] ?? '', 48 | }); 49 | const openai = ai.getOpenAi(); 50 | 51 | const resultsPromises = strategyQa.map(async ({ question, answer }) => { 52 | const messages: ChatCompletionMessageParam[] = [ 53 | { role: 'system', content: baseStrategyQaPrompt }, 54 | { role: 'user', content: question }, 55 | ]; 56 | const result = await openai.chat.completions.create({ 57 | messages, 58 | model: 'gpt-4-1106-preview', 59 | max_tokens: 4000, 60 | stream: false, 61 | temperature: 0.4, 62 | }); 63 | 64 | if (!result.choices[0]?.message?.content) { 65 | throw new Error('No response in strategyqa eval'); 66 | } 67 | 68 | const answerScore = await score({ 69 | question, 70 | correctAnswer: answer.toString(), 71 | answer: result.choices[0].message.content, 72 | }); 73 | 74 | const verdict = answerScore > 6 ? 'correct' : 'incorrect'; 75 | 76 | return { 77 | question, 78 | answer, 79 | response: result.choices[0].message.content, 80 | answerScore, 81 | verdict, 82 | }; 83 | }); 84 | const results = await Promise.all(resultsPromises); 85 | const path = await saveEvalResults(results); 86 | console.log(`Saved eval results to ${path}`); 87 | }; 88 | 89 | export { evalStrategyQa }; 90 | -------------------------------------------------------------------------------- /src/utils/shuffle.ts: -------------------------------------------------------------------------------- 1 | function shuffleArray(array: any[]) { 2 | for (let i = array.length - 1; i > 0; i--) { 3 | const j = Math.floor(Math.random() * (i + 1)); 4 | [array[i], array[j]] = [array[j], array[i]]; 5 | } 6 | } 7 | 8 | export { shuffleArray }; 9 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@sindresorhus/tsconfig", 3 | "compilerOptions": { 4 | "outDir": "dist", 5 | "noEmitOnError": false, 6 | "resolveJsonModule": true, 7 | "module": "NodeNext", 8 | "moduleResolution": "NodeNext" 9 | }, 10 | "include": ["src"] 11 | } 12 | --------------------------------------------------------------------------------