├── .github └── workflows │ ├── dispatch.yaml │ ├── pull_request.yaml │ ├── push_main.yaml │ ├── release.yaml │ └── run_tests.yaml ├── .gitignore ├── LICENSE ├── README.md ├── package-lock.json ├── package.json ├── scripts └── install-binary.js ├── src └── gptscript.ts ├── tests ├── fixtures │ ├── acorn-labs-context.gpt │ ├── chat.gpt │ ├── credential-override-windows.gpt │ ├── credential-override.gpt │ ├── empty.gpt │ ├── global-tools.gpt │ ├── parse-with-metadata.gpt │ ├── test-with-context.gpt │ └── test.gpt └── gptscript.test.ts └── tsconfig.json /.github/workflows/dispatch.yaml: -------------------------------------------------------------------------------- 1 | name: Update GPTScript Version 2 | on: 3 | repository_dispatch: 4 | types: release 5 | 6 | jobs: 7 | update-gptscript-dep: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | with: 12 | token: ${{ secrets.BOT_GH_TOKEN }} 13 | - name: Install jq 14 | uses: dcarbone/install-jq-action@v2.1.0 15 | - name: Update GPTScript Version 16 | run: | 17 | jq '.version = "${{ github.event.client_payload.tag }}"' package.json > temp.json && mv temp.json package.json 18 | sed -i 's/version: "v.*"/version: "${{ github.event.client_payload.tag }}"/' scripts/install-binary.js 19 | - uses: actions/setup-node@v4 20 | with: 21 | node-version: 21 22 | - name: Install 23 | run: npm i 24 | - uses: stefanzweifel/git-auto-commit-action@v5 25 | with: 26 | commit_message: Automated GPTScript Version Update 27 | file_pattern: 'package*.json scripts/install-binary.js' 28 | tagging_message: ${{ github.event.client_payload.tag }} 29 | - name: Create a GitHub release 30 | uses: ncipollo/release-action@v1 31 | with: 32 | tag: ${{ github.event.client_payload.tag }} 33 | name: Release ${{ github.event.client_payload.tag }} 34 | generateReleaseNotes: true 35 | prerelease: ${{ contains(github.event.client_payload.tag, '-rc') }} 36 | -------------------------------------------------------------------------------- /.github/workflows/pull_request.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request 2 | on: 3 | pull_request_target: 4 | types: 5 | - opened 6 | - synchronize 7 | - reopened 8 | - labeled 9 | branches: 10 | - main 11 | paths-ignore: 12 | - README.md 13 | 14 | jobs: 15 | check-perms: 16 | if: ${{ !github.event.pull_request.merged }} 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Get User Permission 20 | id: checkAccess 21 | uses: actions-cool/check-user-permission@v2 22 | with: 23 | require: write 24 | username: ${{ github.triggering_actor }} 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | - name: Check User Permission 28 | if: steps.checkAccess.outputs.require-result == 'false' 29 | run: | 30 | echo "${{ github.triggering_actor }} does not have permissions on this repo." 31 | echo "Current permission level is ${{ steps.checkAccess.outputs.user-permission }}" 32 | echo "Job originally triggered by ${{ github.actor }}" 33 | exit 1 34 | run-tests: 35 | uses: gptscript-ai/node-gptscript/.github/workflows/run_tests.yaml@main 36 | needs: check-perms 37 | with: 38 | git_ref: ${{ github.event.pull_request.head.sha }} 39 | secrets: 40 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 41 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 42 | -------------------------------------------------------------------------------- /.github/workflows/push_main.yaml: -------------------------------------------------------------------------------- 1 | name: Push Main 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths-ignore: 7 | - README.md 8 | 9 | jobs: 10 | run-tests: 11 | uses: gptscript-ai/node-gptscript/.github/workflows/run_tests.yaml@main 12 | with: 13 | git_ref: '' 14 | secrets: 15 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 16 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 17 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Publish to NPM 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | fetch-depth: 1 15 | - uses: actions/setup-node@v4 16 | with: 17 | node-version: 21 18 | registry-url: 'https://registry.npmjs.org' 19 | - name: Install dependencies 20 | run: npm install -g typescript && npm install 21 | - name: Build 22 | run: npm run build 23 | - name: Publish release 24 | run: npm publish --access public 25 | env: 26 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/run_tests.yaml: -------------------------------------------------------------------------------- 1 | name: Run Tests 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | git_ref: 7 | required: true 8 | type: string 9 | secrets: 10 | OPENAI_API_KEY: 11 | required: true 12 | ANTHROPIC_API_KEY: 13 | required: true 14 | 15 | jobs: 16 | test-linux: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 1 22 | ref: ${{ github.event.pull_request.head.sha }} 23 | - uses: actions/setup-node@v4 24 | with: 25 | node-version: 21 26 | - name: Install gptscript 27 | run: | 28 | curl https://get.gptscript.ai/releases/default_linux_amd64_v1/gptscript -o ./gptscriptexe 29 | chmod +x ./gptscriptexe 30 | - name: Install dependencies 31 | run: npm install 32 | - name: Run Tests 33 | env: 34 | GPTSCRIPT_BIN: ./gptscriptexe 35 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 36 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 37 | NODE_GPTSCRIPT_SKIP_INSTALL_BINARY: true 38 | run: npm test 39 | 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | Thumbs.db 3 | .cache 4 | .project 5 | .settings 6 | .tmproj 7 | *.esproj 8 | nbproject 9 | *.sublime-project 10 | *.sublime-workspace 11 | *.sublime-* 12 | .idea 13 | 14 | # common node files and folders 15 | # https://github.com/github/gitignore/blob/master/Node.gitignore 16 | lib-cov 17 | *.seed 18 | *.log 19 | *.csv 20 | *.dat 21 | *.out 22 | *.pid 23 | *.gz 24 | 25 | pids 26 | logs 27 | results 28 | 29 | node_modules 30 | npm-debug.log 31 | 32 | build/Release # Compiled binary addons (http://nodejs.org/api/addons.html) 33 | 34 | # always-ignore extensions 35 | *.diff 36 | *.err 37 | *.orig 38 | *.rej 39 | *.swo 40 | *.swp 41 | *.vi 42 | *~ 43 | 44 | # Coverage directory used by tools like nyc 45 | .nyc_output 46 | coverage 47 | 48 | # nodemon 49 | .monitor 50 | 51 | bin/ 52 | dist/ 53 | lib/ 54 | workspace/ 55 | 56 | .npmrc 57 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # node-gptscript 2 | 3 | This module provides a set of functions to interact with gptscripts. It allows for executing scripts, listing available 4 | tools and models, and more. The functions are designed to be used in a Node.js environment. 5 | 6 | ## Installation 7 | 8 | To use this module, you need to have Node.js installed on your system. Then, you can install the module via npm: 9 | 10 | ```bash 11 | npm install @gptscript-ai/gptscript 12 | ``` 13 | 14 | This will install the gptscript binary in the `node_modules/@gptscript-ai/gptscript/bin` directory. 15 | 16 | You can opt out of this behavior by setting the `NODE_GPTSCRIPT_SKIP_INSTALL_BINARY=true` environment variable before 17 | running `npm install`. 18 | 19 | ## Usage 20 | 21 | To use the module and run gptscripts, you need to first set the `OPENAI_API_KEY` environment variable to your OpenAI API 22 | key. You can also set the `GPTSCRIPT_BIN` environment variable to change the execution of the gptscripts. 23 | 24 | To ensure it is working properly, you can run the following command: 25 | 26 | ```bash 27 | npm exec -c "gptscript https://get.gptscript.ai/echo.gpt --input 'Hello, World!'" 28 | ``` 29 | 30 | You will see "Hello, World!" in the output of the command. 31 | 32 | ## GPTScript 33 | 34 | The GPTScript instance allows the caller to run gptscript files, tools, and other operations (see below). Note that the 35 | intention is that a single instance is all you need for the life of your application, you should call `close()` on the 36 | instance when you are done. 37 | 38 | ## Global Options 39 | 40 | When creating a `GTPScript` instance, you can pass the following global options. These options are also available as 41 | run `Options`. Except `Env`, anything specified as a run option will take precedence over the global 42 | option. Any `env` provided in the run options are appended. 43 | 44 | - `APIKey`: Specify an OpenAI API key for authenticating requests 45 | - `BaseURL`: A base URL for an OpenAI compatible API (the default is `https://api.openai.com/v1`) 46 | - `DefaultModel`: The default model to use for chat completion requests 47 | - `DefaultModelProvider`: The default model provider to use for chat completion requests 48 | - `Env`: Replace the system's environment variables with these in the for `KEY=VAL` 49 | 50 | ## Run Options 51 | 52 | These are optional options that can be passed to the various `exec` functions. 53 | None of the options is required, and the defaults will reduce the number of calls made to the Model API. 54 | As noted above, the Global Options are also available to specify here. These options would take precedence. 55 | 56 | - `cache`: Enable or disable caching. Default (true). 57 | - `cacheDir`: Specify the cache directory. 58 | - `quiet`: No output logging 59 | - `subTool`: Use tool of this name, not the first tool 60 | - `input`: Input arguments for the tool run 61 | - `workspace`: Directory to use for the workspace, if specified it will not be deleted on exit 62 | - `chatState`: The chat state to continue, or null to start a new chat and return the state 63 | - `confirm`: Prompt before running potentially dangerous commands 64 | - `prompt`: Allow scripts to prompt the user for input 65 | - `env`: Extra environment variables to pass to the script in the form `KEY=VAL` 66 | 67 | ## Functions 68 | 69 | ### listModels 70 | 71 | Lists all the available models, returns a list. 72 | 73 | **Usage:** 74 | 75 | ```javascript 76 | const gptscript = require('@gptscript-ai/gptscript'); 77 | 78 | async function listModels() { 79 | let models = []; 80 | const g = new gptscript.GPTScript(); 81 | try { 82 | models = await g.listModels(); 83 | } catch (error) { 84 | console.error(error); 85 | } 86 | g.close(); 87 | } 88 | ``` 89 | 90 | ### version 91 | 92 | Get the first of the current `gptscript` binary being used for the calls. 93 | 94 | **Usage:** 95 | 96 | ```javascript 97 | const gptscript = require('@gptscript-ai/gptscript'); 98 | 99 | async function version() { 100 | const g = new gptscript.GPTScript(); 101 | try { 102 | console.log(await g.version()); 103 | } catch (error) { 104 | console.error(error); 105 | } 106 | g.close(); 107 | } 108 | ``` 109 | 110 | ### evaluate 111 | 112 | Executes a prompt with optional arguments. The first argument can be a `ToolDef`, an array of `ToolDef`s, or a `string` 113 | representing the contents of a gptscript file. 114 | 115 | ```javascript 116 | const gptscript = require('@gptscript-ai/gptscript'); 117 | 118 | const t = { 119 | instructions: "Who was the president of the united states in 1928?" 120 | }; 121 | 122 | const g = new gptscript.GPTScript(); 123 | try { 124 | const run = await g.evaluate(t); 125 | console.log(await run.text()); 126 | } catch (error) { 127 | console.error(error); 128 | } 129 | g.close(); 130 | ``` 131 | 132 | ### run 133 | 134 | Executes a GPT script file with optional input and arguments. The script is relative to the callers source directory. 135 | 136 | ```javascript 137 | const gptscript = require('@gptscript-ai/gptscript'); 138 | 139 | const opts = { 140 | disableCache: true, 141 | input: "--input World" 142 | }; 143 | 144 | async function execFile() { 145 | const g = new gptscript.GPTScript(); 146 | try { 147 | const run = await g.run('./hello.gpt', opts); 148 | console.log(await run.text()); 149 | } catch (e) { 150 | console.error(e); 151 | } 152 | g.close(); 153 | } 154 | ``` 155 | 156 | ### Getting events during runs 157 | 158 | The `Run` object exposes event handlers so callers can access the progress events as the script is running. 159 | 160 | The `Run` object exposes these events with their corresponding event type: 161 | 162 | Subscribing to `RunEventType.Event` gets you all events. 163 | 164 | ```javascript 165 | const gptscript = require('@gptscript-ai/gptscript'); 166 | 167 | const opts = { 168 | disableCache: true, 169 | input: "--testin how high is that there mouse?" 170 | }; 171 | 172 | async function streamExecFileWithEvents() { 173 | const g = new gptscript.GPTScript(); 174 | try { 175 | const run = await g.run('./test.gpt', opts); 176 | 177 | run.on(gptscript.RunEventType.Event, data => { 178 | console.log(`event: ${JSON.stringify(data)}`); 179 | }); 180 | 181 | await run.text(); 182 | } catch (e) { 183 | console.error(e); 184 | } 185 | g.close(); 186 | } 187 | ``` 188 | 189 | ### Confirm 190 | 191 | If a gptscript can run commands, you may want to inspect and confirm/deny the command before they are run. This can be 192 | done with the `confirm` method. A user should listen for the `RunEventType.CallConfirm` event. 193 | 194 | ```javascript 195 | const gptscript = require('@gptscript-ai/gptscript'); 196 | 197 | const opts = { 198 | disableCache: true, 199 | input: "--testin how high is that there mouse?", 200 | confirm: true 201 | }; 202 | 203 | async function streamExecFileWithEvents() { 204 | const g = new gptscript.GPTScript(); 205 | try { 206 | const run = await g.run('./test.gpt', opts); 207 | 208 | run.on(gptscript.RunEventType.CallConfirm, async (data: gptscript.CallFrame) => { 209 | // data.Tool has the information for the command being run. 210 | // data.Input has the input for this command 211 | 212 | await g.confirm({ 213 | id: data.id, 214 | accept: true, // false if the command should not be run 215 | message: "", // Explain the denial (ignored if accept is true) 216 | }) 217 | }); 218 | 219 | await run.text(); 220 | } catch (e) { 221 | console.error(e); 222 | } 223 | g.close(); 224 | } 225 | ``` 226 | 227 | ### Prompt 228 | 229 | A gptscript may need to prompt the user for information like credentials. A user should listen for 230 | the `RunEventType.Prompt`. Note that if `prompt: true` is not set in the options, then an error will occur if a 231 | gptscript attempts to prompt the user. 232 | 233 | ```javascript 234 | const gptscript = require('@gptscript-ai/gptscript'); 235 | 236 | const opts = { 237 | disableCache: true, 238 | input: "--testin how high is that there mouse?", 239 | prompt: true 240 | }; 241 | 242 | async function streamExecFileWithEvents() { 243 | const g = new gptscript.GPTScript(); 244 | try { 245 | const run = await g.run('./test.gpt', opts); 246 | 247 | run.on(gptscript.RunEventType.Prompt, async (data: gptscript.PromptFrame) => { 248 | // data will have the information for what the gptscript is prompting. 249 | 250 | await g.promptResponse({ 251 | id: data.id, 252 | // response is a map of fields to values 253 | responses: {[data.fields[0]]: "Some Value"} 254 | }) 255 | }); 256 | 257 | await run.text(); 258 | } catch (e) { 259 | console.error(e); 260 | } 261 | g.close(); 262 | } 263 | ``` 264 | 265 | ### Chat support 266 | 267 | For tools that support chat, you can use the `nextChat` method on the run object to continue the chat. This method takes 268 | a string representing the next chat message from the user. 269 | 270 | If the chat can/should continue, then the `Run`'s state will be `RunState.Continue`. Note that calling `nextChat` on 271 | a `Run` object is an error. Each call to `nextChat` will return a new `Run` instance, so, the call can keep track of the 272 | chat `Run`s, if desired. 273 | 274 | Here is an example flow for chat. 275 | 276 | ```javascript 277 | const gptscript = require('@gptscript-ai/gptscript'); 278 | 279 | const opts = { 280 | disableCache: true 281 | }; 282 | 283 | const t = { 284 | chat: true, 285 | tools: ["sys.chat.finish"], 286 | instructions: "You are a chat bot. Don't finish the conversation until I say 'bye'." 287 | }; 288 | 289 | async function streamExecFileWithEvents() { 290 | const g = new gptscript.GPTScript(); 291 | let run = await g.evaluate(t, opts); 292 | try { 293 | // Wait for the initial run to complete. 294 | await run.text(); 295 | 296 | while (run.state === gptscript.RunState.Continue) { 297 | // ...Get the next input from the user somehow... 298 | 299 | run = run.nextChat(inputFromUser) 300 | 301 | // Get the output from gptscript 302 | const output = await run.text() 303 | 304 | // Display the output to the user... 305 | } 306 | } catch (e) { 307 | console.error(e); 308 | } 309 | 310 | g.close(); 311 | 312 | // The state here should either be RunState.Finished (on success) or RunState.Error (on error). 313 | console.log(run.state) 314 | } 315 | ``` 316 | 317 | ## Types 318 | 319 | ### Tool Parameters 320 | 321 | | Argument | Type | Default | Description | 322 | |----------------|------------------|-------------|----------------------------------------------------------------------------------------------------------------------------| 323 | | name | string | `""` | The name of the tool. Optional only on the first tool if there are multiple tools defined. | 324 | | description | string | `""` | A brief description of what the tool does, this is important for explaining to the LLM when it should be used. | 325 | | tools | array | `[]` | An array of tools that the current tool might depend on or use. | 326 | | maxTokens | number/undefined | `undefined` | The maximum number of tokens to be used. Prefer `undefined` for uninitialized or optional values. | 327 | | modelName | string | `""` | The model that the tool uses, if applicable. | 328 | | cache | boolean | `true` | Whether caching is enabled for the tool. | 329 | | temperature | number/undefined | `undefined` | The temperature setting for the model, affecting randomness. `undefined` for default behavior. | 330 | | args | object | `{}` | Additional arguments specific to the tool, described by OpenAPIv3 spec. | 331 | | internalPrompt | boolean | `false` | An internal prompt used by the tool, if any. | 332 | | instructions | string | `""` | Instructions on how to use the tool. | 333 | | jsonResponse | boolean | `false` | Whether the tool returns a JSON response instead of plain text. You must include the word 'json' in the body of the prompt | 334 | | export | string[] | [] | A list of tools exported by this tool | 335 | 336 | ## License 337 | 338 | Copyright (c) 2024, [Acorn Labs, Inc.](https://www.acorn.io) 339 | 340 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the 341 | License. You may obtain a copy of the License at 342 | 343 | 344 | 345 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an " 346 | AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 347 | language governing permissions and limitations under the License. 348 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@gptscript-ai/gptscript", 3 | "version": "v0.9.5", 4 | "description": "Run gptscript in node.js", 5 | "source": "src/gptscript.ts", 6 | "main": "dist/gptscript.js", 7 | "types": "dist/gptscript.d.ts", 8 | "type": "module", 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/gptscript-ai/node-gptscript.git" 12 | }, 13 | "bin": { 14 | "gptscript": "bin/gptscript" 15 | }, 16 | "scripts": { 17 | "pretest": "npm run install-binary", 18 | "install-binary": "node scripts/install-binary.js", 19 | "test": "node --experimental-vm-modules node_modules/jest/bin/jest.js", 20 | "postinstall": "node scripts/install-binary.js", 21 | "clean": "rm -rf dist", 22 | "prepare": "npm run build", 23 | "build": "tsc" 24 | }, 25 | "keywords": [ 26 | "gptscript", 27 | "gpt", 28 | "AI" 29 | ], 30 | "author": "Bill Maxwell ", 31 | "license": "Apache-2.0", 32 | "dependencies": { 33 | "@types/sync-fetch": "^0.4.3", 34 | "adm-zip": "^0.5.10", 35 | "node-downloader-helper": "^2.1.9", 36 | "tar": "^6.2.0" 37 | }, 38 | "devDependencies": { 39 | "@babel/core": "^7.24.5", 40 | "@babel/preset-env": "^7.24.5", 41 | "@babel/preset-typescript": "^7.24.1", 42 | "@swc/cli": "^0.3.9", 43 | "@swc/core": "^1.4.2", 44 | "@types/jest": "^29.5.12", 45 | "@types/node": "^20.12.8", 46 | "babel-loader": "^9.1.3", 47 | "babel-plugin-transform-import-meta": "^2.2.1", 48 | "copyfiles": "^2.4.1", 49 | "jest": "^29.7.0", 50 | "npm-run-all": "^4.1.5", 51 | "ts-jest": "^29.1.2", 52 | "ts-loader": "^9.5.1", 53 | "typescript": "^5.4.5", 54 | "url": "^0.11.3" 55 | }, 56 | "jest": { 57 | "preset": "ts-jest/presets/default-esm", 58 | "transform": { 59 | "^.+\\.ts?$": [ 60 | "ts-jest", 61 | { 62 | "useESM": true 63 | } 64 | ] 65 | }, 66 | "testEnvironment": "node", 67 | "testRegex": "/tests/.*\\.(test|spec)?\\.(ts|tsx)$", 68 | "moduleFileExtensions": [ 69 | "ts", 70 | "tsx", 71 | "js", 72 | "jsx", 73 | "json", 74 | "node" 75 | ] 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /scripts/install-binary.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | import {DownloaderHelper} from 'node-downloader-helper'; 6 | import fs from 'fs'; 7 | import path from 'path'; 8 | import AdmZip from 'adm-zip'; 9 | import tar from 'tar'; 10 | import util from 'util'; 11 | import child_process from 'child_process' 12 | 13 | const exec = util.promisify(child_process.exec); 14 | 15 | async function downloadAndExtract(url, saveDirectory) { 16 | const dlh = new DownloaderHelper(url, saveDirectory); 17 | 18 | return new Promise((resolve, reject) => { 19 | dlh.on('end', () => { 20 | const downloadedFilePath = path.join(dlh.getDownloadPath()); 21 | if (url.endsWith('.zip')) { 22 | const zip = new AdmZip(downloadedFilePath); 23 | zip.extractAllTo(saveDirectory, true); 24 | fs.unlinkSync(downloadedFilePath); 25 | } else if (url.endsWith('.tar.gz')) { 26 | tar.x({ 27 | file: downloadedFilePath, 28 | cwd: saveDirectory, 29 | }).then(() => { 30 | fs.unlinkSync(downloadedFilePath); // Delete the tar.gz file after extraction 31 | }).catch((error) => reject(error)); 32 | } 33 | resolve(); 34 | }); 35 | dlh.on('error', (error) => reject(error)); 36 | dlh.on('progress.throttled', (downloadEvents) => { 37 | const percentageComplete = 38 | downloadEvents.progress < 100 39 | ? downloadEvents.progress.toFixed(2) 40 | : 100; 41 | console.info(`downloaded: ${percentageComplete}%`); 42 | }); 43 | 44 | dlh.start(); 45 | }); 46 | } 47 | 48 | async function versions_match() { 49 | try { 50 | const command = path.join(outputDir, gptscriptBinaryName) + ' --version'; 51 | const {stdout} = await exec(command); 52 | return stdout.toString().includes(gptscript_info.version); 53 | } catch (err) { 54 | console.error('Error checking gptscript version:', err); 55 | return false; 56 | } 57 | } 58 | 59 | const platform = process.platform; 60 | let arch = process.arch; 61 | if (process.platform === 'darwin') { 62 | arch = 'universal'; 63 | } else if (process.arch === 'x64') { 64 | arch = 'amd64'; 65 | } 66 | 67 | let gptscriptBinaryName = 'gptscript'; 68 | if (process.platform === 'win32') { 69 | gptscriptBinaryName = 'gptscript.exe'; 70 | } 71 | 72 | const gptscript_info = { 73 | name: "gptscript", 74 | url: "https://github.com/gptscript-ai/gptscript/releases/download/", 75 | version: "v0.9.5" 76 | } 77 | 78 | const pltfm = { 79 | win32: "windows", 80 | linux: "linux", 81 | darwin: "macOS" 82 | }[platform]; 83 | 84 | const suffix = { 85 | win32: 'zip', 86 | linux: 'tar.gz', 87 | darwin: 'tar.gz' 88 | }[platform]; 89 | 90 | const url = `${gptscript_info.url}${gptscript_info.version}/gptscript-${gptscript_info.version}-${pltfm}-${arch}.${suffix}`; 91 | 92 | const outputDir = path.resolve('bin'); 93 | 94 | const fileExist = (path) => { 95 | try { 96 | fs.accessSync(path); 97 | return true; 98 | } catch (err) { 99 | return false; 100 | } 101 | } 102 | 103 | if (!fs.existsSync(outputDir)) { 104 | fs.mkdirSync(outputDir); 105 | console.info(`${outputDir} directory was created`) 106 | } 107 | 108 | async function needToInstall() { 109 | if (fileExist(path.join(outputDir, gptscriptBinaryName))) { 110 | console.log('gptscript is installed...') 111 | const versions = await versions_match(); 112 | if (versions) { 113 | console.log('gptscript version is up to date...exiting') 114 | process.exit(0); 115 | } 116 | } 117 | } 118 | 119 | (async () => { 120 | await needToInstall(); 121 | if (process.env.NODE_GPTSCRIPT_SKIP_INSTALL_BINARY === 'true') { 122 | console.info('Skipping binary download'); 123 | process.exit(0); 124 | } 125 | 126 | console.log(`Downloading and extracting gptscript binary from ${url}...`); 127 | try { 128 | await downloadAndExtract(url, outputDir); 129 | } catch (error) { 130 | console.error('Error downloading and extracting:', error); 131 | } 132 | })(); 133 | -------------------------------------------------------------------------------- /src/gptscript.ts: -------------------------------------------------------------------------------- 1 | import http from "http" 2 | import path from "path" 3 | import child_process from "child_process" 4 | import {fileURLToPath} from "url" 5 | import {gunzipSync} from "zlib" 6 | import https from "https" 7 | 8 | export interface GlobalOpts { 9 | URL?: string 10 | Token?: string 11 | CacheDir?: string 12 | APIKey?: string 13 | BaseURL?: string 14 | DefaultModel?: string 15 | DefaultModelProvider?: string 16 | DatasetTool?: string 17 | WorkspaceTool?: string 18 | Env?: string[] 19 | } 20 | 21 | function globalOptsToEnv(env: NodeJS.ProcessEnv, opts?: GlobalOpts) { 22 | if (!opts) { 23 | return 24 | } 25 | 26 | if (opts.APIKey) { 27 | env["OPENAI_API_KEY"] = opts.APIKey 28 | } 29 | if (opts.BaseURL) { 30 | env["OPENAI_BASE_URL"] = opts.BaseURL 31 | } 32 | if (opts.DefaultModel) { 33 | env["GPTSCRIPT_SDKSERVER_DEFAULT_MODEL"] = opts.DefaultModel 34 | } 35 | if (opts.DefaultModelProvider) { 36 | env["GPTSCRIPT_SDKSERVER_DEFAULT_MODEL_PROVIDER"] = opts.DefaultModelProvider 37 | } 38 | } 39 | 40 | export interface RunOpts { 41 | input?: string 42 | disableCache?: boolean 43 | quiet?: boolean 44 | chdir?: string 45 | subTool?: string 46 | workspace?: string 47 | chatState?: string 48 | confirm?: boolean 49 | prompt?: boolean 50 | credentialOverrides?: string[] 51 | credentialContexts?: string[] 52 | location?: string 53 | env?: string[] 54 | forceSequential?: boolean 55 | 56 | URL?: string 57 | Token?: string 58 | CacheDir?: string 59 | APIKey?: string 60 | BaseURL?: string 61 | DefaultModel?: string 62 | } 63 | 64 | export enum RunEventType { 65 | Event = "event", 66 | RunStart = "runStart", 67 | RunFinish = "runFinish", 68 | CallStart = "callStart", 69 | CallChat = "callChat", 70 | CallSubCalls = "callSubCalls", 71 | CallProgress = "callProgress", 72 | CallConfirm = "callConfirm", 73 | CallContinue = "callContinue", 74 | CallFinish = "callFinish", 75 | 76 | Prompt = "prompt" 77 | } 78 | 79 | export class GPTScript { 80 | private static serverURL: string = "" 81 | private static serverProcess: child_process.ChildProcess 82 | private static instanceCount: number = 0 83 | 84 | 85 | private readonly opts: GlobalOpts 86 | 87 | constructor(opts?: GlobalOpts) { 88 | this.opts = opts || {} 89 | GPTScript.instanceCount++ 90 | 91 | let startSDK = !GPTScript.serverProcess && !GPTScript.serverURL && !this.opts.URL 92 | 93 | if (!GPTScript.serverURL) { 94 | GPTScript.serverURL = process.env.GPTSCRIPT_URL ?? "" 95 | startSDK = startSDK && !GPTScript.serverURL 96 | } 97 | 98 | if (!this.opts.Token) { 99 | this.opts.Token = process.env.GPTSCRIPT_TOKEN 100 | } 101 | 102 | if (startSDK) { 103 | let env = process.env 104 | if (this.opts.Env) { 105 | env = { 106 | "NODE_ENV": process.env.NODE_ENV 107 | } 108 | for (const v of this.opts.Env) { 109 | const equalIndex = v.indexOf("=") 110 | if (equalIndex === -1) { 111 | env[v] = "" 112 | } else { 113 | env[v.substring(0, equalIndex)] = v.substring(equalIndex + 1) 114 | } 115 | } 116 | } 117 | 118 | globalOptsToEnv(env, this.opts) 119 | process.on("exit", (code) => { 120 | if (GPTScript.serverProcess) { 121 | GPTScript.serverProcess.stdin?.end() 122 | GPTScript.serverProcess.kill(code) 123 | } 124 | }) 125 | 126 | GPTScript.serverProcess = child_process.spawn(getCmdPath(), ["sys.sdkserver", "--listen-address", "127.0.0.1:0"], { 127 | env: env, 128 | stdio: ["pipe", "ignore", "pipe"] 129 | }) 130 | 131 | GPTScript.serverProcess.stderr?.on("data", (data) => { 132 | let url = data.toString().trim() 133 | if (url.includes("=")) { 134 | url = url.substring(url.indexOf("=") + 1) 135 | } 136 | 137 | GPTScript.serverURL = `http://${url}` 138 | 139 | GPTScript.serverProcess.stderr?.removeAllListeners() 140 | }) 141 | } else { 142 | if (!this.opts.URL) { 143 | this.opts.URL = GPTScript.serverURL 144 | } 145 | if (this.opts.URL !== "" && !this.opts.URL.startsWith("http://") && !this.opts.URL.startsWith("https://")) { 146 | this.opts.URL = "http://" + this.opts.URL 147 | } 148 | 149 | if (!this.opts.Env) { 150 | this.opts.Env = Object.entries(process.env).map(([k, v]) => `${k}=${v}`) 151 | } 152 | if (this.opts.URL) { 153 | this.opts.Env.push(`GPTSCRIPT_URL=${this.opts.URL}`) 154 | } 155 | 156 | if (this.opts.Token) { 157 | this.opts.Env.push(`GPTSCRIPT_TOKEN=${this.opts.Token}`) 158 | } 159 | } 160 | } 161 | 162 | close(): void { 163 | GPTScript.instanceCount-- 164 | if (GPTScript.instanceCount === 0 && GPTScript.serverProcess) { 165 | GPTScript.serverURL = process.env.GPTSCRIPT_URL ?? "" 166 | GPTScript.serverProcess.kill("SIGTERM") 167 | GPTScript.serverProcess.stdin?.end() 168 | } 169 | } 170 | 171 | async listModels(providers?: string[], credentialOverrides?: string[]): Promise> { 172 | if (this.opts.DefaultModelProvider) { 173 | if (!providers) { 174 | providers = [] 175 | } 176 | providers.push(this.opts.DefaultModelProvider) 177 | } 178 | const result = await this.runBasicCommand("list-models", { 179 | "providers": providers, 180 | "env": this.opts.Env, 181 | "credentialOverrides": credentialOverrides 182 | }) 183 | return await JSON.parse(result) as Array 184 | } 185 | 186 | version(): Promise { 187 | return this.runBasicCommand("version") 188 | } 189 | 190 | async runBasicCommand(cmd: string, body?: any): Promise { 191 | if (!this.opts.URL) { 192 | await this.testGPTScriptURL(20) 193 | } 194 | const r = new RunSubcommand(cmd, "", {URL: this.opts.URL, Token: this.opts.Token}) 195 | r.requestNoStream(body) 196 | return r.text() 197 | } 198 | 199 | /** 200 | * Runs a tool with the specified name and options. 201 | * 202 | * @param {string} toolName - The name of the tool to run. Can be a file path, URL, or GitHub URL. 203 | * @param {RunOpts} [opts={}] - The options for running the tool. 204 | * @return {Run} The Run object representing the running tool. 205 | */ 206 | async run(toolName: string, opts: RunOpts = {}): Promise { 207 | if (!this.opts.URL) { 208 | await this.testGPTScriptURL(20) 209 | } 210 | if (this.opts.Env) { 211 | opts.env = this.opts.Env.concat(opts.env || []) 212 | } 213 | 214 | return (new Run("run", toolName, {...this.opts, ...opts})).nextChat(opts.input) 215 | } 216 | 217 | /** 218 | * Evaluates the given tool and returns a Run object. 219 | * 220 | * @param {ToolDef | ToolDef[]} tool - The tool to be evaluated. Can be a single ToolDef object or an array of ToolDef objects. 221 | * @param {RunOpts} [opts={}] - Optional options for the evaluation. 222 | * @return {Run} The Run object representing the evaluation. 223 | */ 224 | async evaluate(tool: Tool | ToolDef | ToolDef[], opts: RunOpts = {}): Promise { 225 | if (!this.opts.URL) { 226 | await this.testGPTScriptURL(20) 227 | } 228 | if (this.opts.Env) { 229 | opts.env = this.opts.Env.concat(opts.env || []) 230 | } 231 | return (new Run("evaluate", tool, {...this.opts, ...opts})).nextChat(opts.input) 232 | } 233 | 234 | async parse(fileName: string, disableCache?: boolean): Promise { 235 | if (!this.opts.URL) { 236 | await this.testGPTScriptURL(20) 237 | } 238 | const r: Run = new RunSubcommand("parse", fileName, { 239 | disableCache: disableCache, 240 | URL: this.opts.URL, 241 | Token: this.opts.Token 242 | }) 243 | r.request({file: fileName}) 244 | return parseBlocksFromNodes((await r.json()).nodes) 245 | } 246 | 247 | async parseContent(toolContent: string): Promise { 248 | if (!this.opts.URL) { 249 | await this.testGPTScriptURL(20) 250 | } 251 | const r: Run = new RunSubcommand("parse", "", {URL: this.opts.URL, Token: this.opts.Token}) 252 | r.request({content: toolContent}) 253 | return parseBlocksFromNodes((await r.json()).nodes) 254 | } 255 | 256 | async stringify(blocks: Block[]): Promise { 257 | if (!this.opts.URL) { 258 | await this.testGPTScriptURL(20) 259 | } 260 | const nodes: any[] = [] 261 | 262 | for (const block of blocks) { 263 | if (block.type === "text") { 264 | nodes.push({ 265 | textNode: { 266 | text: "!" + (block.format || "text") + "\n" + block.content 267 | } 268 | }) 269 | } else { 270 | nodes.push({ 271 | toolNode: { 272 | tool: block 273 | } 274 | }) 275 | } 276 | } 277 | 278 | const r: Run = new RunSubcommand("fmt", "", {URL: this.opts.URL, Token: this.opts.Token}) 279 | r.request({nodes: nodes}) 280 | return r.text() 281 | } 282 | 283 | async confirm(response: AuthResponse): Promise { 284 | if (!this.opts.URL) { 285 | await this.testGPTScriptURL(20) 286 | } 287 | const resp = await fetch(`${this.opts.URL}/confirm/${response.id}`, { 288 | method: "POST", 289 | body: JSON.stringify(response) 290 | }) 291 | 292 | if (resp.status < 200 || resp.status >= 400) { 293 | throw new Error(`Failed to confirm ${response.id}: ${await resp.text()}`) 294 | } 295 | } 296 | 297 | async promptResponse(response: PromptResponse): Promise { 298 | if (!this.opts.URL) { 299 | await this.testGPTScriptURL(20) 300 | } 301 | const resp = await fetch(`${this.opts.URL}/prompt-response/${response.id}`, { 302 | method: "POST", 303 | body: JSON.stringify(response.responses) 304 | }) 305 | 306 | if (resp.status < 200 || resp.status >= 400) { 307 | throw new Error(`Failed to respond to prompt ${response.id}: ${await resp.text()}`) 308 | } 309 | } 310 | 311 | /** 312 | * Loads a file into a Program. 313 | * 314 | * @param {string} fileName - The name of the file to load. 315 | * @param {boolean} [disableCache] - Whether to disable the cache. 316 | * @param {string} [subTool] - The sub-tool to use. 317 | * @return {Promise} The loaded program. 318 | */ 319 | async load( 320 | fileName: string, 321 | disableCache?: boolean, 322 | subTool?: string 323 | ): Promise { 324 | return this._load({file: fileName, disableCache, subTool}) 325 | } 326 | 327 | /** 328 | * Loads content into a Program. 329 | * 330 | * @param {string} content - The content to load. 331 | * @param {boolean} [disableCache] - Whether to disable the cache. 332 | * @param {string} [subTool] - The sub-tool to use. 333 | * @return {Promise} The loaded program. 334 | */ 335 | async loadContent( 336 | content: string, 337 | disableCache?: boolean, 338 | subTool?: string 339 | ): Promise { 340 | return this._load({content, disableCache, subTool}) 341 | } 342 | 343 | /** 344 | * Loads tools into a Program. 345 | * 346 | * @param {ToolDef[]} toolDefs - The tools to load. 347 | * @param {boolean} [disableCache] - Whether to disable the cache. 348 | * @param {string} [subTool] - The sub-tool to use. 349 | * @return {Promise} The loaded program. 350 | */ 351 | async loadTools( 352 | toolDefs: ToolDef[], 353 | disableCache?: boolean, 354 | subTool?: string 355 | ): Promise { 356 | return this._load({toolDefs, disableCache, subTool}) 357 | } 358 | 359 | async listCredentials(context: Array, allContexts: boolean): Promise> { 360 | if (!this.opts.URL) { 361 | await this.testGPTScriptURL(20) 362 | } 363 | 364 | const r: Run = new RunSubcommand("credentials", "", {URL: this.opts.URL, Token: this.opts.Token}) 365 | r.request({context, allContexts}) 366 | const out = await r.json() 367 | return out.map((c: any) => jsonToCredential(JSON.stringify(c))) 368 | } 369 | 370 | async createCredential(credential: Credential): Promise { 371 | await this.runBasicCommand("credentials/create", { 372 | content: credentialToJSON(credential) 373 | }) 374 | } 375 | 376 | async revealCredential(context: Array, name: string): Promise { 377 | const resp = await this.runBasicCommand("credentials/reveal", { 378 | context, 379 | name 380 | }) 381 | return jsonToCredential(resp) 382 | } 383 | 384 | async deleteCredential(context: string, name: string): Promise { 385 | await this.runBasicCommand("credentials/delete", { 386 | context: [context], 387 | name 388 | }) 389 | } 390 | 391 | // returns an array of dataset IDs 392 | async listDatasets(): Promise> { 393 | const result = await this.runBasicCommand("datasets", { 394 | input: "{}", 395 | datasetTool: this.opts.DatasetTool ?? "", 396 | env: this.opts.Env 397 | }) 398 | return JSON.parse(result) as Array 399 | } 400 | 401 | async addDatasetElements(elements: Array, opts: { 402 | name?: string, 403 | description?: string, 404 | datasetID?: string 405 | }): Promise { 406 | const serializableElements = elements.map(e => { 407 | return { 408 | name: e.name, 409 | description: e.description, 410 | contents: e.contents, 411 | binaryContents: Buffer.from(e.binaryContents ?? Buffer.from("")).toString("base64") 412 | } 413 | }) 414 | 415 | return await this.runBasicCommand("datasets/add-elements", { 416 | input: JSON.stringify({ 417 | name: opts.name ?? "", 418 | description: opts.description ?? "", 419 | datasetID: opts.datasetID ?? "", 420 | elements: serializableElements 421 | }), 422 | datasetTool: this.opts.DatasetTool ?? "", 423 | env: this.opts.Env 424 | }) 425 | } 426 | 427 | async listDatasetElements(datasetID: string): Promise> { 428 | const result = await this.runBasicCommand("datasets/list-elements", { 429 | input: JSON.stringify({datasetID}), 430 | datasetTool: this.opts.DatasetTool ?? "", 431 | env: this.opts.Env 432 | }) 433 | return JSON.parse(result) as Array 434 | } 435 | 436 | async getDatasetElement(datasetID: string, elementName: string): Promise { 437 | const result = await this.runBasicCommand("datasets/get-element", { 438 | input: JSON.stringify({datasetID, name: elementName}), 439 | datasetTool: this.opts.DatasetTool ?? "", 440 | env: this.opts.Env 441 | }) 442 | 443 | const element = JSON.parse(result) 444 | return { 445 | name: element.name, 446 | description: element.description, 447 | contents: element.contents, 448 | binaryContents: Buffer.from(element.binaryContents ?? "", "base64") 449 | } 450 | } 451 | 452 | async createWorkspace(providerType: string, ...fromWorkspaces: string[]): Promise { 453 | const out = await this.runBasicCommand("workspaces/create", { 454 | providerType: providerType, 455 | fromWorkspaceIDs: fromWorkspaces, 456 | workspaceTool: this.opts.WorkspaceTool, 457 | env: this.opts.Env, 458 | }) 459 | return out.trim() 460 | } 461 | 462 | async deleteWorkspace(workspaceID: string): Promise { 463 | if (!workspaceID) { 464 | return Promise.reject("workspace ID cannot be empty") 465 | } 466 | 467 | await this.runBasicCommand("workspaces/delete", { 468 | id: workspaceID, 469 | workspaceTool: this.opts.WorkspaceTool, 470 | env: this.opts.Env, 471 | }) 472 | } 473 | 474 | async listFilesInWorkspace(prefix?: string, workspaceID?: string): Promise> { 475 | if (!workspaceID) { 476 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 477 | } 478 | const out = await this.runBasicCommand("workspaces/list", { 479 | id: workspaceID, 480 | prefix: prefix, 481 | workspaceTool: this.opts.WorkspaceTool, 482 | env: this.opts.Env, 483 | }) 484 | return JSON.parse(out) 485 | } 486 | 487 | async removeAll(withPrefix?: string, workspaceID?: string): Promise { 488 | if (!workspaceID) { 489 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 490 | } 491 | await this.runBasicCommand("workspaces/remove-all-with-prefix", { 492 | id: workspaceID, 493 | prefix: withPrefix, 494 | workspaceTool: this.opts.WorkspaceTool, 495 | env: this.opts.Env, 496 | }) 497 | } 498 | 499 | async writeFileInWorkspace(filePath: string, content: ArrayBuffer, workspaceID?: string): Promise { 500 | if (!workspaceID) { 501 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 502 | } 503 | await this.runBasicCommand("workspaces/write-file", { 504 | id: workspaceID, 505 | filePath: filePath, 506 | contents: Buffer.from(content).toString("base64"), 507 | workspaceTool: this.opts.WorkspaceTool, 508 | env: this.opts.Env, 509 | }) 510 | } 511 | 512 | async deleteFileInWorkspace(filePath: string, workspaceID?: string): Promise { 513 | if (!workspaceID) { 514 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 515 | } 516 | await this.runBasicCommand("workspaces/delete-file", { 517 | id: workspaceID, 518 | filePath: filePath, 519 | workspaceTool: this.opts.WorkspaceTool, 520 | env: this.opts.Env, 521 | }) 522 | } 523 | 524 | async readFileInWorkspace(filePath: string, workspaceID?: string): Promise { 525 | if (!workspaceID) { 526 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 527 | } 528 | const out = await this.runBasicCommand("workspaces/read-file", { 529 | id: workspaceID, 530 | filePath: filePath, 531 | workspaceTool: this.opts.WorkspaceTool, 532 | env: this.opts.Env, 533 | }) 534 | return Buffer.from(out.trim(), "base64") 535 | } 536 | 537 | async statFileInWorkspace(filePath: string, workspaceID?: string): Promise { 538 | if (!workspaceID) { 539 | workspaceID = process.env.GPTSCRIPT_WORKSPACE_ID ?? "" 540 | } 541 | const out = await this.runBasicCommand("workspaces/stat-file", { 542 | id: workspaceID, 543 | filePath: filePath, 544 | workspaceTool: this.opts.WorkspaceTool, 545 | env: this.opts.Env, 546 | }) 547 | 548 | return JSON.parse(out) 549 | } 550 | 551 | /** 552 | * Helper method to handle the common logic for loading. 553 | * 554 | * @param {any} payload - The payload to send in the request. 555 | * @return {Promise} The loaded program. 556 | */ 557 | private async _load(payload: any): Promise { 558 | if (!this.opts.URL) { 559 | await this.testGPTScriptURL(20) 560 | } 561 | const r: Run = new RunSubcommand("load", payload.toolDefs || [], {URL: this.opts.URL, Token: this.opts.Token}) 562 | 563 | r.request(payload) 564 | return (await r.json()) as LoadResponse 565 | } 566 | 567 | private async testGPTScriptURL(count: number): Promise { 568 | while (count > 0) { 569 | try { 570 | await fetch(`${GPTScript.serverURL}/healthz`) 571 | this.opts.URL = GPTScript.serverURL 572 | if (!this.opts.Env) { 573 | this.opts.Env = [] 574 | } 575 | this.opts.Env.push(`GPTSCRIPT_URL=${this.opts.URL}`) 576 | if (this.opts.Token) { 577 | this.opts.Env.push(`GPTSCRIPT_TOKEN=${this.opts.Token}`) 578 | } 579 | 580 | return 581 | } catch { 582 | if (count === 0) { 583 | } 584 | await new Promise(r => setTimeout(r, 500)) 585 | count-- 586 | } 587 | } 588 | 589 | throw new Error("Failed to wait for gptscript to be ready") 590 | } 591 | } 592 | 593 | export interface FileInfo { 594 | workspaceID: string 595 | name: string 596 | size: number 597 | modTime: string 598 | } 599 | 600 | export class Run { 601 | public readonly id: string 602 | public readonly opts: RunOpts 603 | public readonly tools?: ToolDef | ToolDef[] | string 604 | public state: RunState = RunState.Creating 605 | public calls: Record = {} 606 | public err: string = "" 607 | 608 | protected stdout?: string 609 | 610 | private readonly requestPath: string = "" 611 | private promise?: Promise 612 | private req?: http.ClientRequest 613 | private stderr?: string 614 | private callbacks: Record void)[]> = {} 615 | private chatState?: string 616 | private parentCallId: string = "" 617 | private prg?: Program 618 | private respondingToolId?: string 619 | 620 | constructor(subCommand: string, tools: ToolDef | ToolDef[] | string, opts: RunOpts) { 621 | this.id = randomId("run-") 622 | this.requestPath = subCommand 623 | this.opts = opts 624 | this.tools = tools 625 | } 626 | 627 | nextChat(input: string = ""): Run { 628 | if (this.state !== RunState.Continue && this.state !== RunState.Creating && this.state !== RunState.Error) { 629 | throw (new Error(`Run must in creating, continue or error state, not ${this.state}`)) 630 | } 631 | 632 | let run = this 633 | if (run.state !== RunState.Creating) { 634 | run = new (this.constructor as any)(this.requestPath, this.tools, this.opts) 635 | } 636 | 637 | if (this.chatState && this.state === RunState.Continue) { 638 | // Only update the chat state if the previous run didn't error. 639 | // The chat state on opts will be the chat state for the last successful run. 640 | this.opts.chatState = this.chatState 641 | } 642 | run.opts.input = input 643 | if (Array.isArray(this.tools)) { 644 | run.request({toolDefs: this.tools, ...this.opts}) 645 | } else if (typeof this.tools === "string") { 646 | run.request({file: this.tools, ...this.opts}) 647 | } else { 648 | // In this last case, this.tools is a single ToolDef. 649 | run.request({toolDefs: [this.tools], ...this.opts}) 650 | } 651 | 652 | return run 653 | } 654 | 655 | processStdout(data: string | object): string { 656 | if (typeof data === "string") { 657 | if (data.trim() === "") { 658 | return "" 659 | } 660 | 661 | try { 662 | data = JSON.parse(data) 663 | } catch (e) { 664 | return data as string 665 | } 666 | } 667 | 668 | const out = data as ChatState 669 | if (out.done === undefined || !out.done) { 670 | this.chatState = JSON.stringify(out.state) 671 | this.state = RunState.Continue 672 | this.respondingToolId = out.toolID 673 | } else { 674 | this.state = RunState.Finished 675 | this.chatState = undefined 676 | } 677 | 678 | return "" 679 | } 680 | 681 | request(tool: any) { 682 | if (!this.opts.URL) { 683 | throw new Error("request() requires URL to be set") 684 | } 685 | const options = this.requestOptions(this.opts.URL, this.opts.Token || "", this.requestPath, tool) 686 | options.headers = {"Transfer-Encoding": "chunked", ...options.headers} as any 687 | 688 | this.promise = new Promise(async (resolve, reject) => { 689 | let frag = "" 690 | this.req = http.request(options, (res: http.IncomingMessage) => { 691 | this.state = RunState.Running 692 | res.on("data", (chunk: any) => { 693 | for (let line of (frag + chunk.toString()).split("\n")) { 694 | const c = line.replace(/^(data: )/, "").trim() 695 | if (!c) { 696 | continue 697 | } 698 | 699 | if (c === "[DONE]") { 700 | return 701 | } 702 | 703 | let e: any 704 | try { 705 | e = JSON.parse(c) 706 | } catch { 707 | frag = c 708 | return 709 | } 710 | 711 | if (e.stderr) { 712 | this.stderr = (this.stderr || "") + (typeof e.stderr === "string" ? e.stderr : JSON.stringify(e.stderr)) 713 | frag = "" 714 | } else if (e.stdout) { 715 | frag = this.processStdout(e.stdout) 716 | } else { 717 | frag = this.emitEvent(c) 718 | } 719 | } 720 | }) 721 | 722 | res.on("end", () => { 723 | if (this.state === RunState.Running || this.state === RunState.Finished || this.state === RunState.Continue) { 724 | if (this.stdout || !this.stderr) { 725 | if (this.state !== RunState.Continue) { 726 | this.state = RunState.Finished 727 | } 728 | resolve(this.stdout || "") 729 | } else { 730 | this.state = RunState.Error 731 | reject(new Error(this.stderr)) 732 | } 733 | } else if (this.state === RunState.Error) { 734 | reject(new Error(this.err)) 735 | } 736 | }) 737 | 738 | res.on("aborted", () => { 739 | if (this.state !== RunState.Finished && this.state !== RunState.Error) { 740 | this.state = RunState.Error 741 | this.err = "Run has been aborted" 742 | reject(new Error(this.err)) 743 | } 744 | }) 745 | 746 | res.on("error", (error: Error) => { 747 | if (this.state !== RunState.Error) { 748 | this.state = RunState.Error 749 | this.err = error.message || "" 750 | } 751 | reject(new Error(this.err)) 752 | }) 753 | }) 754 | 755 | this.req.on("error", (error: Error) => { 756 | if (this.state !== RunState.Error) { 757 | this.state = RunState.Error 758 | this.err = error.message || "" 759 | } 760 | reject(new Error(this.err)) 761 | }) 762 | 763 | this.req.write(JSON.stringify({...tool, ...this.opts})) 764 | this.req.end() 765 | }) 766 | } 767 | 768 | requestNoStream(tool: any) { 769 | if (!this.opts.URL) { 770 | throw new Error("request() requires gptscriptURL to be set") 771 | } 772 | 773 | const options = this.requestOptions(this.opts.URL, this.opts.Token || "", this.requestPath, tool) as any 774 | if (tool) { 775 | options.body = JSON.stringify({...tool, ...this.opts}) 776 | } 777 | const req = new Request(this.opts.URL + "/" + this.requestPath, options) 778 | 779 | this.promise = new Promise(async (resolve, reject) => { 780 | fetch(req).then(resp => { 781 | return resp.json() 782 | }).then(res => { 783 | if (typeof res.stdout === "string") { 784 | resolve(res.stdout) 785 | } 786 | resolve(JSON.stringify(res.stdout)) 787 | }).catch(e => { 788 | reject(new Error(e)) 789 | }) 790 | }) 791 | } 792 | 793 | requestOptions(gptscriptURL: string, token: string, path: string, tool: any) { 794 | let method = "GET" 795 | if (tool) { 796 | method = "POST" 797 | } 798 | 799 | const url = new URL(gptscriptURL) 800 | 801 | const headers = { 802 | "Content-Type": "application/json" 803 | } as any 804 | if (token) { 805 | headers["Authorization"] = `Bearer ${token}` 806 | } 807 | 808 | return { 809 | hostname: url.hostname, 810 | port: url.port || 80, 811 | protocol: url.protocol || "http:", 812 | path: "/" + path, 813 | method: method, 814 | headers: headers 815 | } 816 | } 817 | 818 | public on(event: RunEventType.RunStart | RunEventType.RunFinish, listener: (data: RunFrame) => void): this; 819 | public on(event: RunEventType.CallStart | RunEventType.CallProgress | RunEventType.CallContinue | RunEventType.CallChat | RunEventType.CallConfirm | RunEventType.CallFinish, listener: (data: CallFrame) => void): this; 820 | public on(event: RunEventType.Prompt, listener: (data: PromptFrame) => void): this; 821 | public on(event: RunEventType.Event, listener: (data: Frame) => void): this; 822 | public on(event: RunEventType, listener: (data: any) => void): this { 823 | if (!this.callbacks[event]) { 824 | this.callbacks[event] = [] 825 | } 826 | 827 | this.callbacks[event].push(listener) 828 | 829 | return this 830 | } 831 | 832 | public text(): Promise { 833 | if (this.err) { 834 | throw new Error(this.err) 835 | } 836 | 837 | if (!this.promise) { 838 | throw new Error("Run not started") 839 | } 840 | 841 | return this.promise 842 | } 843 | 844 | public async json(): Promise { 845 | return JSON.parse(await this.text()) 846 | } 847 | 848 | public currentChatState(): string | undefined { 849 | return this.chatState 850 | } 851 | 852 | public parentCallFrame(): CallFrame | undefined { 853 | if (this.parentCallId) { 854 | return this.calls[this.parentCallId] 855 | } 856 | 857 | return undefined 858 | } 859 | 860 | public program(): Program | undefined { 861 | return this.prg 862 | } 863 | 864 | public respondingTool(): Tool | undefined { 865 | return this.respondingToolId ? this.prg?.toolSet[this.respondingToolId] : undefined 866 | } 867 | 868 | public close(): void { 869 | if (this.req) { 870 | this.req.destroy() 871 | return 872 | } 873 | throw new Error("Run not started") 874 | } 875 | 876 | private emitEvent(data: string): string { 877 | for (let event of data.split("\n")) { 878 | event = event.trim() 879 | 880 | if (!event) { 881 | continue 882 | } 883 | let f: Frame 884 | try { 885 | const obj = JSON.parse(event) 886 | if (obj.run) { 887 | f = obj.run as Frame 888 | } else if (obj.call) { 889 | f = obj.call as Frame 890 | } else if (obj.prompt) { 891 | f = obj.prompt as Frame 892 | } else { 893 | return event 894 | } 895 | } catch (error) { 896 | return event 897 | } 898 | 899 | if (!this.state) { 900 | this.state = RunState.Creating 901 | } 902 | 903 | if (f.type === RunEventType.Prompt && !this.opts.prompt) { 904 | this.state = RunState.Error 905 | this.err = `prompt occurred when prompt was not allowed: Message: ${f.message}\nFields: ${f.fields}\nSensitive: ${f.sensitive}` 906 | this.close() 907 | return "" 908 | } 909 | 910 | if (f.type === RunEventType.RunStart) { 911 | this.state = RunState.Running 912 | this.prg = f.program 913 | } else if (f.type === RunEventType.RunFinish) { 914 | if (f.error) { 915 | this.state = RunState.Error 916 | this.err = f.error || "" 917 | } else { 918 | this.state = RunState.Finished 919 | this.stdout = f.output || "" 920 | } 921 | } else if ((f.type as string).startsWith("call")) { 922 | f = f as CallFrame 923 | if (!f.parentID && this.parentCallId === "" && (f.toolCategory || ToolCategory.NoCategory) === ToolCategory.NoCategory) { 924 | this.parentCallId = f.id 925 | } 926 | this.calls[f.id] = f 927 | } 928 | 929 | this.emit(RunEventType.Event, f) 930 | this.emit(f.type, f) 931 | } 932 | 933 | return "" 934 | } 935 | 936 | private emit(event: RunEventType, data: any) { 937 | for (const cb of this.callbacks[event] || []) { 938 | cb(data) 939 | } 940 | } 941 | } 942 | 943 | class RunSubcommand extends Run { 944 | constructor(subCommand: string, tool: ToolDef | ToolDef[] | string, opts: RunOpts) { 945 | super(subCommand, tool, opts) 946 | } 947 | 948 | processStdout(data: string | object): string { 949 | if (typeof data === "string") { 950 | this.stdout = (this.stdout || "") + data 951 | } else { 952 | this.stdout = JSON.stringify(data) 953 | } 954 | 955 | return "" 956 | } 957 | } 958 | 959 | interface ChatState { 960 | state: string 961 | done: boolean 962 | content: string 963 | toolID: string 964 | } 965 | 966 | export type Arguments = string | Record 967 | 968 | export const ArgumentSchemaType = "object" as const 969 | 970 | export interface ArgumentSchema { 971 | type: typeof ArgumentSchemaType 972 | properties?: Record 973 | required?: string[] 974 | } 975 | 976 | export interface Program { 977 | name: string 978 | entryToolId: string 979 | toolSet: Record 980 | openAPICache: Record 981 | } 982 | 983 | export const PropertyType = "string" as const 984 | 985 | export interface Property { 986 | type: typeof PropertyType 987 | description: string 988 | default?: string 989 | } 990 | 991 | export interface Repo { 992 | VCS: string 993 | Root: string 994 | Path: string 995 | Name: string 996 | Revision: string 997 | } 998 | 999 | export type ToolType = "tool" | "context" | "credential" | "input" | "output" | "agent" | "assistant" | "provider" | "" 1000 | 1001 | export interface ToolDef { 1002 | name?: string 1003 | description?: string 1004 | maxTokens?: number 1005 | modelName?: string 1006 | modelProvider?: boolean 1007 | jsonResponse?: boolean 1008 | temperature?: number 1009 | cache?: boolean 1010 | chat?: boolean 1011 | internalPrompt?: boolean 1012 | arguments?: ArgumentSchema 1013 | tools?: string[] 1014 | globalTools?: string[] 1015 | globalModelName?: string 1016 | context?: string[] 1017 | exportContext?: string[] 1018 | export?: string[] 1019 | agents?: string[] 1020 | credentials?: string[] 1021 | exportCredentials?: string[] 1022 | inputFilters?: string[] 1023 | exportInputFilters?: string[] 1024 | outputFilters?: string[] 1025 | exportOutputFilters?: string[] 1026 | instructions?: string 1027 | type?: ToolType 1028 | metaData?: Record 1029 | } 1030 | 1031 | export interface ToolReference { 1032 | named: string 1033 | reference: string 1034 | arg: string 1035 | toolID: string 1036 | } 1037 | 1038 | 1039 | export interface Tool extends ToolDef { 1040 | id: string 1041 | toolMapping?: Record 1042 | localTools?: Record 1043 | source?: SourceRef 1044 | workingDir?: string 1045 | } 1046 | 1047 | export interface SourceRef { 1048 | location: string 1049 | lineNo: number 1050 | repo?: Repo 1051 | } 1052 | 1053 | export const TextType = "text" as const 1054 | 1055 | export interface Text { 1056 | id: string 1057 | type: typeof TextType 1058 | format: string 1059 | content: string 1060 | } 1061 | 1062 | export type Block = Tool | Text 1063 | 1064 | export enum RunState { 1065 | Creating = "creating", 1066 | Running = "running", 1067 | Continue = "continue", 1068 | Finished = "finished", 1069 | Error = "error" 1070 | } 1071 | 1072 | export enum ToolCategory { 1073 | ProviderToolCategory = "provider", 1074 | CredentialToolCategory = "credential", 1075 | ContextToolCategory = "context", 1076 | InputToolCategory = "input", 1077 | OutputToolCategory = "output", 1078 | NoCategory = "" 1079 | } 1080 | 1081 | export interface RunFrame { 1082 | id: string 1083 | type: RunEventType.RunStart | RunEventType.RunFinish 1084 | program: Program 1085 | input: string 1086 | output: string 1087 | error: string 1088 | start: string 1089 | end: string 1090 | state: RunState 1091 | chatState: any 1092 | } 1093 | 1094 | export interface Call { 1095 | toolID: string 1096 | input?: string 1097 | } 1098 | 1099 | export interface Output { 1100 | content?: string 1101 | subCalls: Record 1102 | } 1103 | 1104 | export interface InputContext { 1105 | toolID: string 1106 | content: string 1107 | } 1108 | 1109 | export interface Usage { 1110 | promptTokens: number 1111 | completionTokens: number 1112 | totalTokens: number 1113 | } 1114 | 1115 | export interface CallFrame { 1116 | id: string 1117 | tool?: Tool 1118 | agentGroup?: ToolReference[] 1119 | currentAgent?: ToolReference 1120 | displayText?: string 1121 | inputContext: InputContext[] 1122 | toolCategory?: ToolCategory 1123 | toolName: string 1124 | parentID?: string 1125 | type: RunEventType.CallStart | RunEventType.CallChat | RunEventType.CallConfirm | RunEventType.CallContinue | RunEventType.CallSubCalls | RunEventType.CallProgress | RunEventType.CallFinish 1126 | start: string 1127 | end: string 1128 | input: Arguments 1129 | output: Output[] 1130 | error?: string 1131 | usage: Usage 1132 | chatResponseCached: boolean 1133 | toolResults: number 1134 | llmRequest?: any 1135 | llmResponse?: any 1136 | } 1137 | 1138 | export interface PromptFrame { 1139 | id: string 1140 | type: RunEventType.Prompt 1141 | time: string 1142 | message: string 1143 | fields: Field[] 1144 | sensitive: boolean 1145 | metadata: Record 1146 | } 1147 | 1148 | export interface Field { 1149 | name: string 1150 | description?: string 1151 | sensitive?: boolean 1152 | } 1153 | 1154 | export type Frame = RunFrame | CallFrame | PromptFrame 1155 | 1156 | export interface AuthResponse { 1157 | id: string 1158 | accept: boolean 1159 | message?: string 1160 | } 1161 | 1162 | export interface PromptResponse { 1163 | id: string 1164 | responses: Record 1165 | } 1166 | 1167 | export interface LoadResponse { 1168 | program: Program; 1169 | } 1170 | 1171 | export function getEnv(key: string, def: string = ""): string { 1172 | let v = process.env[key] || "" 1173 | if (v == "") { 1174 | return def 1175 | } 1176 | 1177 | if (v.startsWith("{\"_gz\":\"") && v.endsWith("\"}")) { 1178 | try { 1179 | return gunzipSync(Buffer.from(v.slice(8, -2), "base64")).toString("utf8") 1180 | } catch (e) { 1181 | } 1182 | } 1183 | 1184 | return v 1185 | } 1186 | 1187 | function getCmdPath(): string { 1188 | if (process.env.GPTSCRIPT_BIN) { 1189 | return process.env.GPTSCRIPT_BIN 1190 | } 1191 | 1192 | return path.join(path.dirname(fileURLToPath(import.meta.url)), "..", "bin", "gptscript" + (process.platform === "win32" ? ".exe" : "")) 1193 | } 1194 | 1195 | function parseBlocksFromNodes(nodes: any[]): Block[] { 1196 | const blocks: Block[] = [] 1197 | if (!nodes) { 1198 | return blocks 1199 | } 1200 | 1201 | for (const node of nodes) { 1202 | if (node.toolNode) { 1203 | if (!node.toolNode.tool.id) { 1204 | node.toolNode.tool.id = randomId("tool-") 1205 | } 1206 | blocks.push({ 1207 | type: node.toolNode.tool.type || "tool", 1208 | ...node.toolNode.tool, 1209 | } as Tool) 1210 | } 1211 | if (node.textNode) { 1212 | const format = node.textNode.text.substring(1, node.textNode.text.indexOf("\n")).trim() || "text" 1213 | blocks.push({ 1214 | id: randomId("text-"), 1215 | type: "text", 1216 | format: format, 1217 | content: node.textNode.text.substring(node.textNode.text.indexOf("\n") + 1).trim(), 1218 | } as Text) 1219 | } 1220 | } 1221 | return blocks 1222 | } 1223 | 1224 | function randomId(prefix: string): string { 1225 | return prefix + Math.random().toString(36).substring(2, 12) 1226 | } 1227 | 1228 | export enum CredentialType { 1229 | Tool = "tool", 1230 | ModelProvider = "modelProvider", 1231 | } 1232 | 1233 | export type Credential = { 1234 | context: string 1235 | name: string 1236 | type: CredentialType 1237 | env: Record 1238 | ephemeral: boolean 1239 | expiresAt?: Date | undefined 1240 | refreshToken?: string | undefined 1241 | checkParam?: string | undefined 1242 | } 1243 | 1244 | // Types for OpenAI API-compatible models 1245 | 1246 | export type Permission = { 1247 | created: number, 1248 | id: string, 1249 | object: string, 1250 | allow_create_engine: boolean, 1251 | allow_sampling: boolean, 1252 | allow_logprobs: boolean, 1253 | allow_search_indices: boolean, 1254 | allow_view: boolean, 1255 | allow_fine_tuning: boolean, 1256 | organization: string, 1257 | group: any, 1258 | is_blocking: boolean, 1259 | } 1260 | 1261 | export type Model = { 1262 | created: number, 1263 | id: string, 1264 | object: string, 1265 | owned_by: string, 1266 | permission: Array, 1267 | root: string, 1268 | parent: string, 1269 | metadata: Record, 1270 | } 1271 | 1272 | // for internal use only 1273 | type cred = { 1274 | context: string 1275 | toolName: string 1276 | type: string 1277 | env: Record 1278 | ephemeral: boolean 1279 | expiresAt: string | undefined 1280 | refreshToken: string | undefined 1281 | checkParam: string | undefined 1282 | } 1283 | 1284 | export function credentialToJSON(c: Credential): string { 1285 | const expiresAt = c.expiresAt ? c.expiresAt.toISOString() : undefined 1286 | const type = c.type === CredentialType.Tool ? "tool" : "modelProvider" 1287 | return JSON.stringify({ 1288 | context: c.context, 1289 | toolName: c.name, 1290 | type: type, 1291 | env: c.env, 1292 | ephemeral: c.ephemeral, 1293 | expiresAt: expiresAt, 1294 | refreshToken: c.refreshToken, 1295 | checkParam: c.checkParam 1296 | } as cred) 1297 | } 1298 | 1299 | function jsonToCredential(cred: string): Credential { 1300 | const c = JSON.parse(cred) as cred 1301 | return { 1302 | context: c.context, 1303 | name: c.toolName, 1304 | type: c.type === "tool" ? CredentialType.Tool : CredentialType.ModelProvider, 1305 | env: c.env, 1306 | ephemeral: c.ephemeral, 1307 | expiresAt: c.expiresAt ? new Date(c.expiresAt) : undefined, 1308 | refreshToken: c.refreshToken, 1309 | checkParam: c.checkParam 1310 | } 1311 | } 1312 | 1313 | export interface DatasetMeta { 1314 | id: string 1315 | name: string 1316 | description: string 1317 | } 1318 | 1319 | export interface DatasetElementMeta { 1320 | name: string 1321 | description: string 1322 | } 1323 | 1324 | export interface DatasetElement { 1325 | name: string 1326 | description: string 1327 | contents?: string 1328 | binaryContents?: ArrayBuffer 1329 | } 1330 | 1331 | // Functions for use in daemon tools: 1332 | 1333 | export function createServer(listener: http.RequestListener): https.Server { 1334 | const certB64 = process.env.CERT 1335 | const privateKeyB64 = process.env.PRIVATE_KEY 1336 | const gptscriptCertB64 = process.env.GPTSCRIPT_CERT 1337 | 1338 | if (!certB64) { 1339 | console.log("Missing CERT env var") 1340 | process.exit(1) 1341 | } else if (!privateKeyB64) { 1342 | console.log("Missing PRIVATE_KEY env var") 1343 | process.exit(1) 1344 | } else if (!gptscriptCertB64) { 1345 | console.log("Missing GPTSCRIPT_CERT env var") 1346 | process.exit(1) 1347 | } 1348 | 1349 | const cert = Buffer.from(certB64, "base64").toString("utf-8") 1350 | const privateKey = Buffer.from(privateKeyB64, "base64").toString("utf-8") 1351 | const gptscriptCert = Buffer.from(gptscriptCertB64, "base64").toString("utf-8") 1352 | 1353 | const options = { 1354 | key: privateKey, 1355 | cert: cert, 1356 | ca: gptscriptCert, 1357 | requestCert: true, 1358 | rejectUnauthorized: true, 1359 | } 1360 | 1361 | return https.createServer(options, listener) 1362 | } 1363 | 1364 | export function startServer(server: https.Server) { 1365 | const port = process.env.PORT 1366 | if (!port) { 1367 | console.log("Missing PORT env var") 1368 | process.exit(1) 1369 | } 1370 | 1371 | server.listen(parseInt(port, 10), "127.0.0.1", () => { 1372 | console.log(`Server listening on port ${port}`) 1373 | }) 1374 | } 1375 | -------------------------------------------------------------------------------- /tests/fixtures/acorn-labs-context.gpt: -------------------------------------------------------------------------------- 1 | Type: context 2 | 3 | #!sys.echo 4 | "Always respond with 'Acorn Labs' and nothing else" -------------------------------------------------------------------------------- /tests/fixtures/chat.gpt: -------------------------------------------------------------------------------- 1 | chat: true 2 | tools: sys.chat.finish 3 | 4 | You are a chat bot. Don't finish the conversation until I say 'bye'. -------------------------------------------------------------------------------- /tests/fixtures/credential-override-windows.gpt: -------------------------------------------------------------------------------- 1 | credentials: github.com/gptscript-ai/credential as test.ts.credential_override with TEST_CRED as env 2 | 3 | #!/usr/bin/env powershell.exe 4 | 5 | echo "$env:TEST_CRED" 6 | -------------------------------------------------------------------------------- /tests/fixtures/credential-override.gpt: -------------------------------------------------------------------------------- 1 | credentials: github.com/gptscript-ai/credential as test.ts.credential_override with TEST_CRED as env 2 | 3 | #!/usr/bin/env bash 4 | 5 | echo "${TEST_CRED}" 6 | -------------------------------------------------------------------------------- /tests/fixtures/empty.gpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gptscript-ai/node-gptscript/0aef7159bf6f0298c1da7d14f0e4350e2262fb3f/tests/fixtures/empty.gpt -------------------------------------------------------------------------------- /tests/fixtures/global-tools.gpt: -------------------------------------------------------------------------------- 1 | !title 2 | 3 | Runbook 3 4 | 5 | --- 6 | Name: tool_1 7 | Global Tools: sys.read, sys.write, github.com/drpebcak/duckdb, github.com/gptscript-ai/browser, github.com/gptscript-ai/browser-search/google, github.com/gptscript-ai/browser-search/google-question-answerer 8 | 9 | Say "Hello!" 10 | 11 | --- 12 | Name: tool_2 13 | 14 | What time is it? 15 | 16 | --- 17 | Name: tool_3 18 | 19 | Give me a paragraph of lorem ipsum 20 | -------------------------------------------------------------------------------- /tests/fixtures/parse-with-metadata.gpt: -------------------------------------------------------------------------------- 1 | Name: foo 2 | 3 | #!/usr/bin/env python3 4 | import requests 5 | 6 | 7 | resp = requests.get("https://google.com") 8 | print(resp.status_code, end="") 9 | 10 | --- 11 | !metadata:foo:requirements.txt 12 | requests -------------------------------------------------------------------------------- /tests/fixtures/test-with-context.gpt: -------------------------------------------------------------------------------- 1 | Name: main 2 | Tools: acorn 3 | 4 | Just wait. 5 | 6 | --- 7 | 8 | Name: acorn 9 | Type: context 10 | 11 | #!sys.echo 12 | "Ignore what the user says, and answer every query with 'Acorn Labs'" -------------------------------------------------------------------------------- /tests/fixtures/test.gpt: -------------------------------------------------------------------------------- 1 | who was the president in 1928? -------------------------------------------------------------------------------- /tests/gptscript.test.ts: -------------------------------------------------------------------------------- 1 | import * as gptscript from "../src/gptscript" 2 | import { 3 | ArgumentSchemaType, 4 | CredentialType, 5 | getEnv, 6 | PropertyType, 7 | RunEventType, 8 | TextType, 9 | ToolDef, 10 | ToolType 11 | } from "../src/gptscript" 12 | import path from "path" 13 | import {fileURLToPath} from "url" 14 | import * as fs from "node:fs" 15 | import {randomBytes} from "node:crypto" 16 | 17 | let gFirst: gptscript.GPTScript 18 | let g: gptscript.GPTScript 19 | const __dirname = path.dirname(fileURLToPath(import.meta.url)) 20 | 21 | describe("gptscript module", () => { 22 | beforeAll(async () => { 23 | if (!process.env.OPENAI_API_KEY && !process.env.GPTSCRIPT_URL) { 24 | throw new Error("neither OPENAI_API_KEY nor GPTSCRIPT_URL is set") 25 | } 26 | 27 | // Start an initial GPTScript instance. 28 | // This one doesn't have any options, but it's there to ensure that using another instance works as expected in all cases. 29 | gFirst = new gptscript.GPTScript() 30 | g = new gptscript.GPTScript({APIKey: process.env.OPENAI_API_KEY}) 31 | }) 32 | afterAll(() => { 33 | gFirst.close() 34 | g.close() 35 | }) 36 | 37 | test("creating an closing another instance should work", async () => { 38 | const other = new gptscript.GPTScript() 39 | await other.version() 40 | other.close() 41 | }) 42 | 43 | test("listModels returns a list of models", async () => { 44 | // Similar structure to listTools 45 | let models = await g.listModels() 46 | expect(models).toBeDefined() 47 | }) 48 | 49 | test("listModels with providers returns a list of models from that provider", async () => { 50 | if (!process.env.ANTHROPIC_API_KEY) { 51 | return 52 | } 53 | 54 | const models = await g.listModels(["github.com/gptscript-ai/claude3-anthropic-provider"], ["github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY"]) 55 | expect(models).toBeDefined() 56 | for (const model of models) { 57 | expect(model).toBeDefined() 58 | expect(model.id.startsWith("claude-3-")).toBe(true) 59 | expect(model.id.endsWith("from github.com/gptscript-ai/claude3-anthropic-provider")).toBe(true) 60 | } 61 | }, 60000) 62 | 63 | test("listModels with default provider returns a list of models from that provider", async () => { 64 | if (!process.env.ANTHROPIC_API_KEY) { 65 | return 66 | } 67 | 68 | const newg = new gptscript.GPTScript({DefaultModelProvider: "github.com/gptscript-ai/claude3-anthropic-provider"}) 69 | try { 70 | const models = await newg.listModels(undefined, ["github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY"]) 71 | expect(models).toBeDefined() 72 | for (const model of models) { 73 | expect(model).toBeDefined() 74 | expect(model.id.startsWith("claude-3-")).toBe(true) 75 | expect(model.id.endsWith("from github.com/gptscript-ai/claude3-anthropic-provider")).toBe(true) 76 | } 77 | } finally { 78 | newg.close() 79 | } 80 | }, 15000) 81 | 82 | test("version returns a gptscript version", async () => { 83 | // Similar structure to listTools 84 | let version = await g.version() 85 | expect(version).toContain("gptscript version") 86 | }) 87 | 88 | test("evaluate executes a prompt correctly", async () => { 89 | const t = { 90 | instructions: "who was the president of the united states in 1928?" 91 | } 92 | 93 | const run = await g.evaluate(t) 94 | expect(run).toBeDefined() 95 | expect(await run.text()).toContain("Calvin Coolidge") 96 | }) 97 | 98 | test("evaluate executes subtool with empty instructions", async () => { 99 | const tools = [ 100 | { 101 | type: "tool", 102 | tools: ["new-tool-1"], 103 | instructions: "Ask the user for their 'first name'. Then reply hello to the user.", 104 | } as ToolDef, 105 | { 106 | type: "tool", 107 | name: "new-tool-1", 108 | } as ToolDef, 109 | ] 110 | const run = await g.evaluate(tools, { 111 | input: "{}", 112 | disableCache: true, 113 | workspace: "", 114 | subTool: "new-tool-1", 115 | }) 116 | 117 | expect(run).toBeDefined() 118 | expect(await run.text()).toContain("Understood.") 119 | }, 10000) 120 | 121 | test("evaluate executes and streams a prompt correctly", async () => { 122 | let out = "" 123 | let err = undefined 124 | const t = { 125 | instructions: "who was the president of the united states in 1928?" 126 | } 127 | const opts = { 128 | disableCache: true, 129 | } 130 | 131 | const run = await g.evaluate(t, opts) 132 | run.on(gptscript.RunEventType.CallFinish, data => { 133 | for (let output of data.output) out += `system: ${output.content}` 134 | }) 135 | 136 | let callFinished = false 137 | run.on(gptscript.RunEventType.CallFinish, (data: gptscript.CallFrame) => { 138 | if (data.type == RunEventType.CallFinish) { 139 | expect(callFinished).toBe(false) 140 | callFinished = true 141 | } 142 | }) 143 | 144 | await run.text() 145 | err = run.err 146 | 147 | expect(out).toContain("Calvin Coolidge") 148 | expect(err).toEqual("") 149 | expect(run.parentCallFrame()).toBeTruthy() 150 | }) 151 | 152 | test("evaluate executes a prompt correctly with context", async () => { 153 | let out = "" 154 | let err = undefined 155 | const t = { 156 | type: "tool" as ToolType, 157 | instructions: "who was the president of the united states in 1928?", 158 | tools: [path.join(__dirname, "fixtures", "acorn-labs-context.gpt")] 159 | } 160 | 161 | const run = await g.evaluate(t, {disableCache: true}) 162 | out = await run.text() 163 | err = run.err 164 | 165 | expect(out).toContain("Acorn Labs") 166 | expect(err).toEqual("") 167 | }) 168 | 169 | test("should execute test.gpt correctly", async () => { 170 | const testGptPath = path.join(__dirname, "fixtures", "test.gpt") 171 | 172 | const result = await (await g.run(testGptPath)).text() 173 | expect(result).toBeDefined() 174 | expect(result).toContain("Calvin Coolidge") 175 | 176 | // Run it a second time and expect a cached result 177 | const run = await g.run(testGptPath) 178 | const secondResult = await run.text() 179 | expect(result).toBeDefined() 180 | expect(secondResult).toStrictEqual(result) 181 | 182 | // There should be one call frame, and it should be cached 183 | for (let c in run.calls) { 184 | expect(run.calls[c].chatResponseCached).toBeTruthy() 185 | } 186 | }) 187 | 188 | test("should override credentials correctly", async () => { 189 | let testGptPath = path.join(__dirname, "fixtures", "credential-override.gpt") 190 | if (process.platform === "win32") { 191 | testGptPath = path.join(__dirname, "fixtures", "credential-override-windows.gpt") 192 | } 193 | 194 | const result = await (await g.run(testGptPath, { 195 | disableCache: true, 196 | credentialOverrides: ["test.ts.credential_override:TEST_CRED=foo"], 197 | })).text() 198 | 199 | expect(result).toBeDefined() 200 | expect(result).toContain("foo") 201 | }) 202 | 203 | test("run executes and stream a file correctly", async () => { 204 | let out = "" 205 | let err = undefined 206 | let [promptTokens, completionTokens, totalTokens] = [0, 0, 0] 207 | const testGptPath = path.join(__dirname, "fixtures", "test.gpt") 208 | const opts = { 209 | disableCache: true, 210 | } 211 | 212 | const run = await g.run(testGptPath, opts) 213 | run.on(gptscript.RunEventType.CallFinish, data => { 214 | for (let output of data.output) out += `system: ${output.content}` 215 | }) 216 | 217 | expect(await run.text()).toContain("Calvin Coolidge") 218 | err = run.err 219 | 220 | for (let c in run.calls) { 221 | promptTokens += run.calls[c].usage.promptTokens || 0 222 | completionTokens += run.calls[c].usage.completionTokens || 0 223 | totalTokens += run.calls[c].usage.totalTokens || 0 224 | } 225 | 226 | expect(out).toContain("Calvin Coolidge") 227 | expect(err).toEqual("") 228 | expect(promptTokens).toBeGreaterThan(0) 229 | expect(completionTokens).toBeGreaterThan(0) 230 | expect(totalTokens).toBeGreaterThan(0) 231 | }) 232 | 233 | test("run executes and streams a file with global tools correctly", async () => { 234 | let out = "" 235 | const testGptPath = path.join(__dirname, "fixtures", "global-tools.gpt") 236 | const opts = { 237 | disableCache: true, 238 | credentialOverrides: ["github.com/gptscript-ai/gateway:OPENAI_API_KEY"] 239 | } 240 | 241 | const run = await g.run(testGptPath, opts) 242 | run.on(gptscript.RunEventType.CallFinish, data => { 243 | for (let output of data.output) out += `system: ${output.content}` 244 | }) 245 | 246 | expect(await run.text()).toContain("Hello!") 247 | expect(run.err).toEqual("") 248 | expect(out).toContain("Hello!") 249 | }, 60000) 250 | 251 | test("aborting a run is reported correctly", async () => { 252 | let errMessage = "" 253 | let err = undefined 254 | const testGptPath = path.join(__dirname, "fixtures", "test.gpt") 255 | const opts = { 256 | disableCache: true, 257 | } 258 | 259 | try { 260 | const run = await g.run(testGptPath, opts) 261 | run.on(gptscript.RunEventType.CallProgress, data => { 262 | run.close() 263 | }) 264 | await run.text() 265 | err = run.err 266 | } catch (error: any) { 267 | errMessage = error.toString() 268 | } 269 | 270 | expect(errMessage).toContain("aborted") 271 | expect(err).toBeUndefined() 272 | }) 273 | 274 | 275 | describe("evaluate with multiple tools", () => { 276 | test("multiple tools", async () => { 277 | const t0 = { 278 | tools: ["ask"], 279 | instructions: "Only use the ask tool to ask who was the president of the united states in 1928?" 280 | } 281 | const t1 = { 282 | name: "ask", 283 | description: "This tool is used to ask a question", 284 | arguments: { 285 | type: ArgumentSchemaType, 286 | properties: { 287 | question: { 288 | type: PropertyType, 289 | description: "The question to ask", 290 | } 291 | } 292 | }, 293 | instructions: "${question}" 294 | } 295 | 296 | const run = await g.evaluate([t0, t1]) 297 | const response = await run.text() 298 | expect(response).toBeDefined() 299 | expect(response).toContain("Calvin Coolidge") 300 | 301 | // In this case, we expect the total number of tool results to be 1 302 | let toolResults = 0 303 | for (let c in run.calls) { 304 | toolResults += run.calls[c].toolResults 305 | } 306 | expect(toolResults).toStrictEqual(1) 307 | }, 30000) 308 | 309 | test("with sub tool", async () => { 310 | const t0 = { 311 | tools: ["ask"], 312 | instructions: "Only use the ask tool to ask who was the president of the united states in 1928?" 313 | } 314 | const t1 = { 315 | name: "other", 316 | instructions: "Who was the president of the united states in 1986?" 317 | } 318 | const t2 = { 319 | name: "ask", 320 | description: "This tool is used to ask a question", 321 | arguments: { 322 | type: "object", 323 | question: "The question to ask" 324 | }, 325 | instructions: "${question}" 326 | } 327 | 328 | const response = await (await g.evaluate([t0, t1, t2], {subTool: "other"})).text() 329 | expect(response).toBeDefined() 330 | expect(response).toContain("Ronald Reagan") 331 | }, 30000) 332 | }) 333 | 334 | test("parse file", async () => { 335 | const response = await g.parse(path.join(__dirname, "fixtures", "test.gpt")) 336 | expect(response).toBeDefined() 337 | expect(response).toHaveLength(1) 338 | expect((response[0] as gptscript.Tool).instructions).toEqual("who was the president in 1928?") 339 | }, 30000) 340 | 341 | test("parse empty file", async () => { 342 | const response = await g.parse(path.join(__dirname, "fixtures", "empty.gpt")) 343 | expect(response).toBeDefined() 344 | expect(response).toHaveLength(0) 345 | }, 30000) 346 | 347 | test("parse non-existent file", async () => { 348 | try { 349 | await g.parse(path.join(__dirname, "fixtures", "non-existent.gpt")) 350 | } catch (e) { 351 | expect(e).toBeDefined() 352 | expect(typeof e !== "string").toBeTruthy() 353 | return 354 | } 355 | expect(false).toBeTruthy() 356 | }, 30000) 357 | 358 | test("parse non-existent url", async () => { 359 | try { 360 | await g.parse("github.com/thedadams/dne") 361 | } catch (e) { 362 | expect(e).toBeDefined() 363 | expect(typeof e !== "string").toBeTruthy() 364 | return 365 | } 366 | expect(false).toBeTruthy() 367 | }, 30000) 368 | 369 | test("parse file with context", async () => { 370 | const response = await g.parse(path.join(__dirname, "fixtures", "test-with-context.gpt")) 371 | expect(response).toBeDefined() 372 | expect(response).toHaveLength(2) 373 | expect((response[0] as gptscript.Tool).instructions).toEqual("Just wait.") 374 | expect((response[0] as gptscript.Tool).type).toEqual("tool") 375 | expect((response[1] as gptscript.Tool).type).toEqual("context") 376 | }, 30000) 377 | 378 | test("parse file with metadata", async () => { 379 | const response = await g.parse(path.join(__dirname, "fixtures", "parse-with-metadata.gpt")) 380 | expect(response).toBeDefined() 381 | expect(response).toHaveLength(2) 382 | expect((response[0] as gptscript.Tool).instructions).toContain("requests.get") 383 | expect((response[0] as gptscript.Tool).metaData).toEqual({"requirements.txt": "requests"}) 384 | expect((response[1] as gptscript.Text).format).toEqual("metadata:foo:requirements.txt") 385 | }, 30000) 386 | 387 | test("parse string tool", async () => { 388 | const tool = "How much wood would a woodchuck chuck if a woodchuck could chuck wood?" 389 | const response = await g.parseContent(tool) 390 | expect(response).toBeDefined() 391 | expect(response).toHaveLength(1) 392 | expect((response[0] as gptscript.Tool).instructions).toEqual(tool) 393 | }, 30000) 394 | 395 | test("parse empty string tool", async () => { 396 | const response = await g.parseContent("") 397 | expect(response).toBeDefined() 398 | expect(response).toHaveLength(0) 399 | }, 30000) 400 | 401 | test("parse string tool with text node", async () => { 402 | const tool = "How much wood would a woodchuck chuck if a woodchuck could chuck wood?\n---\n!markdown\nThis is a text node" 403 | const response = await g.parseContent(tool) 404 | expect(response).toBeDefined() 405 | expect(response).toHaveLength(2) 406 | expect((response[0] as gptscript.Tool).instructions).toEqual("How much wood would a woodchuck chuck if a woodchuck could chuck wood?") 407 | expect((response[1] as gptscript.Text).content).toEqual("This is a text node") 408 | }, 30000) 409 | 410 | test("parse string tool global tools", async () => { 411 | const tool = "Global Tools: acorn, do-work\nHow much wood would a woodchuck chuck if a woodchuck could chuck wood?" 412 | const response = await g.parseContent(tool) 413 | expect(response).toBeDefined() 414 | expect(response).toHaveLength(1) 415 | expect((response[0] as gptscript.Tool).instructions).toEqual("How much wood would a woodchuck chuck if a woodchuck could chuck wood?") 416 | expect((response[0] as gptscript.Tool).globalTools).toEqual(["acorn", "do-work"]) 417 | }, 30000) 418 | 419 | test("parse string tool first line shebang", async () => { 420 | const tool = "\n#!/usr/bin/env python\nHow much wood would a woodchuck chuck if a woodchuck could chuck wood?" 421 | const response = await g.parseContent(tool) 422 | expect(response).toBeDefined() 423 | expect(response).toHaveLength(1) 424 | expect((response[0] as gptscript.Tool).instructions).toEqual("#!/usr/bin/env python\nHow much wood would a woodchuck chuck if a woodchuck could chuck wood?") 425 | }, 30000) 426 | 427 | test("format tool", async () => { 428 | const tool = { 429 | id: "my-tool", 430 | type: "tool" as ToolType, 431 | tools: ["sys.write", "sys.read"], 432 | instructions: "This is a test", 433 | arguments: { 434 | type: ArgumentSchemaType, 435 | properties: { 436 | text: { 437 | type: PropertyType, 438 | description: "The text to write" 439 | } 440 | } 441 | } 442 | } 443 | 444 | const response = await g.stringify([tool]) 445 | expect(response).toBeDefined() 446 | expect(response).toContain("Tools: sys.write, sys.read") 447 | expect(response).toContain("This is a test") 448 | expect(response).toContain("Parameter: text: The text to write") 449 | }) 450 | 451 | test("format context tool", async () => { 452 | const tool = { 453 | id: "my-tool", 454 | type: "context" as ToolType, 455 | tools: ["sys.write", "sys.read"], 456 | instructions: "This is a test", 457 | arguments: { 458 | type: ArgumentSchemaType, 459 | properties: { 460 | text: { 461 | type: PropertyType, 462 | description: "The text to write" 463 | } 464 | } 465 | } 466 | } 467 | 468 | const response = await g.stringify([tool]) 469 | expect(response).toBeDefined() 470 | expect(response).toContain("Tools: sys.write, sys.read") 471 | expect(response).toContain("This is a test") 472 | expect(response).toContain("Parameter: text: The text to write") 473 | expect(response).toContain("Type: Context") 474 | }) 475 | 476 | test("load simple file", async () => { 477 | const response = await g.load(path.join(__dirname, "fixtures", "test.gpt")) 478 | expect(response.program).toBeDefined() 479 | expect(response.program.name).toBeTruthy() 480 | expect(response.program.entryToolId).toBeTruthy() 481 | expect(response.program.toolSet).toBeDefined() 482 | }, 30000) 483 | 484 | test("load remote tool", async () => { 485 | const response = await g.load("github.com/gptscript-ai/context/workspace") 486 | expect(response.program).toBeDefined() 487 | expect(response.program.name).toBeTruthy() 488 | expect(response.program.entryToolId).toBeTruthy() 489 | expect(response.program.toolSet).toBeDefined() 490 | }, 30000) 491 | 492 | test("load content", async () => { 493 | const content = fs.readFileSync(path.join(__dirname, "fixtures", "test.gpt"), {encoding: "utf8"}) 494 | const response = await g.loadContent(content) 495 | expect(response.program).toBeDefined() 496 | // Name will not be defined in this case. 497 | expect(response.program.name).toBeFalsy() 498 | expect(response.program.entryToolId).toBeTruthy() 499 | expect(response.program.toolSet).toBeDefined() 500 | }, 30000) 501 | 502 | test("load tools", async () => { 503 | const tools = [{ 504 | tools: ["ask"], 505 | instructions: "Only use the ask tool to ask who was the president of the united states in 1928?" 506 | }, 507 | { 508 | name: "other", 509 | instructions: "Who was the president of the united states in 1986?" 510 | }, 511 | { 512 | name: "ask", 513 | description: "This tool is used to ask a question", 514 | arguments: { 515 | type: "object", 516 | question: "The question to ask" 517 | }, 518 | instructions: "${question}" 519 | }, 520 | ] as gptscript.ToolDef[] 521 | const response = await g.loadTools(tools) 522 | expect(response.program).toBeDefined() 523 | // Name will not be defined in this case. 524 | expect(response.program.name).toBeFalsy() 525 | expect(response.program.entryToolId).toBeTruthy() 526 | expect(response.program.toolSet).toBeDefined() 527 | }, 30000) 528 | 529 | test("exec tool with chat", async () => { 530 | let err = undefined 531 | const t = { 532 | chat: true, 533 | instructions: "You are a chat bot. Don't finish the conversation until I say 'bye'.", 534 | tools: ["sys.chat.finish"] 535 | } 536 | const opts = { 537 | disableCache: true, 538 | } 539 | let run = await g.evaluate(t, opts) 540 | 541 | const inputs = [ 542 | "List the three largest states in the United States by area.", 543 | "What is the capital of the third one?", 544 | "What timezone is the first one in?" 545 | ] 546 | 547 | const expectedOutputs = [ 548 | "California", 549 | "Sacramento", 550 | "Alaska Time Zone" 551 | ] 552 | 553 | await run.text() 554 | for (let i: number = 0; i < inputs.length; i++) { 555 | run = run.nextChat(inputs[i]) 556 | err = run.err 557 | 558 | if (err) { 559 | break 560 | } 561 | 562 | expect(await run.text()).toContain(expectedOutputs[i]) 563 | expect(run.state).toEqual(gptscript.RunState.Continue) 564 | } 565 | 566 | run = run.nextChat("bye") 567 | await run.text() 568 | 569 | expect(run.state).toEqual(gptscript.RunState.Finished) 570 | expect(err).toEqual("") 571 | }, 60000) 572 | 573 | test("exec file with chat", async () => { 574 | let err = undefined 575 | const opts = { 576 | disableCache: true 577 | } 578 | let run = await g.run(path.join(__dirname, "fixtures", "chat.gpt"), opts) 579 | 580 | const inputs = [ 581 | "List the 3 largest of the Great Lakes by volume.", 582 | "What is the volume of the second in the list in cubic miles?", 583 | "What is the total area of the third in the list in square miles?" 584 | ] 585 | 586 | const expectedOutputs = [ 587 | "Lake Superior", 588 | "Lake Michigan", 589 | "Lake Huron" 590 | ] 591 | 592 | await run.text() 593 | for (let i: number = 0; i < inputs.length; i++) { 594 | run = run.nextChat(inputs[i]) 595 | err = run.err 596 | 597 | if (err) { 598 | break 599 | } 600 | 601 | expect(await run.text()).toContain(expectedOutputs[i]) 602 | expect(run.state).toEqual(gptscript.RunState.Continue) 603 | } 604 | 605 | run = run.nextChat("bye") 606 | await run.text() 607 | 608 | expect(run.state).toEqual(gptscript.RunState.Finished) 609 | expect(err).toEqual("") 610 | }, 60000) 611 | 612 | test("nextChat on file providing chat state", async () => { 613 | let run = await g.run(path.join(__dirname, "fixtures", "chat.gpt"), {disableCache: true}) 614 | 615 | run = run.nextChat("List the 3 largest of the Great Lakes by volume.") 616 | expect(await run.text()).toContain("Lake Superior") 617 | expect(run.err).toEqual("") 618 | expect(run.state).toEqual(gptscript.RunState.Continue) 619 | 620 | run = await g.run(path.join(__dirname, "fixtures", "chat.gpt"), { 621 | disableCache: true, 622 | input: "What is the total area of the third one in square miles?", 623 | chatState: run.currentChatState() 624 | }) 625 | 626 | expect(await run.text()).toContain("Lake Huron") 627 | expect(run.err).toEqual("") 628 | expect(run.state).toEqual(gptscript.RunState.Continue) 629 | }, 15000) 630 | 631 | test("nextChat on tool providing chat state", async () => { 632 | const t = { 633 | chat: true, 634 | instructions: "You are a chat bot. Don't finish the conversation until I say 'bye'.", 635 | tools: ["sys.chat.finish"] 636 | } 637 | let run = await g.evaluate(t, {disableCache: true}) 638 | 639 | run = run.nextChat("List the three largest states in the United States by area.") 640 | expect(await run.text()).toContain("California") 641 | expect(run.err).toEqual("") 642 | expect(run.state).toEqual(gptscript.RunState.Continue) 643 | 644 | run = await g.evaluate(t, { 645 | disableCache: true, 646 | input: "What is the capital of the second one?", 647 | chatState: run.currentChatState() 648 | }) 649 | 650 | expect(await run.text()).toContain("Austin") 651 | expect(run.err).toEqual("") 652 | expect(run.state).toEqual(gptscript.RunState.Continue) 653 | }, 15000) 654 | 655 | test("confirm", async () => { 656 | const t = { 657 | instructions: "List the files in the current working directory.", 658 | tools: ["sys.exec"] 659 | } 660 | 661 | const commands = [`ls`, `dir`] 662 | let confirmCallCount = 0 663 | const run = await g.evaluate(t, {confirm: true}) 664 | run.on(gptscript.RunEventType.CallConfirm, async (data: gptscript.CallFrame) => { 665 | // On Windows, ls is not always a command. The LLM will try to run dir in this case. Allow both. 666 | expect(data.input).toContain(commands[confirmCallCount]) 667 | confirmCallCount++ 668 | await g.confirm({id: data.id, accept: true}) 669 | }) 670 | 671 | expect(await run.text()).toContain("README.md") 672 | expect(run.err).toEqual("") 673 | expect(confirmCallCount > 0).toBeTruthy() 674 | }) 675 | 676 | test("do not confirm", async () => { 677 | let confirmFound = false 678 | const t = { 679 | instructions: "List the files in the current directory as '.'. If that doesn't work print the word FAIL.", 680 | tools: ["sys.exec"] 681 | } 682 | const run = await g.evaluate(t, {confirm: true}) 683 | run.on(gptscript.RunEventType.CallConfirm, async (data: gptscript.CallFrame) => { 684 | expect(data.input).toContain(`ls`) 685 | confirmFound = true 686 | await g.confirm({id: data.id, accept: false, message: "I will not allow it!"}) 687 | }) 688 | 689 | expect(await run.text()).toContain("FAIL") 690 | expect(run.err).toEqual("") 691 | expect(confirmFound).toBeTruthy() 692 | }) 693 | 694 | test("prompt", async () => { 695 | let promptFound = false 696 | const t = { 697 | instructions: "Use the sys.prompt user to ask the user for 'first name' which is not sensitive. After you get their first name, say hello.", 698 | tools: ["sys.prompt"] 699 | } 700 | const run = await g.evaluate(t, {prompt: true}) 701 | run.on(gptscript.RunEventType.Prompt, async (data: gptscript.PromptFrame) => { 702 | expect(data.message).toContain("first name") 703 | expect(data.fields.length).toEqual(1) 704 | expect(data.fields[0].name).toEqual("first name") 705 | expect(data.sensitive).toBeFalsy() 706 | 707 | promptFound = true 708 | await g.promptResponse({id: data.id, responses: {[data.fields[0].name]: "Clicky"}}) 709 | }) 710 | 711 | expect(await run.text()).toContain("Clicky") 712 | expect(run.err).toEqual("") 713 | expect(promptFound).toBeTruthy() 714 | }) 715 | 716 | test("prompt with metadata", async () => { 717 | let promptFound = false 718 | const run = await g.run("sys.prompt", { 719 | prompt: true, 720 | input: "{\"fields\":\"first name\",\"metadata\":{\"key\":\"value\"}}" 721 | }) 722 | run.on(gptscript.RunEventType.Prompt, async (data: gptscript.PromptFrame) => { 723 | expect(data.fields.length).toEqual(1) 724 | expect(data.fields[0].name).toEqual("first name") 725 | expect(data.metadata).toEqual({key: "value"}) 726 | expect(data.sensitive).toBeFalsy() 727 | 728 | promptFound = true 729 | await g.promptResponse({id: data.id, responses: {[data.fields[0].name]: "Clicky"}}) 730 | }) 731 | 732 | expect(await run.text()).toContain("Clicky") 733 | expect(run.err).toEqual("") 734 | expect(promptFound).toBeTruthy() 735 | }) 736 | 737 | test("prompt without prompt allowed should fail", async () => { 738 | let promptFound = false 739 | const t = { 740 | instructions: "Use the sys.prompt user to ask the user for 'first name' which is not sensitive. After you get their first name, say hello.", 741 | tools: ["sys.prompt"] 742 | } 743 | const run = await g.evaluate(t) 744 | run.on(gptscript.RunEventType.Prompt, async (data: gptscript.PromptFrame) => { 745 | promptFound = true 746 | }) 747 | 748 | try { 749 | await run.text() 750 | } catch (e: any) { 751 | expect(e.toString()).toContain("prompt occurred") 752 | } 753 | expect(run.err).toContain("prompt occurred") 754 | expect(promptFound).toBeFalsy() 755 | }) 756 | 757 | test("retry failed run", async () => { 758 | let shebang = `#!/bin/bash\nexit \${EXIT_CODE}` 759 | if (process.platform == "win32") { 760 | shebang = "#!/usr/bin/env powershell.exe\n$e = $env:EXIT_CODE;\nif ($e) { Exit 1; }" 761 | } 762 | const t = { 763 | instructions: "say hello", 764 | tools: ["my-context"] 765 | } as gptscript.ToolDef 766 | const contextTool = { 767 | name: "my-context", 768 | type: "context", 769 | instructions: `${shebang}\nexit \${EXIT_CODE}` 770 | } as gptscript.ToolDef 771 | 772 | let run = await g.evaluate([t, contextTool], {disableCache: true, env: ["EXIT_CODE=1"]}) 773 | try { 774 | await run.text() 775 | } catch { 776 | } 777 | 778 | expect(run.err).not.toEqual("") 779 | 780 | run.opts.env = [] 781 | run = run.nextChat() 782 | 783 | await run.text() 784 | 785 | expect(run.err).toEqual("") 786 | }) 787 | 788 | test("test get_env default", async () => { 789 | const env = getEnv("TEST_ENV_MISSING", "foo") 790 | expect(env).toEqual("foo") 791 | }) 792 | 793 | test("test get_env", async () => { 794 | process.env.TEST_ENV = "{\"_gz\":\"H4sIAEosrGYC/ytJLS5RKEvMKU0FACtB3ewKAAAA\"}" 795 | const env = getEnv("TEST_ENV", "missing") 796 | expect(env).toEqual("test value") 797 | }) 798 | 799 | test("run file with metadata", async () => { 800 | let err = undefined 801 | let out = "" 802 | let run = await g.run(path.join(__dirname, "fixtures", "parse-with-metadata.gpt")) 803 | 804 | try { 805 | out = await run.text() 806 | } catch (e) { 807 | err = e 808 | } 809 | expect(err).toEqual(undefined) 810 | expect(out).toEqual("200") 811 | }, 20000) 812 | 813 | test("run parsed tool with metadata", async () => { 814 | let err = undefined 815 | let out = "" 816 | const tools = await g.parse(path.join(__dirname, "fixtures", "parse-with-metadata.gpt")) 817 | 818 | for (const t of tools) { 819 | if (t.type && t.type !== TextType) { 820 | const run = await g.evaluate(t) 821 | try { 822 | out = await run.text() 823 | } catch (e) { 824 | err = e 825 | } 826 | } 827 | } 828 | 829 | expect(err).toEqual(undefined) 830 | expect(out).toEqual("200") 831 | }, 20000) 832 | 833 | test("credential operations", async () => { 834 | const name = "test-" + randomBytes(10).toString("hex") 835 | const value = randomBytes(10).toString("hex") 836 | 837 | // Create 838 | try { 839 | await g.createCredential({ 840 | name: name, 841 | context: "default", 842 | env: {"TEST": value}, 843 | ephemeral: false, 844 | expiresAt: new Date(Date.now() + 5000), // 5 seconds from now 845 | type: CredentialType.Tool, 846 | checkParam: "my-check-param", 847 | }) 848 | } catch (e) { 849 | throw new Error("failed to create credential: " + e) 850 | } 851 | 852 | // Wait 5 seconds 853 | await new Promise(resolve => setTimeout(resolve, 5000)) 854 | 855 | // Reveal 856 | try { 857 | const result = await g.revealCredential(["default"], name) 858 | expect(result.env["TEST"]).toEqual(value) 859 | expect(result.expiresAt!.valueOf()).toBeLessThan(new Date().valueOf()) 860 | expect(result.type).toEqual(CredentialType.Tool) 861 | expect(result.checkParam).toEqual("my-check-param") 862 | } catch (e) { 863 | throw new Error("failed to reveal credential: " + e) 864 | } 865 | 866 | // List 867 | try { 868 | const result = await g.listCredentials(["default"], false) 869 | expect(result.length).toBeGreaterThan(0) 870 | expect(result.map(c => c.name)).toContain(name) 871 | } catch (e) { 872 | throw new Error("failed to list credentials: " + e) 873 | } 874 | 875 | // Delete 876 | try { 877 | await g.deleteCredential("default", name) 878 | } catch (e) { 879 | throw new Error("failed to delete credential: " + e) 880 | } 881 | 882 | // Verify deletion 883 | try { 884 | const result = await g.listCredentials(["default"], false) 885 | expect(result.map(c => c.name)).not.toContain(name) 886 | } catch (e) { 887 | throw new Error("failed to verify deletion: " + e) 888 | } 889 | }, 20000) 890 | 891 | test("dataset operations", async () => { 892 | process.env.GPTSCRIPT_WORKSPACE_ID = await g.createWorkspace("directory") 893 | 894 | const client = new gptscript.GPTScript({ 895 | APIKey: process.env.OPENAI_API_KEY, 896 | Env: Object.entries(process.env).map(([k, v]) => `${k}=${v}`) 897 | }) 898 | 899 | let datasetID: string 900 | 901 | // Create and add two elements 902 | try { 903 | datasetID = await client.addDatasetElements([ 904 | { 905 | name: "element1", 906 | description: "", 907 | contents: "this is element 1 contents" 908 | }, 909 | { 910 | name: "element2", 911 | description: "a description", 912 | binaryContents: Buffer.from("this is element 2 contents") 913 | } 914 | ], {name: "test-dataset", description: "a test dataset"}) 915 | } catch (e) { 916 | throw new Error("failed to create dataset: " + e) 917 | } 918 | 919 | // Add another element 920 | try { 921 | await client.addDatasetElements([ 922 | { 923 | name: "element3", 924 | description: "a description", 925 | contents: "this is element 3 contents" 926 | } 927 | ], {datasetID: datasetID}) 928 | } catch (e) { 929 | throw new Error("failed to add elements: " + e) 930 | } 931 | 932 | // Get elements 933 | try { 934 | const e1 = await client.getDatasetElement(datasetID, "element1") 935 | expect(e1.name).toEqual("element1") 936 | expect(e1.description).toBeUndefined() 937 | expect(e1.contents).toEqual("this is element 1 contents") 938 | 939 | const e2 = await client.getDatasetElement(datasetID, "element2") 940 | expect(e2.name).toEqual("element2") 941 | expect(e2.description).toEqual("a description") 942 | expect(e2.binaryContents).toEqual(Buffer.from("this is element 2 contents")) 943 | 944 | const e3 = await client.getDatasetElement(datasetID, "element3") 945 | expect(e3.name).toEqual("element3") 946 | expect(e3.description).toEqual("a description") 947 | expect(e3.contents).toEqual("this is element 3 contents") 948 | } catch (e) { 949 | throw new Error("failed to get elements: " + e) 950 | } 951 | 952 | // List the elements in the dataset 953 | try { 954 | const elements = await client.listDatasetElements(datasetID) 955 | expect(elements.length).toEqual(3) 956 | expect(elements.map(e => e.name)).toContain("element1") 957 | expect(elements.map(e => e.name)).toContain("element2") 958 | expect(elements.map(e => e.name)).toContain("element3") 959 | } catch (e) { 960 | throw new Error("failed to list elements: " + e) 961 | } 962 | 963 | // List datasets 964 | try { 965 | const datasets = await client.listDatasets() 966 | expect(datasets.length).toBeGreaterThan(0) 967 | expect(datasets[0].id).toEqual(datasetID) 968 | expect(datasets[0].name).toEqual("test-dataset") 969 | expect(datasets[0].description).toEqual("a test dataset") 970 | } catch (e) { 971 | throw new Error("failed to list datasets: " + e) 972 | } 973 | 974 | client.close() 975 | }, 60000) 976 | 977 | test("create and delete workspace", async () => { 978 | const workspaceID = await g.createWorkspace("directory") 979 | expect(workspaceID).toBeDefined() 980 | await g.deleteWorkspace(workspaceID) 981 | }, 60000) 982 | 983 | test("write, read, and delete file", async () => { 984 | const workspaceID = await g.createWorkspace("directory") 985 | expect(workspaceID).toBeDefined() 986 | 987 | await g.writeFileInWorkspace("test.txt", Buffer.from("test"), workspaceID) 988 | const content = await g.readFileInWorkspace("test.txt", workspaceID) 989 | expect(content.toString()).toEqual("test") 990 | 991 | const fileInfo = await g.statFileInWorkspace("test.txt", workspaceID) 992 | expect(fileInfo.size).toEqual(4) 993 | expect(fileInfo.name).toEqual("test.txt") 994 | expect(fileInfo.workspaceID).toEqual(workspaceID) 995 | expect(fileInfo.modTime).toBeDefined() 996 | 997 | await g.deleteFileInWorkspace("test.txt", workspaceID) 998 | await g.deleteWorkspace(workspaceID) 999 | }, 60000) 1000 | 1001 | test("test complex ls", async () => { 1002 | const workspaceID = await g.createWorkspace("directory") 1003 | 1004 | // Write files in the workspace 1005 | await g.writeFileInWorkspace("test/test1.txt", Buffer.from("hello1"), workspaceID) 1006 | await g.writeFileInWorkspace("test1/test2.txt", Buffer.from("hello2"), workspaceID) 1007 | await g.writeFileInWorkspace("test1/test3.txt", Buffer.from("hello3"), workspaceID) 1008 | await g.writeFileInWorkspace(".hidden.txt", Buffer.from("hidden"), workspaceID) 1009 | 1010 | let content = await g.listFilesInWorkspace(undefined, workspaceID) 1011 | expect(content.length).toEqual(4) 1012 | expect(content).toContain("test1/test2.txt") 1013 | expect(content).toContain("test1/test3.txt") 1014 | expect(content).toContain("test/test1.txt") 1015 | expect(content).toContain(".hidden.txt") 1016 | 1017 | content = await g.listFilesInWorkspace("test1", workspaceID) 1018 | expect(content.length).toEqual(2) 1019 | expect(content).toContain("test1/test2.txt") 1020 | expect(content).toContain("test1/test3.txt") 1021 | 1022 | await g.removeAll("test1", workspaceID) 1023 | 1024 | content = await g.listFilesInWorkspace("", workspaceID) 1025 | expect(content.length).toEqual(2) 1026 | expect(content).toContain("test/test1.txt") 1027 | expect(content).toContain(".hidden.txt") 1028 | 1029 | await g.deleteWorkspace(workspaceID) 1030 | }, 60000) 1031 | 1032 | test("create and delete workspace in s3", async () => { 1033 | if (!process.env.AWS_ACCESS_KEY_ID || !process.env.AWS_SECRET_ACCESS_KEY) { 1034 | console.log("AWS credentials not set, skipping test") 1035 | return 1036 | } 1037 | 1038 | const workspaceID = await g.createWorkspace("s3") 1039 | expect(workspaceID).toBeDefined() 1040 | await g.deleteWorkspace(workspaceID) 1041 | }, 60000) 1042 | 1043 | test("write, read, and delete file in s3", async () => { 1044 | if (!process.env.AWS_ACCESS_KEY_ID || !process.env.AWS_SECRET_ACCESS_KEY) { 1045 | console.log("AWS credentials not set, skipping test") 1046 | return 1047 | } 1048 | 1049 | const workspaceID = await g.createWorkspace("s3") 1050 | expect(workspaceID).toBeDefined() 1051 | 1052 | await g.writeFileInWorkspace("test.txt", Buffer.from("test"), workspaceID) 1053 | const content = await g.readFileInWorkspace("test.txt", workspaceID) 1054 | expect(content.toString()).toEqual("test") 1055 | 1056 | const fileInfo = await g.statFileInWorkspace("test.txt", workspaceID) 1057 | expect(fileInfo.size).toEqual(4) 1058 | expect(fileInfo.name).toEqual("test.txt") 1059 | expect(fileInfo.workspaceID).toEqual(workspaceID) 1060 | expect(fileInfo.modTime).toBeDefined() 1061 | 1062 | await g.deleteFileInWorkspace("test.txt", workspaceID) 1063 | await g.deleteWorkspace(workspaceID) 1064 | }, 60000) 1065 | 1066 | test("test complex ls in s3", async () => { 1067 | if (!process.env.AWS_ACCESS_KEY_ID || !process.env.AWS_SECRET_ACCESS_KEY) { 1068 | console.log("AWS credentials not set, skipping test") 1069 | return 1070 | } 1071 | 1072 | const workspaceID = await g.createWorkspace("s3") 1073 | 1074 | // Write files in the workspace 1075 | await g.writeFileInWorkspace("test/test1.txt", Buffer.from("hello1"), workspaceID) 1076 | await g.writeFileInWorkspace("test1/test2.txt", Buffer.from("hello2"), workspaceID) 1077 | await g.writeFileInWorkspace("test1/test3.txt", Buffer.from("hello3"), workspaceID) 1078 | await g.writeFileInWorkspace(".hidden.txt", Buffer.from("hidden"), workspaceID) 1079 | 1080 | let content = await g.listFilesInWorkspace(undefined, workspaceID) 1081 | expect(content.length).toEqual(4) 1082 | expect(content).toContain("test1/test2.txt") 1083 | expect(content).toContain("test1/test3.txt") 1084 | expect(content).toContain("test/test1.txt") 1085 | expect(content).toContain(".hidden.txt") 1086 | 1087 | content = await g.listFilesInWorkspace("test1", workspaceID) 1088 | expect(content.length).toEqual(2) 1089 | expect(content).toContain("test1/test2.txt") 1090 | expect(content).toContain("test1/test3.txt") 1091 | 1092 | await g.removeAll("test1", workspaceID) 1093 | 1094 | content = await g.listFilesInWorkspace("", workspaceID) 1095 | expect(content.length).toEqual(2) 1096 | expect(content).toContain("test/test1.txt") 1097 | expect(content).toContain(".hidden.txt") 1098 | 1099 | await g.deleteWorkspace(workspaceID) 1100 | }, 60000) 1101 | }) 1102 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "esnext", 5 | "moduleResolution": "node", 6 | "rootDir": "./src", 7 | "declaration": true, 8 | "outDir": "./dist", 9 | "esModuleInterop": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "strict": true, 12 | "skipLibCheck": true, 13 | "sourceMap": true, 14 | "emitDecoratorMetadata": true, 15 | "experimentalDecorators": true, 16 | "typeRoots": [ 17 | "./node_modules/@types", 18 | "./src" 19 | ], 20 | "types": [ 21 | "node", 22 | "jest" 23 | ] 24 | }, 25 | "exclude": [ 26 | "node_modules", 27 | "dist", 28 | "tests", 29 | "exec", 30 | "examples" 31 | ] 32 | } 33 | --------------------------------------------------------------------------------