├── .changeset ├── README.md ├── config.json └── cyan-roses-smoke.md ├── .editorconfig ├── .eslintrc.js ├── .gitignore ├── LICENSE ├── NOTES.md ├── README.md ├── apps ├── docs │ ├── .gitignore │ ├── README.md │ ├── babel.config.js │ ├── docs │ │ ├── coming-soon.md │ │ ├── contributing.md │ │ ├── introduction.md │ │ ├── modules │ │ │ ├── chains.md │ │ │ ├── embeddings.md │ │ │ ├── model-providers.md │ │ │ ├── prompts.md │ │ │ ├── tracing.md │ │ │ └── utilities │ │ │ │ ├── loaders.md │ │ │ │ └── text-splitters.md │ │ └── quickstart.md │ ├── docusaurus.config.js │ ├── docusaurus.preferredTheme.js │ ├── global.d.ts │ ├── min-light-with-diff.json │ ├── package.json │ ├── sidebars.js │ ├── src │ │ └── pages │ │ │ └── index.tsx │ ├── static │ │ ├── favicon.png │ │ └── img │ │ │ ├── bg-lines.png │ │ │ ├── bg.png │ │ │ ├── favicon.ico │ │ │ ├── globe-bottom.svg │ │ │ ├── globe.png │ │ │ ├── globe.svg │ │ │ ├── promptable-icon.png │ │ │ ├── tracing-expanded.png │ │ │ └── tracing.png │ ├── tailwind.config.js │ └── tsconfig.json ├── gpt-prisma-seed │ ├── .env.example │ ├── .eslintrc.json │ ├── .gitignore │ ├── README.md │ ├── package-lock.json │ ├── package.json │ ├── prettier.config.cjs │ ├── prisma │ │ ├── schema.prisma │ │ └── seed.ts │ └── tsconfig.json ├── hub │ ├── .eslintrc.json │ ├── .gitignore │ ├── components │ │ └── ExampleCard.tsx │ ├── next.config.js │ ├── package.json │ ├── public │ │ ├── favicon.ico │ │ ├── google_sheets.png │ │ ├── next.svg │ │ ├── notion.png │ │ ├── slack.png │ │ ├── thirteen.svg │ │ └── vercel.svg │ ├── src │ │ ├── pages │ │ │ ├── _app.tsx │ │ │ ├── _document.tsx │ │ │ └── hub │ │ │ │ ├── [example].tsx │ │ │ │ └── index.tsx │ │ └── styles │ │ │ ├── Home.module.css │ │ │ └── globals.css │ └── tsconfig.json ├── nextjs-promptable │ ├── .env.example │ ├── .eslintrc.json │ ├── .gitignore │ ├── README.md │ ├── next.config.mjs │ ├── package.json │ ├── postcss.config.cjs │ ├── prettier.config.cjs │ ├── public │ │ ├── favicon.ico │ │ ├── img │ │ │ ├── bg-lines.png │ │ │ ├── bg.png │ │ │ ├── favicon.ico │ │ │ ├── globe-bottom.svg │ │ │ ├── globe.png │ │ │ ├── globe.svg │ │ │ ├── promptable-icon.png │ │ │ ├── tracing-expanded.png │ │ │ └── tracing.png │ │ ├── next.svg │ │ └── vercel.svg │ ├── src │ │ ├── components │ │ │ └── Chat.tsx │ │ ├── env.mjs │ │ ├── pages │ │ │ ├── _app.tsx │ │ │ ├── api │ │ │ │ ├── chat.ts │ │ │ │ ├── completion.ts │ │ │ │ ├── resume-completions.ts │ │ │ │ └── stream.ts │ │ │ ├── chat.tsx │ │ │ └── index.tsx │ │ └── styles │ │ │ └── globals.css │ ├── tailwind.config.cjs │ └── tsconfig.json └── web │ ├── .env.example │ ├── .eslintrc.json │ ├── .gitignore │ ├── README.md │ ├── next-env.d.ts │ ├── next.config.mjs │ ├── package.json │ ├── postcss.config.cjs │ ├── prettier.config.cjs │ ├── prisma │ ├── schema.prisma │ └── seed.ts │ ├── public │ └── favicon.ico │ ├── src │ ├── components │ │ ├── Content.tsx │ │ └── Tabs.tsx │ ├── env │ │ ├── client.mjs │ │ ├── schema.mjs │ │ └── server.mjs │ ├── pages │ │ ├── _app.tsx │ │ ├── api │ │ │ ├── completions.ts │ │ │ ├── traces.ts │ │ │ └── trpc │ │ │ │ └── [trpc].ts │ │ └── index.tsx │ ├── server │ │ ├── api │ │ │ ├── root.ts │ │ │ ├── routers │ │ │ │ └── traces.ts │ │ │ └── trpc.ts │ │ └── db.ts │ ├── styles │ │ └── globals.css │ └── utils │ │ └── api.ts │ ├── tailwind.config.cjs │ └── tsconfig.json ├── examples ├── .env.example ├── .gitignore ├── TODO.md ├── data │ ├── bens-bites-email.txt │ ├── beyond-smart.txt │ └── startup-mistakes.txt ├── package.json ├── src │ ├── [example-template].ts │ ├── chain-memory.ts │ ├── chain-simple.ts │ ├── chunk-sentences.ts │ ├── count-tokens.ts │ ├── embeddings-create.ts │ ├── embeddings-qa.ts │ ├── embeddings-search.ts │ ├── embeddings.ts │ ├── index.ts │ ├── model-providers.ts │ ├── parse-csv.ts │ ├── parse-json.ts │ ├── prompt-parallel.ts │ ├── prompt-sequential.ts │ ├── prompt-simple.ts │ ├── qa-chunks.ts │ ├── qa-extract.ts │ ├── qa-from-notes.ts │ ├── qa-simple.ts │ ├── split-newlines.ts │ ├── split-paragraphs.ts │ ├── split-sentences.ts │ ├── split-tokens.ts │ ├── split-words.ts │ ├── stream-completions-fetch.ts │ ├── stream-completions.ts │ ├── summarize-chunks.ts │ ├── summarize-recursive.ts │ ├── summarize.ts │ ├── tracing-web-prompt.ts │ ├── tracing-web.ts │ └── tracing.ts └── tsconfig.json ├── package.json ├── packages ├── eslint-config-custom │ ├── index.js │ └── package.json ├── promptable-query │ ├── README.md │ ├── babel.config.js │ ├── jest.config.js │ ├── package.json │ ├── src │ │ └── index.ts │ └── tsconfig.json ├── promptable │ ├── README.md │ ├── babel.config.js │ ├── jest.config.js │ ├── package.json │ ├── src │ │ ├── chains │ │ │ ├── LLMChain.ts │ │ │ ├── MemoryLLMChain.ts │ │ │ └── index.ts │ │ ├── embeddings │ │ │ └── index.ts │ │ ├── index.test.ts │ │ ├── index.ts │ │ ├── internal │ │ │ └── Logger.ts │ │ ├── loaders │ │ │ └── index.ts │ │ ├── memories │ │ │ ├── BufferedChatMemory.ts │ │ │ └── index.ts │ │ ├── prompts │ │ │ ├── Parser.ts │ │ │ ├── Prompt.ts │ │ │ └── prompts.ts │ │ ├── providers │ │ │ ├── ModelProvider.ts │ │ │ └── OpenAI.ts │ │ ├── tracing.ts │ │ └── utils │ │ │ ├── TextSplitter.ts │ │ │ ├── extract-variable-names.ts │ │ │ ├── inject-variables.ts │ │ │ ├── parse-json-sse.ts │ │ │ └── unescape-stop-tokens.ts │ └── tsconfig.json ├── tsconfig │ ├── README.md │ ├── base.json │ ├── nextjs.json │ ├── package.json │ └── react-library.json └── ui │ ├── Button.tsx │ ├── index.tsx │ ├── package.json │ └── tsconfig.json ├── pnpm-lock.yaml ├── pnpm-workspace.yaml └── turbo.json /.changeset/README.md: -------------------------------------------------------------------------------- 1 | # Changesets 2 | 3 | Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works 4 | with multi-package repos, or single-package repos to help you version and publish your code. You can 5 | find the full documentation for it [in our repository](https://github.com/changesets/changesets) 6 | 7 | We have a quick list of common questions to get you started engaging with this project in 8 | [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md) 9 | -------------------------------------------------------------------------------- /.changeset/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://unpkg.com/@changesets/config@2.3.0/schema.json", 3 | "changelog": "@changesets/cli/changelog", 4 | "commit": false, 5 | "fixed": [], 6 | "linked": [], 7 | "access": "public", 8 | "baseBranch": "main", 9 | "updateInternalDependencies": "patch", 10 | "ignore": [ 11 | "ui", 12 | "eslint-config-custom", 13 | "tsconfig", 14 | "web", 15 | "docs" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /.changeset/cyan-roses-smoke.md: -------------------------------------------------------------------------------- 1 | --- 2 | "promptable": patch 3 | --- 4 | 5 | Adds Memory modules, MemoryChains and utilties improvements 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | end_of_line = lf 6 | charset = utf-8 7 | trim_trailing_whitespace = true 8 | insert_final_newline = true 9 | 10 | [*.{ts,tsx}] 11 | indent_style = space 12 | indent_size = 2 13 | 14 | [*.{json,json.txt}] 15 | indent_style = space 16 | indent_size = 2 17 | 18 | [*.{yml,yml.txt}] 19 | indent_style = space 20 | indent_size = 2 21 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | // This tells ESLint to load the config from the package `eslint-config-custom` 4 | extends: ["custom"], 5 | settings: { 6 | next: { 7 | rootDir: ["apps/*/"], 8 | }, 9 | }, 10 | }; 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | node_modules 5 | .pnp 6 | .pnp.js 7 | 8 | # testing 9 | coverage 10 | 11 | # next.js 12 | .next/ 13 | out/ 14 | build 15 | 16 | # misc 17 | .DS_Store 18 | *.pem 19 | 20 | # debug 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | .pnpm-debug.log* 25 | 26 | # env files 27 | .env.local 28 | .env.development.local 29 | .env.test.local 30 | .env.production.local 31 | **/.env 32 | 33 | # turbo 34 | .turbo 35 | 36 | # output 37 | **dist/ 38 | out.csv 39 | 40 | # vscode 41 | .vscode 42 | 43 | 44 | data/cache -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Colin Fortuner 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /NOTES.md: -------------------------------------------------------------------------------- 1 | ## Feedback 2 | 3 | - Highlight the Completion Streaming 4 | - Implementing the different Model Provider APIs is hugely valuable. 5 | 6 | # Issues 7 | 8 | Tracing: 9 | for future ref: 10 | 1- changes the this so you have to pass an anonymous fn, can't do this.prompt.format 11 | 2- the inputs always show up as [Array] in the trace 12 | 13 | # Features to add 14 | 15 | - Validation of Generated Outputs. 16 | - Retry logic 17 | - Caching / Fuzzy input matching 18 | - Transformation of inputs / outputs 19 | 20 | UI Tools 21 | 22 | - Compare prompt / chain across different providers? 23 | -------------------------------------------------------------------------------- /apps/docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | package-lock.json 5 | pnpm-lock.yaml 6 | 7 | # Production 8 | /build 9 | 10 | # Generated files 11 | .docusaurus 12 | .cache-loader 13 | 14 | # Misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | 25 | .vercel 26 | -------------------------------------------------------------------------------- /apps/docs/README.md: -------------------------------------------------------------------------------- 1 | # Docs 2 | 3 | Promptable Docs 4 | -------------------------------------------------------------------------------- /apps/docs/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')], 3 | }; 4 | -------------------------------------------------------------------------------- /apps/docs/docs/coming-soon.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: 'Coming Soon' 3 | --- 4 | 5 | # Coming Soon 6 | -------------------------------------------------------------------------------- /apps/docs/docs/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We're very early and most of this is subject to change, but we'd love your help! 4 | 5 | If you want to dev on the repo follow these instructions! 6 | 7 | ## What's inside? 8 | 9 | This a Turborepo monorepo of tooling for Typescript developers building LLM apps. 10 | 11 | It uses [pnpm](https://pnpm.io) as a package manager and includes the following packages/apps: 12 | 13 | - `packages/promptable`: The Promptable Library for building LLM apps in Typescript / Javascript! 14 | - `examples`: Examples using the Promptable.js library! 15 | - `apps/docs`: The Promptable.js Docs 16 | - `apps/web`: A nextjs app for visualizing Promptable.js steps. 17 | 18 | ## Install and Run 19 | 20 | Then to install run (at the root) 21 | 22 | ``` 23 | pnpm i 24 | 25 | ``` 26 | 27 | To install package in a single workspace 28 | 29 | ``` 30 | 31 | pnpm i --filter 32 | 33 | ``` 34 | 35 | First copy the `apps/web/.env.example` file to `apps/web/.env`. 36 | 37 | Then, To develop all apps and packages, run the following command: 38 | 39 | ``` 40 | 41 | cd my-turborepo 42 | pnpm run dev 43 | 44 | ``` 45 | 46 | This will start watching the files for changes. 47 | 48 | ### UI 49 | 50 | In `apps/web` you'll find the nextjs app for visualizing Promptable.js Traces. 51 | 52 | TODO: more info 53 | -------------------------------------------------------------------------------- /apps/docs/docs/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Introduction" 3 | --- 4 | 5 | # Introduction 6 | 7 | Promptable is a Typescript library for building fullstack AI applications. 8 | 9 | Promptable consists of a collection of utilities and interfaces that help you build applications with LLMs. It is designed to be flexible and extensible so that you can use it with any LLM or Embeddings provider. 10 | 11 | The goal of this library is to provide a set of general tools for Typescript and Javascript developers to help them build fullstack AI first applications quickly and easily. 12 | 13 | > Right now the library is in early development and is very much experimental. Don't use this in production yet! The API is subject to change as we get feedback. 14 | > 💻 15 | 16 | [Github Repo](https://github.com/cfortuner/promptable). 17 | [Discord](https://discord.gg/SYmACWTf6V). 18 | [Twitter](https://twitter.com/promptableai). 19 | 20 | ## Use Cases: 21 | 22 | - 💬 Chatbots & Conversational AI 23 | - ❓ Question Answering Bots 24 | - ✍️ Writing Apps 25 | - 🧑‍✈️ Copilot apps built with Chrome Extensions, VSCode Extensions, and more! 26 | - 🔍 AI Semantic Search apps 27 | - 🛠️ AI first Automations, Workflows and Tools 28 | - 🤖 Autonomous Agents & Personal Assistants 29 | 30 | ## Features 31 | 32 | - [Prompts](./modules/prompts.md) for templating and formatting 33 | - [Model Providers](./modules/model-providers.md) for Text Generation and Embedding Generation 34 | - [Embeddings](./modules/embeddings.md) for creating Embeddings, Indexing and Search 35 | - [Chains](./modules/chains.md) for composing LLMs and Embeddings with data and other tools. 36 | - [Tracing](./modules/tracing.md) for debugging your applications! 37 | - Utilities for working with text and data. 38 | - Web UI 39 | 40 | To assist in debugging, we also provide a Tracing UI that helps you visualize the steps taken by the LLM to generate the output. 41 | 42 | 43 | 44 | ## Motivation 45 | 46 | Large Language models are emerging as a powerful tool to use for variety of tasks. With OpenAI models like GPT-3 only an API call away, it's become possible to build applications that use AI as a core software component for business logic, data processing, content generation and more. Traditionally, AI tooling has only been built in python to power backend systems, but with the success of ChatGPT, we have learned that the UI/UX of an app is just as important as the backend. 47 | 48 | This project aims to provide a set of general tools for Typescript and Javascript developers to help them build fullstack AI first applications. 49 | 50 | ## Community 51 | 52 | If you have any questions about anything related to Promptable or if you want to discuss with us and the community, you are welcome to join our **[discord](https://discord.gg/SYmACWTf6V)**. 53 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/chains.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Chains" 3 | --- 4 | 5 | # Chains 6 | 7 | Chains are pre-built workflows for executing specific tasks. They combine a prompt and a model provider. 8 | 9 | ## LLMChain 10 | 11 | The simplest chain is the LLMChain, which takes a prompt and a completions model provider. 12 | 13 | ```typescript 14 | import { LLMChain, Prompt } from "promptable"; 15 | 16 | const writePoemPrompt = new Prompt("Write a poem about {{topic}}:", ["topic"]); 17 | const llmChain = new LLMChain(writePoemPrompt, completionsModelProvider); 18 | 19 | const poem = await llmChain.run({ topic: "the moon" }); 20 | ``` 21 | 22 | The `run` method executes the chain and returns the parsed result. 23 | 24 | ## MemoryLLMChain 25 | 26 | The MemoryLLMChain combines a prompt, a model provider, and memory. Memory is a way to store and retrieve data between chain runs. This chain is useful for building custom chatbots and other conversational AI applications. 27 | 28 | The following example uses MemoryLLMChain to create a simple chatbot based on a prompt. BufferedChatMemory is a memory which stores the user and bot messages in a buffer, up to a maximum number of interactions (defaulted at Infinity). MemoryLLMChain will automatically extract the memory from the BufferedChatMemory and pass it to the prompt. 29 | 30 | ```typescript 31 | const memory = new BufferedChatMemory(); 32 | const memoryChain = new MemoryLLMChain(prompts.chatbot(), openai, memory); 33 | while (true) { 34 | const { userInput } = await query({ 35 | type: "input", 36 | name: "userInput", 37 | message: "User: ", 38 | }); 39 | if (userInput) { 40 | if (userInput === "exit") break; 41 | memory.addUserMessage(userInput); 42 | const botOutput = await memoryChain.run({ userInput }); 43 | memory.addBotMessage(botOutput); 44 | console.log(chalk.yellow("Assistant:", botOutput)); 45 | } 46 | } 47 | ``` 48 | 49 | This example uses a pre-built prompt for chatbots. You can also use your own custom prompts. See the [prompts](./prompts.md) docs for more info. 50 | 51 | ## Tracing Chains 52 | 53 | Chains often have many steps, and tracing can help you understand what is happening in your chain. You can enable tracing by using the `setTraceConfig` function. 54 | 55 | ```typescript 56 | import { setTraceConfig } from "promptable"; 57 | 58 | setTraceConfig({ 59 | send: (trace) => { 60 | console.log("Received Trace", trace); 61 | }, 62 | }); 63 | ``` 64 | 65 | You can also visualize the trace graphically with `graphTraces`. 66 | 67 | ```typescript 68 | import { graphTraces } from "promptable"; 69 | 70 | graphTraces(traces); 71 | ``` 72 | 73 | ## More Chains 74 | 75 | More chains are coming soon! Keep an eye on this library for updates. Here are some you can expect to see very shortly: 76 | 77 | - `QaLLMChain` - A chain that combines an `LLMChain` with embeddings. This enables QA applications over documents. 78 | - `SummaryChain` - A chain that combines an `LLMChain` with a summary prompt for summarization purposes. 79 | 80 | Send us a message if you have a chain you would like to see added to this library. 81 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/embeddings.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Embeddings" 3 | --- 4 | 5 | # Embeddings 6 | 7 | Embeddings is a interface to help you create embeddings (dense vectors of numbers) for words or phrases using embeddings model providers. It allows you to index and query documents, so you can find similarities between them based on the embeddings of the words they contain. You can use Embeddings to build applications that work with text, such as search engines, chatbots, and recommendation systems. 8 | 9 | ## Features 10 | 11 | The main features of Embeddings are: 12 | 13 | - Creating embeddings for words or phrases using embeddings model providers. 14 | - Indexing and querying documents to find similarities between them based on their embeddings. 15 | - Caching the index to improve performance and reduce the time needed to create embeddings. 16 | - Clearing the cache and re-indexing documents if necessary. 17 | - Saving the embeddings to a file for future use. 18 | - Providing an interface for extracting answers from embeddings using a prompt. 19 | 20 | ## Usage 21 | 22 | ```ts 23 | import { 24 | OpenAI, 25 | } from "@providers/ModelProvider"; 26 | import { Document } from "src"; 27 | 28 | const embeddings = new Embeddings( 29 | "my-embeddings", 30 | new OpenAI(), 31 | [document1, document2, ...], 32 | { cacheDir: "path/to/cache/directory" } 33 | ); 34 | ``` 35 | 36 | After creating an instance of Embeddings, you can index your documents by calling the index() method: 37 | 38 | ```ts 39 | await embeddings.index(); 40 | ``` 41 | 42 | Once your documents are indexed, you can query them by calling the query() method: 43 | 44 | ```ts 45 | const results = await embeddings.query("query", 10); 46 | ``` 47 | 48 | You can also clear the cache and re-index your documents by calling the clearCache() method and then the index() method again: 49 | 50 | ```ts 51 | embeddings.clearCache(); 52 | await embeddings.index(); 53 | ``` 54 | 55 | To save the embeddings to a file, you can call the save() method: 56 | 57 | ```ts 58 | embeddings.save(); 59 | ``` 60 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/model-providers.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Model Providers" 3 | --- 4 | 5 | # Model Providers 6 | 7 | Model Providers are interfaces for interacting with different model providers. They allow you to easily switch between providers without having to change your code. 8 | 9 | You can use Model providers in the same way you would use the OpenAI API. 10 | 11 | ## Creating a Model Provider 12 | 13 | ```ts title ="examples/model-providers.ts" 14 | import { OpenAI } from "promptable"; 15 | const openai = new OpenAI(apiKey); 16 | //or 17 | const openai = new OpenAI(apiKey, { 18 | // Provide OpenAI Configuration here 19 | }); 20 | ``` 21 | 22 | ## Using a Model Provider 23 | 24 | ### Text Generation 25 | 26 | You can use the model provider to complete text generation. 27 | 28 | ```ts title ="examples/model-providers.ts" 29 | import { OpenAI } from "promptable"; 30 | 31 | const text = "This is a test"; 32 | const tokensUsed = openai.countTokens(text); 33 | const response = await openai.generate(text); 34 | 35 | console.log("Tokens: ", tokensUsed); 36 | console.log(response); 37 | ``` 38 | 39 | ### Streaming 40 | 41 | With OpenAI, you can also stream completions like this: 42 | 43 | ```ts 44 | import { OpenAI } from "promptable"; 45 | 46 | const text = "This is a test"; 47 | const tokensUsed = openai.countTokens(text); 48 | await openai.stream( 49 | promptText, 50 | (chunk: string) => { 51 | console.log(chunk); 52 | }, 53 | () => { 54 | console.log("Done"); 55 | } 56 | ); 57 | ``` 58 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/prompts.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Prompts" 3 | --- 4 | 5 | # Prompts 6 | 7 | Prompts are utility classes for formatting text for Text Generation. They allow you to create templates for your inputs, and they allow you to parse the outputs. 8 | 9 | ## Creating a Prompt 10 | 11 | ```ts title ="examples/simple-prompt.ts" 12 | const openai = new OpenAI(apiKey); 13 | 14 | const writePoemPrompt = new Prompt( 15 | // your instructions go here 16 | "Write a poem about {{topic}}:".trim(), 17 | [ 18 | // variable names go here 19 | "topic", 20 | ] 21 | ); 22 | ``` 23 | 24 | ## Generating Text using a Prompt 25 | 26 | ```ts 27 | // format the prompt with your variables 28 | const promptText = writePoemPrompt.format({ 29 | topic: "hi", 30 | }); 31 | 32 | const poem = await openai.generate(promptText); 33 | ``` 34 | 35 | ## Parsing Outputs 36 | 37 | You can parse a prompt to get the variables. 38 | 39 | ```ts title ="examples/parsing.ts" 40 | const promptText = prompt.format({ 41 | data, 42 | type: `{ 43 | meeting_type: string, 44 | Date: Date, 45 | Location: string, 46 | invitee_name: string, 47 | invitee_email: string, 48 | }`, 49 | }); 50 | 51 | const json = await openai.generate(promptText); 52 | 53 | const output = prompt.parse(json); 54 | ``` 55 | 56 | ## Prebuilt Prompts 57 | 58 | Promptable comes with a few prebuilt prompts that you can use. 59 | 60 | ```ts 61 | import { prompts } from "promptable"; 62 | 63 | const qaPrompt = prompts.QA(); 64 | const extractTextPrompt = prompts.extractText(); 65 | const extractJSONPrompt = prompts.extractJSON(); 66 | const summarizePrompt = prompts.summarize(); 67 | ``` 68 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/tracing.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Tracing" 3 | --- 4 | 5 | # Tracing 6 | 7 | Tracing is a utility provided by this library to help diagnose and debug applications. Tracing allows you to wrap any function call with tracing information, including the inputs and outputs of the function, and any errors that occurred. This library provides the trace function to facilitate tracing in your application. 8 | 9 | 10 | 11 | ## Usage 12 | 13 | To use Tracing, you can import the trace function from the tracing module: 14 | 15 | ```ts 16 | import { trace, setTraceConfig, graphTraces, sendTraceToServer } from "promptable"; 17 | ``` 18 | 19 | To trace a function, simply wrap it with the trace function: 20 | 21 | ```ts 22 | const tracedFunction = trace("functionName", originalFunction); 23 | ``` 24 | 25 | Now, when you call tracedFunction, it will log the inputs, outputs, and any errors that occurred. 26 | 27 | You can also set the tracing configuration using the setTraceConfig function. By default, tracing information is sent to the console. To send tracing information to a server, set the serverUrl and send properties: 28 | 29 | ```ts 30 | setTraceConfig({ 31 | serverUrl: "https://example.com/traces", 32 | send: sendTraceToServer, 33 | }); 34 | ``` 35 | 36 | The send function is called with each trace, and should send the trace to a server for storage and analysis. 37 | 38 | Finally, you can use the graphTraces function to display a visual representation of your traces. This can be useful for understanding the flow of your application and identifying performance bottlenecks: 39 | 40 | ```ts 41 | const trace = tracedFunction(args); 42 | graphTraces([trace]); 43 | ``` 44 | 45 | This will output a tree of the traces, starting with the root trace. 46 | 47 | ## Sending Traces to Web UI 48 | 49 | Promptable also provides a web ui for viewing traces. To use it, you can import the trace function from the tracing module: 50 | 51 | The code for the ui is located in the `apps/web` directory. To run it, you can run the following command: 52 | 53 | ```sh 54 | pnpm run dev --filter web 55 | ``` 56 | 57 | To configure a different trace server, you can set the serverUrl property of the tracing configuration: 58 | 59 | ```ts 60 | setTraceConfig({ 61 | serverUrl: "https://localhost:3000/api/traces", 62 | send: sendTraceToServer, 63 | }); 64 | ``` 65 | 66 | ## Tracing UI 67 | 68 | 69 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/utilities/loaders.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Loaders" 3 | --- 4 | 5 | # Loaders 6 | 7 | Loaders are classes that implement the load() method to retrieve documents from various sources, such as files or databases. 8 | 9 | ## Documents 10 | 11 | Document 12 | The Document interface represents a single document and includes a content property containing the text of the document and a meta property containing any additional metadata. 13 | 14 | ```ts 15 | export interface Document { 16 | content: string; 17 | meta?: Record; 18 | } 19 | ``` 20 | 21 | ## Usage 22 | 23 | To use a loader, simply instantiate the loader class and call the load() method. The load() method returns a promise that resolves to an array of documents. 24 | 25 | Example usage: 26 | 27 | ```ts 28 | const loader = new MyLoader(); 29 | const documents = await loader.load(); 30 | ``` 31 | 32 | ## Loader Types 33 | 34 | ### Loader 35 | 36 | The Loader interface defines a single method, load(), which returns a promise that resolves to an array of documents. 37 | 38 | ```ts 39 | export interface Loader { 40 | load(): Promise; 41 | } 42 | ``` 43 | 44 | ### FileLoader 45 | 46 | The FileLoader class is a loader that reads documents from a file on the file system. It takes a path to a file and an optional metadata object as arguments. 47 | 48 | Example usage 49 | 50 | ```ts 51 | const loader = new FileLoader("/path/to/file.txt", { author: "John Doe" }); 52 | const documents = await loader.load(); 53 | ``` 54 | 55 | The FileLoader class provides the following options: 56 | 57 | - `path`: a string indicating the path to the file to load. 58 | - `meta`: an optional object containing additional metadata to add to each document. 59 | 60 | The load() method reads the contents of the file and returns a promise that resolves to an array containing a single document object. The content property of the document contains the contents of the file, and the meta property includes the path to the file and any additional metadata provided during instantiation. 61 | -------------------------------------------------------------------------------- /apps/docs/docs/modules/utilities/text-splitters.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Text Splitter" 3 | --- 4 | 5 | # Text Splitter 6 | 7 | TextSplitter is an abstract class that provides a base implementation for splitting text into smaller chunks, such as paragraphs or sentences. It also includes methods for merging texts and documents. 8 | 9 | ## Usage 10 | 11 | TextSplitter is an abstract class and cannot be instantiated directly. Instead, it should be extended to implement the splitText method. 12 | 13 | You can configure the TextSplitter by passing in an options object when instantiating it. The following options are available: 14 | 15 | - `chunk`: a boolean indicating whether to split text into chunks. Default is false. 16 | - `chunkSize`: a number indicating the maximum size of each chunk. Default is 1000. 17 | - `overlap`: a number indicating the amount of overlap between chunks. Default is 200. 18 | - `lengthFn`: a function that returns the length of a text chunk. Default is based on the tokenizer used. 19 | 20 | ## Splitter Types 21 | 22 | The following subclasses are provided: 23 | 24 | ### CharacterTextSplitter 25 | 26 | CharacterTextSplitter extends TextSplitter and splits text into chunks based on a character, such as a newline character. It provides the following options: 27 | 28 | character: a string indicating the character to split on. Default is "\\n\\n", which splits on double newlines. 29 | 30 | Example usage: 31 | 32 | ```ts 33 | const splitter = new CharacterTextSplitter("\\n", { chunkSize: 500 }); 34 | const chunks = splitter.splitText( 35 | "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec faucibus mauris ut dui bibendum, a convallis nisl laoreet. Sed sed enim ante. Suspendisse semper faucibus elit ac gravida.", 36 | { chunkSize: 200 } 37 | ); 38 | ``` 39 | 40 | ### SentenceTextSplitter 41 | 42 | SentenceTextSplitter extends TextSplitter and splits text into chunks based on sentence boundaries. It uses the sbd package it to identify sentence boundaries. 43 | 44 | Example usage: 45 | 46 | ```ts 47 | const splitter = new SentenceTextSplitter(); 48 | const chunks = splitter.splitText( 49 | "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec faucibus mauris ut dui bibendum, a convallis nisl laoreet. Sed sed enim ante. Suspendisse semper faucibus elit ac gravida." 50 | ); 51 | ``` 52 | 53 | ### TokenSplitter 54 | 55 | TokenSplitter extends TextSplitter and splits text into chunks based on token boundaries. It uses the OpenAI GPT-3 tokenizer to identify tokens. 56 | 57 | Example usage: 58 | 59 | ```ts 60 | const splitter = new TokenSplitter({ chunkSize: 500 }); 61 | const chunks = splitter.splitText( 62 | "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec faucibus mauris ut dui bibendum, a convallis nisl laoreet. Sed sed enim ante. Suspendisse semper faucibus elit ac gravida.", 63 | { overlap: 100 } 64 | ); 65 | ``` 66 | -------------------------------------------------------------------------------- /apps/docs/docs/quickstart.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: "Quickstart" 3 | --- 4 | 5 | # Quickstart 6 | 7 | ## Installation 8 | 9 | ```bash title='terminal' 10 | npm i promptable 11 | ``` 12 | 13 | ## Provider Setup 14 | 15 | If you want to use a Model Provider like OpenAI, you must first create an api key. 16 | 17 | Create your api key: 18 | 19 | [Create a OpenAI API Key](https://platform.openai.com/account/api-keys) 20 | 21 | Once you have it, you can configure promptable by creating a `.env` file in the root of your project and adding your provider's API key to it. 22 | 23 | ```bash title='.env' 24 | OPENAI_API_KEY= 25 | ``` 26 | 27 | ## Basics 28 | 29 | Importing the library: 30 | 31 | ```ts 32 | import * as p from "promptable"; 33 | ``` 34 | 35 | Create a model provider 36 | 37 | ```ts 38 | const provider = new p.OpenAI(apiKey); 39 | ``` 40 | 41 | Create a prompt 42 | 43 | ```ts 44 | const writePoemPrompt = new p.Prompt( 45 | // your instructions go here 46 | "Write a poem about {{topic}}:".trim(), 47 | [ 48 | // variable names go here 49 | "topic", 50 | ] 51 | ); 52 | ``` 53 | 54 | Format the prompt with your variables 55 | 56 | ```ts 57 | const text = writePoemPrompt.format({ 58 | topic: "hi", 59 | }); 60 | ``` 61 | 62 | Count tokens in text 63 | 64 | ```ts 65 | const tokensUsed = provider.countTokens(text); 66 | ``` 67 | 68 | Generate text completions! 69 | 70 | ```ts 71 | const response = await provider.generate(text); 72 | 73 | // Or stream the response! 74 | await provider.stream(promptText, (chunk: string) => { 75 | console.log(chunk); 76 | }); 77 | ``` 78 | 79 | ## Using Embeddings 80 | 81 | ```ts 82 | import * as p from "promptable"; 83 | 84 | // Create a model provider! 85 | const provider = new p.OpenAI(apiKey); 86 | 87 | // Load documents 88 | const filepath = "./data/startup-mistakes.txt"; 89 | const loader = new p.FileLoader(filepath); 90 | let docs = await loader.load(); 91 | 92 | // Split documents into chunks 93 | const splitter = new p.CharacterTextSplitter("\n"); 94 | docs = splitter.splitDocuments(docs, { 95 | chunk: true, 96 | chunkSize: 1000, // tokens :) 97 | }); 98 | 99 | // Create embeddings 100 | const embeddings = new p.Embeddings(provider, documents); 101 | await embeddings.index(); 102 | 103 | // Query embeddings 104 | embeddings.query("startup mistakes"); 105 | ``` 106 | 107 | ## Contributing 108 | 109 | See the [contributing guide](./contributing.md) to learn how to contribute to the repository and the development workflow. 110 | -------------------------------------------------------------------------------- /apps/docs/docusaurus.preferredTheme.js: -------------------------------------------------------------------------------- 1 | import ExecutionEnvironment from '@docusaurus/ExecutionEnvironment'; 2 | 3 | const darkTheme = 'dark'; 4 | const lightTheme = 'light'; 5 | 6 | // if (!window.location.pathname.includes('docs')) { 7 | // console.log("yes") 8 | // document.querySelector('.navbar').classList.add('navbarE'); 9 | // // document.navbar?.style.setProperty('position', 'sticky') 10 | // } 11 | 12 | // if (window.location.pathname.includes('docs')) { 13 | // document.querySelector('.header').style.backgroundColor = '#F7F7F7'; 14 | // } 15 | 16 | if (ExecutionEnvironment.canUseDOM) { 17 | const mediaMatch = window.matchMedia('(prefers-color-scheme: dark)'); 18 | const htmlElement = document.querySelector('html'); 19 | 20 | const setInitialTheme = () => { 21 | const newTheme = mediaMatch.matches ? darkTheme : darkTheme; 22 | htmlElement?.setAttribute('data-theme', newTheme); 23 | }; 24 | setInitialTheme(); 25 | 26 | const colorSchemeChangeListener = (e) => { 27 | const newTheme = e.matches ? darkTheme : lightTheme; 28 | htmlElement?.setAttribute('data-theme', newTheme); 29 | }; 30 | mediaMatch.addEventListener('change', colorSchemeChangeListener); 31 | 32 | 33 | } 34 | -------------------------------------------------------------------------------- /apps/docs/global.d.ts: -------------------------------------------------------------------------------- 1 | declare module '@docusaurus/plugin-content-docs/client'; 2 | -------------------------------------------------------------------------------- /apps/docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "dev": "docusaurus start --port 3001", 8 | "start": "docusaurus start", 9 | "build": "docusaurus build", 10 | "swizzle": "docusaurus swizzle", 11 | "deploy": "docusaurus deploy", 12 | "clear": "docusaurus clear", 13 | "serve": "docusaurus serve", 14 | "write-translations": "docusaurus write-translations", 15 | "write-heading-ids": "docusaurus write-heading-ids", 16 | "typecheck": "tsc" 17 | }, 18 | "directories": { 19 | "sdk": "sdk" 20 | }, 21 | "dependencies": { 22 | "@algolia/client-search": "^4.14.3", 23 | "@docusaurus/core": "2.2.0", 24 | "@docusaurus/plugin-google-analytics": "^2.2.0", 25 | "@docusaurus/preset-classic": "2.2.0", 26 | "@heroicons/react": "^2.0.13", 27 | "@mdx-js/react": "^1.6.22", 28 | "@tailwindcss/line-clamp": "^0.4.2", 29 | "@vercel/analytics": "^0.1.8", 30 | "autoprefixer": "^10.4.13", 31 | "axios": "^1.3.2", 32 | "bridge": "^2.0.42", 33 | "bridge-react-query": "^1.0.8", 34 | "clsx": "^1.2.1", 35 | "docusaurus-preset-shiki-twoslash": "^1.1.38", 36 | "form-data": "^4.0.0", 37 | "formidable": "^2.1.1", 38 | "mixpanel-browser": "^2.45.0", 39 | "postcss": "^8.4.20", 40 | "prism-react-renderer": "^1.3.5", 41 | "react": "^17.0.2", 42 | "react-dom": "^17.0.2", 43 | "react-syntax-highlighter": "^15.5.0", 44 | "superstruct": "^1.0.3", 45 | "tailwindcss": "^3.2.4", 46 | "yup": "^0.32.11", 47 | "zod": "^3.20.2" 48 | }, 49 | "devDependencies": { 50 | "@docusaurus/module-type-aliases": "2.2.0", 51 | "@tsconfig/docusaurus": "^1.0.5", 52 | "@types/formidable": "^2.0.5", 53 | "@types/yup": "^0.32.0", 54 | "typescript": "^4.7.4" 55 | }, 56 | "browserslist": { 57 | "production": [ 58 | ">0.5%", 59 | "not dead", 60 | "not op_mini all" 61 | ], 62 | "development": [ 63 | "last 1 chrome version", 64 | "last 1 firefox version", 65 | "last 1 safari version" 66 | ] 67 | }, 68 | "engines": { 69 | "node": ">=16.14" 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /apps/docs/sidebars.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Creating a sidebar enables you to: 3 | - create an ordered group of docs 4 | - render a sidebar for each doc of that group 5 | - provide next/previous navigation 6 | 7 | The sidebars can be generated from the filesystem, or explicitly defined here. 8 | 9 | Create as many sidebars as you want. 10 | */ 11 | 12 | // @ts-nocheck 13 | 14 | /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ 15 | const sidebars = { 16 | // By default, Docusaurus generates a sidebar from the docs folder structure 17 | // tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], 18 | 19 | sidebar: [ 20 | "introduction", 21 | "quickstart", 22 | { 23 | type: "category", 24 | label: "Modules", 25 | items: [ 26 | "modules/model-providers", 27 | "modules/prompts", 28 | "modules/embeddings", 29 | "modules/chains", 30 | "modules/tracing", 31 | { 32 | type: "category", 33 | label: "Utilities", 34 | items: [ 35 | "modules/utilities/text-splitters", 36 | "modules/utilities/loaders", 37 | ], 38 | }, 39 | ], 40 | }, 41 | "contributing", 42 | ], 43 | }; 44 | 45 | module.exports = sidebars; 46 | -------------------------------------------------------------------------------- /apps/docs/src/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { Redirect } from "@docusaurus/router"; 3 | 4 | export default function Home(): JSX.Element { 5 | return ; 6 | } 7 | -------------------------------------------------------------------------------- /apps/docs/static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/favicon.png -------------------------------------------------------------------------------- /apps/docs/static/img/bg-lines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/bg-lines.png -------------------------------------------------------------------------------- /apps/docs/static/img/bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/bg.png -------------------------------------------------------------------------------- /apps/docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /apps/docs/static/img/globe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/globe.png -------------------------------------------------------------------------------- /apps/docs/static/img/globe.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /apps/docs/static/img/promptable-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/promptable-icon.png -------------------------------------------------------------------------------- /apps/docs/static/img/tracing-expanded.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/tracing-expanded.png -------------------------------------------------------------------------------- /apps/docs/static/img/tracing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/docs/static/img/tracing.png -------------------------------------------------------------------------------- /apps/docs/tailwind.config.js: -------------------------------------------------------------------------------- 1 | // tailwind.config.js 2 | /** @type {import('tailwindcss').Config} */ 3 | module.exports = { 4 | content: [ 5 | // 6 | "./src/**/*.{js,jsx,ts,tsx,md,mdx}", 7 | "./docs/**/*.{js,jsx,ts,tsx,md,mdx}", 8 | ], 9 | theme: {}, 10 | darkMode: ["class", '[data-theme="dark"]'], 11 | plugins: [require("@tailwindcss/line-clamp")], 12 | }; 13 | -------------------------------------------------------------------------------- /apps/docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // This file is not used in compilation. It is here just for a nice editor experience. 3 | "extends": "@tsconfig/docusaurus/tsconfig.json", 4 | "compilerOptions": { 5 | "baseUrl": "." 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/.env.example: -------------------------------------------------------------------------------- 1 | # Since the ".env" file is gitignored, you can use the ".env.example" file to 2 | # build a new ".env" file when you clone the repo. Keep this file up-to-date 3 | # when you add new variables to `.env`. 4 | 5 | # This file will be committed to version control, so make sure not to have any 6 | # secrets in it. If you are cloning this repo, create a copy of this file named 7 | # ".env" and populate it with your secrets. 8 | 9 | # When adding additional environment variables, the schema in "/env/schema.mjs" 10 | # should be updated accordingly. 11 | 12 | # Prisma 13 | # https://www.prisma.io/docs/reference/database-reference/connection-urls#env 14 | DATABASE_URL="file:./db.sqlite" 15 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "overrides": [ 3 | { 4 | "extends": [ 5 | "plugin:@typescript-eslint/recommended-requiring-type-checking" 6 | ], 7 | "files": ["*.ts", "*.tsx"], 8 | "parserOptions": { 9 | "project": "tsconfig.json" 10 | } 11 | } 12 | ], 13 | "parser": "@typescript-eslint/parser", 14 | "parserOptions": { 15 | "project": "./tsconfig.json" 16 | }, 17 | "plugins": ["@typescript-eslint"], 18 | "extends": ["next/core-web-vitals", "plugin:@typescript-eslint/recommended"], 19 | "rules": { 20 | "@typescript-eslint/consistent-type-imports": "warn" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # database 12 | /prisma/db.sqlite 13 | /prisma/db.sqlite-journal 14 | 15 | # next.js 16 | /.next/ 17 | /out/ 18 | next-env.d.ts 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # local env files 34 | # do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables 35 | .env 36 | .env*.local 37 | 38 | # vercel 39 | .vercel 40 | 41 | # typescript 42 | *.tsbuildinfo 43 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/README.md: -------------------------------------------------------------------------------- 1 | # GPT SEED 2 | 3 | # How to run 4 | 5 | Copy .env.example to .env, add your openAI key 6 | 7 | Script for seeding is in prisma/seed.ts 8 | 9 | run commands: 10 | 11 | - pnpm i 12 | - npx prisma db push 13 | - npx prisma db seed 14 | - npx prisma studio 15 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gpt-prisma-seed", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "studio": "prisma studio" 7 | }, 8 | "dependencies": { 9 | "@mrleebo/prisma-ast": "^0.5.1", 10 | "@prisma/client": "^4.9.0", 11 | "dotenv": "^16.0.3", 12 | "promptable": "workspace:*", 13 | "ts-node": "^10.9.1", 14 | "zod": "^3.20.2" 15 | }, 16 | "devDependencies": { 17 | "@types/node": "^18.11.18", 18 | "@types/prettier": "^2.7.2", 19 | "@typescript-eslint/eslint-plugin": "^5.47.1", 20 | "@typescript-eslint/parser": "^5.47.1", 21 | "eslint": "^8.30.0", 22 | "eslint-config-next": "13.1.6", 23 | "prettier": "^2.8.1", 24 | "prisma": "^4.9.0", 25 | "typescript": "^4.9.4" 26 | }, 27 | "ct3aMetadata": { 28 | "initVersion": "7.4.0" 29 | }, 30 | "prisma": { 31 | "seed": "ts-node prisma/seed.ts" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/prettier.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import("prettier").Config} */ 2 | module.exports = { 3 | plugins: [require.resolve("prettier-plugin-tailwindcss")], 4 | }; 5 | -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | // This is your Prisma schema file, 2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema 3 | 4 | generator client { 5 | provider = "prisma-client-js" 6 | } 7 | 8 | datasource db { 9 | provider = "sqlite" 10 | url = env("DATABASE_URL") 11 | } 12 | 13 | model User { 14 | id String @id @default(cuid()) 15 | name String 16 | birthday DateTime 17 | address String 18 | favoriteFood String 19 | } -------------------------------------------------------------------------------- /apps/gpt-prisma-seed/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "node", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "noUncheckedIndexedAccess": true 18 | }, 19 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", "**/*.cjs", "**/*.mjs"], 20 | "exclude": ["node_modules"], 21 | "ts-node": { 22 | // these options are overrides used only by ts-node 23 | // same as the --compilerOptions flag and the TS_NODE_COMPILER_OPTIONS environment variable 24 | "compilerOptions": { 25 | "module": "commonjs" 26 | } 27 | }, 28 | } 29 | -------------------------------------------------------------------------------- /apps/hub/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /apps/hub/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /apps/hub/components/ExampleCard.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Badge, 3 | Box, 4 | Card, 5 | CardBody, 6 | Flex, 7 | Heading, 8 | Image, 9 | Text, 10 | WrapItem, 11 | } from "@chakra-ui/react"; 12 | import { useRouter } from "next/router"; 13 | import React from "react"; 14 | 15 | export default function ExampleCard({ 16 | title, 17 | img, 18 | enabled, 19 | description = "", 20 | }: { 21 | title: string; 22 | img: string; 23 | enabled: boolean; 24 | description: string; 25 | }) { 26 | const router = useRouter(); 27 | 28 | function handleExampleRedirect(exampleName: string) { 29 | if (!enabled) { 30 | return; 31 | } 32 | 33 | router.push(`/hub/${exampleName}`); 34 | } 35 | 36 | return ( 37 | 38 | handleExampleRedirect(title)} 44 | background={!enabled ? "#F8F8F8" : undefined} 45 | _hover={{ bg: !enabled ? undefined : "#F8F8F8" }} 46 | > 47 | {!enabled && ( 48 | 56 | Coming Soon! 57 | 58 | )} 59 | 60 | 61 | 62 | {title} 63 | {description} 64 | 65 | 66 | 67 | 68 | ); 69 | } 70 | -------------------------------------------------------------------------------- /apps/hub/next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | } 5 | 6 | module.exports = nextConfig 7 | -------------------------------------------------------------------------------- /apps/hub/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hub", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "@chakra-ui/icons": "^2.0.17", 13 | "@chakra-ui/react": "^2.5.1", 14 | "@emotion/react": "^11.10.6", 15 | "@emotion/styled": "^11.10.6", 16 | "@next/font": "13.1.6", 17 | "@types/node": "^18.11.18", 18 | "@types/react": "18.0.28", 19 | "@types/react-dom": "18.0.11", 20 | "eslint": "8.34.0", 21 | "eslint-config-next": "13.1.6", 22 | "framer-motion": "^9.0.4", 23 | "next": "13.1.6", 24 | "react": "18.2.0", 25 | "react-code-blocks": "0.0.9-0", 26 | "react-dom": "18.2.0", 27 | "typescript": "4.9.5" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /apps/hub/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/hub/public/favicon.ico -------------------------------------------------------------------------------- /apps/hub/public/google_sheets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/hub/public/google_sheets.png -------------------------------------------------------------------------------- /apps/hub/public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/hub/public/notion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/hub/public/notion.png -------------------------------------------------------------------------------- /apps/hub/public/slack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/hub/public/slack.png -------------------------------------------------------------------------------- /apps/hub/public/thirteen.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/hub/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/hub/src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import "@/styles/globals.css"; 2 | import type { AppProps } from "next/app"; 3 | import { ChakraProvider } from "@chakra-ui/react"; 4 | 5 | export default function App({ Component, pageProps }: AppProps) { 6 | return ( 7 | 8 | 9 | 10 | ); 11 | } 12 | -------------------------------------------------------------------------------- /apps/hub/src/pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import { Html, Head, Main, NextScript } from 'next/document' 2 | 3 | export default function Document() { 4 | return ( 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /apps/hub/src/pages/hub/[example].tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Flex, 3 | Heading, 4 | Divider, 5 | VStack, 6 | Box, 7 | Text, 8 | Button, 9 | IconButton, 10 | } from "@chakra-ui/react"; 11 | import { CodeBlock, dracula } from "react-code-blocks"; 12 | import React from "react"; 13 | import { ArrowBackIcon } from "@chakra-ui/icons"; 14 | import { useRouter } from "next/router"; 15 | 16 | export async function getStaticPaths() { 17 | const paths = ["Slack", "Notion", "Google Sheets"].map((example) => { 18 | return { 19 | params: { example: example }, 20 | }; 21 | }); 22 | 23 | return { 24 | paths, 25 | fallback: false, // TODO: change to whatever hub ends up being 26 | }; 27 | } 28 | 29 | export async function getStaticProps({ 30 | params, 31 | }: { 32 | params: { example: string }; 33 | }) { 34 | const { example } = params; 35 | 36 | return { 37 | props: { example }, 38 | }; 39 | } 40 | 41 | export default function Example({ example }: { example: string }) { 42 | const router = useRouter(); 43 | 44 | function handleGitHubLink(example: string) {} 45 | 46 | function handleBack() { 47 | router.push("/hub"); 48 | } 49 | 50 | // Need to figure out how to center promptable text in header, viewport won't cooperate with current header structure. 51 | return ( 52 | <> 53 | 60 | } 63 | onClick={handleBack} 64 | /> 65 | 66 | Promptable 67 | 68 | 77 | 78 | 79 | 80 | 81 | 82 | {example + " Loader"} 83 | 84 | 85 | 86 | Text about the loader.... 87 | 88 | 89 | 90 | 91 | Usage 92 | 93 | 94 | 95 | Relevant text about the usage.... 96 | 97 | 98 | 99 | x+y;\nconst add = (x,y) => x+y;\nconst add = (x,y) => x+y;const add = (x,y) => x+y;\nconst add = (x,y) => x+y;\nconst add = (x,y) => x+y;" 102 | } 103 | language="typescript" 104 | theme={dracula} 105 | /> 106 | 107 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | ); 117 | } 118 | -------------------------------------------------------------------------------- /apps/hub/src/pages/hub/index.tsx: -------------------------------------------------------------------------------- 1 | import { Text } from "@chakra-ui/react"; 2 | 3 | import React from "react"; 4 | import { 5 | Box, 6 | Flex, 7 | Grid, 8 | GridItem, 9 | Heading, 10 | Input, 11 | InputGroup, 12 | InputLeftElement, 13 | SimpleGrid, 14 | Wrap, 15 | } from "@chakra-ui/react"; 16 | import { Search2Icon } from "@chakra-ui/icons"; 17 | import ExampleCard from "components/ExampleCard"; 18 | 19 | export default function Home() { 20 | function handleSearch() {} 21 | 22 | const promptableIntro = "Upgrade your tools with AI"; 23 | const promptableCTA = "Find Templates that simplify your work."; 24 | 25 | const examples: any[] = [ 26 | { 27 | title: "Slack", 28 | img: "/slack.png", 29 | enabled: true, 30 | description: "Build a GPT-3 powered slack bot.", 31 | }, 32 | { title: "Notion", img: "/notion.png", enabled: false }, 33 | { title: "Google Sheets", img: "/google_sheets.png", enabled: false }, 34 | ]; 35 | 36 | const exampleCards = examples.map((example) => { 37 | return ( 38 | 44 | ); 45 | }); 46 | 47 | return ( 48 | <> 49 | 56 | Promptable Hub 57 | 58 | 65 | 66 | 67 | {promptableIntro} 68 | 69 | 70 | {promptableCTA} 71 | 72 | 73 | {/* 74 | } /> 75 | 76 | */} 77 | 78 | {exampleCards.map((c) => { 79 | return c; 80 | })} 81 | 82 | 83 | 84 | ); 85 | } 86 | -------------------------------------------------------------------------------- /apps/hub/src/styles/globals.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --max-width: 1100px; 3 | --border-radius: 12px; 4 | --font-mono: ui-monospace, Menlo, Monaco, 'Cascadia Mono', 'Segoe UI Mono', 5 | 'Roboto Mono', 'Oxygen Mono', 'Ubuntu Monospace', 'Source Code Pro', 6 | 'Fira Mono', 'Droid Sans Mono', 'Courier New', monospace; 7 | 8 | --foreground-rgb: 0, 0, 0; 9 | --background-start-rgb: 214, 219, 220; 10 | --background-end-rgb: 255, 255, 255; 11 | 12 | --primary-glow: conic-gradient( 13 | from 180deg at 50% 50%, 14 | #16abff33 0deg, 15 | #0885ff33 55deg, 16 | #54d6ff33 120deg, 17 | #0071ff33 160deg, 18 | transparent 360deg 19 | ); 20 | --secondary-glow: radial-gradient( 21 | rgba(255, 255, 255, 1), 22 | rgba(255, 255, 255, 0) 23 | ); 24 | 25 | --tile-start-rgb: 239, 245, 249; 26 | --tile-end-rgb: 228, 232, 233; 27 | --tile-border: conic-gradient( 28 | #00000080, 29 | #00000040, 30 | #00000030, 31 | #00000020, 32 | #00000010, 33 | #00000010, 34 | #00000080 35 | ); 36 | 37 | --callout-rgb: 238, 240, 241; 38 | --callout-border-rgb: 172, 175, 176; 39 | --card-rgb: 180, 185, 188; 40 | --card-border-rgb: 131, 134, 135; 41 | } 42 | 43 | @media (prefers-color-scheme: dark) { 44 | :root { 45 | --foreground-rgb: 255, 255, 255; 46 | --background-start-rgb: 0, 0, 0; 47 | --background-end-rgb: 0, 0, 0; 48 | 49 | --primary-glow: radial-gradient(rgba(1, 65, 255, 0.4), rgba(1, 65, 255, 0)); 50 | --secondary-glow: linear-gradient( 51 | to bottom right, 52 | rgba(1, 65, 255, 0), 53 | rgba(1, 65, 255, 0), 54 | rgba(1, 65, 255, 0.3) 55 | ); 56 | 57 | --tile-start-rgb: 2, 13, 46; 58 | --tile-end-rgb: 2, 5, 19; 59 | --tile-border: conic-gradient( 60 | #ffffff80, 61 | #ffffff40, 62 | #ffffff30, 63 | #ffffff20, 64 | #ffffff10, 65 | #ffffff10, 66 | #ffffff80 67 | ); 68 | 69 | --callout-rgb: 20, 20, 20; 70 | --callout-border-rgb: 108, 108, 108; 71 | --card-rgb: 100, 100, 100; 72 | --card-border-rgb: 200, 200, 200; 73 | } 74 | } 75 | 76 | * { 77 | box-sizing: border-box; 78 | padding: 0; 79 | margin: 0; 80 | } 81 | 82 | html, 83 | body { 84 | max-width: 100vw; 85 | overflow-x: hidden; 86 | } 87 | 88 | body { 89 | color: rgb(var(--foreground-rgb)); 90 | background: linear-gradient( 91 | to bottom, 92 | transparent, 93 | rgb(var(--background-end-rgb)) 94 | ) 95 | rgb(var(--background-start-rgb)); 96 | } 97 | 98 | a { 99 | color: inherit; 100 | text-decoration: none; 101 | } 102 | 103 | @media (prefers-color-scheme: dark) { 104 | html { 105 | color-scheme: dark; 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /apps/hub/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "node", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "baseUrl": ".", 18 | "paths": { 19 | "@/*": ["./src/*"] 20 | } 21 | }, 22 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], 23 | "exclude": ["node_modules"] 24 | } 25 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/.env.example: -------------------------------------------------------------------------------- 1 | # Since the ".env" file is gitignored, you can use the ".env.example" file to 2 | # build a new ".env" file when you clone the repo. Keep this file up-to-date 3 | # when you add new variables to `.env`. 4 | 5 | # This file will be committed to version control, so make sure not to have any 6 | # secrets in it. If you are cloning this repo, create a copy of this file named 7 | # ".env" and populate it with your secrets. 8 | 9 | # When adding additional environment variables, the schema in "/env/schema.mjs" 10 | # should be updated accordingly. 11 | 12 | # Example: 13 | # SERVERVAR="foo" 14 | # NEXT_PUBLIC_CLIENTVAR="bar" 15 | 16 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /apps/nextjs-promptable/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "overrides": [ 3 | { 4 | "extends": [ 5 | "plugin:@typescript-eslint/recommended-requiring-type-checking" 6 | ], 7 | "files": ["*.ts", "*.tsx"], 8 | "parserOptions": { 9 | "project": "tsconfig.json" 10 | } 11 | } 12 | ], 13 | "parser": "@typescript-eslint/parser", 14 | "parserOptions": { 15 | "project": "./tsconfig.json" 16 | }, 17 | "plugins": ["@typescript-eslint"], 18 | "extends": ["next/core-web-vitals", "plugin:@typescript-eslint/recommended"], 19 | "rules": { 20 | "@typescript-eslint/consistent-type-imports": "warn" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # database 12 | /prisma/db.sqlite 13 | /prisma/db.sqlite-journal 14 | 15 | # next.js 16 | /.next/ 17 | /out/ 18 | next-env.d.ts 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # local env files 34 | # do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables 35 | .env 36 | .env*.local 37 | 38 | # vercel 39 | .vercel 40 | 41 | # typescript 42 | *.tsbuildinfo 43 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/README.md: -------------------------------------------------------------------------------- 1 | # Promptable + NextJS! 2 | 3 | Screen Shot 2023-02-15 at 11 18 59 AM 4 | 5 | ## One-Click Deploy 6 | 7 | 10 | 11 | # Create T3 App 12 | 13 | This is a [T3 Stack](https://create.t3.gg/) project bootstrapped with `create-t3-app`. 14 | 15 | ## What's next? How do I make an app with this? 16 | 17 | We try to keep this project as simple as possible, so you can start with just the scaffolding we set up for you, and add additional things later when they become necessary. 18 | 19 | If you are not familiar with the different technologies used in this project, please refer to the respective docs. If you still are in the wind, please join our [Discord](https://t3.gg/discord) and ask for help. 20 | 21 | - [Next.js](https://nextjs.org) 22 | - [NextAuth.js](https://next-auth.js.org) 23 | - [Prisma](https://prisma.io) 24 | - [Tailwind CSS](https://tailwindcss.com) 25 | - [tRPC](https://trpc.io) 26 | 27 | ## Learn More 28 | 29 | To learn more about the [T3 Stack](https://create.t3.gg/), take a look at the following resources: 30 | 31 | - [Documentation](https://create.t3.gg/) 32 | - [Learn the T3 Stack](https://create.t3.gg/en/faq#what-learning-resources-are-currently-available) — Check out these awesome tutorials 33 | 34 | You can check out the [create-t3-app GitHub repository](https://github.com/t3-oss/create-t3-app) — your feedback and contributions are welcome! 35 | 36 | ## How do I deploy this? 37 | 38 | Follow our deployment guides for [Vercel](https://create.t3.gg/en/deployment/vercel), [Netlify](https://create.t3.gg/en/deployment/netlify) and [Docker](https://create.t3.gg/en/deployment/docker) for more information. 39 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/next.config.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | 3 | /** 4 | * Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. 5 | * This is especially useful for Docker builds. 6 | */ 7 | !process.env.SKIP_ENV_VALIDATION && (await import("./src/env.mjs")); 8 | 9 | /** @type {import("next").NextConfig} */ 10 | const config = { 11 | reactStrictMode: true, 12 | 13 | /** 14 | * If you have the "experimental: { appDir: true }" setting enabled, then you 15 | * must comment the below `i18n` config out. 16 | * 17 | * @see https://github.com/vercel/next.js/issues/41980 18 | */ 19 | i18n: { 20 | locales: ["en"], 21 | defaultLocale: "en", 22 | }, 23 | }; 24 | export default config; 25 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nextjs-promptable", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "build": "next build", 7 | "dev": "next dev", 8 | "lint": "next lint", 9 | "start": "next start" 10 | }, 11 | "dependencies": { 12 | "axios": "^1.3.3", 13 | "classnames": "^2.3.2", 14 | "eventsource-parser": "^0.1.0", 15 | "next": "13.1.6", 16 | "promptable": "workspace:*", 17 | "react": "18.2.0", 18 | "react-dom": "18.2.0", 19 | "react-textarea-autosize": "^8.4.0", 20 | "uuid": "^9.0.0", 21 | "zod": "^3.20.2" 22 | }, 23 | "devDependencies": { 24 | "@types/axios": "^0.14.0", 25 | "@types/node": "^18.11.18", 26 | "@types/prettier": "^2.7.2", 27 | "@types/react": "^18.0.26", 28 | "@types/react-dom": "^18.0.10", 29 | "@types/uuid": "^9.0.0", 30 | "@typescript-eslint/eslint-plugin": "^5.47.1", 31 | "@typescript-eslint/parser": "^5.47.1", 32 | "autoprefixer": "^10.4.7", 33 | "eslint": "^8.30.0", 34 | "eslint-config-next": "13.1.6", 35 | "postcss": "^8.4.14", 36 | "prettier": "^2.8.1", 37 | "prettier-plugin-tailwindcss": "^0.2.1", 38 | "tailwindcss": "^3.2.0", 39 | "typescript": "^4.9.4" 40 | }, 41 | "ct3aMetadata": { 42 | "initVersion": "7.5.1" 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/postcss.config.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/prettier.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import("prettier").Config} */ 2 | module.exports = { 3 | plugins: [require.resolve("prettier-plugin-tailwindcss")], 4 | }; 5 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/favicon.ico -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/bg-lines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/bg-lines.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/bg.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/favicon.ico -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/globe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/globe.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/globe.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/promptable-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/promptable-icon.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/tracing-expanded.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/tracing-expanded.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/img/tracing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/nextjs-promptable/public/img/tracing.png -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/env.mjs: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/ban-ts-comment */ 2 | import { z } from "zod"; 3 | 4 | /** 5 | * Specify your server-side environment variables schema here. 6 | * This way you can ensure the app isn't built with invalid env vars. 7 | */ 8 | const server = z.object({ 9 | NODE_ENV: z.enum(["development", "test", "production"]), 10 | }); 11 | 12 | /** 13 | * Specify your client-side environment variables schema here. 14 | * This way you can ensure the app isn't built with invalid env vars. 15 | * To expose them to the client, prefix them with `NEXT_PUBLIC_`. 16 | */ 17 | const client = z.object({ 18 | // NEXT_PUBLIC_CLIENTVAR: z.string().min(1), 19 | }); 20 | 21 | /** 22 | * You can't destruct `process.env` as a regular object in the Next.js 23 | * edge runtimes (e.g. middlewares) or client-side so we need to destruct manually. 24 | * @type {Record | keyof z.infer, string | undefined>} 25 | */ 26 | const processEnv = { 27 | NODE_ENV: process.env.NODE_ENV, 28 | // NEXT_PUBLIC_CLIENTVAR: process.env.NEXT_PUBLIC_CLIENTVAR, 29 | }; 30 | 31 | // Don't touch the part below 32 | // -------------------------- 33 | 34 | const merged = server.merge(client); 35 | /** @type z.infer 36 | * @ts-ignore - can't type this properly in jsdoc */ 37 | let env = process.env; 38 | 39 | if (!!process.env.SKIP_ENV_VALIDATION == false) { 40 | const isServer = typeof window === "undefined"; 41 | 42 | const parsed = isServer 43 | ? merged.safeParse(processEnv) // on server we can validate all env vars 44 | : client.safeParse(processEnv); // on client we can only validate the ones that are exposed 45 | 46 | if (parsed.success === false) { 47 | console.error( 48 | "❌ Invalid environment variables:", 49 | parsed.error.flatten().fieldErrors, 50 | ); 51 | throw new Error("Invalid environment variables"); 52 | } 53 | 54 | /** @type z.infer 55 | * @ts-ignore - can't type this properly in jsdoc */ 56 | env = new Proxy(parsed.data, { 57 | get(target, prop) { 58 | if (typeof prop !== "string") return undefined; 59 | // Throw a descriptive error if a server-side env var is accessed on the client 60 | // Otherwise it would just be returning `undefined` and be annoying to debug 61 | if (!isServer && !prop.startsWith("NEXT_PUBLIC_")) 62 | throw new Error( 63 | process.env.NODE_ENV === "production" 64 | ? "❌ Attempted to access a server-side environment variable on the client" 65 | : `❌ Attempted to access server-side environment variable '${prop}' on the client`, 66 | ); 67 | /* @ts-ignore - can't type this properly in jsdoc */ 68 | return target[prop]; 69 | }, 70 | }); 71 | } 72 | 73 | export { env }; 74 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import { type AppType } from "next/dist/shared/lib/utils"; 2 | 3 | import "../styles/globals.css"; 4 | 5 | const MyApp: AppType = ({ Component, pageProps }) => { 6 | return ; 7 | }; 8 | 9 | export default MyApp; 10 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/api/chat.ts: -------------------------------------------------------------------------------- 1 | // Next.js API route support: https://nextjs.org/docs/api-routes/introduction 2 | import type { NextApiRequest, NextApiResponse } from "next"; 3 | 4 | import * as promptable from "promptable"; 5 | 6 | const openai = new promptable.OpenAI(process.env.OPENAI_API_KEY || ""); 7 | 8 | // Note: this only works for one client at a time. 9 | const chatHistory = new promptable.BufferedChatMemory(); 10 | 11 | export default async function handler( 12 | req: NextApiRequest, 13 | res: NextApiResponse 14 | ) { 15 | const { userInput, clear } = JSON.parse(req.body); 16 | 17 | // clear the chat history 18 | if (clear) { 19 | chatHistory.clear(); 20 | return res.status(200).json({}); 21 | } 22 | 23 | // get a response 24 | 25 | const prompt = promptable.prompts.chatbot(); 26 | 27 | const memoryChain = new promptable.MemoryLLMChain( 28 | prompt, 29 | openai, 30 | chatHistory 31 | ); 32 | 33 | chatHistory.addUserMessage(userInput); 34 | const botOutput = await memoryChain.run({ userInput }); 35 | chatHistory.addBotMessage(botOutput); 36 | 37 | res.status(200).json({ text: botOutput }); 38 | } 39 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/api/completion.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from "next"; 2 | import { Prompt, OpenAI, LLMChain } from "promptable"; 3 | 4 | export default async function handler( 5 | req: NextApiRequest, 6 | res: NextApiResponse 7 | ) { 8 | type bulletBody = { 9 | organization: string; 10 | title: string; 11 | }; 12 | 13 | // Ensure key is properly processed 14 | if (!process.env.OPENAI_API_KEY) { 15 | throw new Error("Missing env var from OpenAI"); 16 | } 17 | 18 | // Retrieve organization and title params from request body 19 | const { organization, title }: bulletBody = req.body; 20 | 21 | const openai = new OpenAI(process.env.OPENAI_API_KEY); 22 | 23 | // Check for incorrect request 24 | if (!organization || !title) { 25 | return res.status(400).send("Improper request"); 26 | } 27 | 28 | // Create prompt with the given variables 29 | const generateBulletPrompt = new Prompt( 30 | `Write 3 resume professional "\u2022" points for a {{title}} at {{organization}}. 31 | If relevant to the position one should be quanitifiable, do not label it as quanitifiable.`, 32 | ["title", "organization"] 33 | ); 34 | 35 | // Create LLMChain with prompt and provider; run the chain 36 | const bulletChain = new LLMChain(generateBulletPrompt, openai); 37 | 38 | const bullets = await bulletChain.run({ 39 | title: title, 40 | organization: organization, 41 | }); 42 | 43 | // Return response if text generated successfully; otherwise fail and return 400 44 | if (bullets) { 45 | return res.status(200).json({ bullets: bullets }); 46 | } else { 47 | return res.status(400).send("Error generating bullets"); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/api/resume-completions.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from "next"; 2 | import { Prompt, OpenAI, LLMChain } from "promptable"; 3 | 4 | export default async function handler(req: NextApiRequest, res: NextApiResponse) { 5 | 6 | type bulletBody = { 7 | organization: string, 8 | title: string 9 | } 10 | 11 | // Ensure key is properly processed 12 | if (!process.env.OPENAI_API_KEY) { 13 | throw new Error("Missing env var from OpenAI"); 14 | } 15 | 16 | // Retrieve organization and title params from request body 17 | const { organization, title }: bulletBody = req.body; 18 | 19 | const openai = new OpenAI(process.env.OPENAI_API_KEY); 20 | 21 | // Check for incorrect request 22 | if (!organization || !title) { 23 | return res.status(400).send("Improper request") 24 | } 25 | 26 | // Create prompt with the given variables 27 | const generateBulletPrompt = new Prompt(`Write 3 resume professional "\u2022" points for a {{title}} at {{organization}}. 28 | If relevant to the position one should be quanitifiable, do not label it as quanitifiable.`, ['title', 'organization']); 29 | 30 | // Create LLMChain with prompt and provider; run the chain 31 | const bulletChain = new LLMChain(generateBulletPrompt, openai) 32 | 33 | const bullets = await bulletChain.run({title: title, organization: organization}) 34 | 35 | // Return response if text generated successfully; otherwise fail and return 400 36 | if (bullets) { 37 | return res.status(200).json({bullets: bullets}) 38 | } else { 39 | return res.status(400).send("Error generating bullets") 40 | } 41 | } -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/api/stream.ts: -------------------------------------------------------------------------------- 1 | import axios from "axios"; 2 | import * as promptable from "promptable"; 3 | import { NextApiRequest, NextApiResponse } from "next"; 4 | 5 | // Note: this only works for one client at a time. 6 | const chatHistory = new promptable.BufferedChatMemory(); 7 | 8 | interface Message { 9 | isUserMessage: boolean; 10 | text: string; 11 | id: string; 12 | } 13 | 14 | export default async function handler( 15 | req: NextApiRequest, 16 | res: NextApiResponse 17 | ) { 18 | console.log("req.body", req.body); 19 | const { userInput, clear, prevMessages } = req.body; 20 | 21 | // clear the chat history 22 | if (clear) { 23 | chatHistory.clear(); 24 | return res.status(200).json({}); 25 | } 26 | 27 | const messages = prevMessages as Message[]; 28 | 29 | // We don't know what the last message was b/c we streamed it to the client. 30 | // so we need to find it in the list of previous messages. 31 | const lastBotMessage = messages.reverse().find((m) => !m.isUserMessage); 32 | console.log("The last bot message was:", lastBotMessage); 33 | chatHistory.addBotMessage(lastBotMessage?.text ?? ""); 34 | 35 | // then add the user message 36 | chatHistory.addUserMessage(userInput); 37 | 38 | const chatbotPrompt = promptable.prompts.chatbot(); 39 | 40 | const promptText = chatbotPrompt.format({ 41 | memory: chatHistory.get(), 42 | userInput, 43 | }); 44 | 45 | const oaiRes = await axios.post( 46 | "https://api.openai.com/v1/completions", 47 | { 48 | prompt: promptText, 49 | model: "text-davinci-003", 50 | max_tokens: 1000, 51 | stream: true, 52 | }, 53 | { 54 | headers: { 55 | "Content-Type": "application/json", 56 | Authorization: `Bearer ${process.env.OPENAI_API_KEY ?? ""}`, 57 | }, 58 | responseType: "stream", 59 | } 60 | ); 61 | 62 | oaiRes.data.pipe(res); 63 | } 64 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/chat.tsx: -------------------------------------------------------------------------------- 1 | import { type NextPage } from "next"; 2 | import Head from "next/head"; 3 | import Image from "next/image"; 4 | import Link from "next/link"; 5 | import Chat from "../components/Chat"; 6 | 7 | const ChatPage: NextPage = () => { 8 | return ; 9 | }; 10 | 11 | export default ChatPage; 12 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import { type NextPage } from "next"; 2 | import Head from "next/head"; 3 | import Image from "next/image"; 4 | import Link from "next/link"; 5 | 6 | const Home: NextPage = () => { 7 | return ( 8 | <> 9 | 10 | Create T3 App 11 | 12 | 13 | 14 |
15 |
16 | 17 |
18 | Next.js Logo 26 |
32 | + 33 |
34 | {"Promptable 41 |
42 |
43 | 49 |

50 | Docs -> 51 |

52 |

Read the Promptable Docs!

53 |
54 | 58 |

59 | Chat Bot-> 60 |

61 |

Prebuilt Chat Bot UI & API.

62 | 63 | 72 | 81 |
82 |
83 | 84 | ); 85 | }; 86 | 87 | export default Home; 88 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/src/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/tailwind.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: ["./src/**/*.{js,ts,jsx,tsx}"], 4 | theme: { 5 | extend: {}, 6 | }, 7 | plugins: [], 8 | }; 9 | -------------------------------------------------------------------------------- /apps/nextjs-promptable/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "checkJs": true, 7 | "skipLibCheck": true, 8 | "strict": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "noEmit": true, 11 | "esModuleInterop": true, 12 | "module": "esnext", 13 | "moduleResolution": "node", 14 | "resolveJsonModule": true, 15 | "isolatedModules": true, 16 | "jsx": "preserve", 17 | "incremental": true, 18 | "noUncheckedIndexedAccess": true 19 | }, 20 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", "**/*.cjs", "**/*.mjs"], 21 | "exclude": ["node_modules"] 22 | } 23 | -------------------------------------------------------------------------------- /apps/web/.env.example: -------------------------------------------------------------------------------- 1 | # Since .env is gitignored, you can use .env.example to build a new `.env` file when you clone the repo. 2 | # Keep this file up-to-date when you add new variables to `.env`. 3 | 4 | # This file will be committed to version control, so make sure not to have any secrets in it. 5 | # If you are cloning this repo, create a copy of this file named `.env` and populate it with your secrets. 6 | 7 | # When adding additional env variables, the schema in /env/schema.mjs should be updated accordingly 8 | # Prisma 9 | DATABASE_URL=file:./db.sqlite 10 | -------------------------------------------------------------------------------- /apps/web/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "overrides": [ 3 | { 4 | "extends": [ 5 | "plugin:@typescript-eslint/recommended-requiring-type-checking" 6 | ], 7 | "files": [ 8 | "*.ts", 9 | "*.tsx" 10 | ], 11 | "parserOptions": { 12 | "project": "./apps/web/tsconfig.json" 13 | } 14 | } 15 | ], 16 | "parser": "@typescript-eslint/parser", 17 | "parserOptions": { 18 | "project": "./tsconfig.json" 19 | }, 20 | "plugins": [ 21 | "@typescript-eslint" 22 | ], 23 | "extends": [ 24 | "next/core-web-vitals", 25 | "plugin:@typescript-eslint/recommended" 26 | ], 27 | "rules": { 28 | "@typescript-eslint/consistent-type-imports": "warn" 29 | } 30 | } -------------------------------------------------------------------------------- /apps/web/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # database 12 | /prisma/db.sqlite 13 | /prisma/db.sqlite-journal 14 | 15 | # next.js 16 | /.next/ 17 | /out/ 18 | next-env.d.ts 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # local env files 34 | # do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables 35 | .env 36 | .env*.local 37 | 38 | # vercel 39 | .vercel 40 | 41 | # typescript 42 | *.tsbuildinfo 43 | -------------------------------------------------------------------------------- /apps/web/README.md: -------------------------------------------------------------------------------- 1 | # setup 2 | 3 | run "npx prisma db push" 4 | 5 | # Chains 6 | 7 | add a chain with "npx prisma db seed" 8 | 9 | # Create T3 App 10 | 11 | This is a [T3 Stack](https://create.t3.gg/) project bootstrapped with `create-t3-app`. 12 | 13 | ## What's next? How do I make an app with this? 14 | 15 | We try to keep this project as simple as possible, so you can start with just the scaffolding we set up for you, and add additional things later when they become necessary. 16 | 17 | If you are not familiar with the different technologies used in this project, please refer to the respective docs. If you still are in the wind, please join our [Discord](https://t3.gg/discord) and ask for help. 18 | 19 | - [Next.js](https://nextjs.org) 20 | - [NextAuth.js](https://next-auth.js.org) 21 | - [Prisma](https://prisma.io) 22 | - [Tailwind CSS](https://tailwindcss.com) 23 | - [tRPC](https://trpc.io) 24 | 25 | ## Learn More 26 | 27 | To learn more about the [T3 Stack](https://create.t3.gg/), take a look at the following resources: 28 | 29 | - [Documentation](https://create.t3.gg/) 30 | - [Learn the T3 Stack](https://create.t3.gg/en/faq#what-learning-resources-are-currently-available) — Check out these awesome tutorials 31 | 32 | You can check out the [create-t3-app GitHub repository](https://github.com/t3-oss/create-t3-app) — your feedback and contributions are welcome! 33 | 34 | ## How do I deploy this? 35 | 36 | Follow our deployment guides for [Vercel](https://create.t3.gg/en/deployment/vercel), [Netlify](https://create.t3.gg/en/deployment/netlify) and [Docker](https://create.t3.gg/en/deployment/docker) for more information. 37 | -------------------------------------------------------------------------------- /apps/web/next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | // NOTE: This file should not be edited 5 | // see https://nextjs.org/docs/basic-features/typescript for more information. 6 | -------------------------------------------------------------------------------- /apps/web/next.config.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | /** 3 | * Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. 4 | * This is especially useful for Docker builds. 5 | */ 6 | !process.env.SKIP_ENV_VALIDATION && (await import("./src/env/server.mjs")); 7 | 8 | /** @type {import("next").NextConfig} */ 9 | const config = { 10 | reactStrictMode: true, 11 | /* If trying out the experimental appDir, comment the i18n config out 12 | * @see https://github.com/vercel/next.js/issues/41980 */ 13 | i18n: { 14 | locales: ["en"], 15 | defaultLocale: "en", 16 | }, 17 | transpilePackages: ["ui"] 18 | }; 19 | export default config; 20 | -------------------------------------------------------------------------------- /apps/web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "build": "next build", 7 | "dev": "next dev", 8 | "postinstall": "prisma generate", 9 | "disabled:lint": "next lint", 10 | "start": "next start" 11 | }, 12 | "dependencies": { 13 | "@prisma/client": "^4.8.0", 14 | "@tanstack/react-query": "^4.20.0", 15 | "@trpc/client": "^10.8.1", 16 | "@trpc/next": "^10.8.1", 17 | "@trpc/react-query": "^10.8.1", 18 | "@trpc/server": "^10.8.1", 19 | "classnames": "^2.3.2", 20 | "daisyui": "^2.49.0", 21 | "jotai": "^1.13.1", 22 | "next": "13.1.2", 23 | "react": "18.2.0", 24 | "react-dom": "18.2.0", 25 | "react-json-view": "^1.21.3", 26 | "react-query": "^3.39.3", 27 | "superjson": "1.9.1", 28 | "ui": "workspace:*", 29 | "zod": "^3.20.2" 30 | }, 31 | "devDependencies": { 32 | "@types/node": "^18.11.18", 33 | "@types/prettier": "^2.7.2", 34 | "@types/react": "^18.0.26", 35 | "@types/react-dom": "^18.0.10", 36 | "@typescript-eslint/eslint-plugin": "^5.47.1", 37 | "@typescript-eslint/parser": "^5.47.1", 38 | "autoprefixer": "^10.4.7", 39 | "eslint": "^8.30.0", 40 | "eslint-config-custom": "workspace:*", 41 | "eslint-config-next": "13.1.2", 42 | "postcss": "^8.4.14", 43 | "prettier": "^2.8.1", 44 | "prettier-plugin-tailwindcss": "^0.2.1", 45 | "prisma": "^4.8.0", 46 | "tailwindcss": "^3.2.0", 47 | "ts-node": "^10.9.1", 48 | "tsconfig": "workspace:*", 49 | "typescript": "^4.9.4" 50 | }, 51 | "ct3aMetadata": { 52 | "initVersion": "7.3.2" 53 | }, 54 | "prisma": { 55 | "seed": "ts-node --compiler-options {\"module\":\"CommonJS\"} prisma/seed.ts" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /apps/web/postcss.config.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /apps/web/prettier.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import("prettier").Config} */ 2 | module.exports = { 3 | plugins: [require.resolve("prettier-plugin-tailwindcss")], 4 | }; 5 | -------------------------------------------------------------------------------- /apps/web/prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | // This is your Prisma schema file, 2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema 3 | 4 | generator client { 5 | provider = "prisma-client-js" 6 | } 7 | 8 | datasource db { 9 | provider = "sqlite" 10 | url = env("DATABASE_URL") 11 | } 12 | 13 | model Example { 14 | id String @id @default(cuid()) 15 | createdAt DateTime @default(now()) 16 | updatedAt DateTime @updatedAt 17 | } 18 | 19 | model Trace { 20 | id String @id @default(cuid()) 21 | trace String 22 | } 23 | -------------------------------------------------------------------------------- /apps/web/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruvector/promptable/dd877ac53e7d406f07bcb701e99999a782a7604f/apps/web/public/favicon.ico -------------------------------------------------------------------------------- /apps/web/src/components/Tabs.tsx: -------------------------------------------------------------------------------- 1 | import { atom, useAtom } from "jotai"; 2 | import classnames from "classnames"; 3 | import { useState } from "react"; 4 | 5 | export const tabs = { 6 | Traces: "traces", 7 | }; 8 | 9 | export const tabAtom = atom(tabs.Traces); 10 | 11 | export const Tabs = () => { 12 | const [activeTab, setActiveTab] = useAtom(tabAtom); 13 | 14 | const handleClickTab = (tab: string) => { 15 | setActiveTab(tab); 16 | }; 17 | return ( 18 |
25 | {Object.values(tabs).map((tab) => { 26 | return ( 27 | 38 | ); 39 | })} 40 | {/* hack for border */} 41 |
42 |
43 | ); 44 | }; 45 | -------------------------------------------------------------------------------- /apps/web/src/env/client.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | import { clientEnv, clientSchema } from "./schema.mjs"; 3 | 4 | const _clientEnv = clientSchema.safeParse(clientEnv); 5 | 6 | export const formatErrors = ( 7 | /** @type {import('zod').ZodFormattedError,string>} */ 8 | errors, 9 | ) => 10 | Object.entries(errors) 11 | .map(([name, value]) => { 12 | if (value && "_errors" in value) 13 | return `${name}: ${value._errors.join(", ")}\n`; 14 | }) 15 | .filter(Boolean); 16 | 17 | if (!_clientEnv.success) { 18 | console.error( 19 | "❌ Invalid environment variables:\n", 20 | ...formatErrors(_clientEnv.error.format()), 21 | ); 22 | throw new Error("Invalid environment variables"); 23 | } 24 | 25 | for (let key of Object.keys(_clientEnv.data)) { 26 | if (!key.startsWith("NEXT_PUBLIC_")) { 27 | console.warn( 28 | `❌ Invalid public environment variable name: ${key}. It must begin with 'NEXT_PUBLIC_'`, 29 | ); 30 | 31 | throw new Error("Invalid public environment variable name"); 32 | } 33 | } 34 | 35 | export const env = _clientEnv.data; 36 | -------------------------------------------------------------------------------- /apps/web/src/env/schema.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | import { z } from "zod"; 3 | 4 | /** 5 | * Specify your server-side environment variables schema here. 6 | * This way you can ensure the app isn't built with invalid env vars. 7 | */ 8 | export const serverSchema = z.object({ 9 | DATABASE_URL: z.string().url(), 10 | NODE_ENV: z.enum(["development", "test", "production"]), 11 | }); 12 | 13 | /** 14 | * You can't destruct `process.env` as a regular object in the Next.js 15 | * middleware, so you have to do it manually here. 16 | * @type {{ [k in keyof z.infer]: z.infer[k] | undefined }} 17 | */ 18 | export const serverEnv = { 19 | DATABASE_URL: process.env.DATABASE_URL, 20 | NODE_ENV: process.env.NODE_ENV, 21 | }; 22 | 23 | /** 24 | * Specify your client-side environment variables schema here. 25 | * This way you can ensure the app isn't built with invalid env vars. 26 | * To expose them to the client, prefix them with `NEXT_PUBLIC_`. 27 | */ 28 | export const clientSchema = z.object({ 29 | // NEXT_PUBLIC_CLIENTVAR: z.string(), 30 | }); 31 | 32 | /** 33 | * You can't destruct `process.env` as a regular object, so you have to do 34 | * it manually here. This is because Next.js evaluates this at build time, 35 | * and only used environment variables are included in the build. 36 | * @type {{ [k in keyof z.infer]: z.infer[k] | undefined }} 37 | */ 38 | export const clientEnv = { 39 | // NEXT_PUBLIC_CLIENTVAR: process.env.NEXT_PUBLIC_CLIENTVAR, 40 | }; 41 | -------------------------------------------------------------------------------- /apps/web/src/env/server.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | /** 3 | * This file is included in `/next.config.mjs` which ensures the app isn't built with invalid env vars. 4 | * It has to be a `.mjs`-file to be imported there. 5 | */ 6 | import { serverSchema, serverEnv } from "./schema.mjs"; 7 | import { env as clientEnv, formatErrors } from "./client.mjs"; 8 | 9 | const _serverEnv = serverSchema.safeParse(serverEnv); 10 | 11 | if (!_serverEnv.success) { 12 | console.error( 13 | "❌ Invalid environment variables:\n", 14 | ...formatErrors(_serverEnv.error.format()), 15 | ); 16 | throw new Error("Invalid environment variables"); 17 | } 18 | 19 | for (let key of Object.keys(_serverEnv.data)) { 20 | if (key.startsWith("NEXT_PUBLIC_")) { 21 | console.warn("❌ You are exposing a server-side env-variable:", key); 22 | 23 | throw new Error("You are exposing a server-side env-variable"); 24 | } 25 | } 26 | 27 | export const env = { ..._serverEnv.data, ...clientEnv }; 28 | -------------------------------------------------------------------------------- /apps/web/src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import { type AppType } from "next/app"; 2 | 3 | import { api } from "../utils/api"; 4 | 5 | import "../styles/globals.css"; 6 | 7 | const MyApp: AppType = ({ Component, pageProps }) => { 8 | return ; 9 | }; 10 | 11 | export default api.withTRPC(MyApp); 12 | -------------------------------------------------------------------------------- /apps/web/src/pages/api/completions.ts: -------------------------------------------------------------------------------- 1 | export {}; 2 | -------------------------------------------------------------------------------- /apps/web/src/pages/api/traces.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from "next"; 2 | import { NextApiHandler } from "next"; 3 | import { NextRequest, NextResponse } from "next/server"; 4 | import { api } from "src/utils/api"; 5 | import { PrismaClient } from "@prisma/client"; 6 | // change to get from prisma 7 | 8 | export default function handler(req: NextApiRequest, res: NextApiResponse) { 9 | if (req.method === "POST") { 10 | const prisma = new PrismaClient() 11 | const trace = req.body; 12 | console.log("adding trace:", trace) 13 | 14 | try { 15 | async function addTrace() { 16 | await prisma.trace.create({ 17 | data: { 18 | trace: JSON.stringify(trace) 19 | } 20 | }) 21 | } 22 | addTrace(); 23 | res.status(200).send(null); 24 | } catch (error) { 25 | console.error(`Error adding traces to server: ${(error as Error).message}`); 26 | } 27 | } 28 | 29 | // GET 30 | res.status(200).json("get response"); 31 | } 32 | -------------------------------------------------------------------------------- /apps/web/src/pages/api/trpc/[trpc].ts: -------------------------------------------------------------------------------- 1 | import { createNextApiHandler } from "@trpc/server/adapters/next"; 2 | 3 | import { env } from "../../../env/server.mjs"; 4 | import { createTRPCContext } from "../../../server/api/trpc"; 5 | import { appRouter } from "../../../server/api/root"; 6 | 7 | // export API handler 8 | export default createNextApiHandler({ 9 | router: appRouter, 10 | createContext: createTRPCContext, 11 | onError: 12 | env.NODE_ENV === "development" 13 | ? ({ path, error }) => { 14 | console.error( 15 | `❌ tRPC failed on ${path ?? ""}: ${error.message}`, 16 | ); 17 | } 18 | : undefined, 19 | }); 20 | -------------------------------------------------------------------------------- /apps/web/src/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import { Tabs } from "@components/Tabs"; 2 | import { type NextPage } from "next"; 3 | import Head from "next/head"; 4 | import Link from "next/link"; 5 | import { Provider as JotaiProvider } from "jotai"; 6 | 7 | import { api } from "../utils/api"; 8 | import { Content } from "@components/Content"; 9 | import { QueryClient, QueryClientProvider } from "react-query"; 10 | 11 | const queryClient = new QueryClient(); 12 | 13 | const Home: NextPage = () => { 14 | return ( 15 | 16 | 17 | 18 | Promptable UI 19 | 20 | 21 | 22 |
23 | 24 | 25 |

hello World!!!

26 |
27 |
28 |
29 | ); 30 | }; 31 | 32 | export default Home; 33 | -------------------------------------------------------------------------------- /apps/web/src/server/api/root.ts: -------------------------------------------------------------------------------- 1 | import { createTRPCRouter } from "./trpc"; 2 | import { traceRouter } from "./routers/traces"; 3 | 4 | /** 5 | * This is the primary router for your server. 6 | * 7 | * All routers added in /api/routers should be manually added here 8 | */ 9 | export const appRouter = createTRPCRouter({ 10 | trace: traceRouter, 11 | }); 12 | 13 | // export type definition of API 14 | export type AppRouter = typeof appRouter; 15 | -------------------------------------------------------------------------------- /apps/web/src/server/api/routers/traces.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | 3 | import { createTRPCRouter, publicProcedure } from "../trpc"; 4 | 5 | export const traceRouter = createTRPCRouter({ 6 | getTraces: publicProcedure.query(async ({ ctx }) => { 7 | const data = await ctx.prisma.trace.findMany(); 8 | const traces: any[] = []; 9 | try { 10 | for (const d of data) { 11 | traces.push(JSON.parse(d.trace)); 12 | } 13 | } catch (error) { 14 | console.error(`Error getting traces from server: ${(error as Error).message}`); 15 | } 16 | 17 | return traces; 18 | }), 19 | add: publicProcedure 20 | .input(z.object({ trace: z.any() })) 21 | .mutation(async ({ input, ctx }) => { 22 | await ctx.prisma.trace.create({ 23 | data: { 24 | trace: JSON.stringify(input.trace) 25 | } 26 | }) 27 | }) 28 | }); -------------------------------------------------------------------------------- /apps/web/src/server/api/trpc.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * YOU PROBABLY DON'T NEED TO EDIT THIS FILE, UNLESS: 3 | * 1. You want to modify request context (see Part 1) 4 | * 2. You want to create a new middleware or type of procedure (see Part 3) 5 | * 6 | * tl;dr - this is where all the tRPC server stuff is created and plugged in. 7 | * The pieces you will need to use are documented accordingly near the end 8 | */ 9 | 10 | /** 11 | * 1. CONTEXT 12 | * 13 | * This section defines the "contexts" that are available in the backend API 14 | * 15 | * These allow you to access things like the database, the session, etc, when 16 | * processing a request 17 | * 18 | */ 19 | import { type CreateNextContextOptions } from "@trpc/server/adapters/next"; 20 | 21 | import { prisma } from "../db"; 22 | 23 | type CreateContextOptions = Record; 24 | 25 | /** 26 | * This helper generates the "internals" for a tRPC context. If you need to use 27 | * it, you can export it from here 28 | * 29 | * Examples of things you may need it for: 30 | * - testing, so we dont have to mock Next.js' req/res 31 | * - trpc's `createSSGHelpers` where we don't have req/res 32 | * @see https://create.t3.gg/en/usage/trpc#-servertrpccontextts 33 | */ 34 | const createInnerTRPCContext = (_opts: CreateContextOptions) => { 35 | return { 36 | prisma, 37 | }; 38 | }; 39 | 40 | /** 41 | * This is the actual context you'll use in your router. It will be used to 42 | * process every request that goes through your tRPC endpoint 43 | * @link https://trpc.io/docs/context 44 | */ 45 | export const createTRPCContext = (_opts: CreateNextContextOptions) => { 46 | return createInnerTRPCContext({}); 47 | }; 48 | 49 | /** 50 | * 2. INITIALIZATION 51 | * 52 | * This is where the trpc api is initialized, connecting the context and 53 | * transformer 54 | */ 55 | import { initTRPC } from "@trpc/server"; 56 | import superjson from "superjson"; 57 | 58 | const t = initTRPC.context().create({ 59 | transformer: superjson, 60 | errorFormatter({ shape }) { 61 | return shape; 62 | }, 63 | }); 64 | 65 | /** 66 | * 3. ROUTER & PROCEDURE (THE IMPORTANT BIT) 67 | * 68 | * These are the pieces you use to build your tRPC API. You should import these 69 | * a lot in the /src/server/api/routers folder 70 | */ 71 | 72 | /** 73 | * This is how you create new routers and subrouters in your tRPC API 74 | * @see https://trpc.io/docs/router 75 | */ 76 | export const createTRPCRouter = t.router; 77 | 78 | /** 79 | * Public (unauthed) procedure 80 | * 81 | * This is the base piece you use to build new queries and mutations on your 82 | * tRPC API. It does not guarantee that a user querying is authorized, but you 83 | * can still access user session data if they are logged in 84 | */ 85 | export const publicProcedure = t.procedure; 86 | -------------------------------------------------------------------------------- /apps/web/src/server/db.ts: -------------------------------------------------------------------------------- 1 | import { PrismaClient } from "@prisma/client"; 2 | 3 | import { env } from "../env/server.mjs"; 4 | 5 | declare global { 6 | // eslint-disable-next-line no-var 7 | var prisma: PrismaClient | undefined; 8 | } 9 | 10 | export const prisma = 11 | global.prisma || 12 | new PrismaClient({ 13 | log: 14 | env.NODE_ENV === "development" ? ["query", "error", "warn"] : ["error"], 15 | }); 16 | 17 | if (env.NODE_ENV !== "production") { 18 | global.prisma = prisma; 19 | } 20 | -------------------------------------------------------------------------------- /apps/web/src/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | 6 | /* Set the height of the document full */ 7 | @layer base { 8 | html, 9 | body, 10 | body > div:first-child, 11 | div#__next { 12 | @apply h-full; 13 | } 14 | } -------------------------------------------------------------------------------- /apps/web/src/utils/api.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * This is the client-side entrypoint for your tRPC API. 3 | * It's used to create the `api` object which contains the Next.js App-wrapper 4 | * as well as your typesafe react-query hooks. 5 | * 6 | * We also create a few inference helpers for input and output types 7 | */ 8 | import { httpBatchLink, loggerLink } from "@trpc/client"; 9 | import { createTRPCNext } from "@trpc/next"; 10 | import { type inferRouterInputs, type inferRouterOutputs } from "@trpc/server"; 11 | import superjson from "superjson"; 12 | 13 | import { type AppRouter } from "../server/api/root"; 14 | 15 | const getBaseUrl = () => { 16 | if (typeof window !== "undefined") return ""; // browser should use relative url 17 | if (process.env.VERCEL_URL) return `https://${process.env.VERCEL_URL}`; // SSR should use vercel url 18 | return `http://localhost:${process.env.PORT ?? 3000}`; // dev SSR should use localhost 19 | }; 20 | 21 | /** 22 | * A set of typesafe react-query hooks for your tRPC API 23 | */ 24 | export const api = createTRPCNext({ 25 | config() { 26 | return { 27 | /** 28 | * Transformer used for data de-serialization from the server 29 | * @see https://trpc.io/docs/data-transformers 30 | **/ 31 | transformer: superjson, 32 | 33 | /** 34 | * Links used to determine request flow from client to server 35 | * @see https://trpc.io/docs/links 36 | * */ 37 | links: [ 38 | loggerLink({ 39 | enabled: (opts) => 40 | process.env.NODE_ENV === "development" || 41 | (opts.direction === "down" && opts.result instanceof Error), 42 | }), 43 | httpBatchLink({ 44 | url: `${getBaseUrl()}/api/trpc`, 45 | }), 46 | ], 47 | }; 48 | }, 49 | /** 50 | * Whether tRPC should await queries when server rendering pages 51 | * @see https://trpc.io/docs/nextjs#ssr-boolean-default-false 52 | */ 53 | ssr: false, 54 | }); 55 | 56 | /** 57 | * Inference helper for inputs 58 | * @example type HelloInput = RouterInputs['example']['hello'] 59 | **/ 60 | export type RouterInputs = inferRouterInputs; 61 | /** 62 | * Inference helper for outputs 63 | * @example type HelloOutput = RouterOutputs['example']['hello'] 64 | **/ 65 | export type RouterOutputs = inferRouterOutputs; 66 | -------------------------------------------------------------------------------- /apps/web/tailwind.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: ["./src/**/*.{js,ts,jsx,tsx}"], 4 | theme: { 5 | extend: {}, 6 | }, 7 | plugins: [require("daisyui")], 8 | daisyui: { 9 | prefix: 'daisy-', 10 | themes: ["light", "dark", "cupcake", "bumblebee", "emerald", "corporate", "synthwave", "retro", "cyberpunk", "valentine", "halloween", "garden", "forest", "aqua", "lofi", "pastel", "fantasy", "wireframe", "black", "luxury", "dracula", "cmyk", "autumn", "business", "acid", "lemonade", "night", "coffee", "winter"], 11 | } 12 | }; 13 | -------------------------------------------------------------------------------- /apps/web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "strict": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "noEmit": true, 14 | "esModuleInterop": true, 15 | "module": "esnext", 16 | "moduleResolution": "node", 17 | "resolveJsonModule": true, 18 | "isolatedModules": true, 19 | "jsx": "preserve", 20 | "incremental": true, 21 | "noUncheckedIndexedAccess": true, 22 | "baseUrl": ".", 23 | "paths": { 24 | "@components/*": [ 25 | "src/components/*" 26 | ], 27 | } 28 | }, 29 | "include": [ 30 | "next-env.d.ts", 31 | "**/*.ts", 32 | "**/*.tsx", 33 | "**/*.cjs", 34 | "**/*.mjs" 35 | ], 36 | "exclude": [ 37 | "node_modules" 38 | ] 39 | } -------------------------------------------------------------------------------- /examples/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | node_modules 5 | .pnp 6 | .pnp.js 7 | 8 | # testing 9 | coverage 10 | 11 | # next.js 12 | .next/ 13 | out/ 14 | build 15 | 16 | # misc 17 | .DS_Store 18 | *.pem 19 | 20 | # debug 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | .pnpm-debug.log* 25 | 26 | # env files 27 | .env.local 28 | .env.development.local 29 | .env.test.local 30 | .env.production.local 31 | **/.env 32 | 33 | # turbo 34 | .turbo 35 | 36 | # output 37 | **dist/ 38 | out.csv 39 | 40 | # vscode 41 | .vscode 42 | 43 | 44 | data/cache -------------------------------------------------------------------------------- /examples/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "examples", 3 | "private": true, 4 | "main": "dist/index.js", 5 | "types": "dist/index.d.ts", 6 | "scripts": { 7 | "start": "FORCE_COLOR=1 pnpm build && node dist/index.js", 8 | "build": "tsup ./src/index.ts --format cjs --dts" 9 | }, 10 | "dependencies": { 11 | "@tensorflow/tfjs": "^4.2.0", 12 | "axios": "^1.2.4", 13 | "chalk": "^4.1.2", 14 | "console-table-printer": "^2.11.1", 15 | "csv-parse": "^5.3.4", 16 | "danfojs-node": "^1.1.2", 17 | "dotenv": "^16.0.3", 18 | "enquirer": "^2.3.6", 19 | "gpt3-tokenizer": "^1.1.5", 20 | "handlebars": "^4.7.7", 21 | "openai": "^3.1.0", 22 | "promptable": "workspace:*", 23 | "ramda": "^0.28.0", 24 | "ramda-async": "^1.1.2", 25 | "seedrandom": "^3.0.5" 26 | }, 27 | "devDependencies": { 28 | "@types/node": "^18.11.18", 29 | "@types/ramda": "^0.28.23", 30 | "eslint": "7.32.0", 31 | "tsup": "^6.5.0", 32 | "typescript": "^4.9.4" 33 | }, 34 | "keywords": [ 35 | "llm", 36 | "embeddings", 37 | "qa", 38 | "question answering", 39 | "semantic search", 40 | "chatbot", 41 | "langchain", 42 | "ai", 43 | "gptindex", 44 | "gpt3", 45 | "chain", 46 | "prompt", 47 | "prompt programming", 48 | "promptable", 49 | "nlp", 50 | "javascript", 51 | "react", 52 | "chatgpt", 53 | "model", 54 | "machine learning", 55 | "ml", 56 | "typescript" 57 | ] 58 | } 59 | -------------------------------------------------------------------------------- /examples/src/[example-template].ts: -------------------------------------------------------------------------------- 1 | const run = async (args: string[]) => {}; 2 | 3 | export default run; 4 | -------------------------------------------------------------------------------- /examples/src/chain-memory.ts: -------------------------------------------------------------------------------- 1 | /** 2 | Chains are pre-built workflows for executing specific tasks. 3 | 4 | The MemoryLLMChain is a chain which combines a prompt, a model provider and memory. 5 | Memory is a way to store and retrieve data between chain runs. 6 | 7 | This example uses MemoryLLMChain to create a simple Chatbot based on a prompt. 8 | BufferedChatInteractionMemory is a memory which stores the user and bot messages in a buffer, 9 | up a max number of interactions (defaulted at Infinity). 10 | MemoryLLMChain will automatically extract the memory from the BufferedChatInteractionMemory and 11 | pass it to the prompt. 12 | 13 | Note: 14 | Since this example uses CLI input, which is not supported by pnpm, to run this example 15 | cd into the examples directory and run `npm start chain-memory` 16 | **/ 17 | import dotenv from "dotenv"; 18 | dotenv.config(); 19 | import { 20 | OpenAI, 21 | MemoryLLMChain, 22 | prompts, 23 | BufferedChatMemory, 24 | } from "promptable"; 25 | import chalk from "chalk"; 26 | import enquirer from "enquirer"; 27 | 28 | const { prompt: query } = enquirer; 29 | 30 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 31 | 32 | export default async function run() { 33 | const openai = new OpenAI(apiKey); 34 | const memory = new BufferedChatMemory(); 35 | const memoryChain = new MemoryLLMChain(prompts.chatbot(), openai, memory); 36 | 37 | while (true) { 38 | const { userInput } = (await query({ 39 | type: "input", 40 | name: "userInput", 41 | message: "User: ", 42 | })) as { 43 | userInput: string; 44 | }; 45 | 46 | if (userInput) { 47 | if (userInput === "exit") break; 48 | memory.addUserMessage(userInput); 49 | const botOutput = await memoryChain.run({ userInput }); 50 | memory.addBotMessage(botOutput); 51 | console.log(chalk.yellow("Assisant:", botOutput)); 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /examples/src/chain-simple.ts: -------------------------------------------------------------------------------- 1 | /** 2 | Chains are pre-built workflows for executing specific tasks. 3 | The simplest chain is the LLMChain, a chain which combines a prompt and a model provider. 4 | This example uses LLMChain to use the OpenAI Completions API to generate a poem about the moon. 5 | 6 | This example also uses tracing to log the steps of the chain. 7 | Chains often have many steps, and tracing can help you understand what is happening in your chain. 8 | **/ 9 | import dotenv from "dotenv"; 10 | dotenv.config(); 11 | import { Prompt, OpenAI, LLMChain, setTraceConfig, Trace, graphTraces } from "promptable"; 12 | 13 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 14 | 15 | export default async function run() { 16 | const traces: Trace[] = []; 17 | 18 | setTraceConfig({ 19 | send: (trace) => { 20 | console.log("Received Trace", trace); 21 | traces.push(trace); 22 | }, 23 | }); 24 | 25 | const openai = new OpenAI(apiKey); 26 | 27 | const writePoemPrompt = new Prompt("Write a poem about {{topic}}:", [ 28 | "topic", 29 | ]); 30 | 31 | const llmChain = new LLMChain(writePoemPrompt, openai); 32 | 33 | const poem = await llmChain.run({ topic: "the moon" }); 34 | 35 | console.log(poem); 36 | 37 | graphTraces(traces); 38 | } 39 | -------------------------------------------------------------------------------- /examples/src/chunk-sentences.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { CharacterTextSplitter, SentenceTextSplitter } from "promptable"; 3 | 4 | /** 5 | * Simple example showing how to use the TextSplitter to split and chunk text. 6 | * This example splits a paragraph of text into sentences, then chunks the sentences into chunks of 50 tokens. 7 | * 8 | * outputs the following: 9 | * 10 | * [ 11 | * "Fatima Whitbread (born 1961) is a retired British javelin thrower. She broke the women's javelin throw world record with a throw of 77.44 metres (254 ft 3⁄4 in) at the 1986 European Athletics Championships in Stuttgart, and also won the European title that year.", 12 | * "She took the gold medal at the 1987 World Championships in Athletics and is a two-time Olympic medallist, winning bronze at the 1984 games and silver at the 1988 games. She was voted BBC Sports Personality of the Year in 1987. During her career, she had a well-publicised rivalry with another British javelin athlete, Tessa Sanderson.", 13 | * "Her later career was affected by a persistent shoulder injury, and in 1992 she retired from competition. She has since appeared on several television programmes, including I'm a Celebrity ... Get Me Out of Here! in 2011. Whitbread was named the Sports Writers' Association Sportswoman of the Year in 1986 and 1987.", 14 | * "She was appointed a Member of the Order of the British Empire for services to athletics." 15 | * ] 16 | */ 17 | export default async function run(args: string[]) { 18 | const splitter = new SentenceTextSplitter(); 19 | const text = ` 20 | Fatima Whitbread (born 1961) is a retired British javelin thrower. 21 | She broke the women's javelin throw world record with a throw of 77.44 metres (254 ft 3⁄4 in) at the 1986 European Athletics Championships in Stuttgart, and also won the European title that year. 22 | She took the gold medal at the 1987 World Championships in Athletics and is a two-time Olympic medallist, winning bronze at the 1984 games and silver at the 1988 games. 23 | She was voted BBC Sports Personality of the Year in 1987. 24 | During her career, she had a well-publicised rivalry with another British javelin athlete, Tessa Sanderson. 25 | Her later career was affected by a persistent shoulder injury, and in 1992 she retired from competition. 26 | She has since appeared on several television programmes, including I'm a Celebrity ... Get Me Out of Here! in 2011. 27 | Whitbread was named the Sports Writers' Association Sportswoman of the Year in 1986 and 1987. 28 | She was appointed a Member of the Order of the British Empire for services to athletics.`; 29 | 30 | const chunks = splitter.splitText(text, { 31 | chunk: true, 32 | chunkSize: 50, // 50 tokens 33 | overlap: 0, 34 | }); 35 | console.log(chalk.white(`Chunks:`)); 36 | console.log(chalk.green(JSON.stringify(chunks, undefined, 4))); 37 | } 38 | -------------------------------------------------------------------------------- /examples/src/count-tokens.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import chalk from "chalk"; 3 | import { FileLoader, OpenAI, prompts } from "promptable"; 4 | 5 | const apiKey = process.env.OPENAI_API_KEY || ""; 6 | 7 | /** 8 | * Simple example of using the OpenAI API count the tokens used in a prompt 9 | */ 10 | export default async function run(args: string[]) { 11 | const openai = new OpenAI(apiKey); 12 | const prompt = prompts.QA(); 13 | 14 | const docs = await new FileLoader("./data/startup-mistakes.txt").load(); 15 | const promptText = prompt.format({ document: docs[0].content, question: "" }); 16 | const tokensUsed = openai.countTokens(promptText); 17 | 18 | console.log(chalk.white(`Token Count`), chalk.green(tokensUsed)); 19 | } 20 | -------------------------------------------------------------------------------- /examples/src/embeddings-create.ts: -------------------------------------------------------------------------------- 1 | /** 2 | Question Answering 3 | 4 | Method for augmenting GPT-3 with a large body of additional contextual information by using document embeddings and retrieval. 5 | 6 | This method answers queries in two steps: 7 | 8 | 1. retrieves the information relevant to the query, 9 | 2. writes an answer tailored to the question based on the retrieved information. 10 | 11 | The first step uses the Embeddings API, the second step uses the Completions API. 12 | 13 | The steps are: 14 | 15 | 1. Preprocess the contextual information by splitting it into chunks and create an embedding vector for each chunk. 16 | 2. On receiving a query, embed the query in the same vector space as the context chunks and find the context embeddings which are most similar to the query. 17 | 3. Prepend the most relevant context embeddings to the query prompt. 18 | 4. Submit the question along with the most relevant context to GPT, and receive an answer which makes use of the provided contextual information. 19 | **/ 20 | 21 | import { 22 | FileLoader, 23 | Embeddings, 24 | OpenAI, 25 | SentenceTextSplitter, 26 | } from "promptable"; 27 | import dotenv from "dotenv"; 28 | dotenv.config(); 29 | import chalk from "chalk"; 30 | 31 | const apiKey = process.env.OPENAI_API_KEY || ""; 32 | const openai = new OpenAI(apiKey); 33 | 34 | /** 35 | * A simple example of creating embeddings. 36 | * 37 | * @param args 38 | */ 39 | const run = async (args: string[]) => { 40 | console.log(chalk.blue.bold("\nRunning Example: Create embeddings")); 41 | 42 | const filepath = "./data/startup-mistakes.txt"; 43 | const loader = new FileLoader(filepath); 44 | const splitter = new SentenceTextSplitter(); 45 | 46 | const documents = await loader.load(); 47 | 48 | // split the documents into sentences 49 | const sentences = splitter.splitDocuments(documents, { 50 | chunk: false, 51 | }); 52 | 53 | console.log(chalk.green("Sentences:")); 54 | console.log(sentences); 55 | 56 | // create your index 57 | const embeddings = new Embeddings("startup-mistakes", openai, sentences); 58 | await embeddings.index(); 59 | 60 | // query your index 61 | const query = "What is the worst mistake a startup can make?"; 62 | 63 | const results = await embeddings.query(query, 1); 64 | 65 | // results 66 | console.log(chalk.green("Results:")); 67 | console.log(results); 68 | }; 69 | 70 | export default run; 71 | -------------------------------------------------------------------------------- /examples/src/embeddings-qa.ts: -------------------------------------------------------------------------------- 1 | /** 2 | Question Answering 3 | 4 | Method for augmenting GPT-3 with a large body of additional contextual information by using document embeddings and retrieval. 5 | 6 | This method answers queries in two steps: 7 | 8 | 1. retrieves the information relevant to the query, 9 | 2. writes an answer tailored to the question based on the retrieved information. 10 | 11 | The first step uses the Embeddings API, the second step uses the Completions API. 12 | 13 | The steps are: 14 | 15 | 1. Preprocess the contextual information by splitting it into chunks and create an embedding vector for each chunk. 16 | 2. On receiving a query, embed the query in the same vector space as the context chunks and find the context embeddings which are most similar to the query. 17 | 3. Prepend the most relevant context embeddings to the query prompt. 18 | 4. Submit the question along with the most relevant context to GPT, and receive an answer which makes use of the provided contextual information. 19 | **/ 20 | 21 | import dotenv from "dotenv"; 22 | dotenv.config(); 23 | import * as dfd from "danfojs-node"; 24 | import chalk from "chalk"; 25 | import { Embeddings, OpenAI, prompts } from "promptable"; 26 | 27 | const apiKey = process.env.OPENAI_API_KEY || ""; 28 | 29 | // Using Openai cookbook embeddings 30 | const loadData = async () => { 31 | const df = await dfd.readCSV( 32 | "https://cdn.openai.com/API/examples/data/olympics_sections_text.csv", 33 | {} 34 | ); 35 | const embeddings = await dfd.readCSV( 36 | "https://cdn.openai.com/API/examples/data/olympics_sections_document_embeddings.csv", 37 | {} 38 | ); 39 | 40 | // dont need the title and heading columns 41 | // embeddings.drop({ columns: ["title", "heading"] }); 42 | const maxDim = embeddings.shape[1]; 43 | const embeddingsOnly = embeddings.iloc({ 44 | columns: [`0:${maxDim - 2}`], 45 | }); 46 | 47 | const documents = df.values.map((row, i) => { 48 | const title = df.column("title").values[i]; 49 | const heading = df.column("heading").values[i]; 50 | const content = df.column("content").values[i] as string; 51 | 52 | return { 53 | content, 54 | meta: { 55 | title, 56 | heading, 57 | } as any, 58 | }; 59 | }); 60 | 61 | return { 62 | documents, 63 | embeddings: embeddingsOnly.values as number[][], 64 | }; 65 | }; 66 | 67 | const run = async (args: string[]) => { 68 | console.log( 69 | chalk.blueBright(`Running Embeddings QA: 2020 olympics wikipedia`) 70 | ); 71 | 72 | console.log(chalk.white("Loading data...")); 73 | const { documents, embeddings: embeddingsVector } = await loadData(); 74 | 75 | const openai = new OpenAI(apiKey); 76 | const prompt = prompts.QA(); 77 | 78 | // create your index 79 | const embeddings = new Embeddings("olympics", openai, documents); 80 | await embeddings.index(embeddingsVector); 81 | 82 | // query your index 83 | const query = "Who won the men's high jump?"; 84 | 85 | const results = await embeddings.query(query, 5); 86 | 87 | const top5Documents = results.map((r: any) => r.document.content); 88 | 89 | // results 90 | console.log(chalk.blue(`Running QA Bot...`)); 91 | console.log(chalk.white(`${prompt.text}`)); 92 | 93 | const promptText = prompt.format({ 94 | document: top5Documents.join("\n---\n"), 95 | question: query, 96 | }); 97 | 98 | const answer = await openai.generate(promptText); 99 | 100 | console.log(chalk.greenBright(`${answer}`)); 101 | }; 102 | 103 | export default run; 104 | -------------------------------------------------------------------------------- /examples/src/embeddings-search.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import { readCSV } from "danfojs-node"; 4 | import { Embeddings, OpenAI } from "promptable"; 5 | import chalk from "chalk"; 6 | 7 | /** 8 | * Semantic Search with Embeddings 9 | * 10 | The simplest way to use embeddings for search is as follows: 11 | Before the search (precompute): 12 | - Split your text corpus into chunks smaller than the token limit (8,191 tokens for text-embedding-ada-002) 13 | - Embed each chunk of text 14 | - Store those embeddings in your own database or in a vector search provider like Pinecone or Weaviate 15 | 16 | At the time of the search (live compute): 17 | 18 | - Embed the search query 19 | - Find the closest embeddings in your database 20 | - Return the top results 21 | * 22 | */ 23 | 24 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 25 | 26 | export default async function run() { 27 | const openai = new OpenAI(apiKey); 28 | 29 | // just loading embeddings from oai for now 30 | const df = await readCSV( 31 | "https://github.com/openai/openai-cookbook/raw/main/examples/data/fine_food_reviews_with_embeddings_1k.csv" 32 | ); 33 | 34 | const documents = df.column("Text").values.map((x: any) => { 35 | return { 36 | content: x, 37 | meta: {}, 38 | }; 39 | }); 40 | const embeddingsVector = df.column("embedding").values.map((x: any) => { 41 | return JSON.parse(x); 42 | }); 43 | 44 | const query = "delicious beans"; 45 | 46 | // todo: build index around this idea 47 | 48 | const embeddings = new Embeddings("fine-food", openai, documents); 49 | await embeddings.index(embeddingsVector); 50 | 51 | console.log(chalk.blue("Query: " + query)); 52 | const result = await embeddings.query(query, 1); 53 | 54 | console.log(chalk.green(JSON.stringify(result[0]))); 55 | } 56 | -------------------------------------------------------------------------------- /examples/src/embeddings.ts: -------------------------------------------------------------------------------- 1 | /** 2 | Question Answering 3 | 4 | Method for augmenting GPT-3 with a large body of additional contextual information by using document embeddings and retrieval. 5 | 6 | This method answers queries in two steps: 7 | 8 | 1. retrieves the information relevant to the query, 9 | 2. writes an answer tailored to the question based on the retrieved information. 10 | 11 | The first step uses the Embeddings API, the second step uses the Completions API. 12 | 13 | The steps are: 14 | 15 | 1. Preprocess the contextual information by splitting it into chunks and create an embedding vector for each chunk. 16 | 2. On receiving a query, embed the query in the same vector space as the context chunks and find the context embeddings which are most similar to the query. 17 | 3. Prepend the most relevant context embeddings to the query prompt. 18 | 4. Submit the question along with the most relevant context to GPT, and receive an answer which makes use of the provided contextual information. 19 | **/ 20 | 21 | import { Embeddings, OpenAI } from "promptable"; 22 | import dotenv from "dotenv"; 23 | dotenv.config(); 24 | import fs from "fs"; 25 | import chalk from "chalk"; 26 | import { cwd } from "process"; 27 | import * as dfd from "danfojs-node"; 28 | 29 | const apiKey = process.env.OPENAI_API_KEY || ""; 30 | const openai = new OpenAI(apiKey); 31 | 32 | // Using Openai cookbook embeddings 33 | const loadData = async () => { 34 | const df = await dfd.readCSV( 35 | "https://cdn.openai.com/API/examples/data/olympics_sections_text.csv", 36 | {} 37 | ); 38 | const embeddings = await dfd.readCSV( 39 | "https://cdn.openai.com/API/examples/data/olympics_sections_document_embeddings.csv", 40 | {} 41 | ); 42 | 43 | // dont need the title and heading columns 44 | // embeddings.drop({ columns: ["title", "heading"] }); 45 | const maxDim = embeddings.shape[1]; 46 | const embeddingsOnly = embeddings.iloc({ 47 | columns: [`0:${maxDim - 2}`], 48 | }); 49 | 50 | const documents = df.values.map((row, i) => { 51 | const title = df.column("title").values[i]; 52 | const heading = df.column("heading").values[i]; 53 | const content = df.column("content").values[i] as string; 54 | 55 | return { 56 | content, 57 | meta: { 58 | title, 59 | heading, 60 | } as any, 61 | }; 62 | }); 63 | 64 | return { 65 | documents, 66 | embeddings: embeddingsOnly.values as number[][], 67 | }; 68 | }; 69 | 70 | /** 71 | * Example of using OpenAI embeddings to find document similar to a query. 72 | * 73 | * Embeds the documents and the query, then computes the cosine similarity between the embeddings. 74 | */ 75 | const run = async (args: string[]) => { 76 | console.log(chalk.blue.bold("\nRunning Example: Embeddings Olympic Games")); 77 | 78 | console.log(chalk.white("Loading data...")); 79 | const { documents, embeddings: embeddingsVector } = await loadData(); 80 | 81 | // create your index 82 | const embeddings = new Embeddings("olympics", openai, documents); 83 | await embeddings.index(embeddingsVector); 84 | 85 | // query your index 86 | const query = "Who won the men's high jump?"; 87 | 88 | const results = await embeddings.query(query, 1); 89 | 90 | // results 91 | console.log(chalk.green("Results:")); 92 | console.log(results); 93 | }; 94 | 95 | export default run; 96 | -------------------------------------------------------------------------------- /examples/src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import promptSimple from "./prompt-simple"; 4 | import promptSequential from "./prompt-sequential"; 5 | import promptParallel from "./prompt-parallel"; 6 | 7 | import embeddingsCreate from "./embeddings-create"; 8 | import embeddings from "./embeddings"; 9 | import embeddingsQA from "./embeddings-qa"; 10 | import embeddingsSearch from "./embeddings-search"; 11 | 12 | import qaSimple from "./qa-simple"; 13 | import qaChunks from "./qa-chunks"; 14 | import qaExtract from "./qa-extract"; 15 | import qaFromNotes from "./qa-from-notes"; 16 | 17 | import summarize from "./summarize"; 18 | import summarizeChunks from "./summarize-chunks"; 19 | import summarizeRecursive from "./summarize-recursive"; 20 | 21 | import parseJson from "./parse-json"; 22 | // import parseCSV from "./parse-csv"; 23 | 24 | import countTokens from "./count-tokens"; 25 | 26 | import splitNewlines from "./split-newlines"; 27 | import splitParagraphs from "./split-paragraphs"; 28 | import splitSentences from "./split-sentences"; 29 | import splitTokens from "./split-tokens"; 30 | import splitWords from "./split-words"; 31 | 32 | import chunkSentences from "./chunk-sentences"; 33 | 34 | import streamCompletions from "./stream-completions"; 35 | 36 | import tracing from "./tracing"; 37 | import tracingWeb from "./tracing-web"; 38 | import tracingWebPrompt from "./tracing-web-prompt" 39 | 40 | import chainSimple from "./chain-simple"; 41 | import chainMemory from "./chain-memory"; 42 | 43 | // Add examples here! 44 | 45 | const examples = { 46 | "prompt-simple": promptSimple, 47 | "prompt-sequential": promptSequential, 48 | "prompt-parallel": promptParallel, 49 | 50 | embeddings: embeddings, 51 | "embeddings-create": embeddingsCreate, 52 | "embeddings-qa": embeddingsQA, 53 | "embeddings-search": embeddingsSearch, 54 | 55 | "qa-simple": qaSimple, 56 | "qa-chunks": qaChunks, 57 | "qa-extract": qaExtract, 58 | "qa-from-notes": qaFromNotes, 59 | 60 | summarize: summarize, 61 | "summarize-chunks": summarizeChunks, 62 | "summarize-recursive": summarizeRecursive, 63 | 64 | "parse-json": parseJson, 65 | // disabled until we can figure out how to get the CSV parser to work 66 | // "parse-csv": parseCSV, 67 | 68 | "count-tokens": countTokens, 69 | 70 | "split-newlines": splitNewlines, 71 | "split-paragraphs": splitParagraphs, 72 | "split-sentences": splitSentences, 73 | "split-tokens": splitTokens, 74 | "split-words": splitWords, 75 | 76 | "chunk-sentences": chunkSentences, 77 | 78 | "stream-completions": streamCompletions, 79 | 80 | tracing, 81 | "tracing-web": tracingWeb, 82 | "tracing-web-prompt": tracingWebPrompt, 83 | 84 | "chain-simple": chainSimple, 85 | "chain-memory": chainMemory, 86 | }; 87 | 88 | const isExample = (arg: string): arg is keyof typeof examples => 89 | arg in examples; 90 | 91 | async function run(args: string[]) { 92 | const example = args[0]; 93 | const params = args.slice(1); 94 | 95 | if (!isExample(example)) { 96 | console.error(`Unrecognized example: ${example}`); 97 | return; 98 | } 99 | 100 | await examples[example](params); 101 | } 102 | 103 | run(process.argv.slice(2)); 104 | 105 | export { }; 106 | -------------------------------------------------------------------------------- /examples/src/model-providers.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Run QA on a Document 11 | * 12 | * Adds the entire document to the prompt. 13 | * 14 | * @param args 15 | */ 16 | const run = async (args: string[]) => { 17 | const openai = new OpenAI(apiKey); 18 | 19 | const text = "This is a test"; 20 | const tokensUsed = openai.countTokens(text); 21 | const response = await openai.generate(text); 22 | 23 | console.log("Tokens: ", tokensUsed); 24 | console.log(response); 25 | }; 26 | 27 | export default run; 28 | -------------------------------------------------------------------------------- /examples/src/parse-csv.ts: -------------------------------------------------------------------------------- 1 | // import dotenv from "dotenv"; 2 | // dotenv.config(); 3 | // import { prompts, FileLoader, OpenAI, CharacterTextSplitter } from "promptable"; 4 | 5 | // /** 6 | // * Extract CSV from Data 7 | // * 8 | // * The ExtractCSVPrompt is a prompt that uses for data and a header definitions 9 | // * and then returns the CSV representation of the data. 10 | // * 11 | // * It also has a parser that parses the CSV output into a CSV object 12 | // * or throws an error if the output is invalid. 13 | // * 14 | // * 15 | // * NOTE: Doesn't work reliably yet. 16 | // */ 17 | export {}; 18 | // export default async function run() { 19 | // const apiKey = process.env.OPENAI_API_KEY || "missing"; 20 | 21 | // const openai = new OpenAI(apiKey); 22 | 23 | // const prompt = prompts.extractCSV(); 24 | 25 | // // Load the file 26 | // const filepath = "./data/bens-bites-email.txt"; 27 | // const loader = new FileLoader(filepath); 28 | // const splitter = new CharacterTextSplitter("\n"); 29 | 30 | // // load and split the documents 31 | // let docs = await loader.load(); 32 | // docs = splitter.splitDocuments(docs); 33 | 34 | // const headers = ["link", "title", "tldr", "category"]; 35 | 36 | // const rows = await Promise.all( 37 | // docs.map((docs) => { 38 | // const promptText = prompt.format({ 39 | // data: docs.content, 40 | // headers: headers.join(","), 41 | // }); 42 | 43 | // return openai.generate(promptText); 44 | // }) 45 | // ); 46 | 47 | // console.log(rows); 48 | 49 | // const csvs = prompt.parse(rows); 50 | 51 | // console.log(JSON.stringify(csvs, null, 2)); 52 | // } 53 | -------------------------------------------------------------------------------- /examples/src/parse-json.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import { OpenAI, prompts } from "promptable"; 4 | import chalk from "chalk"; 5 | 6 | /** 7 | * Extract json from data. 8 | * 9 | * The ExtractJSONPrompt is a prompt that asks for data and a type definition 10 | * and then returns the JSON representation of the data. 11 | * 12 | * It also has a parser that parses the JSON output into a JavaScript object 13 | * or throws an error if the JSON is invalid. 14 | */ 15 | export default async function run() { 16 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 17 | 18 | const openai = new OpenAI(apiKey); 19 | const prompt = prompts.extractJSON(); 20 | 21 | // Calendly email 22 | const data = ` 23 | Hi Colin, 24 | 25 | A new event has been scheduled. 26 | 27 | Event Type: 28 | 30 Minute Meeting 29 | 30 | Invitee: 31 | Your mom 32 | 33 | Invitee Email: 34 | yourmom@gmail.com 35 | 36 | Event Date/Time: 37 | 11:00am - Tuesday, February 7, 2023 (Pacific Time - US & Canada) 38 | 39 | Location: 40 | 41 | This is a Google Meet web conference. Join now 42 | Invitee Time Zone: 43 | Pacific Time - US & Canada`; 44 | 45 | const promptText = prompt.format({ 46 | data, 47 | type: `{ 48 | meeting_type: string, 49 | Date: Date, 50 | Location: string, 51 | invitee_name: string, 52 | invitee_email: string, 53 | }`, 54 | }); 55 | 56 | const json = await openai.generate(promptText); 57 | 58 | // Consider creating a zod -> typed object parser 59 | const output = prompt.parse(json); 60 | 61 | console.log(chalk.greenBright(`JSON`, JSON.stringify(output, undefined, 4))); 62 | } 63 | -------------------------------------------------------------------------------- /examples/src/prompt-parallel.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import { OpenAI, Prompt, CharacterTextSplitter } from "promptable"; 4 | 5 | /** 6 | * Evaluate a poem on each paragraph 7 | */ 8 | export default async function run() { 9 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 10 | 11 | const openai = new OpenAI(apiKey); 12 | 13 | const text = `Sports, what a way to have some fun\nCompeting in a game or two\nThe thrill of victory, or the agony of defeat\nIt all comes down to what you do\n\nThe camaraderie shared by teammates\nThe drive to work to be the best\nCheering crowds, a stadium alive\nWhat an amazing way to test\n\nYour will, your strength, your skill\nThe drive for greatness, it's all there\nThe thrill of running, jumping, and throwing\nAthletes everywhere at their share\n\nA reminder that we're all capable\nOf more than we know, and`; 14 | 15 | const splitter = new CharacterTextSplitter("\n\n", { 16 | overlap: 0, 17 | }); 18 | 19 | const chunks = splitter.splitText(text); 20 | 21 | const evalPoemChunksPrompt = new Prompt( 22 | `Rate the following poem phrase on it's creativity:\n\nPoem:{{poem}}\n\n\nRating: Give the phrase a rating (1-5) and an explaination:`, 23 | ["poem"] 24 | ); 25 | 26 | const evaluations = await Promise.all( 27 | chunks.map((chunk) => { 28 | const promptText = evalPoemChunksPrompt.format({ 29 | poem: chunk, 30 | }); 31 | 32 | return openai.generate(promptText); 33 | }) 34 | ); 35 | 36 | console.log(evaluations); 37 | } 38 | -------------------------------------------------------------------------------- /examples/src/prompt-sequential.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import { Prompt, OpenAI } from "promptable"; 4 | 5 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 6 | 7 | export default async function run() { 8 | const openai = new OpenAI(apiKey); 9 | 10 | const writePoemPromptText = new Prompt("Write a poem about {{topic}}:", [ 11 | "topic", 12 | ]).format({ 13 | topic: "hi", 14 | }); 15 | 16 | const poem = await openai.generate(writePoemPromptText); 17 | 18 | const evalPoemPromptText = new Prompt( 19 | "Rate the following poem on it's creativity\n{{poem}}\nRating", 20 | ["poem"] 21 | ).format({ 22 | poem, 23 | }); 24 | 25 | const evaluation = await openai.generate(evalPoemPromptText); 26 | 27 | console.log(evaluation); 28 | } 29 | -------------------------------------------------------------------------------- /examples/src/prompt-simple.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import { Prompt, OpenAI } from "promptable"; 4 | 5 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 6 | 7 | export default async function run() { 8 | const openai = new OpenAI(apiKey); 9 | 10 | const writePoemPrompt = new Prompt("Write a poem about {{topic}}:", [ 11 | "topic", 12 | ]); 13 | 14 | const promptText = writePoemPrompt.format({ 15 | topic: "hi", 16 | }); 17 | 18 | const poem = await openai.generate(promptText); 19 | 20 | console.log(poem); 21 | } 22 | -------------------------------------------------------------------------------- /examples/src/qa-chunks.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { 6 | OpenAI, 7 | Prompt, 8 | prompts, 9 | FileLoader, 10 | CharacterTextSplitter, 11 | } from "promptable"; 12 | 13 | const apiKey = process.env.OPENAI_API_KEY || ""; 14 | 15 | /** 16 | * Run QA on a Document split into chunks. 17 | * 18 | * Each chunk is sent to the model as a separate request. 19 | * 20 | * @param args 21 | */ 22 | export default async function run(args: string[]) { 23 | const openai = new OpenAI(apiKey); 24 | const prompt = prompts.QA(); 25 | 26 | const filepath = "./data/startup-mistakes.txt"; 27 | const loader = new FileLoader(filepath); 28 | const splitter = new CharacterTextSplitter("\n"); 29 | 30 | // load and split the documents 31 | let docs = await loader.load(); 32 | docs = splitter.splitDocuments(docs, { 33 | chunk: true, 34 | }); 35 | 36 | // Run the Question-Answer prompt on each chunk 37 | const question = args[0] || "What is the most common mistake founders make?"; 38 | 39 | console.log(chalk.blue.bold("\nRunning QA Example: startup-mistakes.txt")); 40 | console.log(chalk.white(`Question: ${question}`)); 41 | 42 | for (const doc of docs) { 43 | const tokensUsed = openai.countTokens( 44 | prompt.format({ document: doc.content, question }) 45 | ); 46 | 47 | console.log( 48 | `\n${doc.content.substring(0, 100).trim()}...\n\n...${doc.content 49 | .slice(-100) 50 | .trim()}\n` + chalk.gray(`${"Tokens: " + tokensUsed}`) 51 | ); 52 | 53 | const promptText = prompt.format({ 54 | document: doc.content, 55 | question, 56 | }); 57 | 58 | const answer = await openai.generate(promptText); 59 | 60 | console.log(chalk.greenBright(`Answer: ${answer}`)); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /examples/src/qa-extract.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { CharacterTextSplitter, FileLoader, OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Run Prompt Document to Extract notes relevant to a Question. 11 | * 12 | * First, chunks the document into smaller chunks. 13 | * 14 | * Then, runs the prompt on each chunk to extract notes. 15 | * 16 | * Returns a list of notes. 17 | * 18 | * @param args 19 | */ 20 | const run = async (args: string[]) => { 21 | const openai = new OpenAI(apiKey); 22 | const prompt = prompts.extractText(); 23 | 24 | // Load the file 25 | const filepath = "./data/startup-mistakes.txt"; 26 | const loader = new FileLoader(filepath); 27 | const splitter = new CharacterTextSplitter("\n"); 28 | 29 | // load and split the documents 30 | let docs = await loader.load(); 31 | docs = splitter.splitDocuments(docs, { 32 | chunk: true, 33 | }); 34 | 35 | // The Question to use for extraction 36 | const question = args[0] || "What is the most common mistake founders make?"; 37 | 38 | console.log(chalk.blue.bold("\nRunning QA Extraction: startup-mistakes.txt")); 39 | console.log(chalk.white(`Question: ${question}`)); 40 | 41 | // Run the Question-Answer prompt on each chunk asyncronously 42 | const notes = await Promise.all( 43 | docs.map((doc) => { 44 | const promptText = prompt.format({ 45 | document: doc.content, 46 | question, 47 | }); 48 | 49 | return openai.generate(promptText); 50 | }) 51 | ); 52 | 53 | console.log( 54 | chalk.greenBright( 55 | `Notes: ${JSON.stringify( 56 | { 57 | question, 58 | notes, 59 | }, 60 | null, 61 | 4 62 | )}` 63 | ) 64 | ); 65 | }; 66 | 67 | export default run; 68 | -------------------------------------------------------------------------------- /examples/src/qa-from-notes.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { 6 | CharacterTextSplitter, 7 | FileLoader, 8 | ListParser, 9 | OpenAI, 10 | prompts, 11 | } from "promptable"; 12 | 13 | const apiKey = process.env.OPENAI_API_KEY || ""; 14 | 15 | /** 16 | * Run QA on a Document by first extracting the most important notes, 17 | * 18 | * then running a Question Answer prompt on each note. 19 | * 20 | * @param args 21 | */ 22 | const run = async (args: string[]) => { 23 | const openai = new OpenAI(apiKey); 24 | const extractPrompt = prompts.extractText(); 25 | const qaPrompt = prompts.QA(); 26 | const summarizePrompt = prompts.summarize(); 27 | 28 | // Load the file 29 | const filepath = "./data/startup-mistakes.txt"; 30 | const loader = new FileLoader(filepath); 31 | const splitter = new CharacterTextSplitter("\n"); 32 | 33 | // load and split the documents 34 | let docs = await loader.load(); 35 | docs = splitter.splitDocuments(docs, { 36 | chunk: true, 37 | }); 38 | 39 | // The Question to use for extraction 40 | const question = args[0] || "What is the most common mistake founders make?"; 41 | 42 | console.log(chalk.blue.bold("\nRunning QA From Notes: startup-mistakes.txt")); 43 | console.log(chalk.white(`Question: ${question}`)); 44 | 45 | // Run the Question-Answer prompt on each chunk asyncronously 46 | const notes = await Promise.all( 47 | docs.map((doc) => { 48 | const promptText = extractPrompt.format({ 49 | document: doc.content, 50 | question, 51 | }); 52 | 53 | return openai.generate(promptText); 54 | }) 55 | ); 56 | 57 | // note: selecting the most important notes 58 | // and summarizing them is really important 59 | // how do you avoid the token limit? 60 | const noteSummaries = await Promise.all( 61 | notes.map((note) => { 62 | const promptText = summarizePrompt.format({ 63 | document: note, 64 | }); 65 | 66 | return openai.generate(promptText); 67 | }) 68 | ); 69 | 70 | // TODO: Is there a way to handle this for the user? 71 | const document = `NOTES:\n${splitter.mergeText(noteSummaries, "\n---\n")}`; 72 | 73 | const tokenCount = openai.countTokens( 74 | qaPrompt.format({ 75 | question, 76 | document, 77 | }) 78 | ); 79 | console.log("token count ", tokenCount); 80 | 81 | // note: it's difficult to ensure that your prompt doesn't exceed the token limit 82 | // note: having openai do the formatting is bad and we should change it 83 | // note: joining your notes together is the most basic selector, we should add one 84 | // & formatting the notes is very simple. but useful. 85 | // generally just making it easy to format prompts. 86 | 87 | const qaPromptText = qaPrompt.format({ 88 | question, 89 | document, 90 | }); 91 | 92 | const answer = await openai.generate(qaPromptText); 93 | 94 | console.log(chalk.greenBright(`Answer: ${answer}`)); 95 | }; 96 | 97 | export default run; 98 | -------------------------------------------------------------------------------- /examples/src/qa-simple.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Run QA on a Document 11 | * 12 | * Adds the entire document to the prompt. 13 | * 14 | * @param args 15 | */ 16 | const run = async (args: string[]) => { 17 | const openai = new OpenAI(apiKey); 18 | const prompt = prompts.QA(); 19 | 20 | // Load the file 21 | const filepath = "./data/beyond-smart.txt"; 22 | let doc = fs.readFileSync(filepath, "utf8"); 23 | 24 | // Run the Question-Answer prompt on the document. 25 | const question = args[0] || "What does Paul Graham mean by Beyond Smart??"; 26 | 27 | console.log(chalk.blue.bold("\nRunning Simple QA: beyond-smart.txt")); 28 | console.log(chalk.white(`Question: ${question}`)); 29 | 30 | const promptText = prompt.format({ 31 | document: doc, 32 | question, 33 | }); 34 | 35 | const tokensUsed = openai.countTokens(promptText); 36 | 37 | console.log( 38 | `\n${doc.substring(0, 100).trim()}...\n\n...${doc.slice(-100).trim()}\n` + 39 | chalk.gray(`${"Tokens: " + tokensUsed}`) 40 | ); 41 | 42 | const answer = await openai.generate(promptText); 43 | 44 | console.log(chalk.greenBright(`Answer: ${answer}`)); 45 | }; 46 | 47 | export default run; 48 | -------------------------------------------------------------------------------- /examples/src/split-newlines.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { CharacterTextSplitter } from "promptable"; 3 | 4 | /** 5 | * Simple example showing how to split text on new lines 6 | */ 7 | export default async function run(args: string[]) { 8 | const splitter = new CharacterTextSplitter("\n"); 9 | const text = ` 10 | Sports, what a way to have some fun 11 | Competing in a game or two 12 | The thrill of victory, or the agony of defeat 13 | It all comes down to what you do 14 | 15 | The camaraderie shared by teammates 16 | The drive to work to be the best 17 | Cheering crowds, a stadium alive 18 | What an amazing way to test 19 | 20 | Your will, your strength, your skill 21 | The drive for greatness, it's all there 22 | The thrill of running, jumping, and throwing 23 | Athletes everywhere at their share 24 | 25 | A reminder that we're all capable 26 | Of more than we know, and`; 27 | 28 | const splits = splitter.splitText(text); 29 | console.log(chalk.white(`lines:`)); 30 | console.log(JSON.stringify(splits, undefined, 4)); 31 | } 32 | -------------------------------------------------------------------------------- /examples/src/split-paragraphs.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { CharacterTextSplitter } from "promptable"; 3 | 4 | /** 5 | * Simple example showing how to split text on paragraphs 6 | */ 7 | export default async function run(args: string[]) { 8 | const splitter = new CharacterTextSplitter("\n\n"); 9 | const text = 10 | "Sports, what a way to have some fun\nCompeting in a game or two\nThe thrill of victory, or the agony of defeat\nIt all comes down to what you do\n\nThe camaraderie shared by teammates\nThe drive to work to be the best\nCheering crowds, a stadium alive\nWhat an amazing way to test\n\nYour will, your strength, your skill\nThe drive for greatness, it's all there\nThe thrill of running, jumping, and throwing\nAthletes everywhere at their share\n\nA reminder that we're all capable\nOf more than we know."; 11 | 12 | const paragraphs = splitter.splitText(text); 13 | console.log(chalk.bold.black(`paragraphs:`)); 14 | console.log(chalk.green(JSON.stringify(paragraphs, undefined, 4))); 15 | } 16 | -------------------------------------------------------------------------------- /examples/src/split-sentences.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { SentenceTextSplitter } from "promptable"; 3 | 4 | /** 5 | * Simple example showing how to split text on new sentences 6 | */ 7 | export default async function run(args: string[]) { 8 | const splitter = new SentenceTextSplitter(); 9 | const text = ` 10 | Fatima Whitbread (born 1961) is a retired British javelin thrower. 11 | She broke the women's javelin throw world record with a throw of 77.44 metres (254 ft 3⁄4 in) at the 1986 European Athletics Championships in Stuttgart, and also won the European title that year. 12 | She took the gold medal at the 1987 World Championships in Athletics and is a two-time Olympic medallist, winning bronze at the 1984 games and silver at the 1988 games. 13 | She was voted BBC Sports Personality of the Year in 1987. 14 | During her career, she had a well-publicised rivalry with another British javelin athlete, Tessa Sanderson. 15 | Her later career was affected by a persistent shoulder injury, and in 1992 she retired from competition. 16 | She has since appeared on several television programmes, including I'm a Celebrity ... Get Me Out of Here! in 2011. 17 | Whitbread was named the Sports Writers' Association Sportswoman of the Year in 1986 and 1987. 18 | She was appointed a Member of the Order of the British Empire for services to athletics.`; 19 | 20 | const chunks = splitter.splitText(text); 21 | console.log(chalk.white(`Sentences:`)); 22 | console.log(chalk.greenBright(JSON.stringify(chunks, undefined, 4))); 23 | } 24 | -------------------------------------------------------------------------------- /examples/src/split-tokens.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { FileLoader, OpenAI, TokenSplitter } from "promptable"; 3 | 4 | const apiKey = process.env.OPENAI_API_KEY || ""; 5 | 6 | /** 7 | * Simple example showing how to split text on tokens 8 | */ 9 | export default async function run(args: string[]) { 10 | const openai = new OpenAI(apiKey); 11 | 12 | const filepath = "./data/startup-mistakes.txt"; 13 | const loader = new FileLoader(filepath); 14 | 15 | // load doc 16 | let docs = await loader.load(); 17 | 18 | const splitter = new TokenSplitter(); 19 | 20 | const chunks = splitter.splitText(docs[0].content); 21 | 22 | // Count the tokens used in each chunk 23 | chunks.forEach((chunk, i) => { 24 | const tokensUsed = openai.countTokens(chunk); 25 | console.log( 26 | chalk.white(`Chunk`), 27 | chalk.blue(i), 28 | `Token Count`, 29 | chalk.greenBright(tokensUsed) 30 | ); 31 | }); 32 | } 33 | -------------------------------------------------------------------------------- /examples/src/split-words.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { CharacterTextSplitter } from "promptable"; 3 | 4 | /** 5 | * Simple example showing how to split text on tokens 6 | */ 7 | export default async function run(args: string[]) { 8 | const splitter = new CharacterTextSplitter(" "); 9 | const words = splitter.splitText("Hello world! How are you?"); 10 | console.log(chalk.white(`Words:`)); 11 | console.log(chalk.green(JSON.stringify(words, undefined, 4))); 12 | } 13 | -------------------------------------------------------------------------------- /examples/src/stream-completions-fetch.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIApi, Configuration } from "openai"; 2 | 3 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 4 | 5 | const run = async (args: string[]) => { 6 | const config = new Configuration({ 7 | apiKey, 8 | }); 9 | 10 | const openai = new OpenAIApi(config); 11 | 12 | const res: any = await openai.createCompletion( 13 | { 14 | prompt: "Write a poem about dogs", 15 | model: "text-davinci-003", 16 | max_tokens: 128, 17 | stream: true, 18 | }, 19 | { responseType: "stream" } 20 | ); 21 | 22 | try { 23 | res.data.on("data", (data: any) => { 24 | const lines = data 25 | .toString() 26 | .split("\n") 27 | .filter((line: any) => line.trim() !== ""); 28 | 29 | for (const line of lines) { 30 | const message = line.replace(/^data: /, ""); 31 | if (message === "[DONE]") { 32 | return; // Stream finished 33 | } 34 | try { 35 | const parsed = JSON.parse(message); 36 | console.log(parsed.choices[0].text); 37 | } catch (error) { 38 | console.error("Could not JSON parse stream message", message, error); 39 | } 40 | } 41 | }); 42 | } catch (error) { 43 | console.error("An error occurred during OpenAI request: ", error); 44 | } 45 | }; 46 | 47 | export default run; 48 | -------------------------------------------------------------------------------- /examples/src/stream-completions.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIApi, Configuration } from "openai"; 2 | import { utils, OpenAI, Prompt } from "promptable"; 3 | 4 | // const apiKey = process.env.OPENAI_API_KEY || "missing"; 5 | 6 | // const run = async (args: string[]) => { 7 | // const openai = new OpenAI(apiKey); 8 | 9 | // const prompt = new Prompt("Write a poem about {{topic}}:", ["topic"]); 10 | 11 | // const promptText = prompt.format({ 12 | // topic: "dogs", 13 | // }); 14 | 15 | // await openai.stream( 16 | // promptText, 17 | // (chunk) => { 18 | // console.log(chunk); 19 | // }, 20 | // () => { 21 | // console.log("done"); 22 | // } 23 | // ); 24 | // }; 25 | 26 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 27 | 28 | const run = async (args: string[]) => { 29 | const config = new Configuration({ 30 | apiKey, 31 | }); 32 | 33 | const openai = new OpenAIApi(config); 34 | 35 | const res: any = await openai.createCompletion( 36 | { 37 | prompt: "Write a poem about dogs", 38 | model: "text-davinci-003", 39 | max_tokens: 128, 40 | stream: true, 41 | }, 42 | { responseType: "stream" } 43 | ); 44 | 45 | try { 46 | res.data.on("data", (data: any) => { 47 | const lines = data 48 | .toString() 49 | .split("\n") 50 | .filter((line: any) => line.trim() !== ""); 51 | 52 | for (const line of lines) { 53 | const message = line.replace(/^data: /, ""); 54 | if (message === "[DONE]") { 55 | return; // Stream finished 56 | } 57 | try { 58 | const parsed = JSON.parse(message); 59 | console.log(parsed.choices[0].text); 60 | } catch (error) { 61 | console.error("Could not JSON parse stream message", message, error); 62 | } 63 | } 64 | }); 65 | } catch (error) { 66 | console.error("An error occurred during OpenAI request: ", error); 67 | } 68 | }; 69 | 70 | export default run; 71 | -------------------------------------------------------------------------------- /examples/src/summarize-chunks.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Run QA on a Document split into chunks. 11 | * 12 | * Each chunk is sent to the model as a separate request. 13 | * 14 | * @param args 15 | */ 16 | const run = async (args: string[]) => { 17 | const openai = new OpenAI(apiKey); 18 | const prompt = prompts.summarize(); 19 | 20 | // Load the file 21 | const filepath = "./data/startup-mistakes.txt"; 22 | let doc = fs.readFileSync(filepath, "utf8"); 23 | 24 | // Split the doc by the separator 25 | const separator = "\n\n"; 26 | const texts = doc.split(separator); 27 | 28 | const chunkSize = 1000; 29 | const chunkOverlap = 100; 30 | 31 | // Create chunks to send to the model 32 | const chunks = texts.reduce((chunks: string[], text) => { 33 | let chunk = chunks.pop() || ""; 34 | const chunkLength = openai.countTokens(chunk); 35 | if (chunkLength >= chunkSize + chunkOverlap) { 36 | chunks.push(chunk); 37 | chunk = ""; 38 | } 39 | chunk = chunk === "" ? text : chunk + separator + text; 40 | chunks.push(chunk); 41 | return chunks; 42 | }, []); 43 | 44 | console.log( 45 | chalk.blue.bold("\nRunning Summarize Chunks: startup-mistakes.txt") 46 | ); 47 | 48 | // summarize each chunk 49 | const summaries = await Promise.all( 50 | chunks.map((chunk) => { 51 | const promptText = prompt.format({ 52 | document: chunk, 53 | }); 54 | 55 | return openai.generate(promptText); 56 | }) 57 | ); 58 | 59 | // output 60 | summaries.forEach((sum, i) => { 61 | console.log( 62 | chalk.blue(` 63 | \n 64 | Chunk: ${chunks[i].slice(0, 50)}... 65 | `), 66 | chalk.greenBright(` 67 | Summary: ${sum.slice(0, 200)}...`) 68 | ); 69 | }); 70 | }; 71 | 72 | export default run; 73 | -------------------------------------------------------------------------------- /examples/src/summarize-recursive.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Run QA on a Document split into chunks. 11 | * 12 | * Each chunk is sent to the model as a separate request. 13 | * 14 | * @param args 15 | */ 16 | const run = async (args: string[]) => { 17 | const openai = new OpenAI(apiKey); 18 | 19 | const prompt = prompts.summarize(); 20 | 21 | // Load the file 22 | const filepath = "./data/startup-mistakes.txt"; 23 | let doc = fs.readFileSync(filepath, "utf8"); 24 | 25 | // Split the doc by the separator 26 | const separator = "\n\n"; 27 | const texts = doc.split(separator); 28 | 29 | const chunkSize = 1000; 30 | const chunkOverlap = 100; 31 | 32 | // Create chunks to send to the model 33 | const chunks = texts.reduce((chunks: string[], text) => { 34 | let chunk = chunks.pop() || ""; 35 | const chunkLength = openai.countTokens(chunk); 36 | if (chunkLength >= chunkSize + chunkOverlap) { 37 | chunks.push(chunk); 38 | chunk = ""; 39 | } 40 | chunk = chunk === "" ? text : chunk + separator + text; 41 | chunks.push(chunk); 42 | return chunks; 43 | }, []); 44 | 45 | console.log( 46 | chalk.blue.bold("\nRunning Summarize Chunks: startup-mistakes.txt") 47 | ); 48 | 49 | // summarize each chunk 50 | const summaries = await Promise.all( 51 | chunks.map((chunk) => { 52 | const promptText = prompt.format({ 53 | document: chunk, 54 | }); 55 | return openai.generate(promptText, { 56 | max_tokens: 1000, 57 | }); 58 | }) 59 | ); 60 | 61 | // summarize all summaries 62 | const summariesStr = summaries.reduce( 63 | (acc, sum, i) => acc + separator + `${sum}`, 64 | `` 65 | ); 66 | 67 | const promptText = prompt.format({ 68 | document: summariesStr, 69 | }); 70 | 71 | const finalSummary = await openai.generate(promptText); 72 | 73 | console.log(prompt.format({ document: summariesStr })); 74 | console.log(chalk.greenBright(finalSummary)); 75 | }; 76 | 77 | export default run; 78 | -------------------------------------------------------------------------------- /examples/src/summarize.ts: -------------------------------------------------------------------------------- 1 | import dotenv from "dotenv"; 2 | dotenv.config(); 3 | import fs from "fs"; 4 | import chalk from "chalk"; 5 | import { OpenAI, prompts } from "promptable"; 6 | 7 | const apiKey = process.env.OPENAI_API_KEY || ""; 8 | 9 | /** 10 | * Summarize a document naively. 11 | * 12 | * @param args 13 | */ 14 | const run = async (args: string[]) => { 15 | const openai = new OpenAI(apiKey); 16 | const prompt = prompts.summarize(); 17 | 18 | // Load the file 19 | const filepath = "./data/beyond-smart.txt"; 20 | let doc = fs.readFileSync(filepath, "utf8"); 21 | 22 | console.log(chalk.blue.bold("\nRunning Summarize Example: beyond-smart.txt")); 23 | 24 | const tokensUsed = openai.countTokens(prompt.format({ document: doc })); 25 | 26 | console.log( 27 | `\n${doc.substring(0, 100).trim()}...\n\n...${doc.slice(-100).trim()}\n` + 28 | chalk.gray(`${"Tokens: " + tokensUsed}`) 29 | ); 30 | 31 | const promptText = prompt.format({ 32 | document: doc, 33 | }); 34 | const answer = await openai.generate(promptText); 35 | 36 | console.log(chalk.greenBright(`Summary: ${answer}`)); 37 | }; 38 | 39 | export default run; 40 | -------------------------------------------------------------------------------- /examples/src/tracing-web-prompt.ts: -------------------------------------------------------------------------------- 1 | import { trace, setTraceConfig, sendTraceToServer, Prompt, OpenAI } from "promptable"; 2 | import { pipeAsync } from "ramda-async"; 3 | import dotenv from "dotenv"; 4 | dotenv.config(); 5 | 6 | const apiKey = process.env.OPENAI_API_KEY || "missing"; 7 | 8 | 9 | /** 10 | * An example showing how to send traces to the promptable web server. 11 | * 12 | * @param args 13 | */ 14 | const run = async (args: string[]) => { 15 | const openai = new OpenAI(apiKey); 16 | 17 | 18 | setTraceConfig({ 19 | send: (trace) => { 20 | console.log("Received Trace", trace); 21 | sendTraceToServer(trace); 22 | }, 23 | }); 24 | 25 | const t = trace("tracing-web-prompt", async () => { 26 | const step1 = trace( 27 | "Create Poem about Animal", 28 | async (animal: string) => { 29 | const writePoemPromptText = new Prompt("Write a poem about {{topic}}:", [ 30 | "topic", 31 | ]).format({ 32 | topic: animal, 33 | }); 34 | const poem = await openai.generate(writePoemPromptText); 35 | 36 | return { 37 | poem, 38 | }; 39 | }, 40 | ["animal", "poem"] 41 | ); 42 | const step2 = trace( 43 | "Remix Poem into Pirate Style", 44 | async (props: { poem: string }) => { 45 | const remixPoemPromptText = new Prompt( 46 | "Rewrite the poem in the style of a Pirate's writing: \n{{poem}}", 47 | ["poem"] 48 | ).format({ 49 | poem: props.poem, 50 | }); 51 | const remixedPoem = await openai.generate(remixPoemPromptText) 52 | return { 53 | remixedPoem 54 | }; 55 | }, 56 | ["remix", "pirate"] 57 | ); 58 | const step3 = trace( 59 | "Review the Poem", 60 | async (props: { remixedPoem: string }) => { 61 | const reviewPoemPromptText = new Prompt( 62 | "You are given the following poem\n{{poem}}\n Describe the author and subject of the poem", 63 | ["poem"] 64 | ).format({ 65 | poem: props.remixedPoem, 66 | }); 67 | const review = await openai.generate(reviewPoemPromptText) 68 | return { 69 | review 70 | }; 71 | }, 72 | ["review"] 73 | ); 74 | 75 | // pipe a few functions together 76 | const pipeline = pipeAsync(step1, step2, step3); 77 | 78 | const result = await pipeline("dog"); 79 | console.log(result.review); 80 | }); 81 | 82 | await Promise.all([ 83 | t(), 84 | ]); 85 | 86 | }; 87 | 88 | export default run; 89 | -------------------------------------------------------------------------------- /examples/src/tracing-web.ts: -------------------------------------------------------------------------------- 1 | import { trace, setTraceConfig, sendTraceToServer } from "promptable"; 2 | import { pipeAsync } from "ramda-async"; 3 | 4 | /** 5 | * An example showing how to send traces to the promptable web server. 6 | * 7 | * @param args 8 | */ 9 | const run = async (args: string[]) => { 10 | setTraceConfig({ 11 | send: (trace) => { 12 | console.log("Received Trace", trace); 13 | sendTraceToServer(trace); 14 | }, 15 | }); 16 | 17 | const t = trace("tracing-web", async () => { 18 | const step1 = trace( 19 | "step1", 20 | async (dog: string) => { 21 | await new Promise((resolve) => { 22 | resolve(1); 23 | }); 24 | return { 25 | dog, 26 | }; 27 | }, 28 | ["example"] 29 | ); 30 | const step2 = trace( 31 | "step2", 32 | (props: { dog: string }) => { 33 | return { 34 | dog: { 35 | dog: props.dog, 36 | }, 37 | }; 38 | }, 39 | ["example"] 40 | ); 41 | const step3 = trace( 42 | "step3", 43 | (props: { dog: { dog: string } }) => { 44 | console.log("Finished!", props); 45 | }, 46 | ["example"] 47 | ); 48 | 49 | // pipe a few functions together 50 | const pipeline = pipeAsync(step1, step2, step3); 51 | 52 | await pipeline("dog"); 53 | }); 54 | 55 | await Promise.all([ 56 | t(), 57 | ]); 58 | 59 | }; 60 | 61 | export default run; 62 | -------------------------------------------------------------------------------- /examples/src/tracing.ts: -------------------------------------------------------------------------------- 1 | import R, { pipe } from "ramda"; 2 | import { trace, setTraceConfig, Trace, graphTraces } from "promptable"; 3 | import { pipeAsync, traversePromises } from "ramda-async"; 4 | 5 | const run = async (args: string[]) => { 6 | const traces: Trace[] = []; 7 | 8 | setTraceConfig({ 9 | send: (trace) => { 10 | console.log("Received Trace", trace); 11 | }, 12 | }); 13 | 14 | const scopedFns = async () => { 15 | const step1 = trace( 16 | "step1", 17 | async (dog: string) => { 18 | return { 19 | dog, 20 | }; 21 | }, 22 | ["example"] 23 | ); 24 | const step2 = trace( 25 | "step2", 26 | (props: { dog: string }) => { 27 | return { 28 | dog: { 29 | dog: props.dog, 30 | }, 31 | }; 32 | }, 33 | ["example"] 34 | ); 35 | const step3 = trace( 36 | "step3", 37 | (props: { dog: { dog: string } }) => { 38 | console.log("Finished!", props); 39 | return props; 40 | }, 41 | ["example"] 42 | ); 43 | 44 | // pipe a few functions together 45 | const pipeline = pipeAsync( 46 | step1, 47 | step2, 48 | // trace("substep", pipeAsync(step2, step3)), 49 | step3 50 | ); 51 | 52 | await pipeline("dog"); 53 | }; 54 | 55 | await Promise.all([ 56 | trace("first", scopedFns)(), 57 | trace("second", scopedFns)(), 58 | ]); 59 | 60 | // create a graph of the traces 61 | graphTraces(traces); 62 | }; 63 | export default run; 64 | -------------------------------------------------------------------------------- /examples/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext", 8 | ], 9 | "module": "esnext", 10 | "esModuleInterop": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "moduleResolution": "node", 14 | "preserveWatchOutput": true, 15 | "skipLibCheck": true, 16 | "noEmit": true, 17 | "strict": true, 18 | "baseUrl": ".", 19 | }, 20 | "exclude": [ 21 | "node_modules" 22 | ] 23 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "root", 3 | "private": true, 4 | "workspaces": [ 5 | "apps/*", 6 | "starter-kits/*", 7 | "packages/*", 8 | "examples" 9 | ], 10 | "scripts": { 11 | "build": "FORCE_COLOR=1 NODE_ENV=development turbo run build", 12 | "dev": "FORCE_COLOR=1 turbo run dev --parallel", 13 | "lint": "FORCE_COLOR=1 turbo run lint", 14 | "test": "FORCE_COLOR=1 turbo run test", 15 | "format": "prettier --write \"**/*.{ts,tsx,md}\"", 16 | "publish-packages": "FORCE_COLOR=1 turbo run build lint test --filter promptable && changeset version && changeset publish", 17 | "example": "FORCE_COLOR=1 pnpm --filter examples run start" 18 | }, 19 | "engines": { 20 | "node": ">=14.0.0" 21 | }, 22 | "dependencies": { 23 | "@changesets/cli": "^2.26.0", 24 | "@manypkg/cli": "^0.20.0", 25 | "eslint-config-custom": "0.0.0", 26 | "prettier": "latest", 27 | "turbo": "latest" 28 | }, 29 | "packageManager": "pnpm@7.18.2", 30 | "keywords": [ 31 | "llm", 32 | "langchain", 33 | "ai", 34 | "gptindex", 35 | "gpt3", 36 | "chain", 37 | "prompt", 38 | "prompt programming", 39 | "promptable", 40 | "nlp", 41 | "javascript", 42 | "react", 43 | "chatgpt", 44 | "model", 45 | "machine learning", 46 | "ml", 47 | "typescript" 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/index.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: ["next", "turbo", "prettier"], 3 | rules: { 4 | "@next/next/no-html-link-for-pages": "off", 5 | "react/jsx-key": "off", 6 | }, 7 | }; 8 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "eslint-config-custom", 3 | "private": true, 4 | "version": "0.0.0", 5 | "main": "index.js", 6 | "license": "MIT", 7 | "dependencies": { 8 | "eslint": "7.32.0", 9 | "eslint-config-next": "13.0.0", 10 | "eslint-config-prettier": "^8.3.0", 11 | "eslint-config-turbo": "latest", 12 | "eslint-plugin-react": "7.31.8" 13 | }, 14 | "devDependencies": { 15 | "typescript": "^4.7.4" 16 | }, 17 | "publishConfig": { 18 | "access": "public" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /packages/promptable-query/README.md: -------------------------------------------------------------------------------- 1 | # GPT React Query 2 | 3 | GPT-3 mocking react query 4 | 5 | # How to run 6 | 7 | make sure you're in gpt-prisma-seed directory. copy .env.example to .env, add your openAI key 8 | 9 | all the relevant code is in prisma/seed.ts 10 | 11 | run commands: 12 | 13 | - pnpm i 14 | - npx prisma db push 15 | - npx prisma db seed 16 | - npx prisma studio 17 | -------------------------------------------------------------------------------- /packages/promptable-query/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [ 3 | ["@babel/preset-env", { targets: { node: "current" } }], 4 | "@babel/preset-typescript", 5 | ], 6 | }; 7 | -------------------------------------------------------------------------------- /packages/promptable-query/jest.config.js: -------------------------------------------------------------------------------- 1 | const { pathsToModuleNameMapper } = require("ts-jest"); 2 | // In the following statement, replace `./tsconfig` with the path to your `tsconfig` file 3 | // which contains the path mapping (ie the `compilerOptions.paths` option): 4 | const { compilerOptions } = require("./tsconfig"); 5 | 6 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 7 | module.exports = { 8 | preset: "ts-jest", 9 | testEnvironment: "node", 10 | verbose: true, 11 | silent: false, 12 | roots: "", 13 | modulePaths: [compilerOptions.baseUrl], // <-- This will be set to 'baseUrl' value 14 | moduleNameMapper: pathsToModuleNameMapper(compilerOptions.paths, { 15 | prefix: "/", 16 | }), 17 | }; 18 | -------------------------------------------------------------------------------- /packages/promptable-query/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gpt-react-query", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "private": true, 7 | "scripts": { 8 | "start": "node dist/index.mjs", 9 | "dev": "rm -rf dist && tsup ./src/index.ts --format=esm --watch --dts --external react", 10 | "lint": "TIMING=1 eslint \"**/*.ts*\"", 11 | "lint:ci": "pnpm run lint", 12 | "build": "rm -rf dist && tsup ./src/index.ts --format=esm --dts --external react", 13 | "build:ci": "pnpm run build" 14 | }, 15 | "keywords": [], 16 | "author": "Colin Fortuner", 17 | "license": "ISC", 18 | "dependencies": { 19 | "promptable": "workspace*", 20 | "react": "^18.2.0", 21 | "react-query": "^3.39.3", 22 | "typescript": "latest", 23 | "zod": "^3.20.2", 24 | "zod-to-ts": "^1.1.2" 25 | }, 26 | "devDependencies": { 27 | "tsup": "^6.5.0" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /packages/promptable-query/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext", 8 | ], 9 | "module": "esnext", 10 | "esModuleInterop": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "moduleResolution": "node", 14 | "preserveWatchOutput": true, 15 | "skipLibCheck": true, 16 | "noEmit": true, 17 | "strict": true, 18 | "baseUrl": ".", 19 | "paths": {}, 20 | }, 21 | "exclude": [ 22 | "node_modules" 23 | ] 24 | } -------------------------------------------------------------------------------- /packages/promptable/README.md: -------------------------------------------------------------------------------- 1 | # Promptable 2 | 3 | Promptable is a Typescript library for building fullstack AI applications. 4 | 5 | Promptable consists of a collection of utilities and interfaces that help you build applications with LLMs. It is designed to be flexible and extensible so that you can use it with any LLM or Embeddings provider. 6 | 7 | The goal of this library is to provide a set of general tools for Typescript and Javascript developers to help them build fullstack AI first applications quickly and easily. 8 | 9 | > Right now the library is in early development and is very much experimental. Don't use this in production yet! The API is subject to change as we get feedback. 10 | > 💻 11 | 12 | [Github Repo](https://github.com/cfortuner/promptable). 13 | [Discord](https://discord.gg/SYmACWTf6V). 14 | [Twitter](https://twitter.com/promptableai). 15 | 16 | ## Use Cases: 17 | 18 | - 💬 Chatbots & Conversational AI 19 | - ❓ Question Answering Bots 20 | - ✍️ Writing Apps 21 | - 🧑‍✈️ Copilot apps built with Chrome Extensions, VSCode Extensions, and more! 22 | - 🔍 AI Semantic Search apps 23 | - 🛠️ AI first Automations, Workflows and Tools 24 | - 🤖 Autonomous Agents & Personal Assistants 25 | 26 | ### Features 27 | 28 | - [Prompts](./modules/prompts.md) for templating and formatting 29 | - [Model Providers](./modules/model-providers.md) for Text Generation and Embedding Generation 30 | - [Embeddings](./modules/embeddings.md) for creating Embeddings, Indexing and Search 31 | - [Tracing](./modules/tracing.md) for debugging your applications! 32 | - [Utilities](./modules/utilities.md) for working with text and data. 33 | 34 | ## Install Library 35 | 36 | `npm i promptable` 37 | 38 | ## Usage 39 | 40 | See our docs for more info on how to use the library. 41 | (Documentation)[https://docs-promptable.vercel.app/] 42 | 43 | ### Run the Examples 44 | 45 | To run an example, clone the repo and run the following commands 46 | 47 | ``` 48 | pnpm i 49 | pnpm dev 50 | pnpm run example 51 | ``` 52 | 53 | ### Web UI 54 | 55 | To assist in debugging, we also provide a Tracing UI that helps you visualize the steps taken by the LLM to generate the output. 56 | 57 | See our (Docs)[https://docs-promptable.vercel.app/docs/modules/tracing#tracing-ui] 58 | 59 | ## Motivation 60 | 61 | Large Language models are emerging as a powerful tool to use for variety of tasks. With OpenAI models like GPT-3 only an API call away, it's become possible to build applications that use AI as a core software component for business logic, data processing, content generation and more. Traditionally, AI tooling has only been built in python to power backend systems, but with the success of ChatGPT, we have learned that the UI/UX of an app is just as important as the backend. 62 | 63 | This project aims to provide a set of general tools for Typescript and Javascript developers to help them build fullstack AI first applications. 64 | 65 | ## Community 66 | 67 | If you have any questions about anything related to Promptable or if you want to discuss with us and the community, you are welcome to join our **[discord](https://discord.gg/SYmACWTf6V)**. 68 | -------------------------------------------------------------------------------- /packages/promptable/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [ 3 | ["@babel/preset-env", { targets: { node: "current" } }], 4 | "@babel/preset-typescript", 5 | ], 6 | }; 7 | -------------------------------------------------------------------------------- /packages/promptable/jest.config.js: -------------------------------------------------------------------------------- 1 | const { pathsToModuleNameMapper } = require("ts-jest"); 2 | // In the following statement, replace `./tsconfig` with the path to your `tsconfig` file 3 | // which contains the path mapping (ie the `compilerOptions.paths` option): 4 | // const { compilerOptions } = require("./tsconfig"); 5 | 6 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 7 | module.exports = { 8 | preset: "ts-jest", 9 | testEnvironment: "node", 10 | verbose: true, 11 | silent: false, 12 | // roots: "", 13 | // modulePaths: [compilerOptions.baseUrl], // <-- This will be set to 'baseUrl' value 14 | // moduleNameMapper: pathsToModuleNameMapper(compilerOptions.paths, { 15 | // prefix: "/", 16 | // }), 17 | }; 18 | -------------------------------------------------------------------------------- /packages/promptable/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "promptable", 3 | "version": "0.0.10", 4 | "main": "dist/index.js", 5 | "types": "dist/index.d.ts", 6 | "scripts": { 7 | "test": "jest", 8 | "build": "tsup src/index.ts --format cjs --dts", 9 | "dev": "pnpm run build --watch" 10 | }, 11 | "devDependencies": { 12 | "@babel/core": "^7.20.12", 13 | "@babel/preset-env": "^7.20.2", 14 | "@babel/preset-typescript": "^7.18.6", 15 | "@jest/globals": "^29.4.1", 16 | "@types/jest": "^29.4.0", 17 | "@types/uuid": "^9.0.0", 18 | "babel-jest": "^29.4.1", 19 | "jest": "^29.4.1", 20 | "ts-jest": "^29.0.5", 21 | "tsup": "^6.5.0" 22 | }, 23 | "dependencies": { 24 | "@stdlib/nlp-sentencize": "^0.0.2", 25 | "axios": "^1.2.4", 26 | "chalk": "^4.1.2", 27 | "csv-parse": "^5.3.4", 28 | "gpt3-tokenizer": "^1.1.4", 29 | "openai": "^3.1.0", 30 | "typescript": "latest", 31 | "uuid": "^9.0.0", 32 | "zod": "^3.20.2" 33 | } 34 | } -------------------------------------------------------------------------------- /packages/promptable/src/chains/LLMChain.ts: -------------------------------------------------------------------------------- 1 | import { NoopParser, Parser } from "@prompts/Parser"; 2 | import { Prompt } from "@prompts/Prompt"; 3 | import { CompletionsModelProvider } from "@providers/ModelProvider"; 4 | import { trace } from "../tracing"; 5 | 6 | export class LLMChain< 7 | T extends string = string, 8 | P extends Parser = NoopParser 9 | > { 10 | constructor( 11 | public prompt: Prompt, 12 | public provider: CompletionsModelProvider 13 | ) {} 14 | 15 | protected async _run(variables: Record) { 16 | // TODO: fix trace so that the anonymous function isn't needed 17 | const formattedPrompt = await trace("prompt.format", (variables) => 18 | this.prompt.format(variables) 19 | )(variables); 20 | const completion = await trace("provider.complete", (prompt) => 21 | (this.provider as CompletionsModelProvider).generate(prompt) 22 | )(formattedPrompt); 23 | const parsed = await trace("prompt.parse", (completion) => 24 | this.prompt.parse(completion) 25 | )(completion); 26 | return parsed; 27 | } 28 | 29 | async run(variables: Record) { 30 | return trace("llmchain.run", (variables) => this._run(variables))( 31 | variables 32 | ); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /packages/promptable/src/chains/MemoryLLMChain.ts: -------------------------------------------------------------------------------- 1 | import { NoopParser, Parser } from "@prompts/Parser"; 2 | import { Prompt } from "@prompts/Prompt"; 3 | import { CompletionsModelProvider } from "@providers/ModelProvider"; 4 | import { trace } from "../tracing"; 5 | import { Memory } from "src/memories/index"; 6 | import { LLMChain } from "@chains/LLMChain"; 7 | 8 | // TODO: Trace currently requires an anonymous function to be passed in 9 | // to the trace function. There is some issue with the binding of this 10 | 11 | export class MemoryLLMChain< 12 | T extends "memory" | "userInput", 13 | P extends Parser = NoopParser 14 | > extends LLMChain { 15 | constructor( 16 | public prompt: Prompt, 17 | public provider: CompletionsModelProvider, 18 | public memory: Memory 19 | ) { 20 | super(prompt, provider); 21 | this.prompt = prompt; 22 | } 23 | 24 | protected async _run(variables: Record) { 25 | const formattedPrompt = await trace("prompt.format", (vars) => 26 | this.prompt.format(vars) 27 | )(variables); 28 | 29 | const completion = await trace("provider.complete", (p) => 30 | this.provider.generate(p) 31 | )(formattedPrompt); 32 | 33 | const parsed = await trace("prompt.parse", (c) => this.prompt.parse(c))( 34 | completion 35 | ); 36 | return parsed; 37 | } 38 | 39 | async run(variables: Omit, "memory">) { 40 | const vars = { ...variables, memory: this.memory.get() } as Record< 41 | T, 42 | string 43 | >; 44 | 45 | return await trace("llmchain.run", (v) => this._run(v))(vars); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /packages/promptable/src/chains/index.ts: -------------------------------------------------------------------------------- 1 | export { LLMChain } from "./LLMChain"; 2 | export { MemoryLLMChain } from "./MemoryLLMChain"; -------------------------------------------------------------------------------- /packages/promptable/src/index.test.ts: -------------------------------------------------------------------------------- 1 | import * as p from "."; 2 | 3 | test("promptable", async () => { 4 | console.log("ok"); 5 | }); 6 | -------------------------------------------------------------------------------- /packages/promptable/src/index.ts: -------------------------------------------------------------------------------- 1 | export { LLMChain, MemoryLLMChain } from "@chains/index"; 2 | export { BufferedChatMemory } from "src/memories/BufferedChatMemory"; 3 | 4 | import { Prompt } from "@prompts/Prompt"; 5 | import { ModelProvider } from "@providers/ModelProvider"; 6 | 7 | export interface Document { 8 | content: string; 9 | meta: Record; 10 | } 11 | 12 | import { Embeddings } from "./embeddings"; 13 | export { Embeddings }; 14 | 15 | // Prebuilt prompts 16 | import * as prompts from "@prompts/prompts"; 17 | export { prompts }; 18 | 19 | // Providers 20 | import { OpenAI } from "@providers/OpenAI"; 21 | export { OpenAI }; 22 | 23 | // Loaders 24 | import { Loader, FileLoader } from "@loaders/index"; 25 | export type { Loader }; 26 | export { FileLoader }; 27 | 28 | // Parsing 29 | import { JSONParser, CSVParser, Parser, ListParser } from "@prompts/Parser"; 30 | export type { Parser }; 31 | export { JSONParser, CSVParser, ListParser }; 32 | 33 | // Splitting 34 | import { 35 | TextSplitter, 36 | CharacterTextSplitter, 37 | SentenceTextSplitter, 38 | TokenSplitter, 39 | } from "@utils/TextSplitter"; 40 | export { 41 | TokenSplitter, 42 | TextSplitter, 43 | CharacterTextSplitter, 44 | SentenceTextSplitter, 45 | }; 46 | 47 | import { unescapeStopTokens } from "@utils/unescape-stop-tokens"; 48 | import { injectVariables } from "@utils/inject-variables"; 49 | import { parseJsonSSE } from "@utils/parse-json-sse"; 50 | export const utils = { 51 | unescapeStopTokens, 52 | injectVariables, 53 | parseJsonSSE, 54 | }; 55 | 56 | export { Prompt, ModelProvider }; 57 | 58 | import { graphTraces, trace, setTraceConfig, sendTraceToServer } from "./tracing"; 59 | import type { Trace } from "./tracing"; 60 | export { graphTraces, trace, setTraceConfig, Trace, sendTraceToServer }; 61 | -------------------------------------------------------------------------------- /packages/promptable/src/internal/Logger.ts: -------------------------------------------------------------------------------- 1 | export interface Logger { 2 | log(message?: any, ...optionalParams: any[]): void; 3 | error(message?: any, ...optionalParams: any[]): void; 4 | warn(message?: any, ...optionalParams: any[]): void; 5 | debug(message?: any, ...optionalParams: any[]): void; 6 | info(message?: any, ...optionalParams: any[]): void; 7 | } 8 | 9 | class DefaultLogger implements Logger { 10 | log(message?: any, ...optionalParams: any[]) { 11 | console.log(message, ...optionalParams); 12 | } 13 | error(message?: any, ...optionalParams: any[]) { 14 | console.error(message, ...optionalParams); 15 | } 16 | warn(message?: any, ...optionalParams: any[]) { 17 | console.warn(message, ...optionalParams); 18 | } 19 | debug(message?: any, ...optionalParams: any[]) { 20 | console.debug(message, ...optionalParams); 21 | } 22 | info(message?: any, ...optionalParams: any[]) { 23 | console.info(message, ...optionalParams); 24 | } 25 | } 26 | 27 | export class LoggerService { 28 | private static instance: LoggerService; 29 | private logger: Logger; 30 | 31 | level = "info"; 32 | 33 | private constructor(logger?: Logger) { 34 | this.logger = logger || new DefaultLogger(); 35 | } 36 | 37 | public static getInstance(logger?: Logger): LoggerService { 38 | if (!LoggerService.instance) { 39 | LoggerService.instance = new LoggerService(logger); 40 | } 41 | return LoggerService.instance; 42 | } 43 | 44 | public log(message: string): void { 45 | this.logger.log(message); 46 | } 47 | 48 | public error(message: string): void { 49 | this.logger.error(message); 50 | } 51 | 52 | public warn(message: string): void { 53 | this.logger.warn(message); 54 | } 55 | 56 | public info(message: string): void { 57 | this.logger.info(message); 58 | } 59 | 60 | public debug(message: string): void { 61 | if (this.level !== "debug") return; 62 | this.logger.debug(message); 63 | } 64 | 65 | public setLogger(logger: Logger) { 66 | this.logger = logger; 67 | } 68 | 69 | public setLevel(level: string) { 70 | this.level = level; 71 | } 72 | } 73 | 74 | export const logger = LoggerService.getInstance(); 75 | -------------------------------------------------------------------------------- /packages/promptable/src/loaders/index.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import { Document } from ".."; 3 | 4 | export interface Loader { 5 | load(): Promise; 6 | } 7 | 8 | export class FileLoader implements Loader { 9 | path: string; 10 | meta?: Record; 11 | 12 | constructor(path: string, meta?: Record) { 13 | this.path = path; 14 | this.meta = meta; 15 | } 16 | 17 | /** 18 | * Load a file from the filesystem 19 | * 20 | * @returns {Promise} A promise that resolves to an array of documents 21 | */ 22 | async load(): Promise { 23 | const content = await fs.promises.readFile(this.path, "utf-8"); 24 | return [ 25 | { 26 | content, 27 | meta: { 28 | source: this.path, 29 | ...this.meta, 30 | }, 31 | }, 32 | ]; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /packages/promptable/src/memories/BufferedChatMemory.ts: -------------------------------------------------------------------------------- 1 | import { Memory } from "src/memories/index"; 2 | 3 | export class BufferedChatMemory implements Memory { 4 | botMessages: string[] = []; 5 | userMessages: string[] = []; 6 | 7 | clear() { 8 | this.botMessages = []; 9 | this.userMessages = []; 10 | } 11 | 12 | constructor( 13 | protected botName = "Assistant", 14 | protected userName = "User", 15 | protected startingSpeaker: "user" | "bot" = "user", 16 | protected maxInteractionTurns = Infinity 17 | ) {} 18 | 19 | /** 20 | * get the interaction history 21 | * 22 | * @example 23 | * 24 | * const memory = new BufferedChatMemory(); 25 | * 26 | * memory.addBotMessage("Hello"); 27 | * memory.addUserMessage("Hi"); 28 | * memory.addBotMessage("How are you?"); 29 | * memory.addUserMessage("I'm fine, thanks"); 30 | * 31 | * memory.get(); 32 | * // Assistant: Hello 33 | * // User: Hi 34 | * // Assistant: How are you? 35 | * // User: I'm fine, thanks 36 | * 37 | * 38 | * 39 | * @returns a string containing the interaction history 40 | */ 41 | get() { 42 | const { 43 | firstSpeakerMessage, 44 | secondSpeakerMessage, 45 | firstSpeakerName, 46 | secondSpeakerName, 47 | } = 48 | this.startingSpeaker === "user" 49 | ? { 50 | firstSpeakerMessage: this.userMessages, 51 | secondSpeakerMessage: this.botMessages, 52 | firstSpeakerName: this.userName, 53 | secondSpeakerName: this.botName, 54 | } 55 | : { 56 | firstSpeakerMessage: this.botMessages, 57 | secondSpeakerMessage: this.userMessages, 58 | firstSpeakerName: this.botName, 59 | secondSpeakerName: this.userName, 60 | }; 61 | 62 | const numInteractionTurns = Math.min( 63 | firstSpeakerMessage.length, 64 | secondSpeakerMessage.length, 65 | this.maxInteractionTurns 66 | ); 67 | let buffer = ""; 68 | // We iterate the shorter array 69 | // because we don't want to put incomplete interaction turns in the buffer 70 | for (let i = 0; i < numInteractionTurns; i++) { 71 | buffer += `${firstSpeakerName}: ${firstSpeakerMessage[i]}\n`; 72 | buffer += `${secondSpeakerName}: ${secondSpeakerMessage[i]}\n`; 73 | } 74 | return buffer.trim(); 75 | } 76 | 77 | addBotMessage(botMessage: string) { 78 | this.botMessages.push(botMessage); 79 | } 80 | 81 | addUserMessage(userMessage: string) { 82 | this.userMessages.push(userMessage); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /packages/promptable/src/memories/index.ts: -------------------------------------------------------------------------------- 1 | export { BufferedChatMemory } from "./BufferedChatMemory"; 2 | 3 | export interface Memory { 4 | get: () => string; 5 | clear: () => void; 6 | } 7 | -------------------------------------------------------------------------------- /packages/promptable/src/prompts/Parser.ts: -------------------------------------------------------------------------------- 1 | import { logger } from "src/internal/Logger"; 2 | import { parse } from "csv-parse/sync"; 3 | 4 | export interface Parser { 5 | parse(text: string): T; 6 | } 7 | 8 | export class NoopParser implements Parser { 9 | parse(text: string) { 10 | return text; 11 | } 12 | } 13 | 14 | export class JSONParser implements Parser { 15 | /** 16 | * Parses JSON text into an object 17 | * 18 | * @example 19 | * const parser = new JSONParser(); 20 | * parser.parse('{"a": 1, "b": 2, "c": 3}'); // outputs {a: 1, b: 2, c: 3} 21 | * 22 | * @param text a string of JSON text 23 | * @returns an object 24 | */ 25 | parse(text: string) { 26 | try { 27 | return JSON.parse(text); 28 | } catch (e) { 29 | console.error(e as any); 30 | throw e; 31 | } 32 | } 33 | } 34 | 35 | /** 36 | * Parser that parses CSV text into an array of objects 37 | */ 38 | export class CSVParser implements Parser { 39 | /** 40 | * Parses CSV text into an array of objects 41 | * 42 | * @example 43 | * const parser = new CSVParser(); 44 | * parser.parse("a,b,c\n1,2,3"); // outputs [{a: 1, b: 2, c: 3}] 45 | * 46 | * @param text a string of CSV text 47 | * @returns an array of objects 48 | */ 49 | parse(text: string) { 50 | try { 51 | return parse(text, { 52 | relax_column_count: true, 53 | relax_quotes: true, 54 | columns: true, 55 | skip_empty_lines: true, 56 | }); 57 | } catch (e) { 58 | console.error(e as any); 59 | throw e; 60 | } 61 | } 62 | } 63 | 64 | export class ListParser implements Parser> { 65 | /** 66 | * Parses a list of items separated by a character 67 | * 68 | * @example 69 | * const parser = new ListParser(); 70 | * parser.parse("a, b, c"); // outputs ["a", "b", "c"] 71 | * 72 | * @param text a string of items separated by a character 73 | * @param char a character to split the text by 74 | * @returns an array of items 75 | */ 76 | parse(text: string, char = ",") { 77 | try { 78 | return text.split(char).map((t) => t.trim()); 79 | } catch (e) { 80 | logger.error(e as any); 81 | throw e; 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /packages/promptable/src/prompts/Prompt.ts: -------------------------------------------------------------------------------- 1 | import { injectVariables } from "@utils/inject-variables"; 2 | import { NoopParser, Parser } from "@prompts/Parser"; 3 | 4 | export class Prompt< 5 | T extends string = string, 6 | P extends Parser = NoopParser 7 | > implements Parser 8 | { 9 | text: string; 10 | variableNames: T[]; 11 | 12 | private parser: P; 13 | 14 | constructor(text: string, variableNames: T[], parser?: P) { 15 | this.text = text; 16 | this.variableNames = variableNames; 17 | 18 | this.parser = new NoopParser() as P; 19 | if (typeof parser !== "undefined") { 20 | this.parser = parser; 21 | } 22 | } 23 | 24 | parse(completion: string) { 25 | return this.parser.parse(completion); 26 | } 27 | 28 | format(variables: Record) { 29 | const formattedPrompt = injectVariables(this.text, variables); 30 | return formattedPrompt; 31 | } 32 | 33 | toJson() { 34 | return { 35 | text: this.text, 36 | variableNames: this.variableNames, 37 | }; 38 | } 39 | } 40 | 41 | //TODO: This is very unweildy. I need to figure out a better way to do this. 42 | // how to handle injecting documents / context into the base prompt. 43 | // How to select context for the prompt?, Ranking? 44 | // how to check the token size of the prompt + context? 45 | 46 | // Maybe a builder pattern? 47 | // like this: 48 | // const prompt = new PromptBuilder() 49 | // .text("What is your name?") 50 | // .variable("name") 51 | // .examples(["John", "Jane", "Joe"]) 52 | // .build(); 53 | // 54 | // Instead of a builder pattern, I could use a 55 | // factory function: 56 | // const prompt = prompt("What is your name?", ["name"], ["John", "Jane", "Joe"]); 57 | // 58 | // Or I could use a class factory: 59 | // const prompt = Prompt("What is your name?", ["name"], ["John", "Jane", "Joe"]); 60 | // 61 | 62 | export const prompt = ( 63 | text: string, 64 | variableNames: string[], 65 | parser?: Parser 66 | ) => new Prompt(text, variableNames, parser); 67 | -------------------------------------------------------------------------------- /packages/promptable/src/providers/ModelProvider.ts: -------------------------------------------------------------------------------- 1 | import { Prompt } from "@prompts/Prompt"; 2 | import { Document } from "src"; 3 | 4 | export enum ModelProviderType { 5 | OpenAI, 6 | } 7 | 8 | export abstract class ModelProvider { 9 | type: ModelProviderType; 10 | 11 | constructor(type: ModelProviderType) { 12 | this.type = type; 13 | } 14 | } 15 | 16 | export interface CompletionsModelProvider extends ModelProvider { 17 | generate( 18 | promptText: string, 19 | ...args: any[] 20 | ): Promise; 21 | } 22 | 23 | export interface CompletionStreamModelProvider extends ModelProvider { 24 | stream( 25 | prompt: Prompt, 26 | variables: Record, 27 | ...args: any[] 28 | ): Promise; 29 | } 30 | 31 | export interface EmbeddingsModelProvider extends ModelProvider { 32 | embed(texts: string[], ...args: any[]): Promise; 33 | embed(text: string, ...args: any[]): Promise; 34 | } 35 | 36 | export interface Tokenizer { 37 | encode(text: string): { tokens: number[]; texts: string[] }; 38 | decode(tokens: number[]): string; 39 | truncate(text: string, maxTokens: number): string; 40 | countTokens(text: string): number; 41 | countDocumentTokens(doc: Document): number; 42 | } 43 | -------------------------------------------------------------------------------- /packages/promptable/src/tracing.ts: -------------------------------------------------------------------------------- 1 | import chalk from "chalk"; 2 | import { AsyncLocalStorage } from "node:async_hooks"; 3 | import { v4 } from "uuid"; 4 | import axios, { AxiosError } from "axios"; 5 | 6 | export interface TraceConfig { 7 | serverUrl: string; 8 | send: (trace: Trace) => Promise | void; 9 | } 10 | 11 | const defaultConfig: TraceConfig = { 12 | // TODO: what if localhost:3000 is not the port where promptable visualizer is, i.e. another app uses localhost:3000? 13 | serverUrl: "http://localhost:3000/api/traces", 14 | send: () => {}, 15 | }; 16 | 17 | let config: TraceConfig = defaultConfig; 18 | 19 | /** 20 | * Set the trace config for your application 21 | * 22 | * This is useful for setting the server URL or the send function. 23 | * 24 | * @param newConfig 25 | */ 26 | export const setTraceConfig = (newConfig: Partial) => { 27 | console.log("Setting trace config:", newConfig); 28 | config = { ...defaultConfig, ...newConfig }; 29 | }; 30 | 31 | // Log the trace on the server 32 | export async function sendTraceToServer(trace: Trace) { 33 | try { 34 | await axios.post(config.serverUrl, { 35 | ...trace, 36 | }); 37 | } catch (error) { 38 | console.error(`Error logging to server: ${(error as AxiosError).message}`); 39 | } 40 | } 41 | 42 | export type Trace = { 43 | name: string; 44 | inputs: any[]; 45 | outputs: any | null; 46 | tags: string[]; 47 | id: string; 48 | parentId: string | undefined; 49 | children: Trace[]; 50 | error?: any; 51 | timestamp: number; 52 | }; 53 | 54 | // Need to use AsyncLocalStorage because the trace context 55 | // needs to be passed to child traces 56 | const traceContext = new AsyncLocalStorage(); 57 | 58 | export const trace = ( 59 | name: string, 60 | fn: (this: any, ...args: T) => R | Promise, 61 | tags?: string[] 62 | ) => { 63 | return async (...args: T) => { 64 | // Get the parent trace context 65 | const parent = traceContext.getStore(); 66 | 67 | // Create a new trace context 68 | const trace: Trace = { 69 | name, 70 | inputs: args, 71 | outputs: null, 72 | tags: tags || [], 73 | id: v4(), 74 | parentId: parent?.id, 75 | children: [], 76 | timestamp: Date.now(), 77 | }; 78 | 79 | return await traceContext.run(trace, async () => { 80 | try { 81 | const result = await Promise.resolve(fn(...args)); 82 | trace.outputs = result; 83 | return result; 84 | } catch (error) { 85 | console.error(`Error in step: ${name} - Error:`, error); 86 | trace.error = error as any; 87 | throw error; 88 | } finally { 89 | // Add the trace to the parent 90 | if (parent) { 91 | parent.children.push(trace); 92 | } 93 | 94 | recordTrace(trace); 95 | } 96 | }); 97 | }; 98 | }; 99 | 100 | /** 101 | * Record a trace to the server. 102 | * 103 | * Uses the configured send function to send the trace to the server. 104 | * 105 | * @param trace 106 | */ 107 | const recordTrace = async (trace: Trace) => { 108 | config.send({ 109 | ...trace, 110 | }); 111 | }; 112 | 113 | export function graphTraces(traces: Trace[], indent = 0) { 114 | traces 115 | .filter((trace) => !trace.parentId) 116 | .forEach((trace) => { 117 | const indentation = "--->".repeat(indent); 118 | console.log(chalk.blueBright(`${indentation}${trace.name}`), "(root)"); 119 | printChildren(trace, indent + 1); 120 | }); 121 | } 122 | 123 | function printChildren(trace: Trace, indent: number) { 124 | trace.children.forEach((child) => { 125 | const indentation = "---".repeat(indent); 126 | console.log(`${indentation}>`, chalk.blue(`${child.name}`)); 127 | printChildren(child, indent + 1); 128 | }); 129 | } 130 | -------------------------------------------------------------------------------- /packages/promptable/src/utils/extract-variable-names.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Extracts *unique* {{variables}} from a string 3 | * 4 | * @param text 5 | * @returns 6 | */ 7 | export const extractVariableNames = (text: string) => { 8 | const matches = text.match(/{{(.*?)}}/g); 9 | const inputsFound = new Set(); 10 | 11 | return ( 12 | matches 13 | ?.map((match) => { 14 | return match.slice(2, -2).trim().replaceAll(" ", "-"); 15 | }) 16 | .filter((s) => { 17 | if (inputsFound.has(s)) { 18 | return false; 19 | } 20 | inputsFound.add(s); 21 | return true; 22 | }) 23 | .map((s) => s) || [] 24 | ); 25 | }; 26 | -------------------------------------------------------------------------------- /packages/promptable/src/utils/inject-variables.ts: -------------------------------------------------------------------------------- 1 | export function injectVariables( 2 | template: string, 3 | variables: { [key: string]: any } 4 | ): string { 5 | let result = template; 6 | for (const key in variables) { 7 | result = result.replaceAll(`{{${key}}}`, variables[key]); 8 | } 9 | return result; 10 | } 11 | -------------------------------------------------------------------------------- /packages/promptable/src/utils/parse-json-sse.ts: -------------------------------------------------------------------------------- 1 | // todo: https://github.com/openai/openai-node/issues/18#issuecomment-1369996933 2 | // there is definitely a better api that we should create for making it simple 3 | // to stream completions on the frontend or backend. 4 | 5 | /** 6 | * Handle Server Sent Events (SSE) and parse JSON 7 | * 8 | * Taken from 9 | * https://www.beskar.co/blog/streaming-openai-completions-vercel-edge 10 | * 11 | * Usage: 12 | * ``` 13 | * const res = await fetch("https://api.openai.com/v1/completions", { 14 | * headers: { 15 | * "Content-Type": "application/json", 16 | * Authorization: `Bearer ${process.env.OPENAI_API_KEY ?? ""}`, 17 | * }, 18 | * method: "POST", 19 | * body: JSON.stringify({ 20 | * prompt: data.prompt, 21 | * model: "text-davinci-003", 22 | * max_tokens: 128, 23 | * temperature: 0.7, 24 | * stream: true, 25 | * }), 26 | * }); 27 | 28 | * parseJsonSSE({ 29 | * data: res.body, 30 | * onParse: (data) => onMessage(data.choices?.[0].text); 31 | * onFinish: onClose, 32 | * }); 33 | * ``` 34 | * */ 35 | export const parseJsonSSE = async ({ 36 | data, 37 | onParse, 38 | onFinish, 39 | }: { 40 | data: ReadableStream; 41 | onParse: (object: T) => void; 42 | onFinish: () => void; 43 | }) => { 44 | const reader = data.getReader(); 45 | const decoder = new TextDecoder(); 46 | 47 | let done = false; 48 | let tempState = ""; 49 | 50 | while (!done) { 51 | const { value, done: doneReading } = await reader.read(); 52 | done = doneReading; 53 | const newValue = decoder.decode(value).split("\n\n").filter(Boolean); 54 | 55 | if (tempState) { 56 | newValue[0] = tempState + newValue[0]; 57 | tempState = ""; 58 | } 59 | 60 | // On the edge (vercel), events can be fragmented and json can be invalid 61 | // https://socket.dev/npm/package/@beskar-labs/parse-json-sse 62 | newValue.forEach((newVal) => { 63 | try { 64 | const json = JSON.parse(newVal.replace("data: ", "")) as T; 65 | 66 | onParse(json); 67 | } catch (error) { 68 | tempState = newVal; 69 | } 70 | }); 71 | } 72 | 73 | onFinish(); 74 | }; 75 | -------------------------------------------------------------------------------- /packages/promptable/src/utils/unescape-stop-tokens.ts: -------------------------------------------------------------------------------- 1 | import { logger } from "../internal/Logger"; 2 | 3 | /** 4 | * Replace any escaped stop tokens like "\\n" their unescaped versions 5 | * 6 | * @param stop_tokens 7 | * @returns 8 | */ 9 | export const unescapeStopTokens = (stop_tokens: string | string[]) => { 10 | logger.debug(`Unescaping stop tokens: ${stop_tokens}`); 11 | if (Array.isArray(stop_tokens)) { 12 | return stop_tokens.map((token) => { 13 | return JSON.parse(`"${token}"`); 14 | }); 15 | } else { 16 | return JSON.parse(`"${stop_tokens}"`); 17 | } 18 | }; 19 | -------------------------------------------------------------------------------- /packages/promptable/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext", 8 | ], 9 | "module": "esnext", 10 | "esModuleInterop": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "moduleResolution": "node", 14 | "preserveWatchOutput": true, 15 | "skipLibCheck": true, 16 | "noEmit": true, 17 | "strict": true, 18 | "baseUrl": ".", 19 | "paths": { 20 | "@memory/*": [ 21 | "src/memories/*" 22 | ], 23 | "@chains/*": [ 24 | "src/chains/*" 25 | ], 26 | "@steps/*": [ 27 | "src/steps/*" 28 | ], 29 | "@utils/*": [ 30 | "src/utils/*" 31 | ], 32 | "@prompts/*": [ 33 | "src/prompts/*" 34 | ], 35 | "@providers/*": [ 36 | "src/providers/*" 37 | ], 38 | "@loaders/*": [ 39 | "src/loaders/*" 40 | ], 41 | }, 42 | }, 43 | "exclude": [ 44 | "node_modules" 45 | ] 46 | } -------------------------------------------------------------------------------- /packages/tsconfig/README.md: -------------------------------------------------------------------------------- 1 | # `tsconfig` 2 | 3 | These are base shared `tsconfig.json`s from which all other `tsconfig.json`'s inherit from. 4 | -------------------------------------------------------------------------------- /packages/tsconfig/base.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "Default", 4 | "compilerOptions": { 5 | "composite": false, 6 | "declaration": true, 7 | "declarationMap": true, 8 | "esModuleInterop": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "inlineSources": false, 11 | "isolatedModules": true, 12 | "moduleResolution": "node", 13 | "noUnusedLocals": false, 14 | "noUnusedParameters": false, 15 | "preserveWatchOutput": true, 16 | "skipLibCheck": true, 17 | "strict": true 18 | }, 19 | "exclude": ["node_modules"] 20 | } 21 | -------------------------------------------------------------------------------- /packages/tsconfig/nextjs.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "Next.js", 4 | "extends": "./base.json", 5 | "compilerOptions": { 6 | "target": "es5", 7 | "lib": ["dom", "dom.iterable", "esnext"], 8 | "allowJs": true, 9 | "skipLibCheck": true, 10 | "strict": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "noEmit": true, 13 | "incremental": true, 14 | "esModuleInterop": true, 15 | "module": "esnext", 16 | "resolveJsonModule": true, 17 | "isolatedModules": true, 18 | "jsx": "preserve" 19 | }, 20 | "include": ["src", "next-env.d.ts"], 21 | "exclude": ["node_modules"] 22 | } 23 | -------------------------------------------------------------------------------- /packages/tsconfig/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "tsconfig", 3 | "version": "0.0.0", 4 | "private": true, 5 | "files": [ 6 | "base.json", 7 | "nextjs.json", 8 | "react-library.json" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /packages/tsconfig/react-library.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "React Library", 4 | "extends": "./base.json", 5 | "compilerOptions": { 6 | "jsx": "react-jsx", 7 | "lib": ["ES2015"], 8 | "module": "ESNext", 9 | "target": "es6" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /packages/ui/Button.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react"; 2 | export const Button = () => { 3 | return ; 4 | }; 5 | -------------------------------------------------------------------------------- /packages/ui/index.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react"; 2 | export * from "./Button"; 3 | -------------------------------------------------------------------------------- /packages/ui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ui", 3 | "private": true, 4 | "version": "0.0.0", 5 | "main": "./index.tsx", 6 | "types": "./index.tsx", 7 | "license": "MIT", 8 | "scripts": { 9 | "lint": "eslint *.ts*" 10 | }, 11 | "devDependencies": { 12 | "@types/react": "^18.0.22", 13 | "@types/react-dom": "^18.0.7", 14 | "eslint": "7.32.0", 15 | "eslint-config-custom": "0.0.0", 16 | "react": "^18.2.0", 17 | "tsconfig": "0.0.0", 18 | "typescript": "^4.7.4" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /packages/ui/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig/react-library.json", 3 | "include": ["."], 4 | "exclude": ["dist", "build", "node_modules"] 5 | } 6 | -------------------------------------------------------------------------------- /pnpm-workspace.yaml: -------------------------------------------------------------------------------- 1 | packages: 2 | - "apps/*" 3 | - "packages/*" 4 | - "examples" 5 | -------------------------------------------------------------------------------- /turbo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://turbo.build/schema.json", 3 | "globalDependencies": ["**/.env.*local", "**/apps/examples.env", "**/examples/.env"], 4 | "pipeline": { 5 | "build": { 6 | "dependsOn": ["^build"], 7 | "env": ["OPENAI_API_KEY", "NODE_ENV"], 8 | "outputs": ["dist/**", ".next/**"] 9 | }, 10 | "lint": { 11 | "outputs": [] 12 | }, 13 | "test": { 14 | }, 15 | "dev": { 16 | "cache": false, 17 | "persistent": true 18 | }, 19 | "start": { 20 | "cache": false 21 | } 22 | } 23 | } 24 | --------------------------------------------------------------------------------