├── .dockerignore ├── .eslintignore ├── .eslintrc.json ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── dependabot.yml ├── .gitignore ├── .husky └── pre-commit ├── .lintstagedrc.json ├── .prettierrc.js ├── Dockerfile ├── LICENSE ├── README.md ├── app ├── api │ ├── chat │ │ ├── engine │ │ │ ├── chat.ts │ │ │ ├── generate.ts │ │ │ └── index.ts │ │ ├── llamaindex │ │ │ └── streaming │ │ │ │ └── events.ts │ │ ├── route.ts │ │ └── upload │ │ │ └── route.ts │ └── share │ │ └── route.ts ├── b │ └── [botId] │ │ └── page.tsx ├── components │ ├── bot │ │ ├── bot-item.tsx │ │ ├── bot-list.tsx │ │ ├── bot-options │ │ │ ├── delete-bot-dialog.tsx │ │ │ ├── edit-bot-dialog.tsx │ │ │ ├── index.tsx │ │ │ └── share-bot-dialog.tsx │ │ ├── bot-settings │ │ │ ├── bot-config.tsx │ │ │ ├── config-item.tsx │ │ │ ├── context-prompt.tsx │ │ │ ├── index.tsx │ │ │ └── model-config.tsx │ │ └── use-bot.tsx │ ├── chat │ │ ├── chat-header.tsx │ │ ├── chat-session.tsx │ │ ├── index.tsx │ │ └── useChatSession.ts │ ├── home.tsx │ ├── layout │ │ ├── error.tsx │ │ ├── sidebar.tsx │ │ ├── theme-provider.tsx │ │ └── theme-toggle.tsx │ ├── settings.tsx │ └── ui │ │ ├── alert-dialog.tsx │ │ ├── badge.tsx │ │ ├── button.tsx │ │ ├── card.tsx │ │ ├── checkbox.tsx │ │ ├── dialog.tsx │ │ ├── dropdown-menu.tsx │ │ ├── emoji.tsx │ │ ├── hover-card.tsx │ │ ├── image-preview.tsx │ │ ├── input.tsx │ │ ├── loading.tsx │ │ ├── markdown.tsx │ │ ├── popover.tsx │ │ ├── progress.tsx │ │ ├── scroll-area.tsx │ │ ├── select.tsx │ │ ├── separator.tsx │ │ ├── textarea.tsx │ │ ├── toast.tsx │ │ ├── toaster.tsx │ │ ├── tooltip.tsx │ │ ├── typography.tsx │ │ └── use-toast.ts ├── constant.ts ├── layout.tsx ├── lib │ └── utils.ts ├── locales │ ├── en.ts │ └── index.ts ├── page.tsx ├── store │ ├── bot.data.ts │ └── bot.ts ├── styles │ ├── globals.css │ └── lib │ │ ├── highlight.css │ │ └── markdown.css └── utils │ ├── clipboard.ts │ ├── download.ts │ └── mobile.ts ├── components.json ├── datasources └── .gitignore ├── docker-compose.yml ├── next.config.mjs ├── package.json ├── pnpm-lock.yaml ├── postcss.config.js ├── public ├── android-chrome-192x192.png ├── android-chrome-512x512.png ├── apple-touch-icon.png ├── favicon-16x16.png ├── favicon-2048x2048.png ├── favicon-32x32.png ├── favicon.ico ├── llama.png ├── robots.txt ├── screenshot.png ├── serviceWorker.js ├── serviceWorkerRegister.js └── site.webmanifest ├── scripts ├── create-llama.sh ├── generate-demo.sh └── get-demo.sh ├── sentry.client.config.ts ├── sentry.edge.config.ts ├── sentry.server.config.ts ├── tailwind.config.ts ├── test └── data │ └── .gitignore └── tsconfig.json /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | .env 3 | .env.template 4 | .env.development.local 5 | .env.* 6 | Dockerfile 7 | docker-compose.yml 8 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | public/serviceWorker.js 2 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals", 3 | "plugins": ["prettier"], 4 | "rules": { 5 | "no-unused-vars": ["warn", { "args": "none" }], 6 | "@next/next/no-img-element": "off" 7 | }, 8 | "ignorePatterns": ["**/*.css"] 9 | } 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[Bug] " 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Deployment** 27 | - [ ] Docker 28 | - [ ] Vercel 29 | - [ ] Server 30 | 31 | **Desktop (please complete the following information):** 32 | - OS: [e.g. iOS] 33 | - Browser [e.g. chrome, safari] 34 | - Version [e.g. 22] 35 | 36 | **Smartphone (please complete the following information):** 37 | - Device: [e.g. iPhone6] 38 | - OS: [e.g. iOS8.1] 39 | - Browser [e.g. stock browser, safari] 40 | - Version [e.g. 22] 41 | 42 | **Additional Logs** 43 | Add any logs about the problem here. 44 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[Feature] " 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "npm" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | target-branch: "develop" 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | dev 38 | 39 | .vscode 40 | .idea 41 | 42 | # docker-compose env files 43 | .env 44 | 45 | *.key 46 | *.key.pub 47 | # Sentry Config File 48 | .sentryclirc 49 | 50 | # create-llama copies 51 | app/api/chat/config/ 52 | app/api/files/ 53 | cl/ 54 | 55 | # uploaded files 56 | output/ -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | npx lint-staged -------------------------------------------------------------------------------- /.lintstagedrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "./app/**/*.{js,ts,jsx,tsx,json,html,css,md}": [ 3 | "eslint --fix", 4 | "prettier --write" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.prettierrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | printWidth: 80, 3 | tabWidth: 2, 4 | useTabs: false, 5 | semi: true, 6 | singleQuote: false, 7 | trailingComma: 'all', 8 | bracketSpacing: true, 9 | arrowParens: 'always', 10 | }; 11 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # ---- Build Stage ---- 2 | FROM node:18-bookworm-slim AS build 3 | 4 | # Install ca-certificates. Issue: #89 5 | RUN apt-get update 6 | RUN apt-get install -y ca-certificates 7 | 8 | # Install Python, g++, and make for building native dependencies 9 | # Issue: https://github.com/docker/getting-started/issues/124 10 | RUN apt-get install -y python3 g++ make && \ 11 | apt-get clean 12 | 13 | # Set the working directory 14 | WORKDIR /usr/src/app 15 | 16 | # Copy the application's package.json and pnpm-lock.yaml to the container 17 | COPY package.json pnpm-lock.yaml ./ 18 | 19 | # Install pnpm and application dependencies 20 | RUN npm install -g pnpm && \ 21 | pnpm install 22 | 23 | # Copy the rest of the application to the container 24 | COPY . . 25 | 26 | # Build the application for production 27 | RUN pnpm build 28 | 29 | # ---- Production Stage ---- 30 | FROM node:18-bookworm-slim AS runtime 31 | 32 | # Use a non-root user 33 | USER node 34 | 35 | # Set the working directory 36 | WORKDIR /usr/src/app 37 | 38 | # Copy the build artifacts from the build stage 39 | COPY --from=build /usr/src/app/.next ./.next 40 | COPY --from=build /usr/src/app/public ./public 41 | COPY --from=build /usr/src/app/package.json ./package.json 42 | COPY --from=build /usr/src/app/node_modules ./node_modules 43 | 44 | # Expose port 3000 to be accessed outside the container 45 | EXPOSE 3000 46 | 47 | # Start the application in production mode 48 | CMD ["npx", "pnpm", "start"] 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 LlamaIndex 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | LlamaIndex Chat Logo 5 |

6 | 7 |

LlamaIndex Chat

8 |

Create chat bots that know your data

9 | 10 |

11 | LlamaIndex Chat Screen 16 |

17 | 18 | Welcome to [LlamaIndex Chat](https://github.com/run-llama/chat-llamaindex). You can create and share LLM chatbots that know your data (PDF or text documents). 19 | 20 | Getting started with LlamaIndex Chat is a breeze. Visit https://chat.llamaindex.ai - a hosted version of LlamaIndex Chat with no user authentication that provides an immediate start. 21 | 22 | ## 🚀 Features 23 | 24 | LlamaIndex Chat is an example chatbot application for [LlamaIndexTS](https://github.com/run-llama/LlamaIndexTS) featuring [LlamaCloud](https://cloud.llamaindex.ai/). 25 | 26 | You can: 27 | 28 | - Create bots using prompt engineering and share them with other users. 29 | - Modify the demo bots by using the UI or directly editing the [./app/bots/bot.data.ts](./app/store/bot.data.ts) file. 30 | - Integrate your data by uploading documents or generating new [data sources](#📀-data-sources). 31 | 32 | ## ⚡️ Quick start 33 | 34 | ### Local Development 35 | 36 | Requirement: [NodeJS](https://nodejs.org) 18 37 | 38 | - Clone the repository 39 | 40 | ```bash 41 | git clone https://github.com/run-llama/chat-llamaindex 42 | cd chat-llamaindex 43 | ``` 44 | 45 | - Prepare the project 46 | 47 | ```bash 48 | pnpm install 49 | pnpm run create-llama 50 | ``` 51 | 52 | > **Note**: The last step copies the chat UI component and file server route from the [create-llama](https://github.com/run-llama/create-llama) project, see [./create-llama.sh](./create-llama.sh). 53 | 54 | - Set the environment variables 55 | 56 | Edit environment variables in `.env.development.local`. Especially check your `OPENAI_API_KEY` and `LLAMA_CLOUD_API_KEY` and the LlamaCloud project to use (`LLAMA_CLOUD_PROJECT_NAME`). 57 | 58 | - Download the demo datasources 59 | 60 | ```bash 61 | pnpm run get-demo 62 | ``` 63 | 64 | - Upload the demo datasources to your LlamaCloud account 65 | 66 | ```bash 67 | pnpm run generate-demo 68 | ``` 69 | 70 | - Run the dev server 71 | 72 | ```bash 73 | pnpm dev 74 | ``` 75 | 76 | ### 🐳 Docker 77 | 78 | Note: This sections has not been used for a while and might be outdated. 79 | 80 | You can use Docker for development and deployment of LlamaIndex Chat. 81 | 82 | #### Building the Docker Image 83 | 84 | ```bash 85 | docker build -t chat-llamaindex . 86 | ``` 87 | 88 | #### Running in a Docker Container 89 | 90 | ```bash 91 | docker run -p 3000:3000 --env-file .env.development.local chat-llamaindex 92 | ``` 93 | 94 | #### Docker Compose 95 | 96 | For those preferring Docker Compose, we've included a docker-compose.yml file. To run using Docker Compose: 97 | 98 | ```bash 99 | docker compose up 100 | ``` 101 | 102 | Go to http://localhost:3000 in your web browser. 103 | 104 | **Note**: By default, the Docker Compose setup maps the `cache` and `datasources` directories from your host machine to the Docker container, ensuring data persistence and accessibility between container restarts. 105 | 106 | ### Vercel Deployment 107 | 108 | Deploying to Vercel is simple; click the button below and follow the instructions: 109 | 110 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Frun-llama%2Fchat-llamaindex&env=OPENAI_API_KEY) 111 | 112 | If you're deploying to a [Vercel Hobby](https://vercel.com/docs/accounts/plans#hobby) account, [change the running time](./app/api/llm/route.ts#L196) to 10 seconds, as this is the limit for the free plan. 113 | 114 | If you want to use the [sharing](#🔄-sharing) functionality, then you need to create a Vercel KV store and connect it to your project. 115 | Just follow [this step from the quickstart](https://vercel.com/docs/storage/vercel-kv/quickstart#create-a-kv-database). No further configuration is necessary as the app automatically uses a connected KV store. 116 | 117 | ## 🔄 Sharing 118 | 119 | LlamaIndex Chat supports the sharing of bots via URLs. Demo bots are read-only and can't be shared. But you can create new bots (or clone and modify a demo bot) and call the share functionality in the context menu. It will create a unique URL that you can share with others. Opening the URL, users can directly use the shared bot. 120 | 121 | ## 📀 Data Sources 122 | 123 | The app is using a [`ChatEngine`](https://ts.llamaindex.ai/modules/chat_engine) for each bot with a [`LlamaCloudIndex`](https://ts.llamaindex.ai/modules/llamacloud) attached. 124 | 125 | To set which `LlamaCloudIndex` is used for a bot, change the `datasource` attribute in the [bot's data](./app/store/bot.data.ts). 126 | 127 | > **Note**: To use the changed bots, you have to clear your local storage. Otherwise, the old bots are still used. You can clear your local storage by opening the developer tools and running `localStorage.clear()` in the console and reloading the page. 128 | 129 | ### Generate Data Sources 130 | 131 | To generate a new data source, create a new subfolder in the `datasources` directory and add the data files (e.g., PDFs). 132 | 133 | Then, run the following command to create as an index in the `Default` project on LlamaCloud 134 | 135 | ```bash 136 | pnpm run generate 137 | ``` 138 | 139 | Where `` is the name of the subfolder with your data files. 140 | 141 | > **Note**: On Windows, use `pnpm run generate:win ` instead. 142 | 143 | ## 🙏 Thanks 144 | 145 | Thanks go to @Yidadaa for his [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web) project, which was used as a starter template for this project. 146 | -------------------------------------------------------------------------------- /app/api/chat/engine/chat.ts: -------------------------------------------------------------------------------- 1 | import { ContextChatEngine, Settings } from "llamaindex"; 2 | import { getDataSource, LlamaCloudDataSourceParams } from "./index"; 3 | import { generateFilters } from "@/cl/app/api/chat/engine/queryFilter"; 4 | 5 | interface ChatEngineOptions { 6 | datasource: LlamaCloudDataSourceParams; 7 | documentIds?: string[]; 8 | } 9 | 10 | export async function createChatEngine({ 11 | datasource, 12 | documentIds, 13 | }: ChatEngineOptions) { 14 | const index = await getDataSource(datasource); 15 | if (!index) { 16 | throw new Error( 17 | `StorageContext is empty - call 'pnpm run generate ${datasource}' to generate the storage first`, 18 | ); 19 | } 20 | const retriever = index.asRetriever({ 21 | similarityTopK: process.env.TOP_K ? parseInt(process.env.TOP_K) : 3, 22 | filters: generateFilters(documentIds || []) as any, 23 | }); 24 | return new ContextChatEngine({ 25 | chatModel: Settings.llm, 26 | retriever, 27 | }); 28 | } 29 | -------------------------------------------------------------------------------- /app/api/chat/engine/generate.ts: -------------------------------------------------------------------------------- 1 | import * as dotenv from "dotenv"; 2 | import * as fs from "fs/promises"; 3 | import * as path from "path"; 4 | import { getDataSource } from "."; 5 | import { FilesService, PipelinesService } from "@llamaindex/cloud/api"; 6 | import { initService } from "llamaindex/cloud/utils"; 7 | 8 | const DATA_DIR = "./datasources"; 9 | 10 | // Load environment variables from local .env.development.local file 11 | dotenv.config({ path: ".env.development.local" }); 12 | 13 | async function getRuntime(func: any) { 14 | const start = Date.now(); 15 | await func(); 16 | const end = Date.now(); 17 | return end - start; 18 | } 19 | 20 | async function* walk(dir: string): AsyncGenerator { 21 | const directory = await fs.opendir(dir); 22 | 23 | for await (const dirent of directory) { 24 | const entryPath = path.join(dir, dirent.name); 25 | 26 | if (dirent.isDirectory()) { 27 | yield* walk(entryPath); // Recursively walk through directories 28 | } else if (dirent.isFile()) { 29 | yield entryPath; // Yield file paths 30 | } 31 | } 32 | } 33 | 34 | // TODO: should be moved to LlamaCloudFileService of LlamaIndexTS 35 | async function addFileToPipeline( 36 | projectId: string, 37 | pipelineId: string, 38 | uploadFile: File | Blob, 39 | customMetadata: Record = {}, 40 | ) { 41 | const file = await FilesService.uploadFileApiV1FilesPost({ 42 | projectId, 43 | formData: { 44 | upload_file: uploadFile, 45 | }, 46 | }); 47 | const files = [ 48 | { 49 | file_id: file.id, 50 | custom_metadata: { file_id: file.id, ...customMetadata }, 51 | }, 52 | ]; 53 | await PipelinesService.addFilesToPipelineApiV1PipelinesPipelineIdFilesPut({ 54 | pipelineId, 55 | requestBody: files, 56 | }); 57 | } 58 | 59 | async function generateDatasource() { 60 | const datasource = process.argv[2]; 61 | if (!datasource) { 62 | console.error("Please provide a datasource as an argument."); 63 | process.exit(1); 64 | } 65 | 66 | console.log(`Generating storage context for datasource '${datasource}'...`); 67 | 68 | const ms = await getRuntime(async () => { 69 | const index = await getDataSource({ 70 | pipeline: datasource, 71 | ensureIndex: true, 72 | }); 73 | const projectId = await index.getProjectId(); 74 | const pipelineId = await index.getPipelineId(); 75 | 76 | // walk through the data directory and upload each file to LlamaCloud 77 | for await (const filePath of walk(path.join(DATA_DIR, datasource))) { 78 | const buffer = await fs.readFile(filePath); 79 | const filename = path.basename(filePath); 80 | const file = new File([buffer], filename); 81 | await addFileToPipeline(projectId, pipelineId, file, { 82 | private: "false", 83 | }); 84 | } 85 | }); 86 | console.log( 87 | `Successfully uploaded documents to LlamaCloud in ${ms / 1000}s.`, 88 | ); 89 | } 90 | 91 | (async () => { 92 | initService(); 93 | await generateDatasource(); 94 | console.log("Finished generating storage."); 95 | })(); 96 | -------------------------------------------------------------------------------- /app/api/chat/engine/index.ts: -------------------------------------------------------------------------------- 1 | import { LlamaCloudIndex } from "llamaindex/cloud/LlamaCloudIndex"; 2 | import type { CloudConstructorParams } from "llamaindex/cloud/constants"; 3 | 4 | export type LlamaCloudDataSourceParams = { 5 | project?: string; 6 | pipeline?: string; 7 | ensureIndex?: boolean; 8 | }; 9 | 10 | export function parseDataSource( 11 | datasource: string, 12 | ): LlamaCloudDataSourceParams { 13 | try { 14 | return JSON.parse(datasource) as LlamaCloudDataSourceParams; 15 | } catch (e) { 16 | return {}; 17 | } 18 | } 19 | 20 | export async function getDataSource(params: LlamaCloudDataSourceParams) { 21 | checkEnvVars(); 22 | if (params.ensureIndex) { 23 | // ensure that the index exists 24 | try { 25 | await LlamaCloudIndex.fromDocuments({ 26 | ...createParams(params), 27 | documents: [], 28 | }); 29 | } catch (e) { 30 | if ((e as any).status === 400) { 31 | // ignore 400 error, it's caused by calling fromDocuments with empty documents 32 | // TODO: fix in LLamaIndexTS 33 | } else { 34 | throw e; 35 | } 36 | } 37 | } 38 | return new LlamaCloudIndex(createParams(params)); 39 | } 40 | 41 | function createParams({ 42 | project, 43 | pipeline, 44 | }: LlamaCloudDataSourceParams): CloudConstructorParams { 45 | if (!pipeline) { 46 | throw new Error("Set pipeline in the params."); 47 | } 48 | const params = { 49 | organizationId: process.env.LLAMA_CLOUD_ORGANIZATION_ID, 50 | name: pipeline, 51 | projectName: project ?? process.env.LLAMA_CLOUD_PROJECT_NAME!, 52 | apiKey: process.env.LLAMA_CLOUD_API_KEY, 53 | baseUrl: process.env.LLAMA_CLOUD_BASE_URL, 54 | }; 55 | return params; 56 | } 57 | 58 | function checkEnvVars() { 59 | if ( 60 | !process.env.LLAMA_CLOUD_PROJECT_NAME || 61 | !process.env.LLAMA_CLOUD_API_KEY 62 | ) { 63 | throw new Error( 64 | "LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY environment variables must be set.", 65 | ); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /app/api/chat/llamaindex/streaming/events.ts: -------------------------------------------------------------------------------- 1 | import { StreamData } from "ai"; 2 | import { 3 | CallbackManager, 4 | LLamaCloudFileService, 5 | Metadata, 6 | MetadataMode, 7 | NodeWithScore, 8 | ToolCall, 9 | ToolOutput, 10 | } from "llamaindex"; 11 | 12 | export async function appendSourceData( 13 | data: StreamData, 14 | sourceNodes?: NodeWithScore[], 15 | ) { 16 | if (!sourceNodes?.length) return; 17 | try { 18 | const nodes = await Promise.all( 19 | sourceNodes.map(async (node) => ({ 20 | metadata: node.node.metadata, 21 | id: node.node.id_, 22 | score: node.score ?? null, 23 | url: await getNodeUrl(node.node.metadata), 24 | text: node.node.getContent(MetadataMode.NONE), 25 | })), 26 | ); 27 | data.appendMessageAnnotation({ 28 | type: "sources", 29 | data: { 30 | nodes, 31 | }, 32 | }); 33 | } catch (error) { 34 | console.error("Error appending source data:", error); 35 | } 36 | } 37 | 38 | export function appendEventData(data: StreamData, title?: string) { 39 | if (!title) return; 40 | data.appendMessageAnnotation({ 41 | type: "events", 42 | data: { 43 | title, 44 | }, 45 | }); 46 | } 47 | 48 | export function appendToolData( 49 | data: StreamData, 50 | toolCall: ToolCall, 51 | toolOutput: ToolOutput, 52 | ) { 53 | data.appendMessageAnnotation({ 54 | type: "tools", 55 | data: { 56 | toolCall: { 57 | id: toolCall.id, 58 | name: toolCall.name, 59 | input: toolCall.input, 60 | }, 61 | toolOutput: { 62 | output: toolOutput.output, 63 | isError: toolOutput.isError, 64 | }, 65 | }, 66 | }); 67 | } 68 | 69 | export function createStreamTimeout(stream: StreamData) { 70 | const timeout = Number(process.env.STREAM_TIMEOUT ?? 1000 * 60 * 5); // default to 5 minutes 71 | const t = setTimeout(() => { 72 | appendEventData(stream, `Stream timed out after ${timeout / 1000} seconds`); 73 | stream.close(); 74 | }, timeout); 75 | return t; 76 | } 77 | 78 | export function createCallbackManager(stream: StreamData) { 79 | const callbackManager = new CallbackManager(); 80 | 81 | callbackManager.on("retrieve-end", (data: any) => { 82 | const { nodes, query } = data.detail; 83 | appendSourceData(stream, nodes); 84 | appendEventData(stream, `Retrieving context for query: '${query}'`); 85 | appendEventData( 86 | stream, 87 | `Retrieved ${nodes.length} sources to use as context for the query`, 88 | ); 89 | }); 90 | 91 | callbackManager.on("llm-tool-call", (event: any) => { 92 | const { name, input } = event.detail.toolCall; 93 | const inputString = Object.entries(input) 94 | .map(([key, value]) => `${key}: ${value}`) 95 | .join(", "); 96 | appendEventData( 97 | stream, 98 | `Using tool: '${name}' with inputs: '${inputString}'`, 99 | ); 100 | }); 101 | 102 | callbackManager.on("llm-tool-result", (event: any) => { 103 | const { toolCall, toolResult } = event.detail; 104 | appendToolData(stream, toolCall, toolResult); 105 | }); 106 | 107 | return callbackManager; 108 | } 109 | 110 | async function getNodeUrl(metadata: Metadata) { 111 | try { 112 | const fileName = metadata["file_name"]; 113 | const pipelineId = metadata["pipeline_id"]; 114 | if (fileName && pipelineId) { 115 | // file has been uploaded to LlamaCloud, so we can get the URL from there 116 | const downloadUrl = await LLamaCloudFileService.getFileUrl( 117 | pipelineId, 118 | fileName, 119 | ); 120 | if (downloadUrl) { 121 | console.log(`Retrieved documents URL from LlamaCloud: ${downloadUrl}`); 122 | return downloadUrl; 123 | } 124 | } 125 | } catch (error) { 126 | console.error("Error retrieving document URL:", error); 127 | } 128 | console.warn( 129 | "Couldn't retrieve document URL from LlamaCloud for node with metadata", 130 | metadata, 131 | ); 132 | return null; 133 | } 134 | -------------------------------------------------------------------------------- /app/api/chat/route.ts: -------------------------------------------------------------------------------- 1 | import { JSONValue, Message, StreamData, StreamingTextResponse } from "ai"; 2 | import { 3 | ChatMessage, 4 | OpenAI, 5 | Settings, 6 | SimpleChatHistory, 7 | SummaryChatHistory, 8 | } from "llamaindex"; 9 | import { NextRequest, NextResponse } from "next/server"; 10 | import { createChatEngine } from "./engine/chat"; 11 | import { LlamaIndexStream } from "@/cl/app/api/chat/llamaindex/streaming/stream"; 12 | import { 13 | convertMessageContent, 14 | retrieveDocumentIds, 15 | } from "@/cl/app/api/chat/llamaindex/streaming/annotations"; 16 | import { 17 | createCallbackManager, 18 | createStreamTimeout, 19 | } from "./llamaindex/streaming/events"; 20 | import { LLMConfig } from "@/app/store/bot"; 21 | import { parseDataSource } from "./engine"; 22 | 23 | export const runtime = "nodejs"; 24 | export const dynamic = "force-dynamic"; 25 | 26 | interface ChatRequestBody { 27 | messages: Message[]; 28 | context: Message[]; 29 | modelConfig: LLMConfig; 30 | datasource?: string; 31 | } 32 | 33 | export async function POST(request: NextRequest) { 34 | // Init Vercel AI StreamData and timeout 35 | const vercelStreamData = new StreamData(); 36 | const streamTimeout = createStreamTimeout(vercelStreamData); 37 | 38 | try { 39 | const body = await request.json(); 40 | const { messages, context, modelConfig, datasource } = 41 | body as ChatRequestBody; 42 | const userMessage = messages.pop(); 43 | if ( 44 | !messages || 45 | !userMessage || 46 | userMessage.role !== "user" || 47 | !datasource 48 | ) { 49 | return NextResponse.json( 50 | { 51 | detail: 52 | "datasource and messages are required in the request body and the last message must be from the user", 53 | }, 54 | { status: 400 }, 55 | ); 56 | } 57 | 58 | let annotations = userMessage.annotations; 59 | if (!annotations) { 60 | // the user didn't send any new annotations with the last message 61 | // so use the annotations from the last user message that has annotations 62 | // REASON: GPT4 doesn't consider MessageContentDetail from previous messages, only strings 63 | annotations = messages 64 | .slice() 65 | .reverse() 66 | .find( 67 | (message) => message.role === "user" && message.annotations, 68 | )?.annotations; 69 | } 70 | 71 | // retrieve document Ids from the annotations of all messages (if any) and create chat engine with index 72 | const allAnnotations: JSONValue[] = [...messages, userMessage].flatMap( 73 | (message) => { 74 | return message.annotations ?? []; 75 | }, 76 | ); 77 | 78 | const ids = retrieveDocumentIds(allAnnotations); 79 | 80 | // Create chat engine instance with llm config from request 81 | const llm = new OpenAI(modelConfig); 82 | const chatEngine = await Settings.withLLM(llm, async () => { 83 | return await createChatEngine({ 84 | datasource: parseDataSource(datasource), 85 | documentIds: ids, 86 | }); 87 | }); 88 | 89 | // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format 90 | const userMessageContent = convertMessageContent( 91 | userMessage.content, 92 | annotations, 93 | ); 94 | 95 | // Setup callbacks 96 | const callbackManager = createCallbackManager(vercelStreamData); 97 | 98 | // Append context messages to the top of the chat history 99 | const chatMessages = context.concat(messages) as ChatMessage[]; 100 | const chatHistory = modelConfig.sendMemory 101 | ? new SummaryChatHistory({ messages: chatMessages, llm }) 102 | : new SimpleChatHistory({ messages: chatMessages }); 103 | 104 | // Calling LlamaIndex's ChatEngine to get a streamed response 105 | const response = await Settings.withCallbackManager(callbackManager, () => { 106 | return chatEngine.chat({ 107 | message: userMessageContent, 108 | chatHistory, 109 | stream: true, 110 | }); 111 | }); 112 | 113 | // Transform LlamaIndex stream to Vercel/AI format 114 | const stream = LlamaIndexStream(response, vercelStreamData, chatMessages); 115 | 116 | // Return a StreamingTextResponse, which can be consumed by the Vercel/AI client 117 | return new StreamingTextResponse(stream, {}, vercelStreamData); 118 | } catch (error) { 119 | console.error("[LlamaIndex]", error); 120 | return NextResponse.json( 121 | { 122 | detail: (error as Error).message, 123 | }, 124 | { 125 | status: 500, 126 | }, 127 | ); 128 | } finally { 129 | clearTimeout(streamTimeout); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /app/api/chat/upload/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from "next/server"; 2 | import { uploadDocument } from "@/cl/app/api/chat/llamaindex/documents/upload"; 3 | import { getDataSource, parseDataSource } from "../engine"; 4 | 5 | export const runtime = "nodejs"; 6 | export const dynamic = "force-dynamic"; 7 | 8 | // Custom upload API to use datasource from request body 9 | export async function POST(request: NextRequest) { 10 | try { 11 | const { 12 | filename, 13 | base64, 14 | datasource, 15 | }: { filename: string; base64: string; datasource: string } = 16 | await request.json(); 17 | if (!base64 || !datasource) { 18 | return NextResponse.json( 19 | { error: "base64 and datasource is required in the request body" }, 20 | { status: 400 }, 21 | ); 22 | } 23 | const index = await getDataSource(parseDataSource(datasource)); 24 | if (!index) { 25 | throw new Error( 26 | `StorageContext is empty - call 'pnpm run generate ${datasource}' to generate the storage first`, 27 | ); 28 | } 29 | return NextResponse.json(await uploadDocument(index, filename, base64)); 30 | } catch (error) { 31 | console.error("[Upload API]", error); 32 | return NextResponse.json( 33 | { error: (error as Error).message }, 34 | { status: 500 }, 35 | ); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /app/api/share/route.ts: -------------------------------------------------------------------------------- 1 | import { kv } from "@vercel/kv"; 2 | import { NextRequest, NextResponse } from "next/server"; 3 | import { nanoid } from "nanoid"; 4 | import { Bot } from "@/app/store/bot"; 5 | 6 | const DAYS_TO_LIVE = 30; 7 | const TTL = 60 * 60 * 24 * DAYS_TO_LIVE; 8 | const MAX_KEY_GENERATION_RETRY = 100; 9 | 10 | export interface ShareResponse { 11 | key: string; 12 | url: string; 13 | } 14 | 15 | async function getKey() { 16 | let key; 17 | let counter = 0; 18 | 19 | do { 20 | key = nanoid(); 21 | counter++; 22 | } while ((await kv.exists(key)) && counter < MAX_KEY_GENERATION_RETRY); 23 | 24 | if (counter === MAX_KEY_GENERATION_RETRY) { 25 | // Handle the case when a unique key was not found within the maximum allowed iterations 26 | throw new Error("Failed to generate a unique key"); 27 | } 28 | return key; 29 | } 30 | 31 | export async function POST(req: NextRequest) { 32 | try { 33 | const body: { bot: Bot } = await req.json(); 34 | 35 | const key = await getKey(); 36 | body.bot.share = { ...body.bot.share, id: key }; 37 | const data = await kv.set<{ bot: Bot }>(key, body, { 38 | ex: TTL, 39 | }); 40 | if (!data) { 41 | throw new Error(`Can't store bot with key ${key}`); 42 | } 43 | 44 | const protocol = req.headers.get("x-forwarded-proto") || "http"; 45 | const url = `${protocol}://${req.headers.get("host")}/b/${key}`; 46 | 47 | console.log(`[Share] shared bot '${body.bot.name}' created at ${url}`); 48 | 49 | return NextResponse.json({ 50 | key: key, 51 | url: url, 52 | data: data, 53 | days: DAYS_TO_LIVE, 54 | } as ShareResponse); 55 | } catch (error) { 56 | console.error("[Share] error while sharing bot", error); 57 | return NextResponse.json( 58 | { 59 | error: true, 60 | msg: (error as Error).message, 61 | }, 62 | { 63 | status: 500, 64 | }, 65 | ); 66 | } 67 | } 68 | 69 | export const runtime = "edge"; 70 | -------------------------------------------------------------------------------- /app/b/[botId]/page.tsx: -------------------------------------------------------------------------------- 1 | import { Home } from "@/app/components/home"; 2 | import { Bot } from "@/app/store/bot"; 3 | import { Analytics } from "@vercel/analytics/react"; 4 | import { kv } from "@vercel/kv"; 5 | 6 | export default async function App({ params }: { params: { botId: string } }) { 7 | console.log(`[Share] try loading bot with key ${params.botId}`); 8 | let bot: Bot | null = null; 9 | try { 10 | const res: { bot: Bot } | null = await kv.get(params.botId); 11 | bot = res?.bot || null; 12 | } catch (e) { 13 | console.error(`[Share] failed to load bot with key ${params.botId}`, e); 14 | } 15 | 16 | if (!bot) { 17 | console.log(`[Share] requested unknown bot with id ${params.botId}`); 18 | return ( 19 | <> 20 | Sorry, there is no bot at this URL. Try  21 | creating your own bot. 22 | 23 | ); 24 | } 25 | 26 | console.debug("[Share] bot loaded", bot); 27 | 28 | return ( 29 | <> 30 | 31 | 32 | 33 | ); 34 | } 35 | -------------------------------------------------------------------------------- /app/components/bot/bot-item.tsx: -------------------------------------------------------------------------------- 1 | import { cn } from "@/app/lib/utils"; 2 | import { Bot } from "../../store/bot"; 3 | import BotOptions from "./bot-options"; 4 | import { BotItemContextProvider, useBot } from "./use-bot"; 5 | import { BotAvatar } from "@/app/components/ui/emoji"; 6 | 7 | function BotItemUI() { 8 | const { bot, isActive, ensureSession } = useBot(); 9 | return ( 10 |
16 |
20 |
21 | 22 |
23 |
{bot.name}
24 |
25 |
26 | 27 |
28 |
29 | ); 30 | } 31 | 32 | export default function BotItem(props: { bot: Bot }) { 33 | return ( 34 | 35 | 36 | 37 | ); 38 | } 39 | -------------------------------------------------------------------------------- /app/components/bot/bot-list.tsx: -------------------------------------------------------------------------------- 1 | import EditBotDialogContent from "@/app/components/bot/bot-options/edit-bot-dialog"; 2 | import { BotItemContextProvider } from "@/app/components/bot/use-bot"; 3 | import { Dialog, DialogTrigger } from "@/app/components/ui/dialog"; 4 | import { PlusCircle } from "lucide-react"; 5 | import { useState } from "react"; 6 | import { useNavigate } from "react-router-dom"; 7 | import { Path } from "../../constant"; 8 | import Locale from "../../locales"; 9 | import { useBotStore } from "../../store/bot"; 10 | import { Button } from "../ui/button"; 11 | import { Input } from "../ui/input"; 12 | import { ScrollArea } from "../ui/scroll-area"; 13 | import BotItem from "./bot-item"; 14 | 15 | export default function BotList() { 16 | const botStore = useBotStore(); 17 | const navigate = useNavigate(); 18 | const [searchText, setSearchText] = useState(""); 19 | const [editBotId, setEditBotId] = useState(undefined); 20 | 21 | const onClickContainer = (e: React.MouseEvent) => { 22 | if (e.target === e.currentTarget) { 23 | navigate(Path.Home); 24 | } 25 | }; 26 | 27 | const onClickCreate = () => { 28 | const newBot = botStore.create(); 29 | botStore.selectBot(newBot.id); 30 | setEditBotId(newBot.id); 31 | }; 32 | 33 | const allBots = botStore.getAll(); 34 | const filteredBots = allBots.filter((b) => 35 | b.name.toLowerCase().includes(searchText.toLowerCase()), 36 | ); 37 | const botList = searchText.length > 0 ? filteredBots : allBots; 38 | const editBot = editBotId ? botStore.get(editBotId) : undefined; 39 | 40 | return ( 41 |
42 |
43 | 44 | 45 | 48 | 49 | {editBot && ( 50 | 51 | 52 | 53 | )} 54 | 55 | setSearchText(e.currentTarget.value)} 60 | /> 61 |
62 | 63 | {botList.map((b) => ( 64 | 65 | ))} 66 | 67 |
68 | ); 69 | } 70 | -------------------------------------------------------------------------------- /app/components/bot/bot-options/delete-bot-dialog.tsx: -------------------------------------------------------------------------------- 1 | import { cn } from "@/app/lib/utils"; 2 | import Locale from "../../../locales"; 3 | import { 4 | AlertDialogAction, 5 | AlertDialogCancel, 6 | AlertDialogContent, 7 | AlertDialogDescription, 8 | AlertDialogFooter, 9 | AlertDialogHeader, 10 | AlertDialogTitle, 11 | } from "../../ui/alert-dialog"; 12 | import { useBot } from "../use-bot"; 13 | import { buttonVariants } from "@/app/components/ui/button"; 14 | 15 | export default function DeleteBotDialogContent() { 16 | const { deleteBot } = useBot(); 17 | return ( 18 | 19 | 20 | Are you absolutely sure? 21 | 22 | {Locale.Bot.Item.DeleteConfirm} 23 | 24 | 25 | 26 | Cancel 27 | 31 | Continue 32 | 33 | 34 | 35 | ); 36 | } 37 | -------------------------------------------------------------------------------- /app/components/bot/bot-options/edit-bot-dialog.tsx: -------------------------------------------------------------------------------- 1 | import Locale from "../../../locales"; 2 | import { DialogContent, DialogHeader, DialogTitle } from "../../ui/dialog"; 3 | import { ScrollArea } from "../../ui/scroll-area"; 4 | import { Separator } from "../../ui/separator"; 5 | import BotSettings from "../bot-settings"; 6 | 7 | export default function EditBotDialogContent() { 8 | return ( 9 | 10 | 11 | {Locale.Bot.EditModal.Title} 12 | 13 | 14 | 15 | 16 | 17 | 18 | ); 19 | } 20 | -------------------------------------------------------------------------------- /app/components/bot/bot-options/index.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | ClipboardEdit, 3 | Copy, 4 | MoreHorizontal, 5 | Share2, 6 | XCircle, 7 | } from "lucide-react"; 8 | import { useState } from "react"; 9 | import Locale from "../../../locales"; 10 | import { AlertDialog, AlertDialogTrigger } from "../../ui/alert-dialog"; 11 | import { Button } from "../../ui/button"; 12 | import { Dialog, DialogTrigger } from "../../ui/dialog"; 13 | import { 14 | DropdownMenu, 15 | DropdownMenuContent, 16 | DropdownMenuItem, 17 | DropdownMenuLabel, 18 | DropdownMenuSeparator, 19 | DropdownMenuTrigger, 20 | } from "../../ui/dropdown-menu"; 21 | import { useBot } from "../use-bot"; 22 | import DeleteBotDialogContent from "./delete-bot-dialog"; 23 | import EditBotDialogContent from "./edit-bot-dialog"; 24 | import ShareBotDialogContent from "./share-bot-dialog"; 25 | 26 | export default function BotOptions() { 27 | const { isReadOnly, isShareble, cloneBot } = useBot(); 28 | const [dialogContent, setDialogContent] = useState(null); 29 | 30 | return ( 31 | 32 | 33 | 34 | 35 | 39 | 40 | 41 | Options 42 | 43 | 44 | 45 | {Locale.Bot.EditModal.Clone} 46 | 47 | 48 | setDialogContent()} 51 | > 52 | 53 | {Locale.Bot.Item.Edit} 54 | 55 | 56 | 57 | setDialogContent()} 60 | > 61 | 62 | {Locale.Bot.Item.Delete} 63 | 64 | 65 | 66 | setDialogContent()} 69 | > 70 | 71 | {Locale.Bot.Item.Share} 72 | 73 | 74 | 75 | 76 | {dialogContent} 77 | 78 | 79 | ); 80 | } 81 | -------------------------------------------------------------------------------- /app/components/bot/bot-options/share-bot-dialog.tsx: -------------------------------------------------------------------------------- 1 | import { ShareResponse } from "@/app/api/share/route"; 2 | import { Card, CardContent } from "@/app/components/ui/card"; 3 | import { Input } from "@/app/components/ui/input"; 4 | import { Loading } from "@/app/components/ui/loading"; 5 | import { useToast } from "@/app/components/ui/use-toast"; 6 | import { Bot } from "@/app/store/bot"; 7 | import { copyToClipboard } from "@/app/utils/clipboard"; 8 | import { Copy } from "lucide-react"; 9 | import { useEffect } from "react"; 10 | import { useMutation } from "react-query"; 11 | import Locale from "../../../locales"; 12 | import { Button } from "../../ui/button"; 13 | import { 14 | DialogContent, 15 | DialogFooter, 16 | DialogHeader, 17 | DialogTitle, 18 | } from "../../ui/dialog"; 19 | import { useBot } from "../use-bot"; 20 | 21 | async function share(bot: Bot): Promise { 22 | const res = await fetch("/api/share", { 23 | method: "POST", 24 | body: JSON.stringify({ bot: bot }), 25 | }); 26 | const json = await res.json(); 27 | console.log("[Share]", json); 28 | if (!res.ok) { 29 | throw new Error(json.msg); 30 | } 31 | return json; 32 | } 33 | 34 | export default function ShareBotDialogContent() { 35 | const { toast } = useToast(); 36 | const { bot, updateBot } = useBot(); 37 | 38 | const shareMutation = useMutation(share, { 39 | onSuccess: (data) => { 40 | updateBot((bot) => { 41 | bot.share = { ...bot.share, id: data.key }; 42 | }); 43 | }, 44 | }); 45 | 46 | // FIXME: check dependency warning 47 | useEffect(() => { 48 | shareMutation.mutate(bot); 49 | }, []); 50 | 51 | return ( 52 | 53 | 54 | {Locale.Share.Title} 55 | 56 |
57 | {!shareMutation.error && ( 58 | 59 | 60 |
61 |
{Locale.Share.Url.Title}
62 | {shareMutation.data ? ( 63 |
64 | 70 | 79 |
80 | ) : ( 81 |
82 | 83 | Loading... 84 |
85 | )} 86 |
87 |
88 |
89 | )} 90 |
91 | 92 |
93 | {shareMutation.error ? ( 94 | {Locale.Share.Url.Error} 95 | ) : ( 96 |
{Locale.Share.Url.Hint}
97 | )} 98 |
99 |
100 |
101 | ); 102 | } 103 | -------------------------------------------------------------------------------- /app/components/bot/bot-settings/bot-config.tsx: -------------------------------------------------------------------------------- 1 | import { useBot } from "@/app/components/bot/use-bot"; 2 | import { LlamaCloudSelector } from "@/cl/app/components/ui/chat/widgets/LlamaCloudSelector"; 3 | import Locale from "../../../locales"; 4 | import { Card, CardContent } from "../../ui/card"; 5 | import { Input } from "../../ui/input"; 6 | import ConfigItem from "./config-item"; 7 | 8 | export default function BotConfig() { 9 | const { bot, updateBot } = useBot(); 10 | return ( 11 | <> 12 |
{Locale.Bot.Config.Title}
13 | 14 | 15 | 16 | 20 | updateBot((bot) => { 21 | bot.name = e.currentTarget.value; 22 | }) 23 | } 24 | /> 25 | 26 | 27 | { 33 | if (pipeline) { 34 | updateBot((bot) => { 35 | bot.datasource = JSON.stringify(pipeline); // stringify configs as datasource 36 | }); 37 | } 38 | }} 39 | /> 40 | 41 | 42 | 43 | 44 | ); 45 | } 46 | -------------------------------------------------------------------------------- /app/components/bot/bot-settings/config-item.tsx: -------------------------------------------------------------------------------- 1 | export default function ConfigItem(props: { 2 | title: string; 3 | subTitle?: string; 4 | children: JSX.Element; 5 | }) { 6 | return ( 7 |
8 |
9 |
{props.title}
10 |
{props.subTitle}
11 |
12 |
{props.children}
13 |
14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /app/components/bot/bot-settings/context-prompt.tsx: -------------------------------------------------------------------------------- 1 | import { Button } from "@/app/components/ui/button"; 2 | import { 3 | Select, 4 | SelectContent, 5 | SelectItem, 6 | SelectTrigger, 7 | SelectValue, 8 | } from "@/app/components/ui/select"; 9 | import { Textarea } from "@/app/components/ui/textarea"; 10 | import { ArrowDownLeftSquare, PlusCircle, XCircle } from "lucide-react"; 11 | import Locale from "../../../locales"; 12 | import { Message as ChatMessage } from "ai"; 13 | import { v4 as uuidv4 } from "uuid"; 14 | import { MESSAGE_ROLES } from "@/app/store/bot"; 15 | 16 | function ContextPromptItem(props: { 17 | index: number; 18 | prompt: ChatMessage; 19 | update: (prompt: ChatMessage) => void; 20 | remove: () => void; 21 | insert: () => void; 22 | }) { 23 | const handleUpdatePrompt = async (input: string) => { 24 | props.update({ 25 | ...props.prompt, 26 | content: input, 27 | }); 28 | }; 29 | 30 | return ( 31 | <> 32 |
33 |
34 | 54 |
55 | 56 |