├── .dockerignore ├── .env.example ├── .eslintrc.json ├── .gitignore ├── Dockerfile ├── README.md ├── data ├── args.json ├── docstore.json └── hnswlib.index ├── download.sh ├── fly.toml ├── ingest.ts ├── ingest ├── ingest.py └── requirements.txt ├── next.config.js ├── package.json ├── pages ├── _app.tsx ├── _document.tsx ├── api │ ├── chat-stream.ts │ ├── chat.ts │ └── util.ts └── index.tsx ├── public ├── favicon.ico ├── next.svg ├── parroticon.png ├── thirteen.svg ├── usericon.png └── vercel.svg ├── styles ├── Home.module.css └── globals.css ├── tsconfig.json ├── vercel.json └── yarn.lock /.dockerignore: -------------------------------------------------------------------------------- 1 | fly.toml 2 | Dockerfile 3 | .dockerignore 4 | node_modules 5 | npm-debug.log 6 | README.md 7 | .next 8 | .git 9 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="" -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # Used env file 32 | .env 33 | 34 | # vercel 35 | .vercel 36 | 37 | # typescript 38 | *.tsbuildinfo 39 | next-env.d.ts 40 | 41 | ingested_data/ 42 | langchain.readthedocs.io/ 43 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Install dependencies only when needed 2 | FROM node:16-alpine AS builder 3 | # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. 4 | RUN apk add --no-cache libc6-compat 5 | 6 | # Add build dependencies for HNSWLib 7 | ENV PYTHONUNBUFFERED=1 8 | RUN apk add --no-cache make g++ 9 | RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python 10 | RUN python3 -m ensurepip 11 | RUN pip3 install --no-cache --upgrade pip setuptools 12 | 13 | WORKDIR /app 14 | COPY . . 15 | RUN yarn install --frozen-lockfile 16 | 17 | # If using npm with a `package-lock.json` comment out above and use below instead 18 | # RUN npm ci 19 | 20 | ENV NEXT_TELEMETRY_DISABLED 1 21 | 22 | # Add `ARG` instructions below if you need `NEXT_PUBLIC_` variables 23 | # then put the value on your fly.toml 24 | # Example: 25 | # ARG NEXT_PUBLIC_EXAMPLE="value here" 26 | 27 | RUN yarn build 28 | 29 | # If using npm comment out above and use below instead 30 | # RUN npm run build 31 | 32 | # Production image, copy all the files and run next 33 | FROM node:16-alpine AS runner 34 | WORKDIR /app 35 | 36 | ENV NODE_ENV production 37 | ENV NEXT_TELEMETRY_DISABLED 1 38 | 39 | RUN addgroup --system --gid 1001 nodejs 40 | RUN adduser --system --uid 1001 nextjs 41 | 42 | COPY --from=builder /app ./ 43 | 44 | USER nextjs 45 | 46 | CMD ["yarn", "start"] 47 | 48 | # If using npm comment out above and use below instead 49 | # CMD ["npm", "run", "start"] 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, create a new `.env` file from `.env.example` and add your OpenAI API key found [here](https://platform.openai.com/account/api-keys). 6 | 7 | ```bash 8 | cp .env.example .env 9 | ``` 10 | 11 | ### Prerequisites 12 | 13 | - [Node.js](https://nodejs.org/en/download/) (v16 or higher) 14 | - [Yarn](https://classic.yarnpkg.com/en/docs/install/#mac-stable) 15 | - `wget` (on macOS, you can install this with `brew install wget`) 16 | 17 | Next, we'll need to load our data source. 18 | 19 | ### Data Ingestion 20 | 21 | Data ingestion happens in two steps. 22 | 23 | First, you should run 24 | 25 | ```bash 26 | sh download.sh 27 | ``` 28 | 29 | This will download our data source (in this case the Langchain docs ). 30 | 31 | Next, install dependencies and run the ingestion script: 32 | 33 | ```bash 34 | yarn && yarn ingest 35 | ``` 36 | 37 | _Note: If on Node v16, use `NODE_OPTIONS='--experimental-fetch' yarn ingest`_ 38 | 39 | This will parse the data, split text, create embeddings, store them in a vectorstore, and 40 | then save it to the `data/` directory. 41 | 42 | We save it to a directory because we only want to run the (expensive) data ingestion process once. 43 | 44 | The Next.js server relies on the presence of the `data/` directory. Please 45 | make sure to run this before moving on to the next step. 46 | 47 | ### Running the Server 48 | 49 | Then, run the development server: 50 | 51 | ```bash 52 | yarn dev 53 | ``` 54 | 55 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 56 | 57 | ### Deploying the server 58 | 59 | The production version of this repo is hosted on 60 | [fly](https://chat-langchainjs.fly.dev/). To deploy your own server on Fly, you 61 | can use the provided `fly.toml` and `Dockerfile` as a starting point. 62 | 63 | **Note:** As a Next.js app it seems like Vercel is a natural place to 64 | host this site. Unfortunately there are 65 | [limitations](https://github.com/websockets/ws/issues/1786#issuecomment-678315435) 66 | to secure websockets using `ws` with Next.js which requires using a custom 67 | server which cannot be hosted on Vercel. Even using server side events, it 68 | seems, Vercel's serverless functions seem to prohibit streaming responses 69 | (e.g. see 70 | [here](https://github.com/vercel/next.js/issues/9965#issuecomment-820156947)) 71 | 72 | ## Inspirations 73 | 74 | This repo borrows heavily from 75 | 76 | - [ChatLangChain](https://github.com/hwchase17/chat-langchain) - for the backend and data ingestion logic 77 | - [LangChain Chat NextJS](https://github.com/zahidkhawaja/langchain-chat-nextjs) - for the frontend. 78 | 79 | ## How To Run on Your Example 80 | 81 | If you'd like to chat your own data, you need to: 82 | 83 | 1. Set up your own ingestion pipeline, and create a similar `data/` directory with a vectorstore in it. 84 | 2. Change the prompt used in `pages/api/util.ts` - right now this tells the chatbot to only respond to questions about LangChain, so in order to get it to work on your data you'll need to update it accordingly. 85 | 86 | The server should work just the same 😄 87 | -------------------------------------------------------------------------------- /data/args.json: -------------------------------------------------------------------------------- 1 | {"space":"ip","numDimensions":1536} -------------------------------------------------------------------------------- /data/hnswlib.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sullivan-sean/chat-langchainjs/35f2249bb99f9cb88f9fae6f905aa4169634e761/data/hnswlib.index -------------------------------------------------------------------------------- /download.sh: -------------------------------------------------------------------------------- 1 | # Bash script to ingest data 2 | # This involves scraping the data from the web and then cleaning up and putting in Weaviate. 3 | # Error if any command fails 4 | set -e 5 | echo Downloading docs... 6 | wget -q -r -A.html https://langchain.readthedocs.io/en/latest/ 7 | -------------------------------------------------------------------------------- /fly.toml: -------------------------------------------------------------------------------- 1 | # fly.toml file generated for chat-langchainjs on 2023-02-18T22:00:39-08:00 2 | 3 | app = "chat-langchainjs" 4 | kill_signal = "SIGINT" 5 | kill_timeout = 5 6 | processes = [] 7 | 8 | [build] 9 | [build.args] 10 | NEXT_PUBLIC_EXAMPLE = "Value goes here" 11 | 12 | [env] 13 | PORT = "8080" 14 | 15 | [experimental] 16 | auto_rollback = true 17 | 18 | [[services]] 19 | http_checks = [] 20 | internal_port = 8080 21 | processes = ["app"] 22 | protocol = "tcp" 23 | script_checks = [] 24 | [services.concurrency] 25 | hard_limit = 25 26 | soft_limit = 20 27 | type = "connections" 28 | 29 | [[services.ports]] 30 | force_https = true 31 | handlers = ["http"] 32 | port = 80 33 | 34 | [[services.ports]] 35 | handlers = ["tls", "http"] 36 | port = 443 37 | 38 | [[services.tcp_checks]] 39 | grace_period = "1s" 40 | interval = "15s" 41 | restart_limit = 0 42 | timeout = "2s" 43 | -------------------------------------------------------------------------------- /ingest.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain/vectorstores"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings"; 3 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; 4 | import * as fs from "fs"; 5 | import { Document } from "langchain/document"; 6 | import { BaseDocumentLoader } from "langchain/document_loaders"; 7 | import path from "path"; 8 | import { load } from "cheerio"; 9 | 10 | async function processFile(filePath: string): Promise { 11 | return await new Promise((resolve, reject) => { 12 | fs.readFile(filePath, "utf8", (err, fileContents) => { 13 | if (err) { 14 | reject(err); 15 | } else { 16 | const text = load(fileContents).text(); 17 | const metadata = { source: filePath }; 18 | const doc = new Document({ pageContent: text, metadata: metadata }); 19 | resolve(doc); 20 | } 21 | }); 22 | }); 23 | } 24 | 25 | async function processDirectory(directoryPath: string): Promise { 26 | const docs: Document[] = []; 27 | let files: string[]; 28 | try { 29 | files = fs.readdirSync(directoryPath); 30 | } catch (err) { 31 | console.error(err); 32 | throw new Error( 33 | `Could not read directory: ${directoryPath}. Did you run \`sh download.sh\`?` 34 | ); 35 | } 36 | for (const file of files) { 37 | const filePath = path.join(directoryPath, file); 38 | const stat = fs.statSync(filePath); 39 | if (stat.isDirectory()) { 40 | const newDocs = processDirectory(filePath); 41 | const nestedDocs = await newDocs; 42 | docs.push(...nestedDocs); 43 | } else { 44 | const newDoc = processFile(filePath); 45 | const doc = await newDoc; 46 | docs.push(doc); 47 | } 48 | } 49 | return docs; 50 | } 51 | 52 | class ReadTheDocsLoader extends BaseDocumentLoader { 53 | constructor(public filePath: string) { 54 | super(); 55 | } 56 | async load(): Promise { 57 | return await processDirectory(this.filePath); 58 | } 59 | } 60 | 61 | const directoryPath = "langchain.readthedocs.io"; 62 | const loader = new ReadTheDocsLoader(directoryPath); 63 | 64 | export const run = async () => { 65 | const rawDocs = await loader.load(); 66 | console.log("Loader created."); 67 | /* Split the text into chunks */ 68 | const textSplitter = new RecursiveCharacterTextSplitter({ 69 | chunkSize: 1000, 70 | chunkOverlap: 200, 71 | }); 72 | const docs = await textSplitter.splitDocuments(rawDocs); 73 | console.log("Docs splitted."); 74 | 75 | console.log("Creating vector store..."); 76 | /* Create the vectorstore */ 77 | const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); 78 | await vectorStore.save("data"); 79 | }; 80 | 81 | (async () => { 82 | await run(); 83 | console.log("done"); 84 | })(); 85 | -------------------------------------------------------------------------------- /ingest/ingest.py: -------------------------------------------------------------------------------- 1 | """Load html from files, clean up, split, ingest into Weaviate.""" 2 | from pathlib import Path 3 | from langchain.document_loaders import ReadTheDocsLoader 4 | 5 | if __name__ == "__main__": 6 | loader = ReadTheDocsLoader("langchain.readthedocs.io/en/latest/") 7 | raw_documents = loader.load() 8 | dir_path = Path("ingested_data") 9 | dir_path.mkdir(parents=True, exist_ok=True) 10 | for i, doc in enumerate(raw_documents): 11 | path = dir_path / f"{i}.json" 12 | with open(path, "w") as f: 13 | f.write(doc.json()) 14 | -------------------------------------------------------------------------------- /ingest/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | bs4 3 | pandas 4 | unstructured 5 | -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | } 5 | 6 | export default nextConfig 7 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chat-langchainjs", 3 | "version": "0.1.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "dev": "next dev", 8 | "build": "next build", 9 | "start": "next start", 10 | "lint": "next lint", 11 | "download": "sh ingest/download.sh", 12 | "ingest": "tsx -r dotenv/config ingest.ts" 13 | }, 14 | "dependencies": { 15 | "@emotion/react": "^11.10.5", 16 | "@emotion/styled": "^11.10.5", 17 | "@microsoft/fetch-event-source": "^2.0.1", 18 | "@mui/material": "^5.11.4", 19 | "@next/font": "13.1.6", 20 | "cheerio": "^1.0.0-rc.12", 21 | "dotenv": "^16.0.3", 22 | "eslint": "8.34.0", 23 | "eslint-config-next": "13.1.6", 24 | "hnswlib-node": "^1.2.0", 25 | "langchain": "0.0.15", 26 | "next": "13.1.6", 27 | "openai": "^3.1.0", 28 | "react": "18.2.0", 29 | "react-dom": "18.2.0", 30 | "react-markdown": "^8.0.5", 31 | "ws": "^8.12.1" 32 | }, 33 | "devDependencies": { 34 | "@types/node": "18.13.0", 35 | "@types/react": "18.0.28", 36 | "@types/react-dom": "18.0.11", 37 | "@types/ws": "^8.5.4", 38 | "cohere-ai": "^5.0.2", 39 | "ts-node": "^10.9.1", 40 | "tsx": "^3.12.3", 41 | "typescript": "4.9.5" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import '@/styles/globals.css' 2 | import type { AppProps } from 'next/app' 3 | 4 | export default function App({ Component, pageProps }: AppProps) { 5 | return 6 | } 7 | -------------------------------------------------------------------------------- /pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import { Html, Head, Main, NextScript } from 'next/document' 2 | 3 | export default function Document() { 4 | return ( 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /pages/api/chat-stream.ts: -------------------------------------------------------------------------------- 1 | // Next.js API route support: https://nextjs.org/docs/api-routes/introduction 2 | import type { NextApiRequest, NextApiResponse } from 'next' 3 | import type { Server as HttpServer } from "http"; 4 | import type { Server as HttpsServer } from "https"; 5 | import { WebSocketServer } from 'ws'; 6 | import { HNSWLib } from "langchain/vectorstores"; 7 | import { OpenAIEmbeddings } from 'langchain/embeddings'; 8 | import { makeChain } from "./util"; 9 | 10 | export default async function handler(req: NextApiRequest, res: NextApiResponse) { 11 | if ((res.socket as any).server.wss) { 12 | res.end(); 13 | return; 14 | } 15 | 16 | const server = (res.socket as any).server as HttpsServer | HttpServer; 17 | const wss = new WebSocketServer({ noServer: true }); 18 | (res.socket as any).server.wss = wss; 19 | 20 | server.on('upgrade', (req, socket, head) => { 21 | if (!req.url?.includes('/_next/webpack-hmr')) { 22 | wss.handleUpgrade(req, socket, head, (ws) => { 23 | wss.emit('connection', ws, req); 24 | }); 25 | } 26 | }); 27 | 28 | wss.on('connection', (ws) => { 29 | const sendResponse = ({ sender, message, type }: { sender: string, message: string, type: string }) => { 30 | ws.send(JSON.stringify({ sender, message, type })); 31 | }; 32 | 33 | const onNewToken = (token: string) => { 34 | sendResponse({ sender: 'bot', message: token, type: 'stream' }); 35 | } 36 | 37 | const chainPromise = HNSWLib.load("data", new OpenAIEmbeddings()).then((vs) => makeChain(vs, onNewToken)); 38 | const chatHistory: [string, string][] = []; 39 | const encoder = new TextEncoder(); 40 | 41 | 42 | ws.on('message', async (data) => { 43 | try { 44 | const question = data.toString(); 45 | sendResponse({ sender: 'you', message: question, type: 'stream' }); 46 | 47 | sendResponse({ sender: 'bot', message: "", type: 'start' }); 48 | const chain = await chainPromise; 49 | 50 | const result = await chain.call({ 51 | question, 52 | chat_history: chatHistory, 53 | }); 54 | chatHistory.push([question, result.answer]); 55 | 56 | sendResponse({ sender: 'bot', message: "", type: 'end' }); 57 | } catch (e) { 58 | sendResponse({ 59 | sender: 'bot', 60 | message: "Sorry, something went wrong. Try again.", 61 | type: 'error' 62 | }); 63 | } 64 | }) 65 | }); 66 | 67 | res.end(); 68 | } 69 | -------------------------------------------------------------------------------- /pages/api/chat.ts: -------------------------------------------------------------------------------- 1 | // Next.js API route support: https://nextjs.org/docs/api-routes/introduction 2 | import type { NextApiRequest, NextApiResponse } from "next"; 3 | import path from "path"; 4 | import { HNSWLib } from "langchain/vectorstores"; 5 | import { OpenAIEmbeddings } from "langchain/embeddings"; 6 | import { makeChain } from "./util"; 7 | 8 | export default async function handler( 9 | req: NextApiRequest, 10 | res: NextApiResponse 11 | ) { 12 | const body = req.body; 13 | const dir = path.resolve(process.cwd(), "data"); 14 | 15 | const vectorstore = await HNSWLib.load(dir, new OpenAIEmbeddings()); 16 | res.writeHead(200, { 17 | "Content-Type": "text/event-stream", 18 | // Important to set no-transform to avoid compression, which will delay 19 | // writing response chunks to the client. 20 | // See https://github.com/vercel/next.js/issues/9965 21 | "Cache-Control": "no-cache, no-transform", 22 | Connection: "keep-alive", 23 | }); 24 | 25 | const sendData = (data: string) => { 26 | res.write(`data: ${data}\n\n`); 27 | }; 28 | 29 | sendData(JSON.stringify({ data: "" })); 30 | const chain = makeChain(vectorstore, (token: string) => { 31 | sendData(JSON.stringify({ data: token })); 32 | }); 33 | 34 | try { 35 | await chain.call({ 36 | question: body.question, 37 | chat_history: body.history, 38 | }); 39 | } catch (err) { 40 | console.error(err); 41 | // Ignore error 42 | } finally { 43 | sendData("[DONE]"); 44 | res.end(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pages/api/util.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms"; 2 | import { LLMChain, ChatVectorDBQAChain, loadQAChain } from "langchain/chains"; 3 | import { HNSWLib } from "langchain/vectorstores"; 4 | import { PromptTemplate } from "langchain/prompts"; 5 | 6 | const CONDENSE_PROMPT = PromptTemplate.fromTemplate(`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. 7 | 8 | Chat History: 9 | {chat_history} 10 | Follow Up Input: {question} 11 | Standalone question:`); 12 | 13 | const QA_PROMPT = PromptTemplate.fromTemplate( 14 | `You are an AI assistant for the open source library LangChain. The documentation is located at https://langchain.readthedocs.io. 15 | You are given the following extracted parts of a long document and a question. Provide a conversational answer with a hyperlink to the documentation. 16 | You should only use hyperlinks that are explicitly listed as a source in the context. Do NOT make up a hyperlink that is not listed. 17 | If the question includes a request for code, provide a code block directly from the documentation. 18 | If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer. 19 | If the question is not about LangChain, politely inform them that you are tuned to only answer questions about LangChain. 20 | Question: {question} 21 | ========= 22 | {context} 23 | ========= 24 | Answer in Markdown:`); 25 | 26 | export const makeChain = (vectorstore: HNSWLib, onTokenStream?: (token: string) => void) => { 27 | const questionGenerator = new LLMChain({ 28 | llm: new OpenAI({ temperature: 0 }), 29 | prompt: CONDENSE_PROMPT, 30 | }); 31 | const docChain = loadQAChain( 32 | new OpenAI({ 33 | temperature: 0, 34 | streaming: Boolean(onTokenStream), 35 | callbackManager: { 36 | handleNewToken: onTokenStream, 37 | } 38 | }), 39 | { prompt: QA_PROMPT }, 40 | ); 41 | 42 | return new ChatVectorDBQAChain({ 43 | vectorstore, 44 | combineDocumentsChain: docChain, 45 | questionGeneratorChain: questionGenerator, 46 | }); 47 | } 48 | 49 | -------------------------------------------------------------------------------- /pages/index.tsx: -------------------------------------------------------------------------------- 1 | import { useState, useRef, useEffect, useMemo } from 'react' 2 | import Head from 'next/head' 3 | import styles from '../styles/Home.module.css' 4 | import Image from 'next/image'; 5 | import Link from 'next/link'; 6 | import ReactMarkdown from 'react-markdown'; 7 | import CircularProgress from '@mui/material/CircularProgress'; 8 | import { fetchEventSource } from '@microsoft/fetch-event-source'; 9 | 10 | type Message = { 11 | type: "apiMessage" | "userMessage"; 12 | message: string; 13 | isStreaming?: boolean; 14 | } 15 | 16 | export default function Home() { 17 | const [userInput, setUserInput] = useState(""); 18 | const [loading, setLoading] = useState(false); 19 | const [messageState, setMessageState] = useState<{ messages: Message[], pending?: string, history: [string, string][] }>({ 20 | messages: [{ 21 | "message": "Hi there! How can I help?", 22 | "type": "apiMessage" 23 | }], 24 | history: [] 25 | }); 26 | const { messages, pending, history } = messageState; 27 | 28 | const messageListRef = useRef(null); 29 | const textAreaRef = useRef(null); 30 | 31 | // Auto scroll chat to bottom 32 | useEffect(() => { 33 | const messageList = messageListRef.current; 34 | if (messageList) { 35 | messageList.scrollTop = messageList.scrollHeight; 36 | } 37 | }, [messages]); 38 | 39 | // Focus on text field on load 40 | useEffect(() => { 41 | textAreaRef.current?.focus(); 42 | }, []); 43 | 44 | // Handle form submission 45 | const handleSubmit = async (e: any) => { 46 | e.preventDefault(); 47 | 48 | const question = userInput.trim(); 49 | if (question === "") { 50 | return; 51 | } 52 | 53 | setMessageState(state => ({ 54 | ...state, 55 | messages: [...state.messages, { 56 | type: "userMessage", 57 | message: question 58 | }], 59 | pending: undefined 60 | })); 61 | 62 | setLoading(true); 63 | setUserInput(""); 64 | setMessageState(state => ({ ...state, pending: "" })); 65 | 66 | const ctrl = new AbortController(); 67 | 68 | fetchEventSource('/api/chat', { 69 | method: 'POST', 70 | headers: { 71 | 'Content-Type': 'application/json', 72 | }, 73 | body: JSON.stringify({ 74 | question, 75 | history 76 | }), 77 | signal: ctrl.signal, 78 | onmessage: (event) => { 79 | if (event.data === "[DONE]") { 80 | setMessageState(state => ({ 81 | history: [...state.history, [question, state.pending ?? ""]], 82 | messages: [...state.messages, { 83 | type: "apiMessage", 84 | message: state.pending ?? "", 85 | }], 86 | pending: undefined 87 | })); 88 | setLoading(false); 89 | ctrl.abort(); 90 | } else { 91 | const data = JSON.parse(event.data); 92 | setMessageState(state => ({ 93 | ...state, 94 | pending: (state.pending ?? "") + data.data, 95 | })); 96 | } 97 | } 98 | }); 99 | } 100 | 101 | // Prevent blank submissions and allow for multiline input 102 | const handleEnter = (e: any) => { 103 | if (e.key === "Enter" && userInput) { 104 | if(!e.shiftKey && userInput) { 105 | handleSubmit(e); 106 | } 107 | } else if (e.key === "Enter") { 108 | e.preventDefault(); 109 | } 110 | }; 111 | 112 | const chatMessages = useMemo(() => { 113 | return [...messages, ...(pending ? [{ type: "apiMessage", message: pending }] : [])]; 114 | }, [messages, pending]); 115 | 116 | return ( 117 | <> 118 | 119 | LangChain Chat 120 | 121 | 122 | 123 | 124 |
125 |
126 | LangChain 127 |
128 | 144 |
145 |
146 |
147 |
148 | {chatMessages.map((message, index) => { 149 | let icon; 150 | let className; 151 | 152 | if (message.type === "apiMessage") { 153 | icon = AI; 154 | className = styles.apimessage; 155 | } else { 156 | icon = Me 157 | 158 | // The latest message sent by the user will be animated while waiting for a response 159 | className = loading && index === chatMessages.length - 1 160 | ? styles.usermessagewaiting 161 | : styles.usermessage; 162 | } 163 | return ( 164 |
165 | {icon} 166 |
167 | {message.message} 168 |
169 |
170 | ) 171 | })} 172 |
173 |
174 |
175 |
176 |
177 |